diff --git a/go.mod b/go.mod index cb23472ae..c25dcb222 100644 --- a/go.mod +++ b/go.mod @@ -3,11 +3,21 @@ module github.com/terraform-providers/terraform-provider-pagerduty go 1.18 require ( - cloud.google.com/go v0.71.0 // indirect - github.com/hashicorp/terraform-plugin-sdk/v2 v2.10.1 - github.com/heimweh/go-pagerduty v0.0.0-20220527195341-4e587aa9b58e - golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd // indirect - google.golang.org/api v0.35.0 // indirect - google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb // indirect - google.golang.org/grpc v1.33.2 // indirect + github.com/agext/levenshtein v1.2.3 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/terraform-plugin-sdk/v2 v2.19.0 + github.com/hashicorp/yamux v0.1.0 // indirect + github.com/heimweh/go-pagerduty v0.0.0-20220722230120-5d470ed7c5fc + github.com/klauspost/compress v1.15.9 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/montanaflynn/stats v0.6.6 // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/vmihailenco/tagparser v0.1.2 // indirect + go.mongodb.org/mongo-driver v1.10.0 // indirect + golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect + golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect + golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252 // indirect ) diff --git a/go.sum b/go.sum index dd09ff741..8e15f9cd5 100644 --- a/go.sum +++ b/go.sum @@ -1,45 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.61.0/go.mod h1:XukKJg4Y7QsUu0Hxg3qQKUWR4VuWivmyMK2+rUyxAqw= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.71.0 h1:2ha722Z08cmRa0orJrzBaszYQcLbLFcsZHsGSj/kIF4= -cloud.google.com/go v0.71.0/go.mod h1:qZfY4Y7AEIQwG/fQYD3xrxLNkQZ0Xzf3HGeqCkA6LVM= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= @@ -48,13 +9,13 @@ github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/ github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/andybalholm/crlf v0.0.0-20171020200849-670099aa064f/go.mod h1:k8feO4+kXDxro6ErPXBRTJ/ro2mf0SsFG8s7doP9kJE= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/apparentlymart/go-cidr v1.0.1 h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U= -github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= +github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= @@ -63,22 +24,16 @@ github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/ github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= -github.com/aws/aws-sdk-go v1.25.3 h1:uM16hIw9BotjZKMZlX05SN2EFtaWfi/NonPKIARiBLQ= -github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -88,73 +43,35 @@ github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3 github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8= github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -162,47 +79,30 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -211,114 +111,60 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= -github.com/hashicorp/go-getter v1.5.3 h1:NF5+zOlQegim+w/EUhSLh6QhXHmZMEeHLQzllkQ3ROU= -github.com/hashicorp/go-getter v1.5.3/go.mod h1:BrrV/1clo8cCYu6mxvboYg+KutTiFnXjMEgDD8+i7ZI= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.15.0 h1:qMuK0wxsoW4D0ddCCYwPSTm4KQv1X1ke3WmPWZ0Mvsk= -github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.16.1 h1:IVQwpTGNRRIHafnTs2dQLIk4ENtneRIEEJWOVDqz99o= -github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-hclog v1.2.1 h1:YQsLlGDJgwhXFpucSPyVbCBviQtjlHv3jLTlp8YmtEw= +github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= -github.com/hashicorp/go-plugin v1.4.1 h1:6UltRQlLN9iZO513VveELp5xyaFxVD2+1OVylE+2E+w= -github.com/hashicorp/go-plugin v1.4.1/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= -github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-plugin v1.4.4 h1:NVdrSdFRt3SkZtNckJ6tog7gbpRrcbOjQi/rgF7JYWQ= +github.com/hashicorp/go-plugin v1.4.4/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hc-install v0.3.1 h1:VIjllE6KyAI1A244G8kTaHXy+TL5/XYzvrtFi8po/Yk= -github.com/hashicorp/hc-install v0.3.1/go.mod h1:3LCdWcCDS1gaHC9mhHCGbkYfoY6vdsKohGjugbZdZak= -github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggUE= -github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= +github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hc-install v0.4.0 h1:cZkRFr1WVa0Ty6x5fTvL1TuO1flul231rWkGH92oYYk= +github.com/hashicorp/hc-install v0.4.0/go.mod h1:5d155H8EC5ewegao9A4PUTMNPZaq+TbOzkJJZ4vrXeI= +github.com/hashicorp/hcl/v2 v2.13.0 h1:0Apadu1w6M11dyGFxWnmhhcMjkbAiKCv7G1r/2QgCNc= +github.com/hashicorp/hcl/v2 v2.13.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.14.0 h1:UQoUcxKTZZXhyyK68Cwn4mApT4mnFPmEXPiqaHL9r+w= -github.com/hashicorp/terraform-exec v0.14.0/go.mod h1:qrAASDq28KZiMPDnQ02sFS9udcqEkRly002EA2izXTA= -github.com/hashicorp/terraform-exec v0.15.0 h1:cqjh4d8HYNQrDoEmlSGelHmg2DYDh5yayckvJ5bV18E= -github.com/hashicorp/terraform-exec v0.15.0/go.mod h1:H4IG8ZxanU+NW0ZpDRNsvh9f0ul7C0nHP+rUR/CHs7I= -github.com/hashicorp/terraform-json v0.12.0 h1:8czPgEEWWPROStjkWPUnTQDXmpmZPlkQAwYYLETaTvw= -github.com/hashicorp/terraform-json v0.12.0/go.mod h1:pmbq9o4EuL43db5+0ogX10Yofv1nozM+wskr/bGFJpI= -github.com/hashicorp/terraform-json v0.13.0 h1:Li9L+lKD1FO5RVFRM1mMMIBDoUHslOniyEi5CM+FWGY= -github.com/hashicorp/terraform-json v0.13.0/go.mod h1:y5OdLBCT+rxbwnpxZs9kGL7R9ExU76+cpdY8zHwoazk= -github.com/hashicorp/terraform-plugin-go v0.3.0 h1:AJqYzP52JFYl9NABRI7smXI1pNjgR5Q/y2WyVJ/BOZA= -github.com/hashicorp/terraform-plugin-go v0.3.0/go.mod h1:dFHsQMaTLpON2gWhVWT96fvtlc/MF1vSy3OdMhWBzdM= -github.com/hashicorp/terraform-plugin-go v0.5.0 h1:+gCDdF0hcYCm0YBTxrP4+K1NGIS5ZKZBKDORBewLJmg= -github.com/hashicorp/terraform-plugin-go v0.5.0/go.mod h1:PAVN26PNGpkkmsvva1qfriae5Arky3xl3NfzKa8XFVM= -github.com/hashicorp/terraform-plugin-log v0.2.0 h1:rjflRuBqCnSk3UHOR25MP1G5BDLKktTA6lNjjcAnBfI= -github.com/hashicorp/terraform-plugin-log v0.2.0/go.mod h1:E1kJmapEHzqu1x6M++gjvhzM2yMQNXPVWZRCB8sgYjg= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.7.1 h1:vpzKKP2dIFb9n89AG8Wxl758/5JSZWZH0OuKdlq0M38= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.7.1/go.mod h1:o3pdss6ynDZW9FfiZ+rETUH5LEVufrXdhwLU+5OiRo0= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.10.1 h1:B9AocC+dxrCqcf4vVhztIkSkt3gpRjUkEka8AmZWGlQ= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.10.1/go.mod h1:FjM9DXWfP0w/AeOtJoSKHBZ01LqmaO6uP4bXhv3fekw= -github.com/hashicorp/terraform-registry-address v0.0.0-20210412075316-9b2996cce896 h1:1FGtlkJw87UsTMg5s8jrekrHmUPUJaMcu6ELiVhQrNw= -github.com/hashicorp/terraform-registry-address v0.0.0-20210412075316-9b2996cce896/go.mod h1:bzBPnUIkI0RxauU8Dqo+2KrZZ28Cf48s8V6IHt3p4co= +github.com/hashicorp/terraform-exec v0.17.2 h1:EU7i3Fh7vDUI9nNRdMATCEfnm9axzTnad8zszYZ73Go= +github.com/hashicorp/terraform-exec v0.17.2/go.mod h1:tuIbsL2l4MlwwIZx9HPM+LOV9vVyEfBYu2GsO1uH3/8= +github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= +github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= +github.com/hashicorp/terraform-plugin-go v0.12.0 h1:6wW9mT1dSs0Xq4LR6HXj1heQ5ovr5GxXNJwkErZzpJw= +github.com/hashicorp/terraform-plugin-go v0.12.0/go.mod h1:kwhmaWHNDvT1B3QiSJdAtrB/D4RaKSY/v3r2BuoWK4M= +github.com/hashicorp/terraform-plugin-log v0.6.0 h1:/Vq78uSIdUSZ3iqDc9PESKtwt8YqNKN6u+khD+lLjuw= +github.com/hashicorp/terraform-plugin-log v0.6.0/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.19.0 h1:7gDAcfto/C4Cjtf90SdukQshsxdMxJ/P69QxiF3digI= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.19.0/go.mod h1:/WYikYjhKB7c2j1HmXZhRsAARldRb4M38bLCLOhC3so= +github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c h1:D8aRO6+mTqHfLsK/BC3j5OAoogv1WLRWzY1AaTo3rBg= +github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c/go.mod h1:Wn3Na71knbXc1G8Lh+yu/dQWWJeFQEpDeJMtWMtlmNI= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/heimweh/go-pagerduty v0.0.0-20210831220234-54710c5e87d1 h1:yBAqdyzvIpGktzh8EnWqaXAtwhdgWqrP2Zk53MDH89Y= -github.com/heimweh/go-pagerduty v0.0.0-20210831220234-54710c5e87d1/go.mod h1:6+bccpjQ/PM8uQY9m8avM4MJea+3vo3ta9r8kGQ4XFY= -github.com/heimweh/go-pagerduty v0.0.0-20210930203304-530eff2acdc6 h1:/D0VtHEOCdotE1vSB9XznceAjIGkUieZ4BF6VKUIqNU= -github.com/heimweh/go-pagerduty v0.0.0-20210930203304-530eff2acdc6/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/heimweh/go-pagerduty v0.0.0-20211027200436-ac3c86fd0ce5 h1:lAh9VAHBA09RKMNdbwdeWuzWSDbQ3wnUaa7mzrCKsCI= -github.com/heimweh/go-pagerduty v0.0.0-20211027200436-ac3c86fd0ce5/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/heimweh/go-pagerduty v0.0.0-20211110145933-0ab24da93a91 h1:vqBxGv/CFAIyr1HRvG2DjOuMVHw62sP38s7p8uVfi9k= -github.com/heimweh/go-pagerduty v0.0.0-20211110145933-0ab24da93a91/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/heimweh/go-pagerduty v0.0.0-20211119212911-31ef1eea0d0f h1:/sgh3L7adJkcpj87qfMjAsr4mDxlIJrbGIGbS1JxxtQ= -github.com/heimweh/go-pagerduty v0.0.0-20211119212911-31ef1eea0d0f/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/heimweh/go-pagerduty v0.0.0-20211210233744-b65de43109c1 h1:49zl3n/g+ff/7/CpxiuqnenyxId5iAjbhgoE9iDHULc= -github.com/heimweh/go-pagerduty v0.0.0-20211210233744-b65de43109c1/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/heimweh/go-pagerduty v0.0.0-20220208023456-83fe435832fb h1:p3faOVCU8L4wab9mpxN9Cm/VNVkPD8GMEfD0sPHw9nY= -github.com/heimweh/go-pagerduty v0.0.0-20220208023456-83fe435832fb/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/heimweh/go-pagerduty v0.0.0-20220422231448-43095fe5ba3f h1:NLk7iDq85F2lz0q1gY32vZR506aYiNcgvV+Us1rX1q4= -github.com/heimweh/go-pagerduty v0.0.0-20220422231448-43095fe5ba3f/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/heimweh/go-pagerduty v0.0.0-20220428180718-5a69bb821163 h1:ETKxW+KSjOPaRzZU9f+QrjCkrL7hQqtMPKDv8DnLDO4= -github.com/heimweh/go-pagerduty v0.0.0-20220428180718-5a69bb821163/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/heimweh/go-pagerduty v0.0.0-20220527195341-4e587aa9b58e h1:xit0rQWTVlM9ohz4IzrddDKglC7jey+m3GSI/bz3TIw= -github.com/heimweh/go-pagerduty v0.0.0-20220527195341-4e587aa9b58e/go.mod h1:JtJGtgN0y9KOCaqFMZFaBCWskpO/KK3Ro9TwjP9ss6w= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/hashicorp/yamux v0.1.0 h1:DzDIF6Sd7GD2sX0kDFpHAsJMY4L+OfTvtuaQsOYXxzk= +github.com/hashicorp/yamux v0.1.0/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/heimweh/go-pagerduty v0.0.0-20220722230120-5d470ed7c5fc h1:J5QVjyO15rK1p1a69yov2bqaWG/28ocfFqqo5HIZRYU= +github.com/heimweh/go-pagerduty v0.0.0-20220722230120-5d470ed7c5fc/go.mod h1:t9vftsO1IjYHGdgJXeemZtomCWnxi2SRgu0PRcRb2oY= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.2 h1:MiK62aErc3gIiVEtyzKfeOHgW7atJb5g/KNX5m3c2nQ= -github.com/klauspost/compress v1.11.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -329,166 +175,116 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.4 h1:ZU1VNC02qyufSZsjjs7+khruk2fKvbQ3TwRV/IBCeFA= -github.com/mitchellh/go-testing-interface v1.0.4/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.6.6 h1:Duep6KMIDpY4Yo11iFsvyqJDyfzLF9+sndUKT+v64GQ= +github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k+Mg7cowZ8yv4Trqw9UsJby758= github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce/go.mod h1:uFMI8w+ref4v2r9jz+c9i1IfIttS/OkmLfrk1jne5hs= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= -github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= +github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= +github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= +github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.8.4 h1:pwhhz5P+Fjxse7S7UriBrMu6AUJSZM5pKqGem1PjGAs= -github.com/zclconf/go-cty v1.8.4/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.9.1 h1:viqrgQwFl5UpSxc046qblj78wZXVDFnSOufaOTER+cc= -github.com/zclconf/go-cty v1.9.1/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.10.0 h1:mp9ZXQeIcN8kAwuqorjH+Q+njbJKjLrvB2yIh4q7U+0= +github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= -go.mongodb.org/mongo-driver v1.7.0 h1:hHrvOBWlWB2c7+8Gh/Xi5jj82AgidK/t7KVXBZ+IyUA= -go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= -go.mongodb.org/mongo-driver v1.7.2 h1:pFttQyIiJUHEn50YfZgC9ECjITMT44oiN36uArf/OFg= -go.mongodb.org/mongo-driver v1.7.2/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.mongodb.org/mongo-driver v1.9.1/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.mongodb.org/mongo-driver v1.10.0 h1:UtV6N5k14upNp4LTduX0QCufG124fSu25Wz9tu94GLg= +go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b h1:7mWr3k41Qtv8XlltBkDkl8LoP3mpSgBW8BUoxtEdbXg= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -498,239 +294,111 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210326060303-6b1517762897 h1:KrsHThm5nFk34YtATK1LsThyGhGbGe1olrte/HInHvs= golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79 h1:RX8C8PRZc2hTIod4ds8ij+/4RQX3AqhYj3uOHmyaz4E= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201030143252-cf7a54d06671/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd h1:kJP9fbfkpUoA4y03Nxor8be+YbShcXP16fc7G4nlgpw= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.34.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.35.0 h1:TBCmTTxUrRDA1iTctnK/fIeitxIZ+TQuaf0j29fmCGo= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201030142918-24207fddd1c3/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb h1:MoNcrN5yaH+35Ge8RUwFbL7ekwq9ED2fiDpgWKrR29w= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252 h1:G5AjFxR+ibe9Taamo0TdW+iylfBYK10DSkHYdx7PZ9w= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -740,32 +408,26 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/github.com/agext/levenshtein/.travis.yml b/vendor/github.com/agext/levenshtein/.travis.yml index a51a14466..68d38816f 100644 --- a/vendor/github.com/agext/levenshtein/.travis.yml +++ b/vendor/github.com/agext/levenshtein/.travis.yml @@ -3,8 +3,11 @@ sudo: false matrix: fast_finish: true include: - - go: 1.11.x + - go: 1.14.x env: TEST_METHOD=goveralls + - go: 1.13.x + - go: 1.12.x + - go: 1.11.x - go: 1.10.x - go: tip - go: 1.9.x @@ -14,6 +17,8 @@ matrix: - go: 1.5.x allow_failures: - go: tip + - go: 1.11.x + - go: 1.10.x - go: 1.9.x - go: 1.8.x - go: 1.7.x diff --git a/vendor/github.com/agext/levenshtein/README.md b/vendor/github.com/agext/levenshtein/README.md index 9e4255879..d9a8ce16d 100644 --- a/vendor/github.com/agext/levenshtein/README.md +++ b/vendor/github.com/agext/levenshtein/README.md @@ -11,7 +11,7 @@ This package implements distance and similarity metrics for strings, based on th ## Project Status -v1.2.2 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. +v1.2.3 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome. diff --git a/vendor/github.com/agext/levenshtein/levenshtein.go b/vendor/github.com/agext/levenshtein/levenshtein.go index df69ce701..56d719b83 100644 --- a/vendor/github.com/agext/levenshtein/levenshtein.go +++ b/vendor/github.com/agext/levenshtein/levenshtein.go @@ -108,7 +108,7 @@ func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, for x := 0; x < l2; x++ { dy, d[doff] = d[doff], d[doff]+insCost - for d[doff] > maxCost && dlen > 0 { + for doff < l1 && d[doff] > maxCost && dlen > 0 { if str1[doff] != str2[x] { dy += subCost } diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go index ed749d4c5..20823af04 100644 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go @@ -28,6 +28,16 @@ import ( // For example, 10.3.0.0/16, extended by 8 bits, with a network number // of 5, becomes 10.3.5.0/24 . func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { + return SubnetBig(base, newBits, big.NewInt(int64(num))) +} + +// SubnetBig takes a parent CIDR range and creates a subnet within it with the +// given number of additional prefix bits and the given network number. It +// differs from Subnet in that it takes a *big.Int for the num, instead of an int. +// +// For example, 10.3.0.0/16, extended by 8 bits, with a network number of 5, +// becomes 10.3.5.0/24 . +func SubnetBig(base *net.IPNet, newBits int, num *big.Int) (*net.IPNet, error) { ip := base.IP mask := base.Mask @@ -39,24 +49,32 @@ func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { } maxNetNum := uint64(1< maxNetNum { + if num.Uint64() > maxNetNum { return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num) } return &net.IPNet{ - IP: insertNumIntoIP(ip, big.NewInt(int64(num)), newPrefixLen), + IP: insertNumIntoIP(ip, num, newPrefixLen), Mask: net.CIDRMask(newPrefixLen, addrLen), }, nil } -// Host takes a parent CIDR range and turns it into a host IP address with -// the given host number. +// Host takes a parent CIDR range and turns it into a host IP address with the +// given host number. // // For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. func Host(base *net.IPNet, num int) (net.IP, error) { + return HostBig(base, big.NewInt(int64(num))) +} + +// HostBig takes a parent CIDR range and turns it into a host IP address with +// the given host number. It differs from Host in that it takes a *big.Int for +// the num, instead of an int. +// +// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. +func HostBig(base *net.IPNet, num *big.Int) (net.IP, error) { ip := base.IP mask := base.Mask - bigNum := big.NewInt(int64(num)) parentLen, addrLen := mask.Size() hostLen := addrLen - parentLen @@ -65,11 +83,11 @@ func Host(base *net.IPNet, num int) (net.IP, error) { maxHostNum.Lsh(maxHostNum, uint(hostLen)) maxHostNum.Sub(maxHostNum, big.NewInt(1)) - numUint64 := big.NewInt(int64(bigNum.Uint64())) - if bigNum.Cmp(big.NewInt(0)) == -1 { - numUint64.Neg(bigNum) + numUint64 := big.NewInt(int64(num.Uint64())) + if num.Cmp(big.NewInt(0)) == -1 { + numUint64.Neg(num) numUint64.Sub(numUint64, big.NewInt(int64(1))) - bigNum.Sub(maxHostNum, numUint64) + num.Sub(maxHostNum, numUint64) } if numUint64.Cmp(maxHostNum) == 1 { @@ -81,7 +99,7 @@ func Host(base *net.IPNet, num int) (net.IP, error) { } else { bitlength = 128 } - return insertNumIntoIP(ip, bigNum, bitlength), nil + return insertNumIntoIP(ip, num, bitlength), nil } // AddressRange returns the first and last addresses in the given CIDR range. diff --git a/vendor/github.com/apparentlymart/go-textseg/LICENSE b/vendor/github.com/apparentlymart/go-textseg/LICENSE deleted file mode 100644 index 684b03b4a..000000000 --- a/vendor/github.com/apparentlymart/go-textseg/LICENSE +++ /dev/null @@ -1,95 +0,0 @@ -Copyright (c) 2017 Martin Atkins - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------- - -Unicode table generation programs are under a separate copyright and license: - -Copyright (c) 2014 Couchbase, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the -License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, -either express or implied. See the License for the specific language governing permissions -and limitations under the License. - ---------- - -Grapheme break data is provided as part of the Unicode character database, -copright 2016 Unicode, Inc, which is provided with the following license: - -Unicode Data Files include all data files under the directories -http://www.unicode.org/Public/, http://www.unicode.org/reports/, -http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and -http://www.unicode.org/utility/trac/browser/. - -Unicode Data Files do not include PDF online code charts under the -directory http://www.unicode.org/Public/. - -Software includes any source code published in the Unicode Standard -or under the directories -http://www.unicode.org/Public/, http://www.unicode.org/reports/, -http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and -http://www.unicode.org/utility/trac/browser/. - -NOTICE TO USER: Carefully read the following legal agreement. -BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S -DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), -YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE -TERMS AND CONDITIONS OF THIS AGREEMENT. -IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE -THE DATA FILES OR SOFTWARE. - -COPYRIGHT AND PERMISSION NOTICE - -Copyright © 1991-2017 Unicode, Inc. All rights reserved. -Distributed under the Terms of Use in http://www.unicode.org/copyright.html. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of the Unicode data files and any associated documentation -(the "Data Files") or Unicode software and any associated documentation -(the "Software") to deal in the Data Files or Software -without restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, and/or sell copies of -the Data Files or Software, and to permit persons to whom the Data Files -or Software are furnished to do so, provided that either -(a) this copyright and permission notice appear with all copies -of the Data Files or Software, or -(b) this copyright and permission notice appear in associated -Documentation. - -THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT OF THIRD PARTY RIGHTS. -IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS -NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL -DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, -DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THE DATA FILES OR SOFTWARE. - -Except as contained in this notice, the name of a copyright holder -shall not be used in advertising or otherwise to promote the sale, -use or other dealings in these Data Files or Software without prior -written authorization of the copyright holder. diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go b/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go deleted file mode 100644 index 5752e9ef8..000000000 --- a/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go +++ /dev/null @@ -1,30 +0,0 @@ -package textseg - -import ( - "bufio" - "bytes" -) - -// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of -// all of the recognized tokens in the given buffer. -func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) { - scanner := bufio.NewScanner(bytes.NewReader(buf)) - scanner.Split(splitFunc) - var ret [][]byte - for scanner.Scan() { - ret = append(ret, scanner.Bytes()) - } - return ret, scanner.Err() -} - -// TokenCount is a utility that uses a bufio.SplitFunc to count the number of -// recognized tokens in the given buffer. -func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) { - scanner := bufio.NewScanner(bytes.NewReader(buf)) - scanner.Split(splitFunc) - var ret int - for scanner.Scan() { - ret++ - } - return ret, scanner.Err() -} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go b/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go deleted file mode 100644 index 81f3a7471..000000000 --- a/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go +++ /dev/null @@ -1,7 +0,0 @@ -package textseg - -//go:generate go run make_tables.go -output tables.go -//go:generate go run make_test_tables.go -output tables_test.go -//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,E_Base,E_Modifier,ZWJ,Glue_After_Zwj,E_Base_GAZ" -o grapheme_clusters_table.rl -//go:generate ragel -Z grapheme_clusters.rl -//go:generate gofmt -w grapheme_clusters.go diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go deleted file mode 100644 index 012bc690a..000000000 --- a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go +++ /dev/null @@ -1,5276 +0,0 @@ - -// line 1 "grapheme_clusters.rl" -package textseg - -import ( - "errors" - "unicode/utf8" -) - -// Generated from grapheme_clusters.rl. DO NOT EDIT - -// line 13 "grapheme_clusters.go" -var _graphclust_actions []byte = []byte{ - 0, 1, 0, 1, 4, 1, 9, 1, 10, - 1, 11, 1, 12, 1, 13, 1, 14, - 1, 15, 1, 16, 1, 17, 1, 18, - 1, 19, 1, 20, 1, 21, 2, 1, - 7, 2, 1, 8, 2, 2, 3, 2, - 5, 1, 3, 0, 1, 8, 3, 5, - 0, 1, 3, 5, 1, 6, -} - -var _graphclust_key_offsets []int16 = []int16{ - 0, 0, 1, 3, 5, 7, 10, 15, - 17, 20, 28, 31, 33, 35, 37, 67, - 75, 77, 81, 84, 89, 94, 104, 116, - 122, 127, 137, 140, 147, 151, 159, 169, - 173, 181, 183, 191, 194, 196, 201, 203, - 210, 212, 220, 221, 242, 246, 252, 257, - 259, 263, 267, 269, 273, 275, 278, 282, - 284, 291, 293, 297, 301, 305, 307, 309, - 318, 322, 327, 329, 335, 337, 338, 340, - 341, 343, 345, 347, 349, 364, 368, 370, - 372, 377, 381, 385, 387, 389, 393, 397, - 399, 403, 410, 415, 419, 422, 423, 427, - 434, 439, 440, 441, 443, 452, 454, 477, - 481, 483, 487, 491, 492, 496, 500, 503, - 505, 510, 523, 525, 527, 529, 531, 535, - 539, 541, 543, 545, 549, 553, 557, 559, - 561, 563, 565, 566, 568, 574, 580, 586, - 588, 592, 596, 601, 604, 614, 616, 618, - 621, 623, 625, 627, 629, 632, 637, 639, - 642, 650, 653, 655, 657, 659, 690, 698, - 700, 704, 711, 723, 730, 744, 750, 768, - 779, 785, 797, 800, 809, 814, 824, 830, - 844, 850, 862, 874, 878, 880, 886, 888, - 895, 898, 906, 907, 928, 937, 945, 951, - 953, 957, 961, 966, 972, 974, 977, 990, - 995, 1009, 1011, 1020, 1027, 1038, 1048, 1056, - 1067, 1071, 1076, 1078, 1080, 1082, 1083, 1085, - 1087, 1089, 1091, 1106, 1110, 1112, 1114, 1122, - 1130, 1132, 1136, 1147, 1150, 1160, 1164, 1171, - 1179, 1185, 1188, 1189, 1193, 1200, 1205, 1206, - 1207, 1209, 1218, 1220, 1243, 1248, 1250, 1259, - 1264, 1265, 1274, 1280, 1290, 1295, 1302, 1316, - 1320, 1325, 1336, 1339, 1349, 1353, 1362, 1364, - 1372, 1379, 1385, 1392, 1396, 1398, 1400, 1402, - 1403, 1405, 1411, 1419, 1425, 1427, 1431, 1435, - 1440, 1443, 1453, 1455, 1457, 1458, 1460, 1461, - 1467, 1469, 1471, 1471, 1472, 1473, 1474, 1480, - 1482, 1484, 1484, 1490, 1492, 1497, 1502, 1504, - 1506, 1508, 1511, 1516, 1518, 1521, 1529, 1532, - 1534, 1536, 1538, 1568, 1576, 1578, 1582, 1585, - 1590, 1595, 1605, 1617, 1623, 1628, 1638, 1641, - 1648, 1652, 1660, 1670, 1674, 1682, 1684, 1692, - 1695, 1697, 1702, 1704, 1711, 1713, 1721, 1722, - 1743, 1747, 1753, 1758, 1760, 1764, 1768, 1770, - 1774, 1776, 1779, 1783, 1785, 1792, 1794, 1798, - 1802, 1806, 1808, 1810, 1819, 1823, 1828, 1830, - 1836, 1838, 1839, 1841, 1842, 1844, 1846, 1848, - 1850, 1865, 1869, 1871, 1873, 1878, 1882, 1886, - 1888, 1890, 1894, 1898, 1900, 1904, 1911, 1916, - 1920, 1923, 1924, 1928, 1935, 1940, 1941, 1942, - 1944, 1953, 1955, 1978, 1982, 1984, 1988, 1992, - 1993, 1997, 2001, 2004, 2006, 2011, 2024, 2026, - 2028, 2030, 2032, 2036, 2040, 2042, 2044, 2046, - 2050, 2054, 2058, 2060, 2062, 2064, 2066, 2067, - 2069, 2075, 2081, 2087, 2089, 2093, 2097, 2102, - 2105, 2115, 2117, 2119, 2122, 2124, 2126, 2128, - 2130, 2133, 2138, 2140, 2143, 2151, 2154, 2156, - 2158, 2160, 2191, 2199, 2201, 2205, 2212, 2224, - 2231, 2245, 2251, 2269, 2280, 2286, 2298, 2301, - 2310, 2315, 2325, 2331, 2345, 2351, 2363, 2375, - 2379, 2381, 2387, 2389, 2396, 2399, 2407, 2408, - 2429, 2438, 2446, 2452, 2454, 2458, 2462, 2467, - 2473, 2475, 2478, 2491, 2496, 2510, 2512, 2521, - 2528, 2539, 2549, 2557, 2568, 2572, 2577, 2579, - 2581, 2583, 2584, 2586, 2588, 2590, 2592, 2607, - 2611, 2613, 2615, 2623, 2631, 2633, 2637, 2648, - 2651, 2661, 2665, 2672, 2680, 2686, 2689, 2690, - 2694, 2701, 2706, 2707, 2708, 2710, 2719, 2721, - 2744, 2749, 2751, 2760, 2765, 2766, 2775, 2781, - 2791, 2796, 2803, 2817, 2821, 2826, 2837, 2840, - 2850, 2854, 2863, 2865, 2873, 2880, 2886, 2893, - 2897, 2899, 2901, 2903, 2904, 2906, 2912, 2920, - 2926, 2928, 2932, 2936, 2941, 2944, 2954, 2956, - 2958, 2959, 2961, 2962, 2968, 2970, 2972, 2972, - 2973, 2974, 2975, 2981, 2983, 2985, 2985, 2991, - 2993, 2997, 3003, 3006, 3009, 3013, 3016, 3019, - 3026, 3028, 3052, 3054, 3078, 3080, 3082, 3105, - 3107, 3109, 3110, 3112, 3114, 3116, 3122, 3124, - 3156, 3160, 3165, 3188, 3190, 3192, 3194, 3196, - 3199, 3201, 3203, 3207, 3207, 3263, 3319, 3350, - 3355, 3359, 3366, 3374, 3378, 3381, 3384, 3390, - 3392, 3412, 3418, 3423, 3425, 3427, 3430, 3432, - 3434, 3438, 3494, 3550, 3581, 3586, 3594, 3598, - 3600, 3605, 3611, 3615, 3618, 3624, 3627, 3631, - 3634, 3638, 3651, 3655, 3662, 3663, 3665, 3668, - 3678, 3698, 3705, 3709, 3716, 3726, 3733, 3736, - 3751, 3753, 3756, 3761, 3763, 3766, 3769, 3773, - 3776, 3779, 3786, 3788, 3790, 3792, 3794, 3797, - 3802, 3804, 3807, 3815, 3818, 3820, 3822, 3824, - 3854, 3862, 3864, 3868, 3871, 3876, 3881, 3891, - 3903, 3909, 3914, 3924, 3927, 3934, 3938, 3946, - 3956, 3960, 3968, 3970, 3978, 3981, 3983, 3988, - 3990, 3997, 3999, 4007, 4008, 4029, 4033, 4039, - 4044, 4046, 4050, 4054, 4056, 4060, 4062, 4065, - 4069, 4071, 4078, 4080, 4084, 4088, 4092, 4094, - 4096, 4105, 4109, 4114, 4116, 4122, 4124, 4125, - 4127, 4128, 4130, 4132, 4134, 4136, 4151, 4155, - 4157, 4159, 4164, 4168, 4172, 4174, 4176, 4180, - 4184, 4186, 4190, 4197, 4202, 4206, 4209, 4210, - 4214, 4221, 4226, 4227, 4228, 4230, 4239, 4241, - 4264, 4268, 4270, 4274, 4278, 4279, 4283, 4287, - 4290, 4292, 4297, 4310, 4312, 4314, 4316, 4318, - 4322, 4326, 4328, 4330, 4332, 4336, 4340, 4344, - 4346, 4348, 4350, 4352, 4353, 4355, 4361, 4367, - 4373, 4375, 4379, 4383, 4388, 4391, 4401, 4403, - 4405, 4408, 4410, 4412, 4414, 4416, 4419, 4424, - 4426, 4429, 4437, 4440, 4442, 4444, 4446, 4477, - 4485, 4487, 4491, 4498, 4510, 4517, 4531, 4537, - 4555, 4566, 4572, 4584, 4587, 4596, 4601, 4611, - 4617, 4631, 4637, 4649, 4661, 4665, 4667, 4673, - 4675, 4682, 4685, 4693, 4694, 4715, 4724, 4732, - 4738, 4740, 4744, 4748, 4753, 4759, 4761, 4764, - 4777, 4782, 4796, 4798, 4807, 4814, 4825, 4835, - 4843, 4854, 4858, 4863, 4865, 4867, 4869, 4870, - 4872, 4874, 4876, 4878, 4893, 4897, 4899, 4901, - 4909, 4917, 4919, 4923, 4934, 4937, 4947, 4951, - 4958, 4966, 4972, 4975, 4976, 4980, 4987, 4992, - 4993, 4994, 4996, 5005, 5007, 5030, 5035, 5037, - 5046, 5051, 5052, 5061, 5067, 5077, 5082, 5089, - 5103, 5107, 5112, 5123, 5126, 5136, 5140, 5149, - 5151, 5159, 5166, 5172, 5179, 5183, 5185, 5187, - 5189, 5190, 5192, 5198, 5206, 5212, 5214, 5218, - 5222, 5227, 5230, 5240, 5242, 5244, 5245, 5247, - 5248, 5254, 5256, 5258, 5258, 5259, 5260, 5261, - 5267, 5269, 5271, 5271, 5277, 5301, 5303, 5327, - 5329, 5331, 5354, 5356, 5358, 5359, 5361, 5363, - 5365, 5371, 5373, 5405, 5409, 5414, 5437, 5439, - 5441, 5443, 5445, 5448, 5450, 5452, 5456, 5456, - 5512, 5568, 5599, 5604, 5607, 5614, 5626, 5628, - 5630, 5632, 5635, 5640, 5642, 5645, 5653, 5656, - 5658, 5660, 5662, 5692, 5700, 5702, 5706, 5709, - 5714, 5719, 5729, 5741, 5747, 5752, 5762, 5765, - 5772, 5776, 5784, 5794, 5798, 5806, 5808, 5816, - 5819, 5821, 5826, 5828, 5835, 5837, 5845, 5846, - 5867, 5871, 5877, 5882, 5884, 5888, 5892, 5894, - 5898, 5900, 5903, 5907, 5909, 5916, 5918, 5922, - 5926, 5930, 5932, 5934, 5943, 5947, 5952, 5954, - 5956, 5958, 5959, 5961, 5963, 5965, 5967, 5982, - 5986, 5988, 5990, 5995, 5999, 6003, 6005, 6007, - 6011, 6015, 6017, 6021, 6028, 6033, 6037, 6040, - 6041, 6045, 6051, 6056, 6057, 6058, 6060, 6069, - 6071, 6094, 6098, 6100, 6104, 6108, 6109, 6113, - 6117, 6120, 6122, 6127, 6140, 6142, 6144, 6146, - 6148, 6152, 6156, 6158, 6160, 6162, 6166, 6170, - 6174, 6176, 6178, 6180, 6182, 6183, 6185, 6191, - 6197, 6203, 6205, 6209, 6213, 6218, 6221, 6231, - 6233, 6235, 6236, 6242, 6244, 6246, 6246, 6252, - 6253, 6260, 6263, 6265, 6267, 6269, 6271, 6274, - 6279, 6281, 6284, 6292, 6295, 6297, 6299, 6301, - 6332, 6340, 6342, 6346, 6353, 6365, 6372, 6386, - 6392, 6410, 6421, 6427, 6439, 6442, 6451, 6456, - 6466, 6472, 6486, 6492, 6504, 6516, 6520, 6522, - 6528, 6530, 6537, 6540, 6548, 6549, 6570, 6579, - 6587, 6593, 6595, 6599, 6603, 6608, 6614, 6616, - 6619, 6632, 6637, 6651, 6653, 6662, 6669, 6680, - 6690, 6698, 6709, 6713, 6718, 6720, 6722, 6724, - 6725, 6727, 6729, 6731, 6733, 6748, 6752, 6754, - 6756, 6764, 6772, 6774, 6778, 6789, 6792, 6802, - 6806, 6813, 6821, 6827, 6830, 6831, 6835, 6842, - 6847, 6848, 6849, 6851, 6860, 6862, 6885, 6890, - 6892, 6901, 6906, 6907, 6916, 6922, 6932, 6937, - 6944, 6958, 6962, 6967, 6978, 6981, 6991, 6995, - 7004, 7006, 7014, 7021, 7027, 7034, 7038, 7040, - 7042, 7044, 7045, 7047, 7053, 7061, 7067, 7069, - 7073, 7077, 7082, 7085, 7095, 7097, 7099, 7100, - 7102, 7103, 7109, 7111, 7113, 7113, 7114, 7115, - 7121, 7124, 7126, 7128, 7130, 7133, 7138, 7140, - 7143, 7151, 7154, 7156, 7158, 7160, 7191, 7199, - 7201, 7205, 7212, 7214, 7216, 7218, 7221, 7226, - 7228, 7231, 7239, 7242, 7244, 7246, 7248, 7278, - 7286, 7288, 7292, 7295, 7300, 7305, 7315, 7327, - 7333, 7338, 7348, 7351, 7358, 7362, 7370, 7380, - 7384, 7392, 7394, 7402, 7405, 7407, 7412, 7414, - 7421, 7423, 7431, 7432, 7453, 7457, 7463, 7468, - 7470, 7474, 7478, 7480, 7484, 7486, 7489, 7493, - 7495, 7502, 7504, 7508, 7512, 7516, 7518, 7520, - 7529, 7533, 7538, 7540, 7546, 7548, 7549, 7551, - 7552, 7554, 7556, 7558, 7560, 7575, 7579, 7581, - 7583, 7588, 7592, 7596, 7598, 7600, 7604, 7608, - 7610, 7614, 7621, 7626, 7630, 7633, 7634, 7638, - 7645, 7650, 7651, 7652, 7654, 7663, 7665, 7688, - 7692, 7694, 7698, 7702, 7703, 7707, 7711, 7714, - 7716, 7721, 7734, 7736, 7738, 7740, 7742, 7746, - 7750, 7752, 7754, 7756, 7760, 7764, 7768, 7770, - 7772, 7774, 7776, 7777, 7779, 7785, 7791, 7797, - 7799, 7803, 7807, 7812, 7815, 7825, 7827, 7829, - 7832, 7834, 7835, 7836, 7837, 7843, 7845, 7847, - 7847, 7853, 7865, 7872, 7886, 7892, 7910, 7921, - 7927, 7939, 7942, 7951, 7956, 7966, 7972, 7986, - 7992, 8004, 8016, 8020, 8022, 8028, 8030, 8037, - 8040, 8048, 8049, 8070, 8079, 8087, 8093, 8095, - 8099, 8103, 8108, 8114, 8116, 8119, 8132, 8137, - 8151, 8153, 8162, 8169, 8180, 8190, 8198, 8209, - 8213, 8218, 8220, 8222, 8224, 8225, 8227, 8229, - 8231, 8233, 8248, 8252, 8254, 8256, 8264, 8272, - 8274, 8278, 8289, 8292, 8302, 8306, 8313, 8321, - 8327, 8330, 8331, 8335, 8342, 8347, 8348, 8349, - 8351, 8360, 8362, 8385, 8390, 8392, 8401, 8406, - 8407, 8416, 8422, 8432, 8437, 8444, 8458, 8462, - 8467, 8478, 8481, 8491, 8495, 8504, 8506, 8514, - 8521, 8527, 8534, 8538, 8540, 8542, 8544, 8545, - 8547, 8553, 8561, 8567, 8569, 8573, 8577, 8582, - 8585, 8595, 8597, 8599, 8600, 8602, 8603, 8609, - 8611, 8613, 8613, 8616, 8622, 8624, 8644, 8650, - 8655, 8657, 8659, 8662, 8664, 8666, 8670, 8726, - 8782, 8817, 8822, 8830, 8832, 8832, 8834, 8838, - 8841, 8848, 8854, 8858, 8861, 8867, 8870, 8876, - 8879, 8885, 8898, 8902, 8904, 8906, 8908, 8911, - 8916, 8918, 8921, 8929, 8932, 8934, 8936, 8938, - 8968, 8976, 8978, 8982, 8985, 8990, 8995, 9005, - 9017, 9023, 9028, 9038, 9041, 9048, 9052, 9060, - 9070, 9074, 9082, 9084, 9092, 9095, 9097, 9102, - 9104, 9111, 9113, 9121, 9122, 9143, 9147, 9153, - 9158, 9160, 9164, 9168, 9170, 9174, 9176, 9179, - 9183, 9185, 9192, 9194, 9198, 9202, 9206, 9208, - 9210, 9219, 9223, 9228, 9230, 9236, 9238, 9239, - 9241, 9242, 9244, 9246, 9248, 9250, 9265, 9269, - 9271, 9273, 9278, 9282, 9286, 9288, 9290, 9294, - 9298, 9300, 9304, 9311, 9316, 9320, 9323, 9324, - 9328, 9335, 9340, 9341, 9342, 9344, 9353, 9355, - 9378, 9382, 9384, 9388, 9392, 9393, 9397, 9401, - 9404, 9406, 9411, 9424, 9426, 9428, 9430, 9432, - 9436, 9440, 9442, 9444, 9446, 9450, 9454, 9458, - 9460, 9462, 9464, 9466, 9467, 9469, 9475, 9481, - 9487, 9489, 9493, 9497, 9502, 9505, 9515, 9517, - 9519, 9522, 9524, 9526, 9528, 9530, 9533, 9538, - 9540, 9543, 9551, 9554, 9556, 9558, 9560, 9591, - 9599, 9601, 9605, 9612, 9624, 9631, 9645, 9651, - 9669, 9680, 9686, 9698, 9701, 9710, 9715, 9725, - 9731, 9745, 9751, 9763, 9775, 9779, 9781, 9787, - 9789, 9796, 9799, 9807, 9808, 9829, 9838, 9846, - 9852, 9854, 9858, 9862, 9867, 9873, 9875, 9878, - 9891, 9896, 9910, 9912, 9921, 9928, 9939, 9949, - 9957, 9968, 9972, 9977, 9979, 9981, 9983, 9984, - 9986, 9988, 9990, 9992, 10007, 10011, 10013, 10015, - 10023, 10031, 10033, 10037, 10048, 10051, 10061, 10065, - 10072, 10080, 10086, 10089, 10090, 10094, 10101, 10106, - 10107, 10108, 10110, 10119, 10121, 10144, 10149, 10151, - 10160, 10165, 10166, 10175, 10181, 10191, 10196, 10203, - 10217, 10221, 10226, 10237, 10240, 10250, 10254, 10263, - 10265, 10273, 10280, 10286, 10293, 10297, 10299, 10301, - 10303, 10304, 10306, 10312, 10320, 10326, 10328, 10332, - 10336, 10341, 10344, 10354, 10356, 10358, 10359, 10361, - 10362, 10368, 10370, 10372, 10372, 10373, 10374, 10375, - 10381, 10383, 10385, 10385, 10391, 10398, 10399, 10401, - 10404, 10414, 10434, 10441, 10445, 10452, 10462, 10469, - 10472, 10487, 10489, 10492, 10501, 10505, 10509, 10538, - 10558, 10578, 10598, 10620, 10640, 10660, 10680, 10703, - 10724, 10745, 10766, 10786, 10809, 10829, 10849, 10869, - 10890, 10911, 10932, 10952, 10972, 10992, 11012, 11032, - 11052, 11072, 11092, 11112, -} - -var _graphclust_trans_keys []byte = []byte{ - 10, 128, 255, 176, 255, 131, 137, 191, - 145, 189, 135, 129, 130, 132, 133, 144, - 154, 176, 139, 159, 150, 156, 159, 164, - 167, 168, 170, 173, 145, 176, 255, 139, - 255, 166, 176, 171, 179, 160, 161, 163, - 164, 165, 167, 169, 171, 173, 174, 175, - 176, 177, 179, 180, 181, 182, 183, 184, - 185, 186, 187, 188, 189, 190, 191, 166, - 170, 172, 178, 150, 153, 155, 163, 165, - 167, 169, 173, 153, 155, 148, 161, 163, - 255, 189, 132, 185, 144, 152, 161, 164, - 255, 188, 129, 131, 190, 255, 133, 134, - 137, 138, 142, 150, 152, 161, 164, 255, - 131, 134, 137, 138, 142, 144, 146, 175, - 178, 180, 182, 255, 134, 138, 142, 161, - 164, 255, 188, 129, 131, 190, 191, 128, - 132, 135, 136, 139, 141, 150, 151, 162, - 163, 130, 190, 191, 151, 128, 130, 134, - 136, 138, 141, 128, 131, 190, 255, 133, - 137, 142, 148, 151, 161, 164, 255, 128, - 132, 134, 136, 138, 141, 149, 150, 162, - 163, 129, 131, 190, 255, 133, 137, 142, - 150, 152, 161, 164, 255, 130, 131, 138, - 150, 143, 148, 152, 159, 178, 179, 177, - 179, 186, 135, 142, 177, 179, 185, 187, - 188, 136, 141, 181, 183, 185, 152, 153, - 190, 191, 177, 191, 128, 132, 134, 135, - 141, 151, 153, 188, 134, 128, 129, 130, - 141, 156, 157, 158, 159, 160, 162, 164, - 168, 169, 170, 172, 173, 174, 175, 176, - 179, 183, 173, 183, 185, 190, 150, 153, - 158, 160, 177, 180, 130, 141, 157, 132, - 134, 157, 159, 146, 148, 178, 180, 146, - 147, 178, 179, 180, 255, 148, 156, 158, - 255, 139, 141, 169, 133, 134, 160, 171, - 176, 187, 151, 155, 160, 162, 191, 149, - 158, 165, 188, 176, 190, 128, 132, 180, - 255, 133, 170, 180, 255, 128, 130, 161, - 173, 166, 179, 164, 183, 173, 144, 146, - 148, 168, 178, 180, 184, 185, 128, 181, - 187, 191, 128, 131, 179, 181, 183, 140, - 141, 128, 131, 157, 179, 181, 183, 144, - 176, 164, 175, 177, 191, 160, 191, 128, - 130, 170, 175, 153, 154, 153, 154, 155, - 160, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 175, 175, 178, 180, 189, - 158, 159, 176, 177, 130, 134, 139, 163, - 167, 128, 129, 180, 255, 134, 159, 178, - 255, 166, 173, 135, 147, 128, 131, 179, - 255, 129, 164, 166, 255, 169, 182, 131, - 188, 140, 141, 176, 178, 180, 183, 184, - 190, 191, 129, 171, 175, 181, 182, 163, - 170, 172, 173, 172, 184, 190, 158, 128, - 143, 160, 175, 144, 145, 150, 155, 157, - 158, 159, 135, 139, 141, 168, 171, 189, - 160, 182, 186, 191, 129, 131, 133, 134, - 140, 143, 184, 186, 165, 166, 128, 129, - 130, 132, 133, 134, 135, 136, 139, 140, - 141, 144, 145, 146, 147, 150, 151, 152, - 153, 154, 156, 176, 178, 128, 130, 184, - 255, 135, 190, 131, 175, 187, 255, 128, - 130, 167, 180, 179, 128, 130, 179, 255, - 129, 137, 141, 255, 190, 172, 183, 159, - 170, 188, 128, 131, 190, 191, 151, 128, - 132, 135, 136, 139, 141, 162, 163, 166, - 172, 176, 180, 181, 191, 128, 134, 176, - 255, 132, 255, 175, 181, 184, 255, 129, - 155, 158, 255, 129, 255, 171, 183, 157, - 171, 175, 182, 184, 191, 146, 167, 169, - 182, 171, 172, 189, 190, 176, 180, 176, - 182, 145, 190, 143, 146, 178, 157, 158, - 133, 134, 137, 168, 169, 170, 165, 169, - 173, 178, 187, 255, 131, 132, 140, 169, - 174, 255, 130, 132, 128, 182, 187, 255, - 173, 180, 182, 255, 132, 155, 159, 161, - 175, 128, 163, 165, 128, 134, 136, 152, - 155, 161, 163, 164, 166, 170, 144, 150, - 132, 138, 145, 146, 151, 166, 169, 0, - 127, 176, 255, 131, 137, 191, 145, 189, - 135, 129, 130, 132, 133, 144, 154, 176, - 139, 159, 150, 156, 159, 164, 167, 168, - 170, 173, 145, 176, 255, 139, 255, 166, - 176, 171, 179, 160, 161, 163, 164, 165, - 166, 167, 169, 171, 172, 173, 174, 175, - 176, 177, 178, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, - 168, 170, 150, 153, 155, 163, 165, 167, - 169, 173, 153, 155, 148, 161, 163, 255, - 131, 187, 189, 132, 185, 190, 255, 141, - 144, 129, 136, 145, 151, 152, 161, 162, - 163, 164, 255, 129, 188, 190, 130, 131, - 191, 255, 141, 151, 129, 132, 133, 134, - 137, 138, 142, 161, 162, 163, 164, 255, - 131, 188, 129, 130, 190, 255, 145, 181, - 129, 130, 131, 134, 135, 136, 137, 138, - 139, 141, 142, 175, 176, 177, 178, 255, - 134, 138, 141, 129, 136, 142, 161, 162, - 163, 164, 255, 129, 188, 130, 131, 190, - 191, 128, 141, 129, 132, 135, 136, 139, - 140, 150, 151, 162, 163, 130, 190, 191, - 128, 141, 151, 129, 130, 134, 136, 138, - 140, 128, 129, 131, 190, 255, 133, 137, - 129, 132, 142, 148, 151, 161, 164, 255, - 129, 188, 190, 191, 130, 131, 130, 134, - 128, 132, 135, 136, 138, 139, 140, 141, - 149, 150, 162, 163, 129, 190, 130, 131, - 191, 255, 133, 137, 141, 151, 129, 132, - 142, 161, 162, 163, 164, 255, 138, 143, - 150, 159, 144, 145, 146, 148, 152, 158, - 178, 179, 177, 179, 180, 186, 135, 142, - 177, 179, 180, 185, 187, 188, 136, 141, - 181, 183, 185, 152, 153, 190, 191, 191, - 177, 190, 128, 132, 134, 135, 141, 151, - 153, 188, 134, 128, 129, 130, 141, 156, - 157, 158, 159, 160, 162, 164, 168, 169, - 170, 172, 173, 174, 175, 176, 179, 183, - 177, 173, 183, 185, 186, 187, 188, 189, - 190, 150, 151, 152, 153, 158, 160, 177, - 180, 130, 132, 141, 157, 133, 134, 157, - 159, 146, 148, 178, 180, 146, 147, 178, - 179, 182, 180, 189, 190, 255, 134, 157, - 137, 147, 148, 255, 139, 141, 169, 133, - 134, 178, 160, 162, 163, 166, 167, 168, - 169, 171, 176, 184, 185, 187, 155, 151, - 152, 153, 154, 150, 160, 162, 191, 149, - 151, 152, 158, 165, 172, 173, 178, 179, - 188, 176, 190, 132, 181, 187, 128, 131, - 180, 188, 189, 255, 130, 133, 170, 171, - 179, 180, 255, 130, 161, 170, 128, 129, - 162, 165, 166, 167, 168, 173, 167, 173, - 166, 169, 170, 174, 175, 177, 178, 179, - 164, 171, 172, 179, 180, 181, 182, 183, - 161, 173, 180, 144, 146, 148, 168, 178, - 179, 184, 185, 128, 181, 187, 191, 128, - 131, 179, 181, 183, 140, 141, 144, 176, - 175, 177, 191, 160, 191, 128, 130, 170, - 175, 153, 154, 153, 154, 155, 160, 162, - 163, 164, 165, 166, 167, 168, 169, 170, - 171, 175, 175, 178, 180, 189, 158, 159, - 176, 177, 130, 134, 139, 167, 163, 164, - 165, 166, 132, 133, 134, 159, 160, 177, - 178, 255, 166, 173, 135, 145, 146, 147, - 131, 179, 188, 128, 130, 180, 181, 182, - 185, 186, 255, 165, 129, 255, 169, 174, - 175, 176, 177, 178, 179, 180, 181, 182, - 131, 140, 141, 188, 176, 178, 180, 183, - 184, 190, 191, 129, 171, 181, 182, 172, - 173, 174, 175, 165, 168, 172, 173, 163, - 170, 172, 184, 190, 158, 128, 143, 160, - 175, 144, 145, 150, 155, 157, 158, 159, - 135, 139, 141, 168, 171, 189, 160, 182, - 186, 191, 129, 131, 133, 134, 140, 143, - 184, 186, 165, 166, 128, 129, 130, 132, - 133, 134, 135, 136, 139, 140, 141, 144, - 145, 146, 147, 150, 151, 152, 153, 154, - 156, 176, 178, 129, 128, 130, 184, 255, - 135, 190, 130, 131, 175, 176, 178, 183, - 184, 187, 255, 172, 128, 130, 167, 180, - 179, 130, 128, 129, 179, 181, 182, 190, - 191, 255, 129, 137, 138, 140, 141, 255, - 180, 190, 172, 174, 175, 177, 178, 181, - 182, 183, 159, 160, 162, 163, 170, 188, - 190, 191, 128, 129, 130, 131, 128, 151, - 129, 132, 135, 136, 139, 141, 162, 163, - 166, 172, 176, 180, 181, 183, 184, 191, - 133, 128, 129, 130, 134, 176, 185, 189, - 177, 178, 179, 186, 187, 190, 191, 255, - 129, 132, 255, 175, 190, 176, 177, 178, - 181, 184, 187, 188, 255, 129, 155, 158, - 255, 189, 176, 178, 179, 186, 187, 190, - 191, 255, 129, 255, 172, 182, 171, 173, - 174, 175, 176, 183, 166, 157, 159, 160, - 161, 162, 171, 175, 190, 176, 182, 184, - 191, 169, 177, 180, 146, 167, 170, 182, - 171, 172, 189, 190, 176, 180, 176, 182, - 143, 146, 178, 157, 158, 133, 134, 137, - 168, 169, 170, 166, 173, 165, 169, 174, - 178, 187, 255, 131, 132, 140, 169, 174, - 255, 130, 132, 128, 182, 187, 255, 173, - 180, 182, 255, 132, 155, 159, 161, 175, - 128, 163, 165, 128, 134, 136, 152, 155, - 161, 163, 164, 166, 170, 144, 150, 132, - 138, 143, 187, 191, 160, 128, 129, 132, - 135, 133, 134, 160, 255, 192, 255, 139, - 168, 160, 128, 129, 132, 135, 133, 134, - 160, 255, 192, 255, 144, 145, 150, 155, - 157, 158, 128, 191, 173, 128, 159, 160, - 191, 156, 128, 133, 134, 191, 0, 127, - 176, 255, 131, 137, 191, 145, 189, 135, - 129, 130, 132, 133, 144, 154, 176, 139, - 159, 150, 156, 159, 164, 167, 168, 170, - 173, 145, 176, 255, 139, 255, 166, 176, - 171, 179, 160, 161, 163, 164, 165, 167, - 169, 171, 173, 174, 175, 176, 177, 179, - 180, 181, 182, 183, 184, 185, 186, 187, - 188, 189, 190, 191, 166, 170, 172, 178, - 150, 153, 155, 163, 165, 167, 169, 173, - 153, 155, 148, 161, 163, 255, 189, 132, - 185, 144, 152, 161, 164, 255, 188, 129, - 131, 190, 255, 133, 134, 137, 138, 142, - 150, 152, 161, 164, 255, 131, 134, 137, - 138, 142, 144, 146, 175, 178, 180, 182, - 255, 134, 138, 142, 161, 164, 255, 188, - 129, 131, 190, 191, 128, 132, 135, 136, - 139, 141, 150, 151, 162, 163, 130, 190, - 191, 151, 128, 130, 134, 136, 138, 141, - 128, 131, 190, 255, 133, 137, 142, 148, - 151, 161, 164, 255, 128, 132, 134, 136, - 138, 141, 149, 150, 162, 163, 129, 131, - 190, 255, 133, 137, 142, 150, 152, 161, - 164, 255, 130, 131, 138, 150, 143, 148, - 152, 159, 178, 179, 177, 179, 186, 135, - 142, 177, 179, 185, 187, 188, 136, 141, - 181, 183, 185, 152, 153, 190, 191, 177, - 191, 128, 132, 134, 135, 141, 151, 153, - 188, 134, 128, 129, 130, 141, 156, 157, - 158, 159, 160, 162, 164, 168, 169, 170, - 172, 173, 174, 175, 176, 179, 183, 173, - 183, 185, 190, 150, 153, 158, 160, 177, - 180, 130, 141, 157, 132, 134, 157, 159, - 146, 148, 178, 180, 146, 147, 178, 179, - 180, 255, 148, 156, 158, 255, 139, 141, - 169, 133, 134, 160, 171, 176, 187, 151, - 155, 160, 162, 191, 149, 158, 165, 188, - 176, 190, 128, 132, 180, 255, 133, 170, - 180, 255, 128, 130, 161, 173, 166, 179, - 164, 183, 173, 144, 146, 148, 168, 178, - 180, 184, 185, 128, 181, 187, 191, 128, - 131, 179, 181, 183, 140, 141, 128, 131, - 157, 179, 181, 183, 144, 176, 164, 175, - 177, 191, 160, 191, 128, 130, 170, 175, - 153, 154, 153, 154, 155, 160, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, - 175, 175, 178, 180, 189, 158, 159, 176, - 177, 130, 134, 139, 163, 167, 128, 129, - 180, 255, 134, 159, 178, 255, 166, 173, - 135, 147, 128, 131, 179, 255, 129, 164, - 166, 255, 169, 182, 131, 188, 140, 141, - 176, 178, 180, 183, 184, 190, 191, 129, - 171, 175, 181, 182, 163, 170, 172, 173, - 172, 184, 190, 158, 128, 143, 160, 175, - 144, 145, 150, 155, 157, 158, 159, 135, - 139, 141, 168, 171, 189, 160, 182, 186, - 191, 129, 131, 133, 134, 140, 143, 184, - 186, 165, 166, 128, 129, 130, 132, 133, - 134, 135, 136, 139, 140, 141, 144, 145, - 146, 147, 150, 151, 152, 153, 154, 156, - 176, 178, 128, 130, 184, 255, 135, 190, - 131, 175, 187, 255, 128, 130, 167, 180, - 179, 128, 130, 179, 255, 129, 137, 141, - 255, 190, 172, 183, 159, 170, 188, 128, - 131, 190, 191, 151, 128, 132, 135, 136, - 139, 141, 162, 163, 166, 172, 176, 180, - 181, 191, 128, 134, 176, 255, 132, 255, - 175, 181, 184, 255, 129, 155, 158, 255, - 129, 255, 171, 183, 157, 171, 175, 182, - 184, 191, 146, 167, 169, 182, 171, 172, - 189, 190, 176, 180, 176, 182, 145, 190, - 143, 146, 178, 157, 158, 133, 134, 137, - 168, 169, 170, 165, 169, 173, 178, 187, - 255, 131, 132, 140, 169, 174, 255, 130, - 132, 128, 182, 187, 255, 173, 180, 182, - 255, 132, 155, 159, 161, 175, 128, 163, - 165, 128, 134, 136, 152, 155, 161, 163, - 164, 166, 170, 144, 150, 132, 138, 145, - 146, 151, 166, 169, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 166, 167, 169, - 171, 172, 173, 174, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 187, 188, 189, 190, 191, 168, 170, 150, - 153, 155, 163, 165, 167, 169, 173, 153, - 155, 148, 161, 163, 255, 131, 187, 189, - 132, 185, 190, 255, 141, 144, 129, 136, - 145, 151, 152, 161, 162, 163, 164, 255, - 129, 188, 190, 130, 131, 191, 255, 141, - 151, 129, 132, 133, 134, 137, 138, 142, - 161, 162, 163, 164, 255, 131, 188, 129, - 130, 190, 255, 145, 181, 129, 130, 131, - 134, 135, 136, 137, 138, 139, 141, 142, - 175, 176, 177, 178, 255, 134, 138, 141, - 129, 136, 142, 161, 162, 163, 164, 255, - 129, 188, 130, 131, 190, 191, 128, 141, - 129, 132, 135, 136, 139, 140, 150, 151, - 162, 163, 130, 190, 191, 128, 141, 151, - 129, 130, 134, 136, 138, 140, 128, 129, - 131, 190, 255, 133, 137, 129, 132, 142, - 148, 151, 161, 164, 255, 129, 188, 190, - 191, 130, 131, 130, 134, 128, 132, 135, - 136, 138, 139, 140, 141, 149, 150, 162, - 163, 129, 190, 130, 131, 191, 255, 133, - 137, 141, 151, 129, 132, 142, 161, 162, - 163, 164, 255, 138, 143, 150, 159, 144, - 145, 146, 148, 152, 158, 178, 179, 177, - 179, 180, 186, 135, 142, 177, 179, 180, - 185, 187, 188, 136, 141, 181, 183, 185, - 152, 153, 190, 191, 191, 177, 190, 128, - 132, 134, 135, 141, 151, 153, 188, 134, - 128, 129, 130, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 177, 173, 183, - 185, 186, 187, 188, 189, 190, 150, 151, - 152, 153, 158, 160, 177, 180, 130, 132, - 141, 157, 133, 134, 157, 159, 146, 148, - 178, 180, 146, 147, 178, 179, 182, 180, - 189, 190, 255, 134, 157, 137, 147, 148, - 255, 139, 141, 169, 133, 134, 178, 160, - 162, 163, 166, 167, 168, 169, 171, 176, - 184, 185, 187, 155, 151, 152, 153, 154, - 150, 160, 162, 191, 149, 151, 152, 158, - 165, 172, 173, 178, 179, 188, 176, 190, - 132, 181, 187, 128, 131, 180, 188, 189, - 255, 130, 133, 170, 171, 179, 180, 255, - 130, 161, 170, 128, 129, 162, 165, 166, - 167, 168, 173, 167, 173, 166, 169, 170, - 174, 175, 177, 178, 179, 164, 171, 172, - 179, 180, 181, 182, 183, 161, 173, 180, - 144, 146, 148, 168, 178, 179, 184, 185, - 128, 181, 187, 191, 128, 131, 179, 181, - 183, 140, 141, 144, 176, 175, 177, 191, - 160, 191, 128, 130, 170, 175, 153, 154, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 167, 163, 164, 165, 166, 132, - 133, 134, 159, 160, 177, 178, 255, 166, - 173, 135, 145, 146, 147, 131, 179, 188, - 128, 130, 180, 181, 182, 185, 186, 255, - 165, 129, 255, 169, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 131, 140, 141, - 188, 176, 178, 180, 183, 184, 190, 191, - 129, 171, 181, 182, 172, 173, 174, 175, - 165, 168, 172, 173, 163, 170, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 176, 178, - 129, 128, 130, 184, 255, 135, 190, 130, - 131, 175, 176, 178, 183, 184, 187, 255, - 172, 128, 130, 167, 180, 179, 130, 128, - 129, 179, 181, 182, 190, 191, 255, 129, - 137, 138, 140, 141, 255, 180, 190, 172, - 174, 175, 177, 178, 181, 182, 183, 159, - 160, 162, 163, 170, 188, 190, 191, 128, - 129, 130, 131, 128, 151, 129, 132, 135, - 136, 139, 141, 162, 163, 166, 172, 176, - 180, 181, 183, 184, 191, 133, 128, 129, - 130, 134, 176, 185, 189, 177, 178, 179, - 186, 187, 190, 191, 255, 129, 132, 255, - 175, 190, 176, 177, 178, 181, 184, 187, - 188, 255, 129, 155, 158, 255, 189, 176, - 178, 179, 186, 187, 190, 191, 255, 129, - 255, 172, 182, 171, 173, 174, 175, 176, - 183, 166, 157, 159, 160, 161, 162, 171, - 175, 190, 176, 182, 184, 191, 169, 177, - 180, 146, 167, 170, 182, 171, 172, 189, - 190, 176, 180, 176, 182, 143, 146, 178, - 157, 158, 133, 134, 137, 168, 169, 170, - 166, 173, 165, 169, 174, 178, 187, 255, - 131, 132, 140, 169, 174, 255, 130, 132, - 128, 182, 187, 255, 173, 180, 182, 255, - 132, 155, 159, 161, 175, 128, 163, 165, - 128, 134, 136, 152, 155, 161, 163, 164, - 166, 170, 144, 150, 132, 138, 143, 187, - 191, 160, 128, 129, 132, 135, 133, 134, - 160, 255, 192, 255, 139, 168, 160, 128, - 129, 132, 135, 133, 134, 160, 255, 192, - 255, 144, 145, 150, 155, 157, 158, 128, - 191, 160, 172, 174, 191, 128, 133, 134, - 155, 157, 191, 157, 128, 191, 143, 128, - 191, 163, 181, 128, 191, 162, 128, 191, - 142, 128, 191, 132, 133, 134, 135, 160, - 128, 191, 128, 255, 128, 129, 130, 132, - 133, 134, 141, 156, 157, 158, 159, 160, - 162, 164, 168, 169, 170, 172, 173, 174, - 175, 176, 179, 183, 160, 255, 128, 129, - 130, 133, 134, 135, 141, 156, 157, 158, - 159, 160, 162, 164, 168, 169, 170, 172, - 173, 174, 175, 176, 179, 183, 160, 255, - 168, 255, 128, 129, 130, 134, 135, 141, - 156, 157, 158, 159, 160, 162, 164, 168, - 169, 170, 172, 173, 174, 175, 176, 179, - 183, 168, 255, 192, 255, 159, 139, 187, - 158, 159, 176, 255, 135, 138, 139, 187, - 188, 255, 168, 255, 153, 154, 155, 160, - 162, 163, 164, 165, 166, 167, 168, 169, - 170, 171, 175, 177, 178, 179, 180, 181, - 182, 184, 185, 186, 187, 188, 189, 191, - 176, 190, 192, 255, 135, 147, 160, 188, - 128, 156, 184, 129, 255, 128, 129, 130, - 133, 134, 141, 156, 157, 158, 159, 160, - 162, 164, 168, 169, 170, 172, 173, 174, - 175, 176, 179, 183, 158, 159, 135, 255, - 148, 176, 140, 168, 132, 160, 188, 152, - 180, 144, 172, 136, 164, 192, 255, 129, - 130, 131, 132, 133, 134, 136, 137, 138, - 139, 140, 141, 143, 144, 145, 146, 147, - 148, 150, 151, 152, 153, 154, 155, 157, - 158, 159, 160, 161, 162, 164, 165, 166, - 167, 168, 169, 171, 172, 173, 174, 175, - 176, 178, 179, 180, 181, 182, 183, 185, - 186, 187, 188, 189, 190, 128, 191, 129, - 130, 131, 132, 133, 134, 136, 137, 138, - 139, 140, 141, 143, 144, 145, 146, 147, - 148, 150, 151, 152, 153, 154, 155, 157, - 158, 159, 160, 161, 162, 164, 165, 166, - 167, 168, 169, 171, 172, 173, 174, 175, - 176, 178, 179, 180, 181, 182, 183, 185, - 186, 187, 188, 189, 190, 128, 191, 129, - 130, 131, 132, 133, 134, 136, 137, 138, - 139, 140, 141, 143, 144, 145, 146, 147, - 148, 150, 151, 152, 153, 154, 155, 157, - 158, 159, 128, 156, 160, 255, 136, 164, - 175, 176, 255, 128, 141, 143, 191, 128, - 129, 152, 155, 156, 130, 191, 140, 141, - 128, 138, 144, 167, 175, 191, 128, 159, - 176, 191, 157, 128, 191, 185, 128, 191, - 128, 137, 138, 141, 142, 191, 128, 191, - 165, 177, 178, 179, 180, 181, 182, 184, - 185, 186, 187, 188, 189, 191, 128, 175, - 176, 190, 192, 255, 128, 159, 160, 188, - 189, 191, 128, 156, 184, 129, 255, 148, - 176, 140, 168, 132, 160, 188, 152, 180, - 144, 172, 136, 164, 192, 255, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 161, 162, 164, 165, 166, 167, - 168, 169, 171, 172, 173, 174, 175, 176, - 178, 179, 180, 181, 182, 183, 185, 186, - 187, 188, 189, 190, 128, 191, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 161, 162, 164, 165, 166, 167, - 168, 169, 171, 172, 173, 174, 175, 176, - 178, 179, 180, 181, 182, 183, 185, 186, - 187, 188, 189, 190, 128, 191, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 128, 156, 160, 255, 136, 164, 175, - 176, 255, 135, 138, 139, 187, 188, 191, - 192, 255, 187, 191, 128, 190, 128, 190, - 188, 128, 175, 190, 191, 145, 155, 157, - 159, 128, 191, 130, 135, 128, 191, 189, - 128, 191, 128, 129, 130, 131, 132, 191, - 178, 128, 191, 128, 159, 164, 191, 133, - 128, 191, 128, 178, 187, 191, 135, 142, - 143, 145, 146, 149, 150, 153, 154, 155, - 164, 128, 191, 128, 165, 166, 191, 144, - 145, 150, 155, 157, 158, 159, 135, 166, - 191, 133, 128, 191, 128, 130, 131, 132, - 133, 137, 138, 139, 140, 191, 174, 188, - 128, 129, 130, 131, 132, 133, 134, 144, - 145, 165, 166, 169, 170, 175, 176, 184, - 185, 191, 128, 132, 170, 129, 135, 136, - 191, 181, 186, 128, 191, 144, 128, 148, - 149, 150, 151, 191, 128, 132, 133, 135, - 136, 138, 139, 143, 144, 191, 163, 128, - 179, 180, 182, 183, 191, 128, 129, 191, - 166, 176, 191, 128, 151, 152, 158, 159, - 178, 179, 185, 186, 187, 188, 190, 128, - 191, 160, 128, 191, 128, 129, 135, 132, - 134, 128, 175, 157, 128, 191, 143, 128, - 191, 163, 181, 128, 191, 162, 128, 191, - 142, 128, 191, 132, 133, 134, 135, 160, - 128, 191, 0, 127, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 167, 169, 171, - 173, 174, 175, 176, 177, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 166, 170, 172, 178, 150, 153, - 155, 163, 165, 167, 169, 173, 153, 155, - 148, 161, 163, 255, 189, 132, 185, 144, - 152, 161, 164, 255, 188, 129, 131, 190, - 255, 133, 134, 137, 138, 142, 150, 152, - 161, 164, 255, 131, 134, 137, 138, 142, - 144, 146, 175, 178, 180, 182, 255, 134, - 138, 142, 161, 164, 255, 188, 129, 131, - 190, 191, 128, 132, 135, 136, 139, 141, - 150, 151, 162, 163, 130, 190, 191, 151, - 128, 130, 134, 136, 138, 141, 128, 131, - 190, 255, 133, 137, 142, 148, 151, 161, - 164, 255, 128, 132, 134, 136, 138, 141, - 149, 150, 162, 163, 129, 131, 190, 255, - 133, 137, 142, 150, 152, 161, 164, 255, - 130, 131, 138, 150, 143, 148, 152, 159, - 178, 179, 177, 179, 186, 135, 142, 177, - 179, 185, 187, 188, 136, 141, 181, 183, - 185, 152, 153, 190, 191, 177, 191, 128, - 132, 134, 135, 141, 151, 153, 188, 134, - 128, 129, 130, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 173, 183, 185, - 190, 150, 153, 158, 160, 177, 180, 130, - 141, 157, 132, 134, 157, 159, 146, 148, - 178, 180, 146, 147, 178, 179, 180, 255, - 148, 156, 158, 255, 139, 141, 169, 133, - 134, 160, 171, 176, 187, 151, 155, 160, - 162, 191, 149, 158, 165, 188, 176, 190, - 128, 132, 180, 255, 133, 170, 180, 255, - 128, 130, 161, 173, 166, 179, 164, 183, - 173, 144, 146, 148, 168, 178, 180, 184, - 185, 128, 181, 187, 191, 128, 131, 179, - 181, 183, 140, 141, 128, 131, 157, 179, - 181, 183, 144, 176, 164, 175, 177, 191, - 160, 191, 128, 130, 170, 175, 153, 154, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 163, 167, 128, 129, 180, 255, - 134, 159, 178, 255, 166, 173, 135, 147, - 128, 131, 179, 255, 129, 164, 166, 255, - 169, 182, 131, 188, 140, 141, 176, 178, - 180, 183, 184, 190, 191, 129, 171, 175, - 181, 182, 163, 170, 172, 173, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 176, 178, - 128, 130, 184, 255, 135, 190, 131, 175, - 187, 255, 128, 130, 167, 180, 179, 128, - 130, 179, 255, 129, 137, 141, 255, 190, - 172, 183, 159, 170, 188, 128, 131, 190, - 191, 151, 128, 132, 135, 136, 139, 141, - 162, 163, 166, 172, 176, 180, 181, 191, - 128, 134, 176, 255, 132, 255, 175, 181, - 184, 255, 129, 155, 158, 255, 129, 255, - 171, 183, 157, 171, 175, 182, 184, 191, - 146, 167, 169, 182, 171, 172, 189, 190, - 176, 180, 176, 182, 145, 190, 143, 146, - 178, 157, 158, 133, 134, 137, 168, 169, - 170, 165, 169, 173, 178, 187, 255, 131, - 132, 140, 169, 174, 255, 130, 132, 128, - 182, 187, 255, 173, 180, 182, 255, 132, - 155, 159, 161, 175, 128, 163, 165, 128, - 134, 136, 152, 155, 161, 163, 164, 166, - 170, 144, 150, 132, 138, 145, 146, 151, - 166, 169, 128, 255, 176, 255, 131, 137, - 191, 145, 189, 135, 129, 130, 132, 133, - 144, 154, 176, 139, 159, 150, 156, 159, - 164, 167, 168, 170, 173, 145, 176, 255, - 139, 255, 166, 176, 171, 179, 160, 161, - 163, 164, 165, 166, 167, 169, 171, 172, - 173, 174, 175, 176, 177, 178, 179, 180, - 181, 182, 183, 184, 185, 186, 187, 188, - 189, 190, 191, 168, 170, 150, 153, 155, - 163, 165, 167, 169, 173, 153, 155, 148, - 161, 163, 255, 131, 187, 189, 132, 185, - 190, 255, 141, 144, 129, 136, 145, 151, - 152, 161, 162, 163, 164, 255, 129, 188, - 190, 130, 131, 191, 255, 141, 151, 129, - 132, 133, 134, 137, 138, 142, 161, 162, - 163, 164, 255, 131, 188, 129, 130, 190, - 255, 145, 181, 129, 130, 131, 134, 135, - 136, 137, 138, 139, 141, 142, 175, 176, - 177, 178, 255, 134, 138, 141, 129, 136, - 142, 161, 162, 163, 164, 255, 129, 188, - 130, 131, 190, 191, 128, 141, 129, 132, - 135, 136, 139, 140, 150, 151, 162, 163, - 130, 190, 191, 128, 141, 151, 129, 130, - 134, 136, 138, 140, 128, 129, 131, 190, - 255, 133, 137, 129, 132, 142, 148, 151, - 161, 164, 255, 129, 188, 190, 191, 130, - 131, 130, 134, 128, 132, 135, 136, 138, - 139, 140, 141, 149, 150, 162, 163, 129, - 190, 130, 131, 191, 255, 133, 137, 141, - 151, 129, 132, 142, 161, 162, 163, 164, - 255, 138, 143, 150, 159, 144, 145, 146, - 148, 152, 158, 178, 179, 177, 179, 180, - 186, 135, 142, 177, 179, 180, 185, 187, - 188, 136, 141, 181, 183, 185, 152, 153, - 190, 191, 191, 177, 190, 128, 132, 134, - 135, 141, 151, 153, 188, 134, 128, 129, - 130, 141, 156, 157, 158, 159, 160, 162, - 164, 168, 169, 170, 172, 173, 174, 175, - 176, 179, 183, 177, 173, 183, 185, 186, - 187, 188, 189, 190, 150, 151, 152, 153, - 158, 160, 177, 180, 130, 132, 141, 157, - 133, 134, 157, 159, 146, 148, 178, 180, - 146, 147, 178, 179, 182, 180, 189, 190, - 255, 134, 157, 137, 147, 148, 255, 139, - 141, 169, 133, 134, 178, 160, 162, 163, - 166, 167, 168, 169, 171, 176, 184, 185, - 187, 155, 151, 152, 153, 154, 150, 160, - 162, 191, 149, 151, 152, 158, 165, 172, - 173, 178, 179, 188, 176, 190, 132, 181, - 187, 128, 131, 180, 188, 189, 255, 130, - 133, 170, 171, 179, 180, 255, 130, 161, - 170, 128, 129, 162, 165, 166, 167, 168, - 173, 167, 173, 166, 169, 170, 174, 175, - 177, 178, 179, 164, 171, 172, 179, 180, - 181, 182, 183, 161, 173, 180, 144, 146, - 148, 168, 178, 179, 184, 185, 128, 181, - 187, 191, 128, 131, 179, 181, 183, 140, - 141, 144, 176, 175, 177, 191, 160, 191, - 128, 130, 170, 175, 153, 154, 153, 154, - 155, 160, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 171, 175, 175, 178, 180, - 189, 158, 159, 176, 177, 130, 134, 139, - 167, 163, 164, 165, 166, 132, 133, 134, - 159, 160, 177, 178, 255, 166, 173, 135, - 145, 146, 147, 131, 179, 188, 128, 130, - 180, 181, 182, 185, 186, 255, 165, 129, - 255, 169, 174, 175, 176, 177, 178, 179, - 180, 181, 182, 131, 140, 141, 188, 176, - 178, 180, 183, 184, 190, 191, 129, 171, - 181, 182, 172, 173, 174, 175, 165, 168, - 172, 173, 163, 170, 172, 184, 190, 158, - 128, 143, 160, 175, 144, 145, 150, 155, - 157, 158, 159, 135, 139, 141, 168, 171, - 189, 160, 182, 186, 191, 129, 131, 133, - 134, 140, 143, 184, 186, 165, 166, 128, - 129, 130, 132, 133, 134, 135, 136, 139, - 140, 141, 144, 145, 146, 147, 150, 151, - 152, 153, 154, 156, 176, 178, 129, 128, - 130, 184, 255, 135, 190, 130, 131, 175, - 176, 178, 183, 184, 187, 255, 172, 128, - 130, 167, 180, 179, 130, 128, 129, 179, - 181, 182, 190, 191, 255, 129, 137, 138, - 140, 141, 255, 180, 190, 172, 174, 175, - 177, 178, 181, 182, 183, 159, 160, 162, - 163, 170, 188, 190, 191, 128, 129, 130, - 131, 128, 151, 129, 132, 135, 136, 139, - 141, 162, 163, 166, 172, 176, 180, 181, - 183, 184, 191, 133, 128, 129, 130, 134, - 176, 185, 189, 177, 178, 179, 186, 187, - 190, 191, 255, 129, 132, 255, 175, 190, - 176, 177, 178, 181, 184, 187, 188, 255, - 129, 155, 158, 255, 189, 176, 178, 179, - 186, 187, 190, 191, 255, 129, 255, 172, - 182, 171, 173, 174, 175, 176, 183, 166, - 157, 159, 160, 161, 162, 171, 175, 190, - 176, 182, 184, 191, 169, 177, 180, 146, - 167, 170, 182, 171, 172, 189, 190, 176, - 180, 176, 182, 143, 146, 178, 157, 158, - 133, 134, 137, 168, 169, 170, 166, 173, - 165, 169, 174, 178, 187, 255, 131, 132, - 140, 169, 174, 255, 130, 132, 128, 182, - 187, 255, 173, 180, 182, 255, 132, 155, - 159, 161, 175, 128, 163, 165, 128, 134, - 136, 152, 155, 161, 163, 164, 166, 170, - 144, 150, 132, 138, 143, 187, 191, 160, - 128, 129, 132, 135, 133, 134, 160, 255, - 192, 255, 139, 168, 160, 128, 129, 132, - 135, 133, 134, 160, 255, 192, 255, 144, - 145, 150, 155, 157, 158, 128, 129, 130, - 132, 133, 134, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 160, 255, 128, - 129, 130, 133, 134, 135, 141, 156, 157, - 158, 159, 160, 162, 164, 168, 169, 170, - 172, 173, 174, 175, 176, 179, 183, 160, - 255, 168, 255, 128, 129, 130, 134, 135, - 141, 156, 157, 158, 159, 160, 162, 164, - 168, 169, 170, 172, 173, 174, 175, 176, - 179, 183, 168, 255, 192, 255, 159, 139, - 187, 158, 159, 176, 255, 135, 138, 139, - 187, 188, 255, 168, 255, 153, 154, 155, - 160, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 175, 177, 178, 179, 180, - 181, 182, 184, 185, 186, 187, 188, 189, - 191, 176, 190, 192, 255, 135, 147, 160, - 188, 128, 156, 184, 129, 255, 128, 129, - 130, 133, 134, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 158, 159, 135, - 255, 148, 176, 140, 168, 132, 160, 188, - 152, 180, 144, 172, 136, 164, 192, 255, - 129, 130, 131, 132, 133, 134, 136, 137, - 138, 139, 140, 141, 143, 144, 145, 146, - 147, 148, 150, 151, 152, 153, 154, 155, - 157, 158, 159, 160, 161, 162, 164, 165, - 166, 167, 168, 169, 171, 172, 173, 174, - 175, 176, 178, 179, 180, 181, 182, 183, - 185, 186, 187, 188, 189, 190, 128, 191, - 129, 130, 131, 132, 133, 134, 136, 137, - 138, 139, 140, 141, 143, 144, 145, 146, - 147, 148, 150, 151, 152, 153, 154, 155, - 157, 158, 159, 160, 161, 162, 164, 165, - 166, 167, 168, 169, 171, 172, 173, 174, - 175, 176, 178, 179, 180, 181, 182, 183, - 185, 186, 187, 188, 189, 190, 128, 191, - 129, 130, 131, 132, 133, 134, 136, 137, - 138, 139, 140, 141, 143, 144, 145, 146, - 147, 148, 150, 151, 152, 153, 154, 155, - 157, 158, 159, 128, 156, 160, 255, 136, - 164, 175, 176, 255, 142, 128, 191, 128, - 129, 152, 155, 156, 130, 191, 139, 141, - 128, 140, 142, 143, 144, 167, 168, 174, - 175, 191, 128, 255, 176, 255, 131, 137, - 191, 145, 189, 135, 129, 130, 132, 133, - 144, 154, 176, 139, 159, 150, 156, 159, - 164, 167, 168, 170, 173, 145, 176, 255, - 139, 255, 166, 176, 171, 179, 160, 161, - 163, 164, 165, 167, 169, 171, 173, 174, - 175, 176, 177, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, - 166, 170, 172, 178, 150, 153, 155, 163, - 165, 167, 169, 173, 153, 155, 148, 161, - 163, 255, 189, 132, 185, 144, 152, 161, - 164, 255, 188, 129, 131, 190, 255, 133, - 134, 137, 138, 142, 150, 152, 161, 164, - 255, 131, 134, 137, 138, 142, 144, 146, - 175, 178, 180, 182, 255, 134, 138, 142, - 161, 164, 255, 188, 129, 131, 190, 191, - 128, 132, 135, 136, 139, 141, 150, 151, - 162, 163, 130, 190, 191, 151, 128, 130, - 134, 136, 138, 141, 128, 131, 190, 255, - 133, 137, 142, 148, 151, 161, 164, 255, - 128, 132, 134, 136, 138, 141, 149, 150, - 162, 163, 129, 131, 190, 255, 133, 137, - 142, 150, 152, 161, 164, 255, 130, 131, - 138, 150, 143, 148, 152, 159, 178, 179, - 177, 179, 186, 135, 142, 177, 179, 185, - 187, 188, 136, 141, 181, 183, 185, 152, - 153, 190, 191, 177, 191, 128, 132, 134, - 135, 141, 151, 153, 188, 134, 128, 129, - 130, 141, 156, 157, 158, 159, 160, 162, - 164, 168, 169, 170, 172, 173, 174, 175, - 176, 179, 183, 173, 183, 185, 190, 150, - 153, 158, 160, 177, 180, 130, 141, 157, - 132, 134, 157, 159, 146, 148, 178, 180, - 146, 147, 178, 179, 180, 255, 148, 156, - 158, 255, 139, 141, 169, 133, 134, 160, - 171, 176, 187, 151, 155, 160, 162, 191, - 149, 158, 165, 188, 176, 190, 128, 132, - 180, 255, 133, 170, 180, 255, 128, 130, - 161, 173, 166, 179, 164, 183, 173, 144, - 146, 148, 168, 178, 180, 184, 185, 128, - 181, 187, 191, 128, 131, 179, 181, 183, - 140, 141, 144, 176, 175, 177, 191, 160, - 191, 128, 130, 170, 175, 153, 154, 153, - 154, 155, 160, 162, 163, 164, 165, 166, - 167, 168, 169, 170, 171, 175, 175, 178, - 180, 189, 158, 159, 176, 177, 130, 134, - 139, 163, 167, 128, 129, 180, 255, 134, - 159, 178, 255, 166, 173, 135, 147, 128, - 131, 179, 255, 129, 164, 166, 255, 169, - 182, 131, 188, 140, 141, 176, 178, 180, - 183, 184, 190, 191, 129, 171, 175, 181, - 182, 163, 170, 172, 173, 172, 184, 190, - 158, 128, 143, 160, 175, 144, 145, 150, - 155, 157, 158, 135, 139, 141, 168, 171, - 189, 160, 182, 186, 191, 129, 131, 133, - 134, 140, 143, 184, 186, 165, 166, 128, - 129, 130, 132, 133, 134, 135, 136, 139, - 140, 141, 144, 145, 146, 147, 150, 151, - 152, 153, 154, 156, 176, 178, 128, 130, - 184, 255, 135, 190, 131, 175, 187, 255, - 128, 130, 167, 180, 179, 128, 130, 179, - 255, 129, 137, 141, 255, 190, 172, 183, - 159, 170, 188, 128, 131, 190, 191, 151, - 128, 132, 135, 136, 139, 141, 162, 163, - 166, 172, 176, 180, 181, 191, 128, 134, - 176, 255, 132, 255, 175, 181, 184, 255, - 129, 155, 158, 255, 129, 255, 171, 183, - 157, 171, 175, 182, 184, 191, 146, 167, - 169, 182, 171, 172, 189, 190, 176, 180, - 176, 182, 145, 190, 143, 146, 178, 157, - 158, 133, 134, 137, 168, 169, 170, 165, - 169, 173, 178, 187, 255, 131, 132, 140, - 169, 174, 255, 130, 132, 128, 182, 187, - 255, 173, 180, 182, 255, 132, 155, 159, - 161, 175, 128, 163, 165, 128, 134, 136, - 152, 155, 161, 163, 164, 166, 170, 144, - 150, 132, 138, 160, 128, 129, 132, 135, - 133, 134, 160, 255, 192, 255, 128, 131, - 157, 179, 181, 183, 164, 144, 145, 150, - 155, 157, 158, 159, 145, 146, 151, 166, - 169, 128, 255, 176, 255, 131, 137, 191, - 145, 189, 135, 129, 130, 132, 133, 144, - 154, 176, 139, 159, 150, 156, 159, 164, - 167, 168, 170, 173, 145, 176, 255, 139, - 255, 166, 176, 171, 179, 160, 161, 163, - 164, 165, 166, 167, 169, 171, 172, 173, - 174, 175, 176, 177, 178, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 168, 170, 150, 153, 155, 163, - 165, 167, 169, 173, 153, 155, 148, 161, - 163, 255, 131, 187, 189, 132, 185, 190, - 255, 141, 144, 129, 136, 145, 151, 152, - 161, 162, 163, 164, 255, 129, 188, 190, - 130, 131, 191, 255, 141, 151, 129, 132, - 133, 134, 137, 138, 142, 161, 162, 163, - 164, 255, 131, 188, 129, 130, 190, 255, - 145, 181, 129, 130, 131, 134, 135, 136, - 137, 138, 139, 141, 142, 175, 176, 177, - 178, 255, 134, 138, 141, 129, 136, 142, - 161, 162, 163, 164, 255, 129, 188, 130, - 131, 190, 191, 128, 141, 129, 132, 135, - 136, 139, 140, 150, 151, 162, 163, 130, - 190, 191, 128, 141, 151, 129, 130, 134, - 136, 138, 140, 128, 129, 131, 190, 255, - 133, 137, 129, 132, 142, 148, 151, 161, - 164, 255, 129, 188, 190, 191, 130, 131, - 130, 134, 128, 132, 135, 136, 138, 139, - 140, 141, 149, 150, 162, 163, 129, 190, - 130, 131, 191, 255, 133, 137, 141, 151, - 129, 132, 142, 161, 162, 163, 164, 255, - 138, 143, 150, 159, 144, 145, 146, 148, - 152, 158, 178, 179, 177, 179, 180, 186, - 135, 142, 177, 179, 180, 185, 187, 188, - 136, 141, 181, 183, 185, 152, 153, 190, - 191, 191, 177, 190, 128, 132, 134, 135, - 141, 151, 153, 188, 134, 128, 129, 130, - 141, 156, 157, 158, 159, 160, 162, 164, - 168, 169, 170, 172, 173, 174, 175, 176, - 179, 183, 177, 173, 183, 185, 186, 187, - 188, 189, 190, 150, 151, 152, 153, 158, - 160, 177, 180, 130, 132, 141, 157, 133, - 134, 157, 159, 146, 148, 178, 180, 146, - 147, 178, 179, 182, 180, 189, 190, 255, - 134, 157, 137, 147, 148, 255, 139, 141, - 169, 133, 134, 178, 160, 162, 163, 166, - 167, 168, 169, 171, 176, 184, 185, 187, - 155, 151, 152, 153, 154, 150, 160, 162, - 191, 149, 151, 152, 158, 165, 172, 173, - 178, 179, 188, 176, 190, 132, 181, 187, - 128, 131, 180, 188, 189, 255, 130, 133, - 170, 171, 179, 180, 255, 130, 161, 170, - 128, 129, 162, 165, 166, 167, 168, 173, - 167, 173, 166, 169, 170, 174, 175, 177, - 178, 179, 164, 171, 172, 179, 180, 181, - 182, 183, 161, 173, 180, 144, 146, 148, - 168, 178, 179, 184, 185, 128, 181, 187, - 191, 128, 131, 179, 181, 183, 140, 141, - 144, 176, 175, 177, 191, 160, 191, 128, - 130, 170, 175, 153, 154, 153, 154, 155, - 160, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 175, 175, 178, 180, 189, - 158, 159, 176, 177, 130, 134, 139, 167, - 163, 164, 165, 166, 132, 133, 134, 159, - 160, 177, 178, 255, 166, 173, 135, 145, - 146, 147, 131, 179, 188, 128, 130, 180, - 181, 182, 185, 186, 255, 165, 129, 255, - 169, 174, 175, 176, 177, 178, 179, 180, - 181, 182, 131, 140, 141, 188, 176, 178, - 180, 183, 184, 190, 191, 129, 171, 181, - 182, 172, 173, 174, 175, 165, 168, 172, - 173, 163, 170, 172, 184, 190, 158, 128, - 143, 160, 175, 144, 145, 150, 155, 157, - 158, 159, 135, 139, 141, 168, 171, 189, - 160, 182, 186, 191, 129, 131, 133, 134, - 140, 143, 184, 186, 165, 166, 128, 129, - 130, 132, 133, 134, 135, 136, 139, 140, - 141, 144, 145, 146, 147, 150, 151, 152, - 153, 154, 156, 176, 178, 129, 128, 130, - 184, 255, 135, 190, 130, 131, 175, 176, - 178, 183, 184, 187, 255, 172, 128, 130, - 167, 180, 179, 130, 128, 129, 179, 181, - 182, 190, 191, 255, 129, 137, 138, 140, - 141, 255, 180, 190, 172, 174, 175, 177, - 178, 181, 182, 183, 159, 160, 162, 163, - 170, 188, 190, 191, 128, 129, 130, 131, - 128, 151, 129, 132, 135, 136, 139, 141, - 162, 163, 166, 172, 176, 180, 181, 183, - 184, 191, 133, 128, 129, 130, 134, 176, - 185, 189, 177, 178, 179, 186, 187, 190, - 191, 255, 129, 132, 255, 175, 190, 176, - 177, 178, 181, 184, 187, 188, 255, 129, - 155, 158, 255, 189, 176, 178, 179, 186, - 187, 190, 191, 255, 129, 255, 172, 182, - 171, 173, 174, 175, 176, 183, 166, 157, - 159, 160, 161, 162, 171, 175, 190, 176, - 182, 184, 191, 169, 177, 180, 146, 167, - 170, 182, 171, 172, 189, 190, 176, 180, - 176, 182, 143, 146, 178, 157, 158, 133, - 134, 137, 168, 169, 170, 166, 173, 165, - 169, 174, 178, 187, 255, 131, 132, 140, - 169, 174, 255, 130, 132, 128, 182, 187, - 255, 173, 180, 182, 255, 132, 155, 159, - 161, 175, 128, 163, 165, 128, 134, 136, - 152, 155, 161, 163, 164, 166, 170, 144, - 150, 132, 138, 143, 187, 191, 160, 128, - 129, 132, 135, 133, 134, 160, 255, 192, - 255, 139, 168, 128, 159, 160, 175, 176, - 191, 157, 128, 191, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 166, 167, 169, - 171, 172, 173, 174, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 187, 188, 189, 190, 191, 168, 170, 150, - 153, 155, 163, 165, 167, 169, 173, 153, - 155, 148, 161, 163, 255, 131, 187, 189, - 132, 185, 190, 255, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 167, 169, 171, - 173, 174, 175, 176, 177, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 166, 170, 172, 178, 150, 153, - 155, 163, 165, 167, 169, 173, 153, 155, - 148, 161, 163, 255, 189, 132, 185, 144, - 152, 161, 164, 255, 188, 129, 131, 190, - 255, 133, 134, 137, 138, 142, 150, 152, - 161, 164, 255, 131, 134, 137, 138, 142, - 144, 146, 175, 178, 180, 182, 255, 134, - 138, 142, 161, 164, 255, 188, 129, 131, - 190, 191, 128, 132, 135, 136, 139, 141, - 150, 151, 162, 163, 130, 190, 191, 151, - 128, 130, 134, 136, 138, 141, 128, 131, - 190, 255, 133, 137, 142, 148, 151, 161, - 164, 255, 128, 132, 134, 136, 138, 141, - 149, 150, 162, 163, 129, 131, 190, 255, - 133, 137, 142, 150, 152, 161, 164, 255, - 130, 131, 138, 150, 143, 148, 152, 159, - 178, 179, 177, 179, 186, 135, 142, 177, - 179, 185, 187, 188, 136, 141, 181, 183, - 185, 152, 153, 190, 191, 177, 191, 128, - 132, 134, 135, 141, 151, 153, 188, 134, - 128, 129, 130, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 173, 183, 185, - 190, 150, 153, 158, 160, 177, 180, 130, - 141, 157, 132, 134, 157, 159, 146, 148, - 178, 180, 146, 147, 178, 179, 180, 255, - 148, 156, 158, 255, 139, 141, 169, 133, - 134, 160, 171, 176, 187, 151, 155, 160, - 162, 191, 149, 158, 165, 188, 176, 190, - 128, 132, 180, 255, 133, 170, 180, 255, - 128, 130, 161, 173, 166, 179, 164, 183, - 173, 144, 146, 148, 168, 178, 180, 184, - 185, 128, 181, 187, 191, 128, 131, 179, - 181, 183, 140, 141, 128, 131, 157, 179, - 181, 183, 144, 176, 164, 175, 177, 191, - 160, 191, 128, 130, 170, 175, 153, 154, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 163, 167, 128, 129, 180, 255, - 134, 159, 178, 255, 166, 173, 135, 147, - 128, 131, 179, 255, 129, 164, 166, 255, - 169, 182, 131, 188, 140, 141, 176, 178, - 180, 183, 184, 190, 191, 129, 171, 175, - 181, 182, 163, 170, 172, 173, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 176, 178, - 128, 130, 184, 255, 135, 190, 131, 175, - 187, 255, 128, 130, 167, 180, 179, 128, - 130, 179, 255, 129, 137, 141, 255, 190, - 172, 183, 159, 170, 188, 128, 131, 190, - 191, 151, 128, 132, 135, 136, 139, 141, - 162, 163, 166, 172, 176, 180, 181, 191, - 128, 134, 176, 255, 132, 255, 175, 181, - 184, 255, 129, 155, 158, 255, 129, 255, - 171, 183, 157, 171, 175, 182, 184, 191, - 146, 167, 169, 182, 171, 172, 189, 190, - 176, 180, 176, 182, 145, 190, 143, 146, - 178, 157, 158, 133, 134, 137, 168, 169, - 170, 165, 169, 173, 178, 187, 255, 131, - 132, 140, 169, 174, 255, 130, 132, 128, - 182, 187, 255, 173, 180, 182, 255, 132, - 155, 159, 161, 175, 128, 163, 165, 128, - 134, 136, 152, 155, 161, 163, 164, 166, - 170, 144, 150, 132, 138, 145, 146, 151, - 166, 169, 139, 168, 160, 128, 129, 132, - 135, 133, 134, 160, 255, 192, 255, 144, - 145, 150, 155, 157, 158, 141, 144, 129, - 136, 145, 151, 152, 161, 162, 163, 164, - 255, 129, 188, 190, 130, 131, 191, 255, - 141, 151, 129, 132, 133, 134, 137, 138, - 142, 161, 162, 163, 164, 255, 131, 188, - 129, 130, 190, 255, 145, 181, 129, 130, - 131, 134, 135, 136, 137, 138, 139, 141, - 142, 175, 176, 177, 178, 255, 134, 138, - 141, 129, 136, 142, 161, 162, 163, 164, - 255, 129, 188, 130, 131, 190, 191, 128, - 141, 129, 132, 135, 136, 139, 140, 150, - 151, 162, 163, 130, 190, 191, 128, 141, - 151, 129, 130, 134, 136, 138, 140, 128, - 129, 131, 190, 255, 133, 137, 129, 132, - 142, 148, 151, 161, 164, 255, 129, 188, - 190, 191, 130, 131, 130, 134, 128, 132, - 135, 136, 138, 139, 140, 141, 149, 150, - 162, 163, 129, 190, 130, 131, 191, 255, - 133, 137, 141, 151, 129, 132, 142, 161, - 162, 163, 164, 255, 138, 143, 150, 159, - 144, 145, 146, 148, 152, 158, 178, 179, - 177, 179, 180, 186, 135, 142, 177, 179, - 180, 185, 187, 188, 136, 141, 181, 183, - 185, 152, 153, 190, 191, 191, 177, 190, - 128, 132, 134, 135, 141, 151, 153, 188, - 134, 128, 129, 130, 141, 156, 157, 158, - 159, 160, 162, 164, 168, 169, 170, 172, - 173, 174, 175, 176, 179, 183, 177, 173, - 183, 185, 186, 187, 188, 189, 190, 150, - 151, 152, 153, 158, 160, 177, 180, 130, - 132, 141, 157, 133, 134, 157, 159, 146, - 148, 178, 180, 146, 147, 178, 179, 182, - 180, 189, 190, 255, 134, 157, 137, 147, - 148, 255, 139, 141, 169, 133, 134, 178, - 160, 162, 163, 166, 167, 168, 169, 171, - 176, 184, 185, 187, 155, 151, 152, 153, - 154, 150, 160, 162, 191, 149, 151, 152, - 158, 165, 172, 173, 178, 179, 188, 176, - 190, 132, 181, 187, 128, 131, 180, 188, - 189, 255, 130, 133, 170, 171, 179, 180, - 255, 130, 161, 170, 128, 129, 162, 165, - 166, 167, 168, 173, 167, 173, 166, 169, - 170, 174, 175, 177, 178, 179, 164, 171, - 172, 179, 180, 181, 182, 183, 161, 173, - 180, 144, 146, 148, 168, 178, 179, 184, - 185, 128, 181, 187, 191, 128, 131, 179, - 181, 183, 140, 141, 144, 176, 175, 177, - 191, 160, 191, 128, 130, 170, 175, 153, - 154, 153, 154, 155, 160, 162, 163, 164, - 165, 166, 167, 168, 169, 170, 171, 175, - 175, 178, 180, 189, 158, 159, 176, 177, - 130, 134, 139, 167, 163, 164, 165, 166, - 132, 133, 134, 159, 160, 177, 178, 255, - 166, 173, 135, 145, 146, 147, 131, 179, - 188, 128, 130, 180, 181, 182, 185, 186, - 255, 165, 129, 255, 169, 174, 175, 176, - 177, 178, 179, 180, 181, 182, 131, 140, - 141, 188, 176, 178, 180, 183, 184, 190, - 191, 129, 171, 181, 182, 172, 173, 174, - 175, 165, 168, 172, 173, 163, 170, 172, - 184, 190, 158, 128, 143, 160, 175, 144, - 145, 150, 155, 157, 158, 159, 135, 139, - 141, 168, 171, 189, 160, 182, 186, 191, - 129, 131, 133, 134, 140, 143, 184, 186, - 165, 166, 128, 129, 130, 132, 133, 134, - 135, 136, 139, 140, 141, 144, 145, 146, - 147, 150, 151, 152, 153, 154, 156, 176, - 178, 129, 128, 130, 184, 255, 135, 190, - 130, 131, 175, 176, 178, 183, 184, 187, - 255, 172, 128, 130, 167, 180, 179, 130, - 128, 129, 179, 181, 182, 190, 191, 255, - 129, 137, 138, 140, 141, 255, 180, 190, - 172, 174, 175, 177, 178, 181, 182, 183, - 159, 160, 162, 163, 170, 188, 190, 191, - 128, 129, 130, 131, 128, 151, 129, 132, - 135, 136, 139, 141, 162, 163, 166, 172, - 176, 180, 181, 183, 184, 191, 133, 128, - 129, 130, 134, 176, 185, 189, 177, 178, - 179, 186, 187, 190, 191, 255, 129, 132, - 255, 175, 190, 176, 177, 178, 181, 184, - 187, 188, 255, 129, 155, 158, 255, 189, - 176, 178, 179, 186, 187, 190, 191, 255, - 129, 255, 172, 182, 171, 173, 174, 175, - 176, 183, 166, 157, 159, 160, 161, 162, - 171, 175, 190, 176, 182, 184, 191, 169, - 177, 180, 146, 167, 170, 182, 171, 172, - 189, 190, 176, 180, 176, 182, 143, 146, - 178, 157, 158, 133, 134, 137, 168, 169, - 170, 166, 173, 165, 169, 174, 178, 187, - 255, 131, 132, 140, 169, 174, 255, 130, - 132, 128, 182, 187, 255, 173, 180, 182, - 255, 132, 155, 159, 161, 175, 128, 163, - 165, 128, 134, 136, 152, 155, 161, 163, - 164, 166, 170, 144, 150, 132, 138, 143, - 187, 191, 160, 128, 129, 132, 135, 133, - 134, 160, 255, 192, 255, 185, 128, 191, - 128, 137, 138, 141, 142, 191, 128, 191, - 165, 177, 178, 179, 180, 181, 182, 184, - 185, 186, 187, 188, 189, 191, 128, 175, - 176, 190, 192, 255, 128, 159, 160, 188, - 189, 191, 128, 156, 184, 129, 255, 148, - 176, 140, 168, 132, 160, 188, 152, 180, - 144, 172, 136, 164, 192, 255, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 161, 162, 164, 165, 166, 167, - 168, 169, 171, 172, 173, 174, 175, 176, - 178, 179, 180, 181, 182, 183, 185, 186, - 187, 188, 189, 190, 128, 191, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 161, 162, 164, 165, 166, 167, - 168, 169, 171, 172, 173, 174, 175, 176, - 178, 179, 180, 181, 182, 183, 185, 186, - 187, 188, 189, 190, 128, 191, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 191, 128, 156, 161, 190, 192, - 255, 136, 164, 175, 176, 255, 135, 138, - 139, 187, 188, 191, 192, 255, 0, 127, - 192, 255, 187, 191, 128, 190, 191, 128, - 190, 188, 128, 175, 176, 189, 190, 191, - 145, 155, 157, 159, 128, 191, 130, 135, - 128, 191, 189, 128, 191, 128, 129, 130, - 131, 132, 191, 178, 128, 191, 128, 159, - 160, 163, 164, 191, 133, 128, 191, 128, - 178, 179, 186, 187, 191, 135, 142, 143, - 145, 146, 149, 150, 153, 154, 155, 164, - 128, 191, 128, 165, 166, 191, 128, 255, - 176, 255, 131, 137, 191, 145, 189, 135, - 129, 130, 132, 133, 144, 154, 176, 139, - 159, 150, 156, 159, 164, 167, 168, 170, - 173, 145, 176, 255, 139, 255, 166, 176, - 171, 179, 160, 161, 163, 164, 165, 167, - 169, 171, 173, 174, 175, 176, 177, 179, - 180, 181, 182, 183, 184, 185, 186, 187, - 188, 189, 190, 191, 166, 170, 172, 178, - 150, 153, 155, 163, 165, 167, 169, 173, - 153, 155, 148, 161, 163, 255, 189, 132, - 185, 144, 152, 161, 164, 255, 188, 129, - 131, 190, 255, 133, 134, 137, 138, 142, - 150, 152, 161, 164, 255, 131, 134, 137, - 138, 142, 144, 146, 175, 178, 180, 182, - 255, 134, 138, 142, 161, 164, 255, 188, - 129, 131, 190, 191, 128, 132, 135, 136, - 139, 141, 150, 151, 162, 163, 130, 190, - 191, 151, 128, 130, 134, 136, 138, 141, - 128, 131, 190, 255, 133, 137, 142, 148, - 151, 161, 164, 255, 128, 132, 134, 136, - 138, 141, 149, 150, 162, 163, 129, 131, - 190, 255, 133, 137, 142, 150, 152, 161, - 164, 255, 130, 131, 138, 150, 143, 148, - 152, 159, 178, 179, 177, 179, 186, 135, - 142, 177, 179, 185, 187, 188, 136, 141, - 181, 183, 185, 152, 153, 190, 191, 177, - 191, 128, 132, 134, 135, 141, 151, 153, - 188, 134, 128, 129, 130, 141, 156, 157, - 158, 159, 160, 162, 164, 168, 169, 170, - 172, 173, 174, 175, 176, 179, 183, 173, - 183, 185, 190, 150, 153, 158, 160, 177, - 180, 130, 141, 157, 132, 134, 157, 159, - 146, 148, 178, 180, 146, 147, 178, 179, - 180, 255, 148, 156, 158, 255, 139, 141, - 169, 133, 134, 160, 171, 176, 187, 151, - 155, 160, 162, 191, 149, 158, 165, 188, - 176, 190, 128, 132, 180, 255, 133, 170, - 180, 255, 128, 130, 161, 173, 166, 179, - 164, 183, 173, 144, 146, 148, 168, 178, - 180, 184, 185, 128, 181, 187, 191, 128, - 131, 179, 181, 183, 140, 141, 128, 131, - 157, 179, 181, 183, 144, 176, 164, 175, - 177, 191, 160, 191, 128, 130, 170, 175, - 153, 154, 153, 154, 155, 160, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, - 175, 175, 178, 180, 189, 158, 159, 176, - 177, 130, 134, 139, 163, 167, 128, 129, - 180, 255, 134, 159, 178, 255, 166, 173, - 135, 147, 128, 131, 179, 255, 129, 164, - 166, 255, 169, 182, 131, 188, 140, 141, - 176, 178, 180, 183, 184, 190, 191, 129, - 171, 175, 181, 182, 163, 170, 172, 173, - 172, 184, 190, 158, 128, 143, 160, 175, - 144, 145, 150, 155, 157, 158, 159, 135, - 139, 141, 168, 171, 189, 160, 182, 186, - 191, 129, 131, 133, 134, 140, 143, 184, - 186, 165, 166, 128, 129, 130, 132, 133, - 134, 135, 136, 139, 140, 141, 144, 145, - 146, 147, 150, 151, 152, 153, 154, 156, - 176, 178, 128, 130, 184, 255, 135, 190, - 131, 175, 187, 255, 128, 130, 167, 180, - 179, 128, 130, 179, 255, 129, 137, 141, - 255, 190, 172, 183, 159, 170, 188, 128, - 131, 190, 191, 151, 128, 132, 135, 136, - 139, 141, 162, 163, 166, 172, 176, 180, - 181, 191, 128, 134, 176, 255, 132, 255, - 175, 181, 184, 255, 129, 155, 158, 255, - 129, 255, 171, 183, 157, 171, 175, 182, - 184, 191, 146, 167, 169, 182, 171, 172, - 189, 190, 176, 180, 176, 182, 145, 190, - 143, 146, 178, 157, 158, 133, 134, 137, - 168, 169, 170, 165, 169, 173, 178, 187, - 255, 131, 132, 140, 169, 174, 255, 130, - 132, 128, 182, 187, 255, 173, 180, 182, - 255, 132, 155, 159, 161, 175, 128, 163, - 165, 128, 134, 136, 152, 155, 161, 163, - 164, 166, 170, 144, 150, 132, 138, 145, - 146, 151, 166, 169, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 166, 167, 169, - 171, 172, 173, 174, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 187, 188, 189, 190, 191, 168, 170, 150, - 153, 155, 163, 165, 167, 169, 173, 153, - 155, 148, 161, 163, 255, 131, 187, 189, - 132, 185, 190, 255, 141, 144, 129, 136, - 145, 151, 152, 161, 162, 163, 164, 255, - 129, 188, 190, 130, 131, 191, 255, 141, - 151, 129, 132, 133, 134, 137, 138, 142, - 161, 162, 163, 164, 255, 131, 188, 129, - 130, 190, 255, 145, 181, 129, 130, 131, - 134, 135, 136, 137, 138, 139, 141, 142, - 175, 176, 177, 178, 255, 134, 138, 141, - 129, 136, 142, 161, 162, 163, 164, 255, - 129, 188, 130, 131, 190, 191, 128, 141, - 129, 132, 135, 136, 139, 140, 150, 151, - 162, 163, 130, 190, 191, 128, 141, 151, - 129, 130, 134, 136, 138, 140, 128, 129, - 131, 190, 255, 133, 137, 129, 132, 142, - 148, 151, 161, 164, 255, 129, 188, 190, - 191, 130, 131, 130, 134, 128, 132, 135, - 136, 138, 139, 140, 141, 149, 150, 162, - 163, 129, 190, 130, 131, 191, 255, 133, - 137, 141, 151, 129, 132, 142, 161, 162, - 163, 164, 255, 138, 143, 150, 159, 144, - 145, 146, 148, 152, 158, 178, 179, 177, - 179, 180, 186, 135, 142, 177, 179, 180, - 185, 187, 188, 136, 141, 181, 183, 185, - 152, 153, 190, 191, 191, 177, 190, 128, - 132, 134, 135, 141, 151, 153, 188, 134, - 128, 129, 130, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 177, 173, 183, - 185, 186, 187, 188, 189, 190, 150, 151, - 152, 153, 158, 160, 177, 180, 130, 132, - 141, 157, 133, 134, 157, 159, 146, 148, - 178, 180, 146, 147, 178, 179, 182, 180, - 189, 190, 255, 134, 157, 137, 147, 148, - 255, 139, 141, 169, 133, 134, 178, 160, - 162, 163, 166, 167, 168, 169, 171, 176, - 184, 185, 187, 155, 151, 152, 153, 154, - 150, 160, 162, 191, 149, 151, 152, 158, - 165, 172, 173, 178, 179, 188, 176, 190, - 132, 181, 187, 128, 131, 180, 188, 189, - 255, 130, 133, 170, 171, 179, 180, 255, - 130, 161, 170, 128, 129, 162, 165, 166, - 167, 168, 173, 167, 173, 166, 169, 170, - 174, 175, 177, 178, 179, 164, 171, 172, - 179, 180, 181, 182, 183, 161, 173, 180, - 144, 146, 148, 168, 178, 179, 184, 185, - 128, 181, 187, 191, 128, 131, 179, 181, - 183, 140, 141, 144, 176, 175, 177, 191, - 160, 191, 128, 130, 170, 175, 153, 154, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 167, 163, 164, 165, 166, 132, - 133, 134, 159, 160, 177, 178, 255, 166, - 173, 135, 145, 146, 147, 131, 179, 188, - 128, 130, 180, 181, 182, 185, 186, 255, - 165, 129, 255, 169, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 131, 140, 141, - 188, 176, 178, 180, 183, 184, 190, 191, - 129, 171, 181, 182, 172, 173, 174, 175, - 165, 168, 172, 173, 163, 170, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 176, 178, - 129, 128, 130, 184, 255, 135, 190, 130, - 131, 175, 176, 178, 183, 184, 187, 255, - 172, 128, 130, 167, 180, 179, 130, 128, - 129, 179, 181, 182, 190, 191, 255, 129, - 137, 138, 140, 141, 255, 180, 190, 172, - 174, 175, 177, 178, 181, 182, 183, 159, - 160, 162, 163, 170, 188, 190, 191, 128, - 129, 130, 131, 128, 151, 129, 132, 135, - 136, 139, 141, 162, 163, 166, 172, 176, - 180, 181, 183, 184, 191, 133, 128, 129, - 130, 134, 176, 185, 189, 177, 178, 179, - 186, 187, 190, 191, 255, 129, 132, 255, - 175, 190, 176, 177, 178, 181, 184, 187, - 188, 255, 129, 155, 158, 255, 189, 176, - 178, 179, 186, 187, 190, 191, 255, 129, - 255, 172, 182, 171, 173, 174, 175, 176, - 183, 166, 157, 159, 160, 161, 162, 171, - 175, 190, 176, 182, 184, 191, 169, 177, - 180, 146, 167, 170, 182, 171, 172, 189, - 190, 176, 180, 176, 182, 143, 146, 178, - 157, 158, 133, 134, 137, 168, 169, 170, - 166, 173, 165, 169, 174, 178, 187, 255, - 131, 132, 140, 169, 174, 255, 130, 132, - 128, 182, 187, 255, 173, 180, 182, 255, - 132, 155, 159, 161, 175, 128, 163, 165, - 128, 134, 136, 152, 155, 161, 163, 164, - 166, 170, 144, 150, 132, 138, 143, 187, - 191, 160, 128, 129, 132, 135, 133, 134, - 160, 255, 192, 255, 139, 168, 160, 128, - 129, 132, 135, 133, 134, 160, 255, 192, - 255, 144, 145, 150, 155, 157, 158, 144, - 145, 150, 155, 157, 158, 159, 135, 166, - 191, 133, 128, 191, 128, 130, 131, 132, - 133, 137, 138, 139, 140, 191, 174, 188, - 128, 129, 130, 131, 132, 133, 134, 144, - 145, 165, 166, 169, 170, 175, 176, 184, - 185, 191, 128, 132, 170, 129, 135, 136, - 191, 181, 186, 128, 191, 144, 128, 148, - 149, 150, 151, 191, 128, 132, 133, 135, - 136, 138, 139, 143, 144, 191, 163, 128, - 179, 180, 182, 183, 191, 128, 129, 191, - 166, 176, 191, 128, 151, 152, 158, 159, - 178, 179, 185, 186, 187, 188, 190, 128, - 191, 160, 128, 191, 128, 130, 131, 135, - 191, 129, 134, 136, 190, 128, 159, 160, - 191, 128, 175, 176, 255, 10, 13, 127, - 194, 216, 219, 220, 224, 225, 226, 234, - 235, 236, 237, 239, 240, 243, 0, 31, - 128, 191, 192, 223, 227, 238, 241, 247, - 248, 255, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 239, 240, 243, 204, 205, - 210, 214, 215, 216, 217, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 234, 239, - 240, 243, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 239, 240, 243, 194, 216, - 219, 220, 224, 225, 226, 234, 235, 236, - 237, 239, 240, 243, 32, 126, 192, 223, - 227, 238, 241, 247, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 235, 236, 237, 239, 240, 243, 204, - 205, 210, 214, 215, 216, 217, 219, 220, - 221, 222, 223, 224, 225, 226, 227, 234, - 237, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 237, 239, 240, - 243, 204, 205, 210, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 234, 237, 239, 240, 243, 204, 205, - 210, 214, 215, 216, 217, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 234, 239, - 240, 243, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 235, 236, 237, 239, 240, - 243, 204, 205, 210, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 234, 239, 240, 243, 204, 205, 210, - 214, 215, 216, 217, 219, 220, 221, 222, - 223, 224, 225, 226, 227, 234, 239, 240, - 243, 204, 205, 210, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 234, 239, 240, 243, 204, 205, 210, - 214, 215, 216, 217, 219, 220, 221, 222, - 223, 224, 225, 226, 227, 234, 237, 239, - 240, 243, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 237, 239, 240, 243, 204, - 205, 210, 214, 215, 216, 217, 219, 220, - 221, 222, 223, 224, 225, 226, 227, 234, - 237, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, -} - -var _graphclust_single_lengths []byte = []byte{ - 0, 1, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 0, 26, 0, - 0, 0, 1, 1, 1, 0, 0, 2, - 1, 0, 1, 1, 0, 2, 0, 0, - 2, 0, 2, 1, 0, 1, 0, 3, - 0, 0, 1, 21, 0, 0, 3, 0, - 0, 0, 0, 0, 0, 1, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 1, - 0, 5, 2, 6, 0, 1, 0, 1, - 0, 2, 0, 0, 15, 0, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 0, - 2, 1, 1, 0, 3, 1, 0, 7, - 5, 1, 1, 0, 1, 0, 23, 0, - 0, 0, 0, 1, 0, 0, 1, 0, - 1, 1, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 4, 0, 0, - 0, 0, 1, 0, 6, 0, 0, 0, - 0, 0, 1, 3, 0, 0, 0, 3, - 0, 0, 0, 0, 1, 1, 0, 1, - 0, 1, 0, 0, 0, 29, 0, 0, - 0, 3, 2, 3, 2, 2, 2, 3, - 2, 2, 3, 3, 1, 2, 4, 2, - 2, 4, 4, 2, 0, 2, 0, 3, - 1, 0, 1, 21, 1, 0, 4, 0, - 0, 0, 1, 2, 0, 1, 1, 1, - 4, 0, 3, 1, 3, 2, 0, 3, - 0, 5, 2, 0, 0, 1, 0, 2, - 0, 0, 15, 0, 0, 0, 4, 0, - 0, 0, 3, 1, 0, 4, 1, 4, - 4, 3, 1, 0, 7, 5, 1, 1, - 0, 1, 0, 23, 1, 0, 1, 1, - 1, 1, 0, 2, 1, 3, 2, 0, - 1, 3, 1, 2, 0, 1, 0, 2, - 1, 2, 3, 4, 0, 0, 0, 1, - 0, 6, 2, 0, 0, 0, 0, 1, - 3, 0, 0, 0, 1, 0, 1, 4, - 0, 0, 0, 1, 1, 1, 4, 0, - 0, 0, 6, 0, 1, 1, 0, 0, - 0, 1, 1, 0, 1, 0, 1, 0, - 0, 0, 26, 0, 0, 0, 1, 1, - 1, 0, 0, 2, 1, 0, 1, 1, - 0, 2, 0, 0, 2, 0, 2, 1, - 0, 1, 0, 3, 0, 0, 1, 21, - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 3, 0, 0, 0, - 0, 0, 0, 1, 0, 5, 2, 6, - 0, 1, 0, 1, 0, 2, 0, 0, - 15, 0, 0, 0, 3, 0, 0, 0, - 0, 0, 0, 0, 2, 1, 1, 0, - 3, 1, 0, 7, 5, 1, 1, 0, - 1, 0, 23, 0, 0, 0, 0, 1, - 0, 0, 1, 0, 1, 1, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 4, 0, 0, 0, 0, 1, 0, - 6, 0, 0, 0, 0, 0, 1, 3, - 0, 0, 0, 3, 0, 0, 0, 0, - 1, 1, 0, 1, 0, 1, 0, 0, - 0, 29, 0, 0, 0, 3, 2, 3, - 2, 2, 2, 3, 2, 2, 3, 3, - 1, 2, 4, 2, 2, 4, 4, 2, - 0, 2, 0, 3, 1, 0, 1, 21, - 1, 0, 4, 0, 0, 0, 1, 2, - 0, 1, 1, 1, 4, 0, 3, 1, - 3, 2, 0, 3, 0, 5, 2, 0, - 0, 1, 0, 2, 0, 0, 15, 0, - 0, 0, 4, 0, 0, 0, 3, 1, - 0, 4, 1, 4, 4, 3, 1, 0, - 7, 5, 1, 1, 0, 1, 0, 23, - 1, 0, 1, 1, 1, 1, 0, 2, - 1, 3, 2, 0, 1, 3, 1, 2, - 0, 1, 0, 2, 1, 2, 3, 4, - 0, 0, 0, 1, 0, 6, 2, 0, - 0, 0, 0, 1, 3, 0, 0, 0, - 1, 0, 1, 4, 0, 0, 0, 1, - 1, 1, 4, 0, 0, 0, 6, 0, - 0, 0, 1, 1, 2, 1, 1, 5, - 0, 24, 0, 24, 0, 0, 23, 0, - 0, 1, 0, 2, 0, 0, 0, 28, - 0, 3, 23, 2, 0, 2, 2, 3, - 2, 2, 2, 0, 54, 54, 27, 1, - 0, 5, 2, 0, 1, 1, 0, 0, - 14, 0, 3, 2, 2, 3, 2, 2, - 2, 54, 54, 27, 1, 0, 2, 0, - 1, 4, 2, 1, 0, 1, 0, 1, - 0, 11, 0, 7, 1, 0, 1, 0, - 2, 3, 2, 1, 0, 1, 1, 3, - 0, 1, 3, 0, 1, 1, 2, 1, - 1, 5, 0, 0, 0, 0, 1, 1, - 0, 1, 0, 1, 0, 0, 0, 26, - 0, 0, 0, 1, 1, 1, 0, 0, - 2, 1, 0, 1, 1, 0, 2, 0, - 0, 2, 0, 2, 1, 0, 1, 0, - 3, 0, 0, 1, 21, 0, 0, 3, - 0, 0, 0, 0, 0, 0, 1, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 1, 0, 5, 2, 6, 0, 1, 0, - 1, 0, 2, 0, 0, 15, 0, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 0, 2, 1, 1, 0, 3, 1, 0, - 7, 5, 1, 1, 0, 1, 0, 23, - 0, 0, 0, 0, 1, 0, 0, 1, - 0, 1, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 4, 0, - 0, 0, 0, 1, 0, 6, 0, 0, - 0, 0, 0, 1, 3, 0, 0, 0, - 3, 0, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 0, 29, 0, - 0, 0, 3, 2, 3, 2, 2, 2, - 3, 2, 2, 3, 3, 1, 2, 4, - 2, 2, 4, 4, 2, 0, 2, 0, - 3, 1, 0, 1, 21, 1, 0, 4, - 0, 0, 0, 1, 2, 0, 1, 1, - 1, 4, 0, 3, 1, 3, 2, 0, - 3, 0, 5, 2, 0, 0, 1, 0, - 2, 0, 0, 15, 0, 0, 0, 4, - 0, 0, 0, 3, 1, 0, 4, 1, - 4, 4, 3, 1, 0, 7, 5, 1, - 1, 0, 1, 0, 23, 1, 0, 1, - 1, 1, 1, 0, 2, 1, 3, 2, - 0, 1, 3, 1, 2, 0, 1, 0, - 2, 1, 2, 3, 4, 0, 0, 0, - 1, 0, 6, 2, 0, 0, 0, 0, - 1, 3, 0, 0, 0, 1, 0, 1, - 4, 0, 0, 0, 1, 1, 1, 4, - 0, 0, 0, 6, 24, 0, 24, 0, - 0, 23, 0, 0, 1, 0, 2, 0, - 0, 0, 28, 0, 3, 23, 2, 0, - 2, 2, 3, 2, 2, 2, 0, 54, - 54, 27, 1, 1, 5, 2, 0, 0, - 0, 1, 1, 0, 1, 0, 1, 0, - 0, 0, 26, 0, 0, 0, 1, 1, - 1, 0, 0, 2, 1, 0, 1, 1, - 0, 2, 0, 0, 2, 0, 2, 1, - 0, 1, 0, 3, 0, 0, 1, 21, - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 3, 0, 0, 0, - 0, 0, 0, 1, 0, 5, 2, 0, - 0, 1, 0, 2, 0, 0, 15, 0, - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 0, 2, 1, 1, 0, 3, 1, - 0, 6, 5, 1, 1, 0, 1, 0, - 23, 0, 0, 0, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 4, - 0, 0, 0, 0, 1, 0, 6, 0, - 0, 0, 0, 0, 1, 3, 0, 0, - 0, 1, 4, 0, 0, 0, 6, 1, - 7, 3, 0, 0, 0, 0, 1, 1, - 0, 1, 0, 1, 0, 0, 0, 29, - 0, 0, 0, 3, 2, 3, 2, 2, - 2, 3, 2, 2, 3, 3, 1, 2, - 4, 2, 2, 4, 4, 2, 0, 2, - 0, 3, 1, 0, 1, 21, 1, 0, - 4, 0, 0, 0, 1, 2, 0, 1, - 1, 1, 4, 0, 3, 1, 3, 2, - 0, 3, 0, 5, 2, 0, 0, 1, - 0, 2, 0, 0, 15, 0, 0, 0, - 4, 0, 0, 0, 3, 1, 0, 4, - 1, 4, 4, 3, 1, 0, 7, 5, - 1, 1, 0, 1, 0, 23, 1, 0, - 1, 1, 1, 1, 0, 2, 1, 3, - 2, 0, 1, 3, 1, 2, 0, 1, - 0, 2, 1, 2, 3, 4, 0, 0, - 0, 1, 0, 6, 2, 0, 0, 0, - 0, 1, 3, 0, 0, 0, 1, 0, - 1, 4, 0, 0, 0, 1, 1, 0, - 1, 0, 0, 0, 1, 1, 0, 1, - 0, 1, 0, 0, 0, 29, 0, 0, - 0, 3, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 0, 26, 0, - 0, 0, 1, 1, 1, 0, 0, 2, - 1, 0, 1, 1, 0, 2, 0, 0, - 2, 0, 2, 1, 0, 1, 0, 3, - 0, 0, 1, 21, 0, 0, 3, 0, - 0, 0, 0, 0, 0, 1, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 1, - 0, 5, 2, 6, 0, 1, 0, 1, - 0, 2, 0, 0, 15, 0, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 0, - 2, 1, 1, 0, 3, 1, 0, 7, - 5, 1, 1, 0, 1, 0, 23, 0, - 0, 0, 0, 1, 0, 0, 1, 0, - 1, 1, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 4, 0, 0, - 0, 0, 1, 0, 6, 0, 0, 0, - 0, 0, 1, 3, 0, 0, 0, 3, - 0, 1, 1, 1, 4, 0, 0, 0, - 6, 2, 3, 2, 2, 2, 3, 2, - 2, 3, 3, 1, 2, 4, 2, 2, - 4, 4, 2, 0, 2, 0, 3, 1, - 0, 1, 21, 1, 0, 4, 0, 0, - 0, 1, 2, 0, 1, 1, 1, 4, - 0, 3, 1, 3, 2, 0, 3, 0, - 5, 2, 0, 0, 1, 0, 2, 0, - 0, 15, 0, 0, 0, 4, 0, 0, - 0, 3, 1, 0, 4, 1, 4, 4, - 3, 1, 0, 7, 5, 1, 1, 0, - 1, 0, 23, 1, 0, 1, 1, 1, - 1, 0, 2, 1, 3, 2, 0, 1, - 3, 1, 2, 0, 1, 0, 2, 1, - 2, 3, 4, 0, 0, 0, 1, 0, - 6, 2, 0, 0, 0, 0, 1, 3, - 0, 0, 0, 1, 0, 1, 4, 0, - 0, 0, 1, 0, 0, 14, 0, 3, - 2, 2, 3, 2, 2, 2, 54, 54, - 29, 1, 0, 0, 0, 0, 2, 1, - 1, 4, 2, 1, 0, 1, 0, 1, - 0, 11, 0, 0, 0, 0, 1, 1, - 0, 1, 0, 1, 0, 0, 0, 26, - 0, 0, 0, 1, 1, 1, 0, 0, - 2, 1, 0, 1, 1, 0, 2, 0, - 0, 2, 0, 2, 1, 0, 1, 0, - 3, 0, 0, 1, 21, 0, 0, 3, - 0, 0, 0, 0, 0, 0, 1, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 1, 0, 5, 2, 6, 0, 1, 0, - 1, 0, 2, 0, 0, 15, 0, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 0, 2, 1, 1, 0, 3, 1, 0, - 7, 5, 1, 1, 0, 1, 0, 23, - 0, 0, 0, 0, 1, 0, 0, 1, - 0, 1, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 4, 0, - 0, 0, 0, 1, 0, 6, 0, 0, - 0, 0, 0, 1, 3, 0, 0, 0, - 3, 0, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 0, 29, 0, - 0, 0, 3, 2, 3, 2, 2, 2, - 3, 2, 2, 3, 3, 1, 2, 4, - 2, 2, 4, 4, 2, 0, 2, 0, - 3, 1, 0, 1, 21, 1, 0, 4, - 0, 0, 0, 1, 2, 0, 1, 1, - 1, 4, 0, 3, 1, 3, 2, 0, - 3, 0, 5, 2, 0, 0, 1, 0, - 2, 0, 0, 15, 0, 0, 0, 4, - 0, 0, 0, 3, 1, 0, 4, 1, - 4, 4, 3, 1, 0, 7, 5, 1, - 1, 0, 1, 0, 23, 1, 0, 1, - 1, 1, 1, 0, 2, 1, 3, 2, - 0, 1, 3, 1, 2, 0, 1, 0, - 2, 1, 2, 3, 4, 0, 0, 0, - 1, 0, 6, 2, 0, 0, 0, 0, - 1, 3, 0, 0, 0, 1, 0, 1, - 4, 0, 0, 0, 1, 1, 1, 4, - 0, 0, 0, 6, 7, 1, 0, 1, - 0, 2, 3, 2, 1, 0, 1, 1, - 3, 0, 1, 5, 0, 0, 17, 20, - 20, 20, 14, 20, 20, 20, 23, 21, - 21, 21, 20, 23, 20, 20, 20, 21, - 21, 21, 20, 20, 20, 20, 20, 20, - 20, 20, 20, 20, -} - -var _graphclust_range_lengths []byte = []byte{ - 0, 0, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 2, 4, - 1, 2, 1, 2, 2, 5, 6, 2, - 2, 5, 1, 3, 2, 3, 5, 2, - 3, 1, 3, 1, 1, 2, 1, 2, - 1, 4, 0, 0, 2, 3, 1, 1, - 2, 2, 1, 2, 1, 1, 2, 1, - 2, 1, 2, 2, 2, 1, 1, 4, - 2, 0, 0, 0, 1, 0, 1, 0, - 1, 0, 1, 1, 0, 2, 1, 1, - 1, 2, 2, 1, 1, 2, 2, 1, - 1, 3, 2, 2, 0, 0, 2, 0, - 0, 0, 0, 1, 4, 1, 0, 2, - 1, 2, 2, 0, 2, 2, 1, 1, - 2, 6, 1, 1, 1, 1, 2, 2, - 1, 1, 1, 2, 2, 0, 1, 1, - 1, 1, 0, 1, 0, 3, 3, 1, - 2, 2, 2, 0, 5, 1, 1, 0, - 1, 1, 1, 1, 1, 2, 1, 1, - 4, 1, 1, 1, 1, 1, 4, 1, - 2, 2, 5, 2, 6, 2, 8, 4, - 2, 5, 0, 3, 2, 4, 1, 6, - 2, 4, 4, 1, 1, 2, 1, 2, - 1, 4, 0, 0, 4, 4, 1, 1, - 2, 2, 2, 2, 1, 1, 6, 2, - 5, 1, 3, 3, 4, 4, 4, 4, - 2, 0, 0, 1, 1, 0, 1, 0, - 1, 1, 0, 2, 1, 1, 2, 4, - 1, 2, 4, 1, 5, 0, 3, 2, - 1, 0, 0, 2, 0, 0, 0, 0, - 1, 4, 1, 0, 2, 1, 4, 2, - 0, 4, 3, 4, 2, 2, 6, 2, - 2, 4, 1, 4, 2, 4, 1, 3, - 3, 2, 2, 0, 1, 1, 1, 0, - 1, 0, 3, 3, 1, 2, 2, 2, - 0, 5, 1, 1, 0, 1, 0, 1, - 1, 1, 0, 0, 0, 0, 1, 1, - 1, 0, 0, 1, 2, 2, 1, 1, - 1, 1, 2, 1, 1, 4, 1, 1, - 1, 1, 2, 4, 1, 2, 1, 2, - 2, 5, 6, 2, 2, 5, 1, 3, - 2, 3, 5, 2, 3, 1, 3, 1, - 1, 2, 1, 2, 1, 4, 0, 0, - 2, 3, 1, 1, 2, 2, 1, 2, - 1, 1, 2, 1, 2, 1, 2, 2, - 2, 1, 1, 4, 2, 0, 0, 0, - 1, 0, 1, 0, 1, 0, 1, 1, - 0, 2, 1, 1, 1, 2, 2, 1, - 1, 2, 2, 1, 1, 3, 2, 2, - 0, 0, 2, 0, 0, 0, 0, 1, - 4, 1, 0, 2, 1, 2, 2, 0, - 2, 2, 1, 1, 2, 6, 1, 1, - 1, 1, 2, 2, 1, 1, 1, 2, - 2, 0, 1, 1, 1, 1, 0, 1, - 0, 3, 3, 1, 2, 2, 2, 0, - 5, 1, 1, 0, 1, 1, 1, 1, - 1, 2, 1, 1, 4, 1, 1, 1, - 1, 1, 4, 1, 2, 2, 5, 2, - 6, 2, 8, 4, 2, 5, 0, 3, - 2, 4, 1, 6, 2, 4, 4, 1, - 1, 2, 1, 2, 1, 4, 0, 0, - 4, 4, 1, 1, 2, 2, 2, 2, - 1, 1, 6, 2, 5, 1, 3, 3, - 4, 4, 4, 4, 2, 0, 0, 1, - 1, 0, 1, 0, 1, 1, 0, 2, - 1, 1, 2, 4, 1, 2, 4, 1, - 5, 0, 3, 2, 1, 0, 0, 2, - 0, 0, 0, 0, 1, 4, 1, 0, - 2, 1, 4, 2, 0, 4, 3, 4, - 2, 2, 6, 2, 2, 4, 1, 4, - 2, 4, 1, 3, 3, 2, 2, 0, - 1, 1, 1, 0, 1, 0, 3, 3, - 1, 2, 2, 2, 0, 5, 1, 1, - 0, 1, 0, 1, 1, 1, 0, 0, - 0, 0, 1, 1, 1, 0, 0, 1, - 2, 3, 1, 1, 1, 1, 1, 1, - 1, 0, 1, 0, 1, 1, 0, 1, - 1, 0, 1, 0, 1, 3, 1, 2, - 2, 1, 0, 0, 1, 0, 0, 0, - 0, 0, 1, 0, 1, 1, 2, 2, - 2, 1, 3, 2, 1, 1, 3, 1, - 3, 3, 1, 0, 0, 0, 0, 0, - 1, 1, 1, 2, 2, 4, 1, 1, - 2, 1, 1, 1, 3, 1, 2, 1, - 2, 1, 2, 0, 0, 1, 1, 5, - 9, 2, 1, 3, 5, 3, 1, 6, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 2, - 1, 1, 4, 1, 1, 1, 1, 2, - 4, 1, 2, 1, 2, 2, 5, 6, - 2, 2, 5, 1, 3, 2, 3, 5, - 2, 3, 1, 3, 1, 1, 2, 1, - 2, 1, 4, 0, 0, 2, 3, 1, - 1, 2, 2, 1, 2, 1, 1, 2, - 1, 2, 1, 2, 2, 2, 1, 1, - 4, 2, 0, 0, 0, 1, 0, 1, - 0, 1, 0, 1, 1, 0, 2, 1, - 1, 1, 2, 2, 1, 1, 2, 2, - 1, 1, 3, 2, 2, 0, 0, 2, - 0, 0, 0, 0, 1, 4, 1, 0, - 2, 1, 2, 2, 0, 2, 2, 1, - 1, 2, 6, 1, 1, 1, 1, 2, - 2, 1, 1, 1, 2, 2, 0, 1, - 1, 1, 1, 0, 1, 0, 3, 3, - 1, 2, 2, 2, 0, 5, 1, 1, - 0, 1, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 1, 4, - 1, 2, 2, 5, 2, 6, 2, 8, - 4, 2, 5, 0, 3, 2, 4, 1, - 6, 2, 4, 4, 1, 1, 2, 1, - 2, 1, 4, 0, 0, 4, 4, 1, - 1, 2, 2, 2, 2, 1, 1, 6, - 2, 5, 1, 3, 3, 4, 4, 4, - 4, 2, 0, 0, 1, 1, 0, 1, - 0, 1, 1, 0, 2, 1, 1, 2, - 4, 1, 2, 4, 1, 5, 0, 3, - 2, 1, 0, 0, 2, 0, 0, 0, - 0, 1, 4, 1, 0, 2, 1, 4, - 2, 0, 4, 3, 4, 2, 2, 6, - 2, 2, 4, 1, 4, 2, 4, 1, - 3, 3, 2, 2, 0, 1, 1, 1, - 0, 1, 0, 3, 3, 1, 2, 2, - 2, 0, 5, 1, 1, 0, 1, 0, - 1, 1, 1, 0, 0, 0, 0, 1, - 1, 1, 0, 0, 0, 1, 0, 1, - 1, 0, 1, 1, 0, 1, 0, 1, - 3, 1, 2, 2, 1, 0, 0, 1, - 0, 0, 0, 0, 0, 1, 0, 1, - 1, 2, 2, 1, 1, 5, 1, 1, - 1, 1, 2, 1, 1, 4, 1, 1, - 1, 1, 2, 4, 1, 2, 1, 2, - 2, 5, 6, 2, 2, 5, 1, 3, - 2, 3, 5, 2, 3, 1, 3, 1, - 1, 2, 1, 2, 1, 4, 0, 0, - 2, 3, 1, 1, 2, 2, 1, 2, - 1, 1, 2, 1, 2, 1, 2, 2, - 2, 1, 1, 4, 2, 0, 0, 1, - 1, 0, 1, 0, 1, 1, 0, 2, - 1, 1, 1, 2, 2, 1, 1, 2, - 2, 1, 1, 3, 2, 2, 0, 0, - 2, 0, 0, 0, 0, 1, 4, 1, - 0, 2, 1, 2, 2, 0, 2, 2, - 1, 1, 2, 6, 1, 1, 1, 1, - 2, 2, 1, 1, 1, 2, 2, 0, - 1, 1, 1, 1, 0, 1, 0, 3, - 3, 1, 2, 2, 2, 0, 5, 1, - 1, 0, 1, 1, 1, 0, 0, 0, - 0, 0, 1, 1, 1, 1, 1, 2, - 1, 1, 4, 1, 1, 1, 1, 1, - 4, 1, 2, 2, 5, 2, 6, 2, - 8, 4, 2, 5, 0, 3, 2, 4, - 1, 6, 2, 4, 4, 1, 1, 2, - 1, 2, 1, 4, 0, 0, 4, 4, - 1, 1, 2, 2, 2, 2, 1, 1, - 6, 2, 5, 1, 3, 3, 4, 4, - 4, 4, 2, 0, 0, 1, 1, 0, - 1, 0, 1, 1, 0, 2, 1, 1, - 2, 4, 1, 2, 4, 1, 5, 0, - 3, 2, 1, 0, 0, 2, 0, 0, - 0, 0, 1, 4, 1, 0, 2, 1, - 4, 2, 0, 4, 3, 4, 2, 2, - 6, 2, 2, 4, 1, 4, 2, 4, - 1, 3, 3, 2, 2, 0, 1, 1, - 1, 0, 1, 0, 3, 3, 1, 2, - 2, 2, 0, 5, 1, 1, 0, 1, - 0, 1, 1, 1, 0, 0, 0, 3, - 1, 1, 1, 1, 1, 2, 1, 1, - 4, 1, 1, 1, 1, 1, 4, 1, - 2, 2, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 2, 4, - 1, 2, 1, 2, 2, 5, 6, 2, - 2, 5, 1, 3, 2, 3, 5, 2, - 3, 1, 3, 1, 1, 2, 1, 2, - 1, 4, 0, 0, 2, 3, 1, 1, - 2, 2, 1, 2, 1, 1, 2, 1, - 2, 1, 2, 2, 2, 1, 1, 4, - 2, 0, 0, 0, 1, 0, 1, 0, - 1, 0, 1, 1, 0, 2, 1, 1, - 1, 2, 2, 1, 1, 2, 2, 1, - 1, 3, 2, 2, 0, 0, 2, 0, - 0, 0, 0, 1, 4, 1, 0, 2, - 1, 2, 2, 0, 2, 2, 1, 1, - 2, 6, 1, 1, 1, 1, 2, 2, - 1, 1, 1, 2, 2, 0, 1, 1, - 1, 1, 0, 1, 0, 3, 3, 1, - 2, 2, 2, 0, 5, 1, 1, 0, - 1, 0, 0, 0, 1, 1, 1, 0, - 0, 5, 2, 6, 2, 8, 4, 2, - 5, 0, 3, 2, 4, 1, 6, 2, - 4, 4, 1, 1, 2, 1, 2, 1, - 4, 0, 0, 4, 4, 1, 1, 2, - 2, 2, 2, 1, 1, 6, 2, 5, - 1, 3, 3, 4, 4, 4, 4, 2, - 0, 0, 1, 1, 0, 1, 0, 1, - 1, 0, 2, 1, 1, 2, 4, 1, - 2, 4, 1, 5, 0, 3, 2, 1, - 0, 0, 2, 0, 0, 0, 0, 1, - 4, 1, 0, 2, 1, 4, 2, 0, - 4, 3, 4, 2, 2, 6, 2, 2, - 4, 1, 4, 2, 4, 1, 3, 3, - 2, 2, 0, 1, 1, 1, 0, 1, - 0, 3, 3, 1, 2, 2, 2, 0, - 5, 1, 1, 0, 1, 0, 1, 1, - 1, 0, 1, 3, 1, 3, 3, 1, - 0, 0, 0, 0, 0, 1, 1, 1, - 3, 2, 4, 1, 0, 1, 1, 1, - 3, 1, 1, 1, 3, 1, 3, 1, - 3, 1, 2, 1, 1, 1, 1, 2, - 1, 1, 4, 1, 1, 1, 1, 2, - 4, 1, 2, 1, 2, 2, 5, 6, - 2, 2, 5, 1, 3, 2, 3, 5, - 2, 3, 1, 3, 1, 1, 2, 1, - 2, 1, 4, 0, 0, 2, 3, 1, - 1, 2, 2, 1, 2, 1, 1, 2, - 1, 2, 1, 2, 2, 2, 1, 1, - 4, 2, 0, 0, 0, 1, 0, 1, - 0, 1, 0, 1, 1, 0, 2, 1, - 1, 1, 2, 2, 1, 1, 2, 2, - 1, 1, 3, 2, 2, 0, 0, 2, - 0, 0, 0, 0, 1, 4, 1, 0, - 2, 1, 2, 2, 0, 2, 2, 1, - 1, 2, 6, 1, 1, 1, 1, 2, - 2, 1, 1, 1, 2, 2, 0, 1, - 1, 1, 1, 0, 1, 0, 3, 3, - 1, 2, 2, 2, 0, 5, 1, 1, - 0, 1, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 1, 4, - 1, 2, 2, 5, 2, 6, 2, 8, - 4, 2, 5, 0, 3, 2, 4, 1, - 6, 2, 4, 4, 1, 1, 2, 1, - 2, 1, 4, 0, 0, 4, 4, 1, - 1, 2, 2, 2, 2, 1, 1, 6, - 2, 5, 1, 3, 3, 4, 4, 4, - 4, 2, 0, 0, 1, 1, 0, 1, - 0, 1, 1, 0, 2, 1, 1, 2, - 4, 1, 2, 4, 1, 5, 0, 3, - 2, 1, 0, 0, 2, 0, 0, 0, - 0, 1, 4, 1, 0, 2, 1, 4, - 2, 0, 4, 3, 4, 2, 2, 6, - 2, 2, 4, 1, 4, 2, 4, 1, - 3, 3, 2, 2, 0, 1, 1, 1, - 0, 1, 0, 3, 3, 1, 2, 2, - 2, 0, 5, 1, 1, 0, 1, 0, - 1, 1, 1, 0, 0, 0, 0, 1, - 1, 1, 0, 0, 0, 0, 1, 1, - 5, 9, 2, 1, 3, 5, 3, 1, - 6, 1, 1, 2, 2, 2, 6, 0, - 0, 0, 4, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, -} - -var _graphclust_index_offsets []int16 = []int16{ - 0, 0, 2, 4, 6, 8, 11, 15, - 17, 20, 25, 28, 30, 32, 34, 63, - 68, 70, 73, 76, 80, 84, 90, 97, - 102, 106, 112, 115, 120, 123, 129, 135, - 138, 144, 146, 152, 155, 157, 161, 163, - 169, 171, 176, 178, 200, 203, 207, 212, - 214, 217, 220, 222, 225, 227, 230, 233, - 235, 241, 243, 246, 249, 252, 254, 256, - 262, 265, 271, 274, 281, 283, 285, 287, - 289, 291, 294, 296, 298, 314, 317, 319, - 321, 326, 329, 332, 334, 336, 339, 342, - 344, 348, 353, 357, 360, 364, 366, 369, - 377, 383, 385, 387, 389, 395, 397, 421, - 424, 426, 429, 432, 434, 437, 440, 443, - 445, 449, 457, 459, 461, 463, 465, 468, - 471, 473, 475, 477, 480, 483, 488, 490, - 492, 494, 496, 498, 500, 507, 511, 515, - 517, 520, 523, 527, 531, 537, 539, 541, - 545, 547, 549, 551, 553, 556, 560, 562, - 565, 570, 573, 575, 577, 579, 610, 615, - 617, 620, 626, 634, 640, 649, 654, 665, - 673, 678, 686, 690, 697, 701, 708, 714, - 723, 728, 737, 746, 750, 752, 757, 759, - 765, 768, 773, 775, 797, 803, 808, 814, - 816, 819, 822, 826, 831, 833, 836, 844, - 848, 858, 860, 867, 872, 880, 887, 892, - 900, 903, 909, 912, 914, 916, 918, 920, - 923, 925, 927, 943, 946, 948, 950, 957, - 962, 964, 967, 975, 978, 984, 989, 994, - 1001, 1007, 1011, 1013, 1016, 1024, 1030, 1032, - 1034, 1036, 1042, 1044, 1068, 1072, 1074, 1080, - 1084, 1086, 1092, 1096, 1103, 1107, 1113, 1122, - 1125, 1129, 1137, 1140, 1147, 1150, 1156, 1158, - 1164, 1169, 1174, 1180, 1185, 1187, 1189, 1191, - 1193, 1195, 1202, 1208, 1212, 1214, 1217, 1220, - 1224, 1228, 1234, 1236, 1238, 1240, 1242, 1244, - 1250, 1252, 1254, 1255, 1257, 1259, 1261, 1267, - 1269, 1271, 1272, 1279, 1281, 1285, 1289, 1291, - 1293, 1295, 1298, 1302, 1304, 1307, 1312, 1315, - 1317, 1319, 1321, 1350, 1355, 1357, 1360, 1363, - 1367, 1371, 1377, 1384, 1389, 1393, 1399, 1402, - 1407, 1410, 1416, 1422, 1425, 1431, 1433, 1439, - 1442, 1444, 1448, 1450, 1456, 1458, 1463, 1465, - 1487, 1490, 1494, 1499, 1501, 1504, 1507, 1509, - 1512, 1514, 1517, 1520, 1522, 1528, 1530, 1533, - 1536, 1539, 1541, 1543, 1549, 1552, 1558, 1561, - 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583, - 1585, 1601, 1604, 1606, 1608, 1613, 1616, 1619, - 1621, 1623, 1626, 1629, 1631, 1635, 1640, 1644, - 1647, 1651, 1653, 1656, 1664, 1670, 1672, 1674, - 1676, 1682, 1684, 1708, 1711, 1713, 1716, 1719, - 1721, 1724, 1727, 1730, 1732, 1736, 1744, 1746, - 1748, 1750, 1752, 1755, 1758, 1760, 1762, 1764, - 1767, 1770, 1775, 1777, 1779, 1781, 1783, 1785, - 1787, 1794, 1798, 1802, 1804, 1807, 1810, 1814, - 1818, 1824, 1826, 1828, 1832, 1834, 1836, 1838, - 1840, 1843, 1847, 1849, 1852, 1857, 1860, 1862, - 1864, 1866, 1897, 1902, 1904, 1907, 1913, 1921, - 1927, 1936, 1941, 1952, 1960, 1965, 1973, 1977, - 1984, 1988, 1995, 2001, 2010, 2015, 2024, 2033, - 2037, 2039, 2044, 2046, 2052, 2055, 2060, 2062, - 2084, 2090, 2095, 2101, 2103, 2106, 2109, 2113, - 2118, 2120, 2123, 2131, 2135, 2145, 2147, 2154, - 2159, 2167, 2174, 2179, 2187, 2190, 2196, 2199, - 2201, 2203, 2205, 2207, 2210, 2212, 2214, 2230, - 2233, 2235, 2237, 2244, 2249, 2251, 2254, 2262, - 2265, 2271, 2276, 2281, 2288, 2294, 2298, 2300, - 2303, 2311, 2317, 2319, 2321, 2323, 2329, 2331, - 2355, 2359, 2361, 2367, 2371, 2373, 2379, 2383, - 2390, 2394, 2400, 2409, 2412, 2416, 2424, 2427, - 2434, 2437, 2443, 2445, 2451, 2456, 2461, 2467, - 2472, 2474, 2476, 2478, 2480, 2482, 2489, 2495, - 2499, 2501, 2504, 2507, 2511, 2515, 2521, 2523, - 2525, 2527, 2529, 2531, 2537, 2539, 2541, 2542, - 2544, 2546, 2548, 2554, 2556, 2558, 2559, 2566, - 2568, 2571, 2575, 2578, 2581, 2585, 2588, 2591, - 2598, 2600, 2625, 2627, 2652, 2654, 2656, 2680, - 2682, 2684, 2686, 2688, 2691, 2693, 2697, 2699, - 2730, 2733, 2738, 2762, 2765, 2767, 2770, 2773, - 2777, 2780, 2783, 2787, 2788, 2844, 2900, 2930, - 2934, 2937, 2944, 2950, 2953, 2956, 2959, 2963, - 2965, 2983, 2987, 2992, 2995, 2998, 3002, 3005, - 3008, 3012, 3068, 3124, 3154, 3158, 3163, 3167, - 3169, 3173, 3179, 3183, 3186, 3190, 3193, 3196, - 3199, 3202, 3215, 3218, 3226, 3228, 3230, 3233, - 3239, 3251, 3257, 3261, 3266, 3272, 3277, 3280, - 3290, 3292, 3295, 3300, 3302, 3305, 3308, 3312, - 3315, 3318, 3325, 3327, 3329, 3331, 3333, 3336, - 3340, 3342, 3345, 3350, 3353, 3355, 3357, 3359, - 3388, 3393, 3395, 3398, 3401, 3405, 3409, 3415, - 3422, 3427, 3431, 3437, 3440, 3445, 3448, 3454, - 3460, 3463, 3469, 3471, 3477, 3480, 3482, 3486, - 3488, 3494, 3496, 3501, 3503, 3525, 3528, 3532, - 3537, 3539, 3542, 3545, 3547, 3550, 3552, 3555, - 3558, 3560, 3566, 3568, 3571, 3574, 3577, 3579, - 3581, 3587, 3590, 3596, 3599, 3606, 3608, 3610, - 3612, 3614, 3616, 3619, 3621, 3623, 3639, 3642, - 3644, 3646, 3651, 3654, 3657, 3659, 3661, 3664, - 3667, 3669, 3673, 3678, 3682, 3685, 3689, 3691, - 3694, 3702, 3708, 3710, 3712, 3714, 3720, 3722, - 3746, 3749, 3751, 3754, 3757, 3759, 3762, 3765, - 3768, 3770, 3774, 3782, 3784, 3786, 3788, 3790, - 3793, 3796, 3798, 3800, 3802, 3805, 3808, 3813, - 3815, 3817, 3819, 3821, 3823, 3825, 3832, 3836, - 3840, 3842, 3845, 3848, 3852, 3856, 3862, 3864, - 3866, 3870, 3872, 3874, 3876, 3878, 3881, 3885, - 3887, 3890, 3895, 3898, 3900, 3902, 3904, 3935, - 3940, 3942, 3945, 3951, 3959, 3965, 3974, 3979, - 3990, 3998, 4003, 4011, 4015, 4022, 4026, 4033, - 4039, 4048, 4053, 4062, 4071, 4075, 4077, 4082, - 4084, 4090, 4093, 4098, 4100, 4122, 4128, 4133, - 4139, 4141, 4144, 4147, 4151, 4156, 4158, 4161, - 4169, 4173, 4183, 4185, 4192, 4197, 4205, 4212, - 4217, 4225, 4228, 4234, 4237, 4239, 4241, 4243, - 4245, 4248, 4250, 4252, 4268, 4271, 4273, 4275, - 4282, 4287, 4289, 4292, 4300, 4303, 4309, 4314, - 4319, 4326, 4332, 4336, 4338, 4341, 4349, 4355, - 4357, 4359, 4361, 4367, 4369, 4393, 4397, 4399, - 4405, 4409, 4411, 4417, 4421, 4428, 4432, 4438, - 4447, 4450, 4454, 4462, 4465, 4472, 4475, 4481, - 4483, 4489, 4494, 4499, 4505, 4510, 4512, 4514, - 4516, 4518, 4520, 4527, 4533, 4537, 4539, 4542, - 4545, 4549, 4553, 4559, 4561, 4563, 4565, 4567, - 4569, 4575, 4577, 4579, 4580, 4582, 4584, 4586, - 4592, 4594, 4596, 4597, 4604, 4629, 4631, 4656, - 4658, 4660, 4684, 4686, 4688, 4690, 4692, 4695, - 4697, 4701, 4703, 4734, 4737, 4742, 4766, 4769, - 4771, 4774, 4777, 4781, 4784, 4787, 4791, 4792, - 4848, 4904, 4934, 4938, 4941, 4948, 4956, 4958, - 4960, 4962, 4965, 4969, 4971, 4974, 4979, 4982, - 4984, 4986, 4988, 5017, 5022, 5024, 5027, 5030, - 5034, 5038, 5044, 5051, 5056, 5060, 5066, 5069, - 5074, 5077, 5083, 5089, 5092, 5098, 5100, 5106, - 5109, 5111, 5115, 5117, 5123, 5125, 5130, 5132, - 5154, 5157, 5161, 5166, 5168, 5171, 5174, 5176, - 5179, 5181, 5184, 5187, 5189, 5195, 5197, 5200, - 5203, 5206, 5208, 5210, 5216, 5219, 5225, 5228, - 5230, 5232, 5234, 5236, 5239, 5241, 5243, 5259, - 5262, 5264, 5266, 5271, 5274, 5277, 5279, 5281, - 5284, 5287, 5289, 5293, 5298, 5302, 5305, 5309, - 5311, 5314, 5321, 5327, 5329, 5331, 5333, 5339, - 5341, 5365, 5368, 5370, 5373, 5376, 5378, 5381, - 5384, 5387, 5389, 5393, 5401, 5403, 5405, 5407, - 5409, 5412, 5415, 5417, 5419, 5421, 5424, 5427, - 5432, 5434, 5436, 5438, 5440, 5442, 5444, 5451, - 5455, 5459, 5461, 5464, 5467, 5471, 5475, 5481, - 5483, 5485, 5487, 5493, 5495, 5497, 5498, 5505, - 5507, 5515, 5519, 5521, 5523, 5525, 5527, 5530, - 5534, 5536, 5539, 5544, 5547, 5549, 5551, 5553, - 5584, 5589, 5591, 5594, 5600, 5608, 5614, 5623, - 5628, 5639, 5647, 5652, 5660, 5664, 5671, 5675, - 5682, 5688, 5697, 5702, 5711, 5720, 5724, 5726, - 5731, 5733, 5739, 5742, 5747, 5749, 5771, 5777, - 5782, 5788, 5790, 5793, 5796, 5800, 5805, 5807, - 5810, 5818, 5822, 5832, 5834, 5841, 5846, 5854, - 5861, 5866, 5874, 5877, 5883, 5886, 5888, 5890, - 5892, 5894, 5897, 5899, 5901, 5917, 5920, 5922, - 5924, 5931, 5936, 5938, 5941, 5949, 5952, 5958, - 5963, 5968, 5975, 5981, 5985, 5987, 5990, 5998, - 6004, 6006, 6008, 6010, 6016, 6018, 6042, 6046, - 6048, 6054, 6058, 6060, 6066, 6070, 6077, 6081, - 6087, 6096, 6099, 6103, 6111, 6114, 6121, 6124, - 6130, 6132, 6138, 6143, 6148, 6154, 6159, 6161, - 6163, 6165, 6167, 6169, 6176, 6182, 6186, 6188, - 6191, 6194, 6198, 6202, 6208, 6210, 6212, 6214, - 6216, 6218, 6224, 6226, 6228, 6229, 6231, 6233, - 6237, 6240, 6242, 6244, 6246, 6249, 6253, 6255, - 6258, 6263, 6266, 6268, 6270, 6272, 6303, 6308, - 6310, 6313, 6319, 6321, 6323, 6325, 6328, 6332, - 6334, 6337, 6342, 6345, 6347, 6349, 6351, 6380, - 6385, 6387, 6390, 6393, 6397, 6401, 6407, 6414, - 6419, 6423, 6429, 6432, 6437, 6440, 6446, 6452, - 6455, 6461, 6463, 6469, 6472, 6474, 6478, 6480, - 6486, 6488, 6493, 6495, 6517, 6520, 6524, 6529, - 6531, 6534, 6537, 6539, 6542, 6544, 6547, 6550, - 6552, 6558, 6560, 6563, 6566, 6569, 6571, 6573, - 6579, 6582, 6588, 6591, 6598, 6600, 6602, 6604, - 6606, 6608, 6611, 6613, 6615, 6631, 6634, 6636, - 6638, 6643, 6646, 6649, 6651, 6653, 6656, 6659, - 6661, 6665, 6670, 6674, 6677, 6681, 6683, 6686, - 6694, 6700, 6702, 6704, 6706, 6712, 6714, 6738, - 6741, 6743, 6746, 6749, 6751, 6754, 6757, 6760, - 6762, 6766, 6774, 6776, 6778, 6780, 6782, 6785, - 6788, 6790, 6792, 6794, 6797, 6800, 6805, 6807, - 6809, 6811, 6813, 6815, 6817, 6824, 6828, 6832, - 6834, 6837, 6840, 6844, 6848, 6854, 6856, 6858, - 6862, 6864, 6866, 6868, 6870, 6876, 6878, 6880, - 6881, 6888, 6896, 6902, 6911, 6916, 6927, 6935, - 6940, 6948, 6952, 6959, 6963, 6970, 6976, 6985, - 6990, 6999, 7008, 7012, 7014, 7019, 7021, 7027, - 7030, 7035, 7037, 7059, 7065, 7070, 7076, 7078, - 7081, 7084, 7088, 7093, 7095, 7098, 7106, 7110, - 7120, 7122, 7129, 7134, 7142, 7149, 7154, 7162, - 7165, 7171, 7174, 7176, 7178, 7180, 7182, 7185, - 7187, 7189, 7205, 7208, 7210, 7212, 7219, 7224, - 7226, 7229, 7237, 7240, 7246, 7251, 7256, 7263, - 7269, 7273, 7275, 7278, 7286, 7292, 7294, 7296, - 7298, 7304, 7306, 7330, 7334, 7336, 7342, 7346, - 7348, 7354, 7358, 7365, 7369, 7375, 7384, 7387, - 7391, 7399, 7402, 7409, 7412, 7418, 7420, 7426, - 7431, 7436, 7442, 7447, 7449, 7451, 7453, 7455, - 7457, 7464, 7470, 7474, 7476, 7479, 7482, 7486, - 7490, 7496, 7498, 7500, 7502, 7504, 7506, 7512, - 7514, 7516, 7517, 7520, 7524, 7526, 7544, 7548, - 7553, 7556, 7559, 7563, 7566, 7569, 7573, 7629, - 7685, 7718, 7722, 7727, 7729, 7730, 7732, 7736, - 7739, 7744, 7750, 7754, 7757, 7761, 7764, 7768, - 7771, 7775, 7788, 7791, 7793, 7795, 7797, 7800, - 7804, 7806, 7809, 7814, 7817, 7819, 7821, 7823, - 7852, 7857, 7859, 7862, 7865, 7869, 7873, 7879, - 7886, 7891, 7895, 7901, 7904, 7909, 7912, 7918, - 7924, 7927, 7933, 7935, 7941, 7944, 7946, 7950, - 7952, 7958, 7960, 7965, 7967, 7989, 7992, 7996, - 8001, 8003, 8006, 8009, 8011, 8014, 8016, 8019, - 8022, 8024, 8030, 8032, 8035, 8038, 8041, 8043, - 8045, 8051, 8054, 8060, 8063, 8070, 8072, 8074, - 8076, 8078, 8080, 8083, 8085, 8087, 8103, 8106, - 8108, 8110, 8115, 8118, 8121, 8123, 8125, 8128, - 8131, 8133, 8137, 8142, 8146, 8149, 8153, 8155, - 8158, 8166, 8172, 8174, 8176, 8178, 8184, 8186, - 8210, 8213, 8215, 8218, 8221, 8223, 8226, 8229, - 8232, 8234, 8238, 8246, 8248, 8250, 8252, 8254, - 8257, 8260, 8262, 8264, 8266, 8269, 8272, 8277, - 8279, 8281, 8283, 8285, 8287, 8289, 8296, 8300, - 8304, 8306, 8309, 8312, 8316, 8320, 8326, 8328, - 8330, 8334, 8336, 8338, 8340, 8342, 8345, 8349, - 8351, 8354, 8359, 8362, 8364, 8366, 8368, 8399, - 8404, 8406, 8409, 8415, 8423, 8429, 8438, 8443, - 8454, 8462, 8467, 8475, 8479, 8486, 8490, 8497, - 8503, 8512, 8517, 8526, 8535, 8539, 8541, 8546, - 8548, 8554, 8557, 8562, 8564, 8586, 8592, 8597, - 8603, 8605, 8608, 8611, 8615, 8620, 8622, 8625, - 8633, 8637, 8647, 8649, 8656, 8661, 8669, 8676, - 8681, 8689, 8692, 8698, 8701, 8703, 8705, 8707, - 8709, 8712, 8714, 8716, 8732, 8735, 8737, 8739, - 8746, 8751, 8753, 8756, 8764, 8767, 8773, 8778, - 8783, 8790, 8796, 8800, 8802, 8805, 8813, 8819, - 8821, 8823, 8825, 8831, 8833, 8857, 8861, 8863, - 8869, 8873, 8875, 8881, 8885, 8892, 8896, 8902, - 8911, 8914, 8918, 8926, 8929, 8936, 8939, 8945, - 8947, 8953, 8958, 8963, 8969, 8974, 8976, 8978, - 8980, 8982, 8984, 8991, 8997, 9001, 9003, 9006, - 9009, 9013, 9017, 9023, 9025, 9027, 9029, 9031, - 9033, 9039, 9041, 9043, 9044, 9046, 9048, 9050, - 9056, 9058, 9060, 9061, 9068, 9076, 9078, 9080, - 9083, 9089, 9101, 9107, 9111, 9116, 9122, 9127, - 9130, 9140, 9142, 9145, 9153, 9156, 9159, 9183, - 9204, 9225, 9246, 9265, 9286, 9307, 9328, 9352, - 9374, 9396, 9418, 9439, 9463, 9484, 9505, 9526, - 9548, 9570, 9592, 9613, 9634, 9655, 9676, 9697, - 9718, 9739, 9760, 9781, -} - -var _graphclust_indicies []int16 = []int16{ - 0, 1, 3, 2, 2, 3, 3, 2, - 3, 3, 2, 3, 3, 3, 2, 3, - 2, 3, 3, 2, 3, 3, 3, 3, - 2, 3, 3, 2, 2, 3, 3, 2, - 3, 2, 4, 5, 6, 7, 8, 10, - 11, 12, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 9, 13, 2, 3, - 3, 3, 3, 2, 3, 2, 3, 3, - 2, 2, 2, 3, 2, 2, 2, 3, - 3, 3, 3, 2, 2, 2, 2, 2, - 2, 3, 2, 2, 2, 2, 2, 2, - 3, 2, 2, 2, 2, 3, 3, 3, - 3, 2, 3, 3, 3, 3, 3, 2, - 3, 3, 2, 3, 3, 3, 3, 2, - 3, 3, 2, 2, 2, 2, 2, 2, - 3, 3, 3, 3, 3, 3, 2, 3, - 3, 2, 2, 2, 2, 2, 2, 3, - 3, 2, 3, 3, 3, 3, 3, 2, - 3, 3, 2, 3, 2, 3, 3, 3, - 2, 3, 2, 3, 3, 3, 3, 3, - 2, 3, 2, 3, 3, 3, 3, 2, - 3, 2, 32, 33, 34, 35, 36, 37, - 38, 39, 40, 41, 42, 43, 44, 45, - 46, 47, 48, 49, 50, 51, 52, 2, - 3, 3, 2, 3, 3, 3, 2, 3, - 3, 3, 3, 2, 3, 2, 3, 3, - 2, 3, 3, 2, 3, 2, 2, 2, - 3, 3, 2, 3, 3, 2, 3, 3, - 2, 3, 2, 3, 3, 3, 3, 3, - 2, 3, 2, 3, 3, 2, 2, 2, - 3, 3, 3, 2, 3, 2, 3, 2, - 3, 3, 3, 3, 3, 2, 3, 3, - 2, 53, 54, 55, 56, 57, 2, 3, - 58, 2, 53, 54, 59, 55, 56, 57, - 2, 3, 2, 3, 2, 3, 2, 3, - 2, 3, 2, 60, 61, 2, 3, 2, - 3, 2, 62, 63, 64, 65, 66, 67, - 68, 69, 70, 71, 72, 73, 74, 75, - 76, 2, 3, 3, 2, 3, 2, 3, - 2, 3, 3, 3, 3, 2, 3, 3, - 2, 2, 2, 3, 3, 2, 3, 2, - 3, 3, 2, 2, 2, 3, 3, 2, - 3, 3, 3, 2, 3, 3, 3, 3, - 2, 3, 3, 3, 2, 3, 3, 2, - 77, 78, 63, 2, 3, 2, 3, 3, - 2, 79, 80, 81, 82, 83, 84, 85, - 2, 86, 87, 88, 89, 90, 2, 3, - 2, 3, 2, 3, 2, 3, 3, 3, - 3, 3, 2, 3, 2, 91, 92, 93, - 94, 95, 96, 97, 98, 99, 100, 101, - 102, 103, 104, 105, 106, 107, 104, 108, - 109, 110, 111, 112, 2, 3, 3, 2, - 2, 3, 2, 2, 3, 3, 3, 2, - 3, 2, 3, 3, 2, 2, 2, 3, - 3, 3, 2, 3, 2, 3, 3, 3, - 2, 3, 3, 3, 3, 3, 3, 3, - 2, 3, 2, 3, 2, 3, 2, 2, - 3, 3, 3, 2, 2, 2, 3, 2, - 3, 3, 2, 3, 2, 3, 3, 2, - 3, 3, 2, 113, 114, 115, 116, 2, - 3, 2, 3, 2, 3, 2, 3, 2, - 117, 2, 3, 2, 118, 119, 120, 121, - 122, 123, 2, 3, 3, 3, 2, 2, - 2, 2, 3, 3, 2, 3, 3, 2, - 2, 2, 3, 3, 3, 3, 2, 124, - 125, 126, 2, 3, 3, 3, 3, 3, - 2, 3, 2, 3, 2, 127, 128, 129, - 2, 130, 2, 2, 130, 2, 130, 130, - 2, 130, 130, 2, 130, 130, 130, 2, - 130, 2, 130, 130, 2, 130, 130, 130, - 130, 2, 130, 130, 2, 2, 130, 130, - 2, 130, 2, 131, 132, 133, 134, 135, - 136, 137, 139, 140, 141, 142, 143, 144, - 145, 146, 147, 148, 149, 150, 22, 151, - 152, 153, 154, 155, 156, 157, 158, 159, - 138, 2, 130, 130, 130, 130, 2, 130, - 2, 130, 130, 2, 3, 3, 2, 2, - 3, 130, 130, 2, 130, 130, 2, 130, - 2, 3, 130, 130, 130, 3, 3, 2, - 130, 130, 130, 2, 2, 2, 130, 2, - 3, 3, 130, 130, 3, 2, 130, 130, - 130, 2, 130, 2, 130, 2, 130, 2, - 3, 2, 2, 130, 130, 2, 130, 2, - 3, 130, 130, 3, 130, 2, 3, 130, - 130, 3, 3, 130, 130, 2, 130, 130, - 3, 2, 130, 130, 130, 3, 3, 3, - 2, 130, 3, 130, 2, 2, 2, 3, - 2, 2, 2, 130, 130, 130, 3, 130, - 3, 2, 130, 130, 3, 3, 3, 130, - 130, 130, 2, 130, 130, 3, 3, 2, - 2, 2, 130, 130, 130, 2, 130, 2, - 3, 130, 130, 130, 130, 3, 130, 3, - 3, 2, 130, 3, 130, 2, 130, 2, - 130, 3, 130, 130, 2, 130, 2, 130, - 130, 130, 130, 3, 2, 3, 130, 2, - 130, 130, 130, 130, 2, 130, 2, 160, - 161, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 172, 173, 174, 175, 176, - 177, 178, 179, 180, 2, 3, 130, 130, - 3, 130, 2, 3, 130, 130, 130, 2, - 130, 3, 130, 130, 130, 2, 130, 2, - 130, 130, 2, 130, 130, 2, 3, 130, - 3, 2, 130, 130, 130, 2, 3, 130, - 2, 130, 130, 2, 130, 130, 3, 130, - 3, 3, 130, 2, 130, 130, 3, 2, - 130, 130, 130, 130, 3, 130, 130, 3, - 130, 2, 130, 2, 3, 3, 3, 130, - 130, 3, 2, 130, 2, 130, 2, 3, - 3, 3, 3, 130, 130, 3, 130, 2, - 3, 130, 130, 3, 130, 3, 2, 3, - 130, 3, 130, 2, 3, 130, 130, 130, - 130, 3, 130, 2, 130, 130, 2, 181, - 182, 183, 184, 185, 2, 130, 58, 2, - 130, 2, 130, 2, 130, 2, 130, 2, - 186, 187, 2, 130, 2, 130, 2, 188, - 189, 190, 191, 66, 192, 193, 194, 195, - 196, 197, 198, 199, 200, 201, 2, 130, - 130, 2, 130, 2, 130, 2, 130, 130, - 130, 3, 3, 130, 2, 130, 2, 130, - 2, 3, 130, 2, 130, 3, 2, 3, - 130, 130, 130, 3, 130, 3, 2, 130, - 2, 3, 130, 3, 130, 3, 130, 2, - 130, 130, 3, 130, 2, 130, 130, 130, - 130, 2, 130, 3, 3, 130, 130, 3, - 2, 130, 130, 3, 130, 3, 2, 202, - 203, 189, 2, 130, 2, 130, 130, 2, - 204, 205, 206, 207, 208, 209, 210, 2, - 211, 212, 213, 214, 215, 2, 130, 2, - 130, 2, 130, 2, 130, 130, 130, 130, - 130, 2, 130, 2, 216, 217, 218, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 228, 229, 230, 231, 232, 233, 234, 235, - 236, 237, 238, 2, 130, 3, 130, 2, - 2, 130, 3, 2, 3, 3, 2, 130, - 3, 130, 130, 2, 130, 2, 3, 130, - 3, 130, 3, 2, 2, 130, 2, 3, - 130, 130, 3, 130, 3, 130, 2, 130, - 3, 130, 2, 130, 130, 3, 130, 3, - 2, 130, 130, 3, 3, 3, 3, 130, - 130, 2, 3, 130, 2, 3, 3, 130, - 2, 130, 3, 130, 3, 130, 3, 130, - 2, 3, 2, 130, 130, 3, 3, 130, - 3, 130, 2, 2, 2, 130, 130, 3, - 130, 3, 130, 2, 2, 130, 3, 3, - 130, 3, 130, 2, 3, 130, 3, 130, - 2, 3, 3, 130, 130, 2, 3, 3, - 3, 130, 130, 2, 239, 240, 115, 241, - 2, 130, 2, 130, 2, 130, 2, 242, - 2, 130, 2, 243, 244, 245, 246, 247, - 248, 2, 3, 3, 130, 130, 130, 2, - 2, 2, 2, 130, 130, 2, 130, 130, - 2, 2, 2, 130, 130, 130, 130, 2, - 249, 250, 251, 2, 130, 130, 130, 130, - 130, 2, 130, 2, 130, 2, 252, 2, - 3, 2, 253, 2, 254, 255, 256, 258, - 257, 2, 130, 2, 2, 130, 130, 3, - 2, 3, 2, 259, 2, 260, 261, 262, - 264, 263, 2, 3, 2, 2, 3, 3, - 79, 80, 81, 82, 83, 84, 2, 3, - 1, 265, 265, 3, 1, 265, 266, 3, - 1, 267, 268, 267, 268, 268, 267, 268, - 268, 267, 268, 268, 268, 267, 268, 267, - 268, 268, 267, 268, 268, 268, 268, 267, - 268, 268, 267, 267, 268, 268, 267, 268, - 267, 269, 270, 271, 272, 273, 275, 276, - 277, 279, 280, 281, 282, 283, 284, 285, - 286, 287, 288, 289, 290, 291, 292, 293, - 294, 295, 296, 274, 278, 267, 268, 268, - 268, 268, 267, 268, 267, 268, 268, 267, - 267, 267, 268, 267, 267, 267, 268, 268, - 268, 268, 267, 267, 267, 267, 267, 267, - 268, 267, 267, 267, 267, 267, 267, 268, - 267, 267, 267, 267, 268, 268, 268, 268, - 267, 268, 268, 268, 268, 268, 267, 268, - 268, 267, 268, 268, 268, 268, 267, 268, - 268, 267, 267, 267, 267, 267, 267, 268, - 268, 268, 268, 268, 268, 267, 268, 268, - 267, 267, 267, 267, 267, 267, 268, 268, - 267, 268, 268, 268, 268, 268, 267, 268, - 268, 267, 268, 267, 268, 268, 268, 267, - 268, 267, 268, 268, 268, 268, 268, 267, - 268, 267, 268, 268, 268, 268, 267, 268, - 267, 297, 298, 299, 300, 301, 302, 303, - 304, 305, 306, 307, 308, 309, 310, 311, - 312, 313, 314, 315, 316, 317, 267, 268, - 268, 267, 268, 268, 268, 267, 268, 268, - 268, 268, 267, 268, 267, 268, 268, 267, - 268, 268, 267, 268, 267, 267, 267, 268, - 268, 267, 268, 268, 267, 268, 268, 267, - 268, 267, 268, 268, 268, 268, 268, 267, - 268, 267, 268, 268, 267, 267, 267, 268, - 268, 268, 267, 268, 267, 268, 267, 268, - 268, 268, 268, 268, 267, 268, 268, 267, - 318, 319, 320, 321, 322, 267, 268, 323, - 267, 318, 319, 324, 320, 321, 322, 267, - 268, 267, 268, 267, 268, 267, 268, 267, - 268, 267, 325, 326, 267, 268, 267, 268, - 267, 327, 328, 329, 330, 331, 332, 333, - 334, 335, 336, 337, 338, 339, 340, 341, - 267, 268, 268, 267, 268, 267, 268, 267, - 268, 268, 268, 268, 267, 268, 268, 267, - 267, 267, 268, 268, 267, 268, 267, 268, - 268, 267, 267, 267, 268, 268, 267, 268, - 268, 268, 267, 268, 268, 268, 268, 267, - 268, 268, 268, 267, 268, 268, 267, 342, - 343, 328, 267, 268, 267, 268, 268, 267, - 344, 345, 346, 347, 348, 349, 350, 267, - 351, 352, 353, 354, 355, 267, 268, 267, - 268, 267, 268, 267, 268, 268, 268, 268, - 268, 267, 268, 267, 356, 357, 358, 359, - 360, 361, 362, 363, 364, 365, 366, 367, - 368, 369, 370, 371, 372, 369, 373, 374, - 375, 376, 377, 267, 268, 268, 267, 267, - 268, 267, 267, 268, 268, 268, 267, 268, - 267, 268, 268, 267, 267, 267, 268, 268, - 268, 267, 268, 267, 268, 268, 268, 267, - 268, 268, 268, 268, 268, 268, 268, 267, - 268, 267, 268, 267, 268, 267, 267, 268, - 268, 268, 267, 267, 267, 268, 267, 268, - 268, 267, 268, 267, 268, 268, 267, 268, - 268, 267, 378, 379, 380, 381, 267, 268, - 267, 268, 267, 268, 267, 268, 267, 382, - 267, 268, 267, 383, 384, 385, 386, 387, - 388, 267, 268, 268, 268, 267, 267, 267, - 267, 268, 268, 267, 268, 268, 267, 267, - 267, 268, 268, 268, 268, 267, 389, 390, - 391, 267, 268, 268, 268, 268, 268, 267, - 268, 267, 268, 267, 392, 393, 394, 267, - 395, 267, 395, 267, 267, 395, 395, 267, - 395, 395, 267, 395, 395, 395, 267, 395, - 267, 395, 395, 267, 395, 395, 395, 395, - 267, 395, 395, 267, 267, 395, 395, 267, - 395, 267, 396, 397, 398, 399, 400, 401, - 402, 404, 405, 406, 407, 408, 409, 410, - 411, 412, 413, 414, 415, 287, 416, 417, - 418, 419, 420, 421, 422, 423, 424, 403, - 267, 395, 395, 395, 395, 267, 395, 267, - 395, 395, 267, 268, 268, 267, 267, 268, - 395, 395, 267, 395, 395, 267, 395, 267, - 268, 395, 395, 395, 268, 268, 267, 395, - 395, 395, 267, 267, 267, 395, 267, 268, - 268, 395, 395, 268, 267, 395, 395, 395, - 267, 395, 267, 395, 267, 395, 267, 268, - 267, 267, 395, 395, 267, 395, 267, 268, - 395, 395, 268, 395, 267, 268, 395, 395, - 268, 268, 395, 395, 267, 395, 395, 268, - 267, 395, 395, 395, 268, 268, 268, 267, - 395, 268, 395, 267, 267, 267, 268, 267, - 267, 267, 395, 395, 395, 268, 395, 268, - 267, 395, 395, 268, 268, 268, 395, 395, - 395, 267, 395, 395, 268, 268, 267, 267, - 267, 395, 395, 395, 267, 395, 267, 268, - 395, 395, 395, 395, 268, 395, 268, 268, - 267, 395, 268, 395, 267, 395, 267, 395, - 268, 395, 395, 267, 395, 267, 395, 395, - 395, 395, 268, 267, 268, 395, 267, 395, - 395, 395, 395, 267, 395, 267, 425, 426, - 427, 428, 429, 430, 431, 432, 433, 434, - 435, 436, 437, 438, 439, 440, 441, 442, - 443, 444, 445, 267, 268, 395, 395, 268, - 395, 267, 268, 395, 395, 395, 267, 395, - 268, 395, 395, 395, 267, 395, 267, 395, - 395, 267, 395, 395, 267, 268, 395, 268, - 267, 395, 395, 395, 267, 268, 395, 267, - 395, 395, 267, 395, 395, 268, 395, 268, - 268, 395, 267, 395, 395, 268, 267, 395, - 395, 395, 395, 268, 395, 395, 268, 395, - 267, 395, 267, 268, 268, 268, 395, 395, - 268, 267, 395, 267, 395, 267, 268, 268, - 268, 268, 395, 395, 268, 395, 267, 268, - 395, 395, 268, 395, 268, 267, 268, 395, - 268, 395, 267, 268, 395, 395, 395, 395, - 268, 395, 267, 395, 395, 267, 446, 447, - 448, 449, 450, 267, 395, 323, 267, 395, - 267, 395, 267, 395, 267, 395, 267, 451, - 452, 267, 395, 267, 395, 267, 453, 454, - 455, 456, 331, 457, 458, 459, 460, 461, - 462, 463, 464, 465, 466, 267, 395, 395, - 267, 395, 267, 395, 267, 395, 395, 395, - 268, 268, 395, 267, 395, 267, 395, 267, - 268, 395, 267, 395, 268, 267, 268, 395, - 395, 395, 268, 395, 268, 267, 395, 267, - 268, 395, 268, 395, 268, 395, 267, 395, - 395, 268, 395, 267, 395, 395, 395, 395, - 267, 395, 268, 268, 395, 395, 268, 267, - 395, 395, 268, 395, 268, 267, 467, 468, - 454, 267, 395, 267, 395, 395, 267, 469, - 470, 471, 472, 473, 474, 475, 267, 476, - 477, 478, 479, 480, 267, 395, 267, 395, - 267, 395, 267, 395, 395, 395, 395, 395, - 267, 395, 267, 481, 482, 483, 484, 485, - 486, 487, 488, 489, 490, 491, 492, 493, - 494, 495, 496, 497, 498, 499, 500, 501, - 502, 503, 267, 395, 268, 395, 267, 267, - 395, 268, 267, 268, 268, 267, 395, 268, - 395, 395, 267, 395, 267, 268, 395, 268, - 395, 268, 267, 267, 395, 267, 268, 395, - 395, 268, 395, 268, 395, 267, 395, 268, - 395, 267, 395, 395, 268, 395, 268, 267, - 395, 395, 268, 268, 268, 268, 395, 395, - 267, 268, 395, 267, 268, 268, 395, 267, - 395, 268, 395, 268, 395, 268, 395, 267, - 268, 267, 395, 395, 268, 268, 395, 268, - 395, 267, 267, 267, 395, 395, 268, 395, - 268, 395, 267, 267, 395, 268, 268, 395, - 268, 395, 267, 268, 395, 268, 395, 267, - 268, 268, 395, 395, 267, 268, 268, 268, - 395, 395, 267, 504, 505, 380, 506, 267, - 395, 267, 395, 267, 395, 267, 507, 267, - 395, 267, 508, 509, 510, 511, 512, 513, - 267, 268, 268, 395, 395, 395, 267, 267, - 267, 267, 395, 395, 267, 395, 395, 267, - 267, 267, 395, 395, 395, 395, 267, 514, - 515, 516, 267, 395, 395, 395, 395, 395, - 267, 395, 267, 395, 267, 517, 267, 268, - 267, 518, 267, 519, 520, 521, 523, 522, - 267, 395, 267, 267, 395, 395, 268, 267, - 268, 267, 524, 267, 525, 526, 527, 529, - 528, 267, 268, 267, 267, 268, 268, 344, - 345, 346, 347, 348, 349, 267, 268, 267, - 268, 268, 267, 266, 268, 268, 267, 266, - 268, 267, 266, 268, 267, 531, 532, 530, - 267, 266, 268, 267, 266, 268, 267, 533, - 534, 535, 536, 537, 530, 267, 538, 267, - 297, 298, 299, 533, 534, 539, 300, 301, - 302, 303, 304, 305, 306, 307, 308, 309, - 310, 311, 312, 313, 314, 315, 316, 317, - 267, 540, 538, 297, 298, 299, 541, 535, - 536, 300, 301, 302, 303, 304, 305, 306, - 307, 308, 309, 310, 311, 312, 313, 314, - 315, 316, 317, 267, 540, 267, 542, 540, - 297, 298, 299, 543, 536, 300, 301, 302, - 303, 304, 305, 306, 307, 308, 309, 310, - 311, 312, 313, 314, 315, 316, 317, 267, - 542, 267, 267, 542, 544, 267, 542, 267, - 545, 546, 267, 540, 267, 267, 542, 267, - 540, 267, 540, 327, 328, 329, 330, 331, - 332, 333, 547, 335, 336, 337, 338, 339, - 340, 341, 549, 550, 551, 552, 553, 554, - 549, 550, 551, 552, 553, 554, 549, 548, - 555, 267, 268, 538, 267, 556, 556, 556, - 542, 267, 297, 298, 299, 541, 539, 300, - 301, 302, 303, 304, 305, 306, 307, 308, - 309, 310, 311, 312, 313, 314, 315, 316, - 317, 267, 545, 557, 267, 267, 540, 556, - 556, 542, 556, 556, 542, 556, 556, 556, - 542, 556, 556, 542, 556, 556, 542, 556, - 556, 267, 542, 542, 551, 552, 553, 554, - 548, 549, 551, 552, 553, 554, 548, 549, - 551, 552, 553, 554, 548, 549, 551, 552, - 553, 554, 548, 549, 551, 552, 553, 554, - 548, 549, 551, 552, 553, 554, 548, 549, - 551, 552, 553, 554, 548, 549, 551, 552, - 553, 554, 548, 549, 551, 552, 553, 554, - 548, 549, 550, 555, 552, 553, 554, 548, - 549, 550, 552, 553, 554, 548, 549, 550, - 552, 553, 554, 548, 549, 550, 552, 553, - 554, 548, 549, 550, 552, 553, 554, 548, - 549, 550, 552, 553, 554, 548, 549, 550, - 552, 553, 554, 548, 549, 550, 552, 553, - 554, 548, 549, 550, 552, 553, 554, 548, - 549, 550, 551, 555, 553, 554, 548, 549, - 550, 551, 553, 554, 548, 549, 550, 551, - 553, 554, 548, 549, 550, 551, 553, 554, - 548, 549, 550, 551, 553, 558, 557, 552, - 267, 555, 556, 267, 540, 542, 268, 268, - 267, 559, 560, 561, 562, 563, 530, 267, - 268, 323, 268, 268, 268, 267, 268, 268, - 267, 395, 268, 267, 395, 268, 267, 268, - 395, 268, 267, 530, 267, 564, 566, 567, - 568, 569, 570, 571, 566, 567, 568, 569, - 570, 571, 566, 530, 565, 555, 267, 268, - 538, 268, 267, 540, 540, 540, 542, 267, - 540, 540, 542, 540, 540, 542, 540, 540, - 540, 542, 540, 540, 542, 540, 540, 542, - 540, 540, 267, 542, 568, 569, 570, 571, - 565, 566, 568, 569, 570, 571, 565, 566, - 568, 569, 570, 571, 565, 566, 568, 569, - 570, 571, 565, 566, 568, 569, 570, 571, - 565, 566, 568, 569, 570, 571, 565, 566, - 568, 569, 570, 571, 565, 566, 568, 569, - 570, 571, 565, 566, 568, 569, 570, 571, - 565, 566, 567, 555, 569, 570, 571, 565, - 566, 567, 569, 570, 571, 565, 566, 567, - 569, 570, 571, 565, 566, 567, 569, 570, - 571, 565, 566, 567, 569, 570, 571, 565, - 566, 567, 569, 570, 571, 565, 566, 567, - 569, 570, 571, 565, 566, 567, 569, 570, - 571, 565, 566, 567, 569, 570, 571, 565, - 566, 567, 568, 555, 570, 571, 565, 566, - 567, 568, 570, 571, 565, 566, 567, 568, - 570, 571, 565, 566, 567, 568, 570, 571, - 565, 566, 567, 568, 570, 572, 573, 569, - 267, 555, 540, 268, 540, 542, 268, 542, - 268, 267, 540, 574, 575, 530, 267, 268, - 267, 268, 268, 268, 267, 577, 578, 579, - 580, 576, 267, 581, 582, 530, 267, 266, - 268, 267, 268, 266, 268, 267, 583, 530, - 267, 268, 268, 267, 584, 530, 267, 268, - 268, 267, 585, 586, 587, 588, 589, 590, - 591, 592, 593, 594, 595, 530, 267, 268, - 596, 267, 344, 345, 346, 347, 348, 349, - 597, 267, 598, 267, 268, 267, 395, 268, - 267, 268, 395, 268, 395, 268, 267, 395, - 395, 268, 395, 268, 395, 268, 395, 268, - 395, 268, 267, 268, 268, 395, 395, 268, - 267, 395, 395, 268, 267, 395, 268, 395, - 268, 267, 268, 395, 268, 395, 268, 267, - 395, 268, 395, 268, 267, 395, 268, 267, - 395, 395, 268, 268, 395, 268, 395, 268, - 395, 267, 576, 267, 599, 576, 267, 322, - 530, 600, 530, 267, 268, 267, 266, 3, - 1, 266, 3, 1, 602, 603, 601, 1, - 266, 3, 1, 266, 3, 1, 604, 605, - 606, 607, 608, 601, 1, 609, 610, 612, - 611, 611, 612, 612, 611, 612, 612, 611, - 612, 612, 612, 611, 612, 611, 612, 612, - 611, 612, 612, 612, 612, 611, 612, 612, - 611, 611, 612, 612, 611, 612, 611, 613, - 614, 615, 616, 617, 619, 620, 621, 623, - 624, 625, 626, 627, 628, 629, 630, 631, - 632, 633, 634, 635, 636, 637, 638, 639, - 640, 618, 622, 611, 612, 612, 612, 612, - 611, 612, 611, 612, 612, 611, 611, 611, - 612, 611, 611, 611, 612, 612, 612, 612, - 611, 611, 611, 611, 611, 611, 612, 611, - 611, 611, 611, 611, 611, 612, 611, 611, - 611, 611, 612, 612, 612, 612, 611, 612, - 612, 612, 612, 612, 611, 612, 612, 611, - 612, 612, 612, 612, 611, 612, 612, 611, - 611, 611, 611, 611, 611, 612, 612, 612, - 612, 612, 612, 611, 612, 612, 611, 611, - 611, 611, 611, 611, 612, 612, 611, 612, - 612, 612, 612, 612, 611, 612, 612, 611, - 612, 611, 612, 612, 612, 611, 612, 611, - 612, 612, 612, 612, 612, 611, 612, 611, - 612, 612, 612, 612, 611, 612, 611, 641, - 642, 643, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, - 658, 659, 660, 661, 611, 612, 612, 611, - 612, 612, 612, 611, 612, 612, 612, 612, - 611, 612, 611, 612, 612, 611, 612, 612, - 611, 612, 611, 611, 611, 612, 612, 611, - 612, 612, 611, 612, 612, 611, 612, 611, - 612, 612, 612, 612, 612, 611, 612, 611, - 612, 612, 611, 611, 611, 612, 612, 612, - 611, 612, 611, 612, 611, 612, 612, 612, - 612, 612, 611, 612, 612, 611, 662, 663, - 664, 665, 666, 611, 612, 667, 611, 662, - 663, 668, 664, 665, 666, 611, 612, 611, - 612, 611, 612, 611, 612, 611, 612, 611, - 669, 670, 611, 612, 611, 612, 611, 671, - 672, 673, 674, 675, 676, 677, 678, 679, - 680, 681, 682, 683, 684, 685, 611, 612, - 612, 611, 612, 611, 612, 611, 612, 612, - 612, 612, 611, 612, 612, 611, 611, 611, - 612, 612, 611, 612, 611, 612, 612, 611, - 611, 611, 612, 612, 611, 612, 612, 612, - 611, 612, 612, 612, 612, 611, 612, 612, - 612, 611, 612, 612, 611, 686, 687, 672, - 611, 612, 611, 612, 612, 611, 688, 689, - 690, 691, 692, 693, 694, 611, 695, 696, - 697, 698, 699, 611, 612, 611, 612, 611, - 612, 611, 612, 612, 612, 612, 612, 611, - 612, 611, 700, 701, 702, 703, 704, 705, - 706, 707, 708, 709, 710, 711, 712, 713, - 714, 715, 716, 713, 717, 718, 719, 720, - 721, 611, 612, 612, 611, 611, 612, 611, - 611, 612, 612, 612, 611, 612, 611, 612, - 612, 611, 611, 611, 612, 612, 612, 611, - 612, 611, 612, 612, 612, 611, 612, 612, - 612, 612, 612, 612, 612, 611, 612, 611, - 612, 611, 612, 611, 611, 612, 612, 612, - 611, 611, 611, 612, 611, 612, 612, 611, - 612, 611, 612, 612, 611, 612, 612, 611, - 722, 723, 724, 725, 611, 612, 611, 612, - 611, 612, 611, 612, 611, 726, 611, 612, - 611, 727, 728, 729, 730, 731, 732, 611, - 612, 612, 612, 611, 611, 611, 611, 612, - 612, 611, 612, 612, 611, 611, 611, 612, - 612, 612, 612, 611, 733, 734, 735, 611, - 612, 612, 612, 612, 612, 611, 612, 611, - 612, 611, 736, 737, 738, 611, 739, 611, - 739, 611, 611, 739, 739, 611, 739, 739, - 611, 739, 739, 739, 611, 739, 611, 739, - 739, 611, 739, 739, 739, 739, 611, 739, - 739, 611, 611, 739, 739, 611, 739, 611, - 740, 741, 742, 743, 744, 745, 746, 748, - 749, 750, 751, 752, 753, 754, 755, 756, - 757, 758, 759, 631, 760, 761, 762, 763, - 764, 765, 766, 767, 768, 747, 611, 739, - 739, 739, 739, 611, 739, 611, 739, 739, - 611, 612, 612, 611, 611, 612, 739, 739, - 611, 739, 739, 611, 739, 611, 612, 739, - 739, 739, 612, 612, 611, 739, 739, 739, - 611, 611, 611, 739, 611, 612, 612, 739, - 739, 612, 611, 739, 739, 739, 611, 739, - 611, 739, 611, 739, 611, 612, 611, 611, - 739, 739, 611, 739, 611, 612, 739, 739, - 612, 739, 611, 612, 739, 739, 612, 612, - 739, 739, 611, 739, 739, 612, 611, 739, - 739, 739, 612, 612, 612, 611, 739, 612, - 739, 611, 611, 611, 612, 611, 611, 611, - 739, 739, 739, 612, 739, 612, 611, 739, - 739, 612, 612, 612, 739, 739, 739, 611, - 739, 739, 612, 612, 611, 611, 611, 739, - 739, 739, 611, 739, 611, 612, 739, 739, - 739, 739, 612, 739, 612, 612, 611, 739, - 612, 739, 611, 739, 611, 739, 612, 739, - 739, 611, 739, 611, 739, 739, 739, 739, - 612, 611, 612, 739, 611, 739, 739, 739, - 739, 611, 739, 611, 769, 770, 771, 772, - 773, 774, 775, 776, 777, 778, 779, 780, - 781, 782, 783, 784, 785, 786, 787, 788, - 789, 611, 612, 739, 739, 612, 739, 611, - 612, 739, 739, 739, 611, 739, 612, 739, - 739, 739, 611, 739, 611, 739, 739, 611, - 739, 739, 611, 612, 739, 612, 611, 739, - 739, 739, 611, 612, 739, 611, 739, 739, - 611, 739, 739, 612, 739, 612, 612, 739, - 611, 739, 739, 612, 611, 739, 739, 739, - 739, 612, 739, 739, 612, 739, 611, 739, - 611, 612, 612, 612, 739, 739, 612, 611, - 739, 611, 739, 611, 612, 612, 612, 612, - 739, 739, 612, 739, 611, 612, 739, 739, - 612, 739, 612, 611, 612, 739, 612, 739, - 611, 612, 739, 739, 739, 739, 612, 739, - 611, 739, 739, 611, 790, 791, 792, 793, - 794, 611, 739, 667, 611, 739, 611, 739, - 611, 739, 611, 739, 611, 795, 796, 611, - 739, 611, 739, 611, 797, 798, 799, 800, - 675, 801, 802, 803, 804, 805, 806, 807, - 808, 809, 810, 611, 739, 739, 611, 739, - 611, 739, 611, 739, 739, 739, 612, 612, - 739, 611, 739, 611, 739, 611, 612, 739, - 611, 739, 612, 611, 612, 739, 739, 739, - 612, 739, 612, 611, 739, 611, 612, 739, - 612, 739, 612, 739, 611, 739, 739, 612, - 739, 611, 739, 739, 739, 739, 611, 739, - 612, 612, 739, 739, 612, 611, 739, 739, - 612, 739, 612, 611, 811, 812, 798, 611, - 739, 611, 739, 739, 611, 813, 814, 815, - 816, 817, 818, 819, 611, 820, 821, 822, - 823, 824, 611, 739, 611, 739, 611, 739, - 611, 739, 739, 739, 739, 739, 611, 739, - 611, 825, 826, 827, 828, 829, 830, 831, - 832, 833, 834, 835, 836, 837, 838, 839, - 840, 841, 842, 843, 844, 845, 846, 847, - 611, 739, 612, 739, 611, 611, 739, 612, - 611, 612, 612, 611, 739, 612, 739, 739, - 611, 739, 611, 612, 739, 612, 739, 612, - 611, 611, 739, 611, 612, 739, 739, 612, - 739, 612, 739, 611, 739, 612, 739, 611, - 739, 739, 612, 739, 612, 611, 739, 739, - 612, 612, 612, 612, 739, 739, 611, 612, - 739, 611, 612, 612, 739, 611, 739, 612, - 739, 612, 739, 612, 739, 611, 612, 611, - 739, 739, 612, 612, 739, 612, 739, 611, - 611, 611, 739, 739, 612, 739, 612, 739, - 611, 611, 739, 612, 612, 739, 612, 739, - 611, 612, 739, 612, 739, 611, 612, 612, - 739, 739, 611, 612, 612, 612, 739, 739, - 611, 848, 849, 724, 850, 611, 739, 611, - 739, 611, 739, 611, 851, 611, 739, 611, - 852, 853, 854, 855, 856, 857, 611, 612, - 612, 739, 739, 739, 611, 611, 611, 611, - 739, 739, 611, 739, 739, 611, 611, 611, - 739, 739, 739, 739, 611, 858, 859, 860, - 611, 739, 739, 739, 739, 739, 611, 739, - 611, 739, 611, 861, 611, 612, 611, 862, - 611, 863, 864, 865, 867, 866, 611, 739, - 611, 611, 739, 739, 612, 611, 612, 611, - 868, 611, 869, 870, 871, 873, 872, 611, - 612, 611, 611, 612, 612, 688, 689, 690, - 691, 692, 693, 611, 641, 642, 643, 604, - 605, 874, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, - 658, 659, 660, 661, 611, 875, 610, 641, - 642, 643, 876, 606, 607, 644, 645, 646, - 647, 648, 649, 650, 651, 652, 653, 654, - 655, 656, 657, 658, 659, 660, 661, 611, - 875, 611, 877, 875, 641, 642, 643, 878, - 607, 644, 645, 646, 647, 648, 649, 650, - 651, 652, 653, 654, 655, 656, 657, 658, - 659, 660, 661, 611, 877, 611, 609, 877, - 879, 611, 877, 611, 880, 881, 611, 875, - 611, 611, 877, 611, 875, 611, 875, 671, - 672, 673, 674, 675, 676, 677, 882, 679, - 680, 681, 682, 683, 684, 685, 884, 885, - 886, 887, 888, 889, 884, 885, 886, 887, - 888, 889, 884, 883, 890, 611, 612, 610, - 611, 891, 891, 891, 877, 611, 641, 642, - 643, 876, 874, 644, 645, 646, 647, 648, - 649, 650, 651, 652, 653, 654, 655, 656, - 657, 658, 659, 660, 661, 611, 880, 892, - 611, 611, 875, 891, 891, 877, 891, 891, - 877, 891, 891, 891, 877, 891, 891, 877, - 891, 891, 877, 891, 891, 611, 877, 877, - 886, 887, 888, 889, 883, 884, 886, 887, - 888, 889, 883, 884, 886, 887, 888, 889, - 883, 884, 886, 887, 888, 889, 883, 884, - 886, 887, 888, 889, 883, 884, 886, 887, - 888, 889, 883, 884, 886, 887, 888, 889, - 883, 884, 886, 887, 888, 889, 883, 884, - 886, 887, 888, 889, 883, 884, 885, 890, - 887, 888, 889, 883, 884, 885, 887, 888, - 889, 883, 884, 885, 887, 888, 889, 883, - 884, 885, 887, 888, 889, 883, 884, 885, - 887, 888, 889, 883, 884, 885, 887, 888, - 889, 883, 884, 885, 887, 888, 889, 883, - 884, 885, 887, 888, 889, 883, 884, 885, - 887, 888, 889, 883, 884, 885, 886, 890, - 888, 889, 883, 884, 885, 886, 888, 889, - 883, 884, 885, 886, 888, 889, 883, 884, - 885, 886, 888, 889, 883, 884, 885, 886, - 888, 893, 892, 887, 611, 890, 891, 611, - 875, 877, 265, 3, 1, 894, 895, 896, - 897, 898, 601, 1, 265, 899, 3, 265, - 3, 265, 3, 1, 901, 900, 900, 901, - 901, 900, 901, 901, 900, 901, 901, 901, - 900, 901, 900, 901, 901, 900, 901, 901, - 901, 901, 900, 901, 901, 900, 900, 901, - 901, 900, 901, 900, 902, 903, 904, 905, - 906, 908, 909, 910, 912, 913, 914, 915, - 916, 917, 918, 919, 920, 921, 922, 923, - 924, 925, 926, 927, 928, 929, 907, 911, - 900, 901, 901, 901, 901, 900, 901, 900, - 901, 901, 900, 900, 900, 901, 900, 900, - 900, 901, 901, 901, 901, 900, 900, 900, - 900, 900, 900, 901, 900, 900, 900, 900, - 900, 900, 901, 900, 900, 900, 900, 901, - 901, 901, 901, 900, 901, 901, 901, 901, - 901, 900, 901, 901, 900, 901, 901, 901, - 901, 900, 901, 901, 900, 900, 900, 900, - 900, 900, 901, 901, 901, 901, 901, 901, - 900, 901, 901, 900, 900, 900, 900, 900, - 900, 901, 901, 900, 901, 901, 901, 901, - 901, 900, 901, 901, 900, 901, 900, 901, - 901, 901, 900, 901, 900, 901, 901, 901, - 901, 901, 900, 901, 900, 901, 901, 901, - 901, 900, 901, 900, 930, 931, 932, 933, - 934, 935, 936, 937, 938, 939, 940, 941, - 942, 943, 944, 945, 946, 947, 948, 949, - 950, 900, 901, 901, 900, 901, 901, 901, - 900, 901, 901, 901, 901, 900, 901, 900, - 901, 901, 900, 901, 901, 900, 901, 900, - 900, 900, 901, 901, 900, 901, 901, 900, - 901, 901, 900, 901, 900, 901, 901, 901, - 901, 901, 900, 901, 900, 901, 901, 900, - 900, 900, 901, 901, 901, 900, 901, 900, - 901, 900, 901, 901, 901, 901, 901, 900, - 901, 901, 900, 951, 952, 953, 954, 955, - 900, 901, 899, 900, 901, 900, 901, 900, - 901, 900, 901, 900, 956, 957, 900, 901, - 900, 901, 900, 958, 959, 960, 961, 962, - 963, 964, 965, 966, 967, 968, 969, 970, - 971, 972, 900, 901, 901, 900, 901, 900, - 901, 900, 901, 901, 901, 901, 900, 901, - 901, 900, 900, 900, 901, 901, 900, 901, - 900, 901, 901, 900, 900, 900, 901, 901, - 900, 901, 901, 901, 900, 901, 901, 901, - 901, 900, 901, 901, 901, 900, 901, 901, - 900, 973, 974, 959, 900, 901, 900, 901, - 901, 900, 975, 976, 977, 978, 979, 980, - 900, 981, 982, 983, 984, 985, 900, 901, - 900, 901, 900, 901, 900, 901, 901, 901, - 901, 901, 900, 901, 900, 986, 987, 988, - 989, 990, 991, 992, 993, 994, 995, 996, - 997, 998, 999, 1000, 1001, 1002, 999, 1003, - 1004, 1005, 1006, 1007, 900, 901, 901, 900, - 900, 901, 900, 900, 901, 901, 901, 900, - 901, 900, 901, 901, 900, 900, 900, 901, - 901, 901, 900, 901, 900, 901, 901, 901, - 900, 901, 901, 901, 901, 901, 901, 901, - 900, 901, 900, 901, 900, 901, 900, 900, - 901, 901, 901, 900, 900, 900, 901, 900, - 901, 901, 900, 901, 900, 901, 901, 900, - 901, 901, 900, 1008, 1009, 1010, 1011, 900, - 901, 900, 901, 900, 901, 900, 901, 900, - 1012, 900, 901, 900, 1013, 1014, 1015, 1016, - 1017, 1018, 900, 901, 901, 901, 900, 900, - 900, 900, 901, 901, 900, 901, 901, 900, - 900, 900, 901, 901, 901, 901, 900, 1019, - 1020, 1021, 900, 901, 901, 901, 901, 901, - 900, 901, 900, 901, 900, 1022, 900, 1023, - 1024, 1025, 1027, 1026, 900, 901, 900, 900, - 901, 901, 951, 952, 1028, 953, 954, 955, - 900, 901, 900, 975, 976, 977, 978, 979, - 980, 1029, 900, 1030, 1031, 1032, 900, 1033, - 900, 1033, 900, 900, 1033, 1033, 900, 1033, - 1033, 900, 1033, 1033, 1033, 900, 1033, 900, - 1033, 1033, 900, 1033, 1033, 1033, 1033, 900, - 1033, 1033, 900, 900, 1033, 1033, 900, 1033, - 900, 1034, 1035, 1036, 1037, 1038, 1039, 1040, - 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, - 1050, 1051, 1052, 1053, 920, 1054, 1055, 1056, - 1057, 1058, 1059, 1060, 1061, 1062, 1041, 900, - 1033, 1033, 1033, 1033, 900, 1033, 900, 1033, - 1033, 900, 901, 901, 900, 900, 901, 1033, - 1033, 900, 1033, 1033, 900, 1033, 900, 901, - 1033, 1033, 1033, 901, 901, 900, 1033, 1033, - 1033, 900, 900, 900, 1033, 900, 901, 901, - 1033, 1033, 901, 900, 1033, 1033, 1033, 900, - 1033, 900, 1033, 900, 1033, 900, 901, 900, - 900, 1033, 1033, 900, 1033, 900, 901, 1033, - 1033, 901, 1033, 900, 901, 1033, 1033, 901, - 901, 1033, 1033, 900, 1033, 1033, 901, 900, - 1033, 1033, 1033, 901, 901, 901, 900, 1033, - 901, 1033, 900, 900, 900, 901, 900, 900, - 900, 1033, 1033, 1033, 901, 1033, 901, 900, - 1033, 1033, 901, 901, 901, 1033, 1033, 1033, - 900, 1033, 1033, 901, 901, 900, 900, 900, - 1033, 1033, 1033, 900, 1033, 900, 901, 1033, - 1033, 1033, 1033, 901, 1033, 901, 901, 900, - 1033, 901, 1033, 900, 1033, 900, 1033, 901, - 1033, 1033, 900, 1033, 900, 1033, 1033, 1033, - 1033, 901, 900, 901, 1033, 900, 1033, 1033, - 1033, 1033, 900, 1033, 900, 1063, 1064, 1065, - 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, - 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, - 1082, 1083, 900, 901, 1033, 1033, 901, 1033, - 900, 901, 1033, 1033, 1033, 900, 1033, 901, - 1033, 1033, 1033, 900, 1033, 900, 1033, 1033, - 900, 1033, 1033, 900, 901, 1033, 901, 900, - 1033, 1033, 1033, 900, 901, 1033, 900, 1033, - 1033, 900, 1033, 1033, 901, 1033, 901, 901, - 1033, 900, 1033, 1033, 901, 900, 1033, 1033, - 1033, 1033, 901, 1033, 1033, 901, 1033, 900, - 1033, 900, 901, 901, 901, 1033, 1033, 901, - 900, 1033, 900, 1033, 900, 901, 901, 901, - 901, 1033, 1033, 901, 1033, 900, 901, 1033, - 1033, 901, 1033, 901, 900, 901, 1033, 901, - 1033, 900, 901, 1033, 1033, 1033, 1033, 901, - 1033, 900, 1033, 1033, 900, 1084, 1085, 1086, - 1087, 1088, 900, 1033, 899, 900, 1033, 900, - 1033, 900, 1033, 900, 1033, 900, 1089, 1090, - 900, 1033, 900, 1033, 900, 1091, 1092, 1093, - 1094, 962, 1095, 1096, 1097, 1098, 1099, 1100, - 1101, 1102, 1103, 1104, 900, 1033, 1033, 900, - 1033, 900, 1033, 900, 1033, 1033, 1033, 901, - 901, 1033, 900, 1033, 900, 1033, 900, 901, - 1033, 900, 1033, 901, 900, 901, 1033, 1033, - 1033, 901, 1033, 901, 900, 1033, 900, 901, - 1033, 901, 1033, 901, 1033, 900, 1033, 1033, - 901, 1033, 900, 1033, 1033, 1033, 1033, 900, - 1033, 901, 901, 1033, 1033, 901, 900, 1033, - 1033, 901, 1033, 901, 900, 1105, 1106, 1092, - 900, 1033, 900, 1033, 1033, 900, 1107, 1108, - 1109, 1110, 1111, 1112, 1113, 900, 1114, 1115, - 1116, 1117, 1118, 900, 1033, 900, 1033, 900, - 1033, 900, 1033, 1033, 1033, 1033, 1033, 900, - 1033, 900, 1119, 1120, 1121, 1122, 1123, 1124, - 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, - 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, - 1141, 900, 1033, 901, 1033, 900, 900, 1033, - 901, 900, 901, 901, 900, 1033, 901, 1033, - 1033, 900, 1033, 900, 901, 1033, 901, 1033, - 901, 900, 900, 1033, 900, 901, 1033, 1033, - 901, 1033, 901, 1033, 900, 1033, 901, 1033, - 900, 1033, 1033, 901, 1033, 901, 900, 1033, - 1033, 901, 901, 901, 901, 1033, 1033, 900, - 901, 1033, 900, 901, 901, 1033, 900, 1033, - 901, 1033, 901, 1033, 901, 1033, 900, 901, - 900, 1033, 1033, 901, 901, 1033, 901, 1033, - 900, 900, 900, 1033, 1033, 901, 1033, 901, - 1033, 900, 900, 1033, 901, 901, 1033, 901, - 1033, 900, 901, 1033, 901, 1033, 900, 901, - 901, 1033, 1033, 900, 901, 901, 901, 1033, - 1033, 900, 1142, 1143, 1010, 1144, 900, 1033, - 900, 1033, 900, 1033, 900, 1145, 900, 1033, - 900, 1146, 1147, 1148, 1149, 1150, 1151, 900, - 901, 901, 1033, 1033, 1033, 900, 900, 900, - 900, 1033, 1033, 900, 1033, 1033, 900, 900, - 900, 1033, 1033, 1033, 1033, 900, 1152, 1153, - 1154, 900, 1033, 1033, 1033, 1033, 1033, 900, - 1033, 900, 1033, 900, 1155, 900, 901, 900, - 1156, 900, 1157, 1158, 1159, 1161, 1160, 900, - 1033, 900, 900, 1033, 1033, 901, 900, 901, - 900, 3, 265, 3, 1, 1162, 3, 1, - 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1162, - 1163, 1162, 1162, 1162, 1163, 1162, 1163, 1162, - 1162, 1163, 1162, 1162, 1162, 1162, 1163, 1162, - 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1163, - 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1172, - 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, - 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, - 1189, 1190, 1191, 1192, 1193, 1171, 1163, 1162, - 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, - 1163, 1194, 1194, 1163, 1163, 1194, 1162, 1194, - 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1163, - 1194, 1194, 1194, 1163, 1194, 1163, 1194, 1194, - 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, - 1163, 1163, 1194, 1194, 1163, 1194, 1163, 1195, - 1196, 1197, 1198, 1199, 1201, 1202, 1203, 1205, - 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1184, - 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, - 1221, 1200, 1204, 1163, 1194, 1194, 1194, 1194, - 1163, 1194, 1163, 1194, 1194, 1163, 1163, 1163, - 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1194, - 1163, 1163, 1163, 1163, 1163, 1163, 1194, 1163, - 1163, 1163, 1163, 1163, 1163, 1194, 1163, 1163, - 1163, 1163, 1194, 1194, 1194, 1194, 1163, 1194, - 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, - 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, - 1163, 1163, 1163, 1163, 1163, 1194, 1194, 1194, - 1194, 1194, 1194, 1163, 1194, 1194, 1163, 1163, - 1163, 1163, 1163, 1163, 1194, 1194, 1163, 1194, - 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, - 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1194, 1194, 1194, 1163, 1194, 1163, 1222, - 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, - 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, - 1239, 1240, 1241, 1242, 1163, 1194, 1194, 1163, - 1194, 1194, 1194, 1163, 1194, 1194, 1194, 1194, - 1163, 1194, 1163, 1194, 1194, 1163, 1194, 1194, - 1163, 1194, 1163, 1163, 1163, 1194, 1194, 1163, - 1194, 1194, 1163, 1194, 1194, 1163, 1194, 1163, - 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1194, 1163, 1163, 1163, 1194, 1194, 1194, - 1163, 1194, 1163, 1194, 1163, 1194, 1194, 1194, - 1194, 1194, 1163, 1194, 1194, 1163, 1243, 1244, - 1245, 1246, 1247, 1163, 1194, 1248, 1163, 1243, - 1244, 1249, 1245, 1246, 1247, 1163, 1194, 1163, - 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1163, - 1250, 1251, 1163, 1194, 1163, 1194, 1163, 1252, - 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, - 1261, 1262, 1263, 1264, 1265, 1266, 1163, 1194, - 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1194, - 1194, 1194, 1163, 1194, 1194, 1163, 1163, 1163, - 1194, 1194, 1163, 1194, 1163, 1194, 1194, 1163, - 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1194, - 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, - 1194, 1163, 1194, 1194, 1163, 1267, 1268, 1253, - 1163, 1194, 1163, 1194, 1194, 1163, 1269, 1270, - 1271, 1272, 1273, 1274, 1275, 1163, 1276, 1277, - 1278, 1279, 1280, 1163, 1194, 1163, 1194, 1163, - 1194, 1163, 1194, 1194, 1194, 1194, 1194, 1163, - 1194, 1163, 1281, 1282, 1283, 1284, 1285, 1286, - 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, - 1295, 1296, 1297, 1294, 1298, 1299, 1300, 1301, - 1302, 1163, 1194, 1194, 1163, 1163, 1194, 1163, - 1163, 1194, 1194, 1194, 1163, 1194, 1163, 1194, - 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1163, - 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1194, - 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1163, 1194, 1163, 1163, 1194, 1194, 1194, - 1163, 1163, 1163, 1194, 1163, 1194, 1194, 1163, - 1194, 1163, 1194, 1194, 1163, 1194, 1194, 1163, - 1303, 1304, 1305, 1306, 1163, 1194, 1163, 1194, - 1163, 1194, 1163, 1194, 1163, 1307, 1163, 1194, - 1163, 1308, 1309, 1310, 1311, 1312, 1313, 1163, - 1194, 1194, 1194, 1163, 1163, 1163, 1163, 1194, - 1194, 1163, 1194, 1194, 1163, 1163, 1163, 1194, - 1194, 1194, 1194, 1163, 1314, 1315, 1316, 1163, - 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1163, 1317, 1318, 1319, 1163, 1162, 1163, - 1194, 1163, 1194, 1163, 1320, 1163, 1321, 1322, - 1323, 1325, 1324, 1163, 1194, 1163, 1163, 1194, - 1194, 1269, 1270, 1271, 1272, 1273, 1274, 1163, - 1162, 1163, 1162, 1162, 1163, 1162, 1163, 1194, - 1162, 1162, 1162, 1194, 1194, 1163, 1162, 1162, - 1162, 1163, 1163, 1163, 1162, 1163, 1194, 1194, - 1162, 1162, 1194, 1163, 1162, 1162, 1162, 1163, - 1162, 1163, 1162, 1163, 1162, 1163, 1194, 1163, - 1163, 1162, 1162, 1163, 1162, 1163, 1194, 1162, - 1162, 1194, 1162, 1163, 1194, 1162, 1162, 1194, - 1194, 1162, 1162, 1163, 1162, 1162, 1194, 1163, - 1162, 1162, 1162, 1194, 1194, 1194, 1163, 1162, - 1194, 1162, 1163, 1163, 1163, 1194, 1163, 1163, - 1163, 1162, 1162, 1162, 1194, 1162, 1194, 1163, - 1162, 1162, 1194, 1194, 1194, 1162, 1162, 1162, - 1163, 1162, 1162, 1194, 1194, 1163, 1163, 1163, - 1162, 1162, 1162, 1163, 1162, 1163, 1194, 1162, - 1162, 1162, 1162, 1194, 1162, 1194, 1194, 1163, - 1162, 1194, 1162, 1163, 1162, 1163, 1162, 1194, - 1162, 1162, 1163, 1162, 1163, 1162, 1162, 1162, - 1162, 1194, 1163, 1194, 1162, 1163, 1162, 1162, - 1162, 1162, 1163, 1162, 1163, 1326, 1327, 1328, - 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, - 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, - 1345, 1346, 1163, 1194, 1162, 1162, 1194, 1162, - 1163, 1194, 1162, 1162, 1162, 1163, 1162, 1194, - 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, - 1163, 1162, 1162, 1163, 1194, 1162, 1194, 1163, - 1162, 1162, 1162, 1163, 1194, 1162, 1163, 1162, - 1162, 1163, 1162, 1162, 1194, 1162, 1194, 1194, - 1162, 1163, 1162, 1162, 1194, 1163, 1162, 1162, - 1162, 1162, 1194, 1162, 1162, 1194, 1162, 1163, - 1162, 1163, 1194, 1194, 1194, 1162, 1162, 1194, - 1163, 1162, 1163, 1162, 1163, 1194, 1194, 1194, - 1194, 1162, 1162, 1194, 1162, 1163, 1194, 1162, - 1162, 1194, 1162, 1194, 1163, 1194, 1162, 1194, - 1162, 1163, 1194, 1162, 1162, 1162, 1162, 1194, - 1162, 1163, 1162, 1162, 1163, 1347, 1348, 1349, - 1350, 1351, 1163, 1162, 1248, 1163, 1162, 1163, - 1162, 1163, 1162, 1163, 1162, 1163, 1352, 1353, - 1163, 1162, 1163, 1162, 1163, 1354, 1355, 1356, - 1357, 1256, 1358, 1359, 1360, 1361, 1362, 1363, - 1364, 1365, 1366, 1367, 1163, 1162, 1162, 1163, - 1162, 1163, 1162, 1163, 1162, 1162, 1162, 1194, - 1194, 1162, 1163, 1162, 1163, 1162, 1163, 1194, - 1162, 1163, 1162, 1194, 1163, 1194, 1162, 1162, - 1162, 1194, 1162, 1194, 1163, 1162, 1163, 1194, - 1162, 1194, 1162, 1194, 1162, 1163, 1162, 1162, - 1194, 1162, 1163, 1162, 1162, 1162, 1162, 1163, - 1162, 1194, 1194, 1162, 1162, 1194, 1163, 1162, - 1162, 1194, 1162, 1194, 1163, 1368, 1369, 1355, - 1163, 1162, 1163, 1162, 1162, 1163, 1370, 1371, - 1372, 1373, 1374, 1375, 1376, 1163, 1377, 1378, - 1379, 1380, 1381, 1163, 1162, 1163, 1162, 1163, - 1162, 1163, 1162, 1162, 1162, 1162, 1162, 1163, - 1162, 1163, 1382, 1383, 1384, 1385, 1386, 1387, - 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, - 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, - 1404, 1163, 1162, 1194, 1162, 1163, 1163, 1162, - 1194, 1163, 1194, 1194, 1163, 1162, 1194, 1162, - 1162, 1163, 1162, 1163, 1194, 1162, 1194, 1162, - 1194, 1163, 1163, 1162, 1163, 1194, 1162, 1162, - 1194, 1162, 1194, 1162, 1163, 1162, 1194, 1162, - 1163, 1162, 1162, 1194, 1162, 1194, 1163, 1162, - 1162, 1194, 1194, 1194, 1194, 1162, 1162, 1163, - 1194, 1162, 1163, 1194, 1194, 1162, 1163, 1162, - 1194, 1162, 1194, 1162, 1194, 1162, 1163, 1194, - 1163, 1162, 1162, 1194, 1194, 1162, 1194, 1162, - 1163, 1163, 1163, 1162, 1162, 1194, 1162, 1194, - 1162, 1163, 1163, 1162, 1194, 1194, 1162, 1194, - 1162, 1163, 1194, 1162, 1194, 1162, 1163, 1194, - 1194, 1162, 1162, 1163, 1194, 1194, 1194, 1162, - 1162, 1163, 1405, 1406, 1305, 1407, 1163, 1162, - 1163, 1162, 1163, 1162, 1163, 1408, 1163, 1162, - 1163, 1409, 1410, 1411, 1412, 1413, 1414, 1163, - 1194, 1194, 1162, 1162, 1162, 1163, 1163, 1163, - 1163, 1162, 1162, 1163, 1162, 1162, 1163, 1163, - 1163, 1162, 1162, 1162, 1162, 1163, 1415, 1416, - 1417, 1163, 1162, 1162, 1162, 1162, 1162, 1163, - 1162, 1163, 1162, 1163, 1418, 1163, 1194, 1163, - 1419, 1163, 1420, 1421, 1422, 1424, 1423, 1163, - 1162, 1163, 1163, 1162, 1162, 1162, 3, 1, - 3, 1162, 3, 1, 601, 1, 1425, 1427, - 1428, 1429, 1430, 1431, 1432, 1427, 1428, 1429, - 1430, 1431, 1432, 1427, 601, 1426, 890, 1, - 3, 610, 3, 1, 875, 875, 875, 877, - 1, 875, 875, 877, 875, 875, 877, 875, - 875, 875, 877, 875, 875, 877, 875, 875, - 877, 875, 875, 1, 877, 1429, 1430, 1431, - 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, - 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, - 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, - 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, - 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, - 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, - 1432, 1426, 1427, 1428, 890, 1430, 1431, 1432, - 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, - 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, - 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, - 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, - 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, - 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, - 1426, 1427, 1428, 1429, 890, 1431, 1432, 1426, - 1427, 1428, 1429, 1431, 1432, 1426, 1427, 1428, - 1429, 1431, 1432, 1426, 1427, 1428, 1429, 1431, - 1432, 1426, 1427, 1428, 1429, 1431, 1433, 1434, - 1435, 1437, 1430, 1436, 1, 890, 875, 3, - 875, 877, 3, 877, 3, 1, 875, 1, - 265, 265, 1, 265, 1438, 1439, 601, 1, - 265, 3, 1, 3, 3, 265, 3, 1, - 1441, 1442, 1443, 1444, 1440, 1, 1445, 1446, - 601, 1, 266, 3, 1, 3, 266, 3, - 1, 1447, 601, 1, 3, 265, 3, 1, - 1448, 601, 1, 3, 265, 3, 1, 1449, - 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, - 1458, 1459, 601, 1, 3, 1460, 1, 1462, - 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1461, - 1462, 1462, 1462, 1461, 1462, 1461, 1462, 1462, - 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, - 1461, 1461, 1462, 1462, 1461, 1462, 1461, 1463, - 1464, 1465, 1466, 1467, 1469, 1470, 1471, 1473, - 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, - 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, - 1490, 1468, 1472, 1461, 1462, 1462, 1462, 1462, - 1461, 1462, 1461, 1462, 1462, 1461, 1461, 1461, - 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1462, - 1461, 1461, 1461, 1461, 1461, 1461, 1462, 1461, - 1461, 1461, 1461, 1461, 1461, 1462, 1461, 1461, - 1461, 1461, 1462, 1462, 1462, 1462, 1461, 1462, - 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, - 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, - 1461, 1461, 1461, 1461, 1461, 1462, 1462, 1462, - 1462, 1462, 1462, 1461, 1462, 1462, 1461, 1461, - 1461, 1461, 1461, 1461, 1462, 1462, 1461, 1462, - 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, - 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1462, 1462, 1462, 1461, 1462, 1461, 1491, - 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, - 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, - 1508, 1509, 1510, 1511, 1461, 1462, 1462, 1461, - 1462, 1462, 1462, 1461, 1462, 1462, 1462, 1462, - 1461, 1462, 1461, 1462, 1462, 1461, 1462, 1462, - 1461, 1462, 1461, 1461, 1461, 1462, 1462, 1461, - 1462, 1462, 1461, 1462, 1462, 1461, 1462, 1461, - 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1462, 1461, 1461, 1461, 1462, 1462, 1462, - 1461, 1462, 1461, 1462, 1461, 1462, 1462, 1462, - 1462, 1462, 1461, 1462, 1462, 1461, 1512, 1513, - 1514, 1515, 1516, 1461, 1462, 1517, 1461, 1512, - 1513, 1518, 1514, 1515, 1516, 1461, 1462, 1461, - 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1461, - 1519, 1520, 1461, 1462, 1461, 1462, 1461, 1521, - 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, - 1530, 1531, 1532, 1533, 1534, 1535, 1461, 1462, - 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1462, - 1462, 1462, 1461, 1462, 1462, 1461, 1461, 1461, - 1462, 1462, 1461, 1462, 1461, 1462, 1462, 1461, - 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1462, - 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, - 1462, 1461, 1462, 1462, 1461, 1536, 1537, 1522, - 1461, 1462, 1461, 1462, 1462, 1461, 1538, 1539, - 1540, 1541, 1542, 1543, 1544, 1461, 1545, 1546, - 1547, 1548, 1549, 1461, 1462, 1461, 1462, 1461, - 1462, 1461, 1462, 1462, 1462, 1462, 1462, 1461, - 1462, 1461, 1550, 1551, 1552, 1553, 1554, 1555, - 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, - 1564, 1565, 1566, 1563, 1567, 1568, 1569, 1570, - 1571, 1461, 1462, 1462, 1461, 1461, 1462, 1461, - 1461, 1462, 1462, 1462, 1461, 1462, 1461, 1462, - 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1461, - 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1461, 1462, 1461, 1461, 1462, 1462, 1462, - 1461, 1461, 1461, 1462, 1461, 1462, 1462, 1461, - 1462, 1461, 1462, 1462, 1461, 1462, 1462, 1461, - 1572, 1573, 1574, 1575, 1461, 1462, 1461, 1462, - 1461, 1462, 1461, 1462, 1461, 1576, 1461, 1462, - 1461, 1577, 1578, 1579, 1580, 1581, 1582, 1461, - 1462, 1462, 1462, 1461, 1461, 1461, 1461, 1462, - 1462, 1461, 1462, 1462, 1461, 1461, 1461, 1462, - 1462, 1462, 1462, 1461, 1583, 1584, 1585, 1461, - 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1461, 1586, 1587, 1588, 1461, 1589, 1461, - 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1589, - 1461, 1589, 1589, 1589, 1461, 1589, 1461, 1589, - 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, - 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1461, - 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1598, - 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, - 1607, 1608, 1609, 1481, 1610, 1611, 1612, 1613, - 1614, 1615, 1616, 1617, 1618, 1597, 1461, 1589, - 1589, 1589, 1589, 1461, 1589, 1461, 1589, 1589, - 1461, 1462, 1462, 1461, 1461, 1462, 1589, 1589, - 1461, 1589, 1589, 1461, 1589, 1461, 1462, 1589, - 1589, 1589, 1462, 1462, 1461, 1589, 1589, 1589, - 1461, 1461, 1461, 1589, 1461, 1462, 1462, 1589, - 1589, 1462, 1461, 1589, 1589, 1589, 1461, 1589, - 1461, 1589, 1461, 1589, 1461, 1462, 1461, 1461, - 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, - 1462, 1589, 1461, 1462, 1589, 1589, 1462, 1462, - 1589, 1589, 1461, 1589, 1589, 1462, 1461, 1589, - 1589, 1589, 1462, 1462, 1462, 1461, 1589, 1462, - 1589, 1461, 1461, 1461, 1462, 1461, 1461, 1461, - 1589, 1589, 1589, 1462, 1589, 1462, 1461, 1589, - 1589, 1462, 1462, 1462, 1589, 1589, 1589, 1461, - 1589, 1589, 1462, 1462, 1461, 1461, 1461, 1589, - 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, - 1589, 1589, 1462, 1589, 1462, 1462, 1461, 1589, - 1462, 1589, 1461, 1589, 1461, 1589, 1462, 1589, - 1589, 1461, 1589, 1461, 1589, 1589, 1589, 1589, - 1462, 1461, 1462, 1589, 1461, 1589, 1589, 1589, - 1589, 1461, 1589, 1461, 1619, 1620, 1621, 1622, - 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, - 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, - 1639, 1461, 1462, 1589, 1589, 1462, 1589, 1461, - 1462, 1589, 1589, 1589, 1461, 1589, 1462, 1589, - 1589, 1589, 1461, 1589, 1461, 1589, 1589, 1461, - 1589, 1589, 1461, 1462, 1589, 1462, 1461, 1589, - 1589, 1589, 1461, 1462, 1589, 1461, 1589, 1589, - 1461, 1589, 1589, 1462, 1589, 1462, 1462, 1589, - 1461, 1589, 1589, 1462, 1461, 1589, 1589, 1589, - 1589, 1462, 1589, 1589, 1462, 1589, 1461, 1589, - 1461, 1462, 1462, 1462, 1589, 1589, 1462, 1461, - 1589, 1461, 1589, 1461, 1462, 1462, 1462, 1462, - 1589, 1589, 1462, 1589, 1461, 1462, 1589, 1589, - 1462, 1589, 1462, 1461, 1462, 1589, 1462, 1589, - 1461, 1462, 1589, 1589, 1589, 1589, 1462, 1589, - 1461, 1589, 1589, 1461, 1640, 1641, 1642, 1643, - 1644, 1461, 1589, 1517, 1461, 1589, 1461, 1589, - 1461, 1589, 1461, 1589, 1461, 1645, 1646, 1461, - 1589, 1461, 1589, 1461, 1647, 1648, 1649, 1650, - 1525, 1651, 1652, 1653, 1654, 1655, 1656, 1657, - 1658, 1659, 1660, 1461, 1589, 1589, 1461, 1589, - 1461, 1589, 1461, 1589, 1589, 1589, 1462, 1462, - 1589, 1461, 1589, 1461, 1589, 1461, 1462, 1589, - 1461, 1589, 1462, 1461, 1462, 1589, 1589, 1589, - 1462, 1589, 1462, 1461, 1589, 1461, 1462, 1589, - 1462, 1589, 1462, 1589, 1461, 1589, 1589, 1462, - 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, - 1462, 1462, 1589, 1589, 1462, 1461, 1589, 1589, - 1462, 1589, 1462, 1461, 1661, 1662, 1648, 1461, - 1589, 1461, 1589, 1589, 1461, 1663, 1664, 1665, - 1666, 1667, 1668, 1669, 1461, 1670, 1671, 1672, - 1673, 1674, 1461, 1589, 1461, 1589, 1461, 1589, - 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, - 1461, 1675, 1676, 1677, 1678, 1679, 1680, 1681, - 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, - 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, - 1461, 1589, 1462, 1589, 1461, 1461, 1589, 1462, - 1461, 1462, 1462, 1461, 1589, 1462, 1589, 1589, - 1461, 1589, 1461, 1462, 1589, 1462, 1589, 1462, - 1461, 1461, 1589, 1461, 1462, 1589, 1589, 1462, - 1589, 1462, 1589, 1461, 1589, 1462, 1589, 1461, - 1589, 1589, 1462, 1589, 1462, 1461, 1589, 1589, - 1462, 1462, 1462, 1462, 1589, 1589, 1461, 1462, - 1589, 1461, 1462, 1462, 1589, 1461, 1589, 1462, - 1589, 1462, 1589, 1462, 1589, 1461, 1462, 1461, - 1589, 1589, 1462, 1462, 1589, 1462, 1589, 1461, - 1461, 1461, 1589, 1589, 1462, 1589, 1462, 1589, - 1461, 1461, 1589, 1462, 1462, 1589, 1462, 1589, - 1461, 1462, 1589, 1462, 1589, 1461, 1462, 1462, - 1589, 1589, 1461, 1462, 1462, 1462, 1589, 1589, - 1461, 1698, 1699, 1574, 1700, 1461, 1589, 1461, - 1589, 1461, 1589, 1461, 1701, 1461, 1589, 1461, - 1702, 1703, 1704, 1705, 1706, 1707, 1461, 1462, - 1462, 1589, 1589, 1589, 1461, 1461, 1461, 1461, - 1589, 1589, 1461, 1589, 1589, 1461, 1461, 1461, - 1589, 1589, 1589, 1589, 1461, 1708, 1709, 1710, - 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, - 1461, 1589, 1461, 1711, 1461, 1462, 1461, 1712, - 1461, 1713, 1714, 1715, 1717, 1716, 1461, 1589, - 1461, 1461, 1589, 1589, 1462, 1461, 1462, 1461, - 1718, 1461, 1719, 1720, 1721, 1723, 1722, 1461, - 1462, 1461, 1461, 1462, 1462, 1538, 1539, 1540, - 1541, 1542, 1543, 1461, 1538, 1539, 1540, 1541, - 1542, 1543, 1724, 1461, 1725, 1461, 1462, 1461, - 1162, 3, 1, 3, 1162, 3, 1162, 3, - 1, 1162, 1162, 3, 1162, 3, 1162, 3, - 1162, 3, 1162, 3, 1, 3, 3, 1162, - 1162, 3, 1, 1162, 1162, 3, 1, 1162, - 3, 1162, 3, 1, 3, 1162, 3, 1162, - 3, 1, 1162, 3, 1162, 3, 1, 1162, - 3, 1, 1162, 1162, 3, 3, 1162, 3, - 1162, 3, 1162, 1, 1440, 1, 1726, 1440, - 1, 1727, 1435, 1437, 1728, 1437, 601, 1436, - 1, 265, 3, 1, 3, 265, 1, 1, - 1730, 1729, 1733, 1734, 1735, 1736, 1737, 1738, - 1739, 1741, 1742, 1743, 1744, 1745, 1746, 1748, - 1729, 1, 1732, 1740, 1747, 1, 1731, 262, - 264, 1750, 1751, 1752, 1753, 1754, 1755, 1756, - 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, - 1765, 1766, 1767, 1749, 262, 264, 1750, 1751, - 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, - 1760, 1761, 1768, 1763, 1764, 1765, 1769, 1767, - 1749, 256, 258, 1770, 1771, 1772, 1773, 1774, - 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, - 1783, 1784, 1785, 1786, 1787, 1749, 1789, 1790, - 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, - 1799, 1800, 1801, 1803, 268, 530, 576, 1802, - 1788, 527, 529, 1804, 1805, 1806, 1807, 1808, - 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, - 1817, 1818, 1819, 1820, 1821, 1788, 527, 529, - 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, - 1812, 1813, 1814, 1815, 1822, 1817, 1818, 1819, - 1823, 1821, 1788, 521, 523, 1824, 1825, 1826, - 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, - 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1788, - 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, - 1810, 1811, 1812, 1813, 1814, 1842, 1816, 1817, - 1843, 1844, 1845, 1846, 1819, 1820, 1821, 1788, - 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, - 1810, 1811, 1812, 1813, 1814, 1847, 1816, 1817, - 1818, 1848, 1819, 1820, 1821, 1788, 527, 529, - 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, - 1812, 1813, 1814, 1849, 1816, 1817, 1818, 1850, - 1819, 1820, 1821, 1788, 527, 529, 1804, 1805, - 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, - 1814, 1851, 1816, 1817, 1818, 1852, 1819, 1820, - 1821, 1788, 527, 529, 1804, 1805, 1806, 1807, - 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, - 1816, 1817, 1818, 1819, 1853, 1821, 1788, 871, - 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, - 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, - 1870, 1871, 1872, 1873, 1874, 1875, 1854, 871, - 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, - 1862, 1863, 1864, 1865, 1876, 1867, 1868, 1877, - 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, - 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, - 1865, 1876, 1878, 1868, 1877, 1873, 1879, 1875, - 1854, 865, 867, 1880, 1881, 1882, 1883, 1884, - 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, - 1893, 1894, 1895, 1896, 1897, 1854, 871, 873, - 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, - 1863, 1864, 1865, 1898, 1867, 1868, 1877, 1899, - 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, - 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, - 1865, 1900, 1867, 1868, 1877, 1901, 1873, 1874, - 1875, 1854, 871, 873, 1855, 1856, 1857, 1858, - 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1902, - 1867, 1868, 1877, 1903, 1873, 1874, 1875, 1854, - 1025, 1027, 1905, 1906, 1907, 1908, 1909, 1910, - 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, - 1919, 1920, 1921, 1922, 1904, 1025, 1027, 1905, - 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, - 1914, 1915, 1916, 1923, 1918, 1919, 1920, 1924, - 1922, 1904, 1159, 1161, 1925, 1926, 1927, 1928, - 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, - 1937, 1938, 1939, 1940, 1941, 1942, 1904, 1422, - 1424, 1944, 1945, 1946, 1947, 1948, 1949, 1950, - 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, - 1959, 1960, 1961, 1943, 1323, 1325, 1962, 1963, - 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, - 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, - 1943, 1323, 1325, 1962, 1963, 1964, 1965, 1966, - 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1980, - 1975, 1976, 1977, 1981, 1979, 1943, 1721, 1723, - 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, - 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 1982, 1721, 1723, 1983, 1984, 1985, - 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, - 1994, 1995, 1996, 1997, 1998, 2001, 2000, 1982, - 1721, 1723, 1983, 1984, 1985, 1986, 1987, 1988, - 1989, 1990, 1991, 1992, 1993, 1994, 2002, 1996, - 1997, 1998, 2003, 2000, 1982, 1715, 1717, 2004, - 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, - 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, - 2021, 1982, -} - -var _graphclust_trans_targs []int16 = []int16{ - 1974, 0, 1974, 1975, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, 29, 30, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, - 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 66, 68, 70, - 71, 72, 1976, 69, 74, 75, 77, 78, - 79, 80, 81, 82, 83, 84, 85, 86, - 87, 88, 89, 90, 91, 93, 94, 96, - 102, 125, 130, 132, 139, 143, 97, 98, - 99, 100, 101, 103, 104, 105, 106, 107, - 108, 109, 110, 111, 112, 113, 114, 115, - 116, 117, 118, 119, 120, 121, 122, 123, - 124, 126, 127, 128, 129, 131, 133, 134, - 135, 136, 137, 138, 140, 141, 142, 144, - 291, 292, 1977, 158, 159, 160, 161, 162, - 163, 164, 165, 166, 167, 168, 169, 170, - 171, 172, 173, 174, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 188, 189, 190, 191, 192, 193, 194, 195, - 196, 197, 198, 199, 200, 201, 202, 203, - 204, 205, 206, 207, 208, 210, 211, 212, - 213, 214, 216, 217, 219, 220, 221, 222, - 223, 224, 225, 226, 227, 228, 229, 230, - 231, 232, 234, 235, 237, 243, 267, 271, - 273, 280, 284, 238, 239, 240, 241, 242, - 244, 245, 246, 247, 248, 249, 250, 251, - 252, 253, 254, 255, 256, 257, 258, 259, - 260, 261, 262, 263, 264, 265, 266, 268, - 269, 270, 272, 274, 275, 276, 277, 278, - 279, 281, 282, 283, 285, 287, 288, 289, - 145, 290, 146, 294, 295, 296, 2, 297, - 3, 1974, 1978, 1974, 1979, 315, 316, 317, - 318, 319, 320, 321, 322, 323, 324, 325, - 326, 327, 328, 329, 330, 331, 332, 333, - 334, 335, 336, 337, 338, 339, 340, 341, - 342, 344, 345, 346, 347, 348, 349, 350, - 351, 352, 353, 354, 355, 356, 357, 358, - 359, 360, 361, 362, 363, 364, 366, 368, - 370, 371, 372, 1980, 369, 374, 375, 377, - 378, 379, 380, 381, 382, 383, 384, 385, - 386, 387, 388, 389, 390, 391, 393, 394, - 396, 402, 425, 430, 432, 439, 443, 397, - 398, 399, 400, 401, 403, 404, 405, 406, - 407, 408, 409, 410, 411, 412, 413, 414, - 415, 416, 417, 418, 419, 420, 421, 422, - 423, 424, 426, 427, 428, 429, 431, 433, - 434, 435, 436, 437, 438, 440, 441, 442, - 444, 591, 592, 1981, 458, 459, 460, 461, - 462, 463, 464, 465, 466, 467, 468, 469, - 470, 471, 472, 473, 474, 475, 476, 477, - 478, 479, 480, 481, 482, 483, 484, 485, - 486, 488, 489, 490, 491, 492, 493, 494, - 495, 496, 497, 498, 499, 500, 501, 502, - 503, 504, 505, 506, 507, 508, 510, 511, - 512, 513, 514, 516, 517, 519, 520, 521, - 522, 523, 524, 525, 526, 527, 528, 529, - 530, 531, 532, 534, 535, 537, 543, 567, - 571, 573, 580, 584, 538, 539, 540, 541, - 542, 544, 545, 546, 547, 548, 549, 550, - 551, 552, 553, 554, 555, 556, 557, 558, - 559, 560, 561, 562, 563, 564, 565, 566, - 568, 569, 570, 572, 574, 575, 576, 577, - 578, 579, 581, 582, 583, 585, 587, 588, - 589, 445, 590, 446, 594, 595, 596, 302, - 597, 303, 599, 605, 606, 608, 610, 613, - 616, 640, 1982, 622, 1983, 612, 1984, 615, - 618, 620, 621, 624, 625, 629, 630, 631, - 632, 633, 634, 635, 1985, 628, 639, 642, - 643, 644, 645, 646, 649, 650, 651, 652, - 653, 654, 655, 656, 660, 661, 663, 664, - 647, 666, 669, 671, 673, 667, 668, 670, - 672, 674, 678, 679, 680, 681, 682, 683, - 684, 685, 686, 687, 1986, 676, 677, 690, - 691, 299, 695, 696, 698, 997, 1000, 1003, - 1027, 1974, 1987, 1974, 1988, 712, 713, 714, - 715, 716, 717, 718, 719, 720, 721, 722, - 723, 724, 725, 726, 727, 728, 729, 730, - 731, 732, 733, 734, 735, 736, 737, 738, - 739, 741, 742, 743, 744, 745, 746, 747, - 748, 749, 750, 751, 752, 753, 754, 755, - 756, 757, 758, 759, 760, 761, 763, 765, - 767, 768, 769, 1989, 766, 771, 772, 774, - 775, 776, 777, 778, 779, 780, 781, 782, - 783, 784, 785, 786, 787, 788, 790, 791, - 793, 799, 822, 827, 829, 836, 840, 794, - 795, 796, 797, 798, 800, 801, 802, 803, - 804, 805, 806, 807, 808, 809, 810, 811, - 812, 813, 814, 815, 816, 817, 818, 819, - 820, 821, 823, 824, 825, 826, 828, 830, - 831, 832, 833, 834, 835, 837, 838, 839, - 841, 988, 989, 1990, 855, 856, 857, 858, - 859, 860, 861, 862, 863, 864, 865, 866, - 867, 868, 869, 870, 871, 872, 873, 874, - 875, 876, 877, 878, 879, 880, 881, 882, - 883, 885, 886, 887, 888, 889, 890, 891, - 892, 893, 894, 895, 896, 897, 898, 899, - 900, 901, 902, 903, 904, 905, 907, 908, - 909, 910, 911, 913, 914, 916, 917, 918, - 919, 920, 921, 922, 923, 924, 925, 926, - 927, 928, 929, 931, 932, 934, 940, 964, - 968, 970, 977, 981, 935, 936, 937, 938, - 939, 941, 942, 943, 944, 945, 946, 947, - 948, 949, 950, 951, 952, 953, 954, 955, - 956, 957, 958, 959, 960, 961, 962, 963, - 965, 966, 967, 969, 971, 972, 973, 974, - 975, 976, 978, 979, 980, 982, 984, 985, - 986, 842, 987, 843, 991, 992, 993, 699, - 994, 700, 1009, 1991, 999, 1992, 1002, 1005, - 1007, 1008, 1011, 1012, 1016, 1017, 1018, 1019, - 1020, 1021, 1022, 1993, 1015, 1026, 1029, 1327, - 1328, 1626, 1627, 1994, 1974, 1995, 1043, 1044, - 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, - 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, - 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, - 1069, 1070, 1072, 1073, 1074, 1075, 1076, 1077, - 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, - 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1094, - 1095, 1096, 1097, 1098, 1100, 1101, 1103, 1104, - 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, - 1113, 1114, 1115, 1116, 1117, 1119, 1120, 1122, - 1128, 1151, 1156, 1158, 1165, 1123, 1124, 1125, - 1126, 1127, 1129, 1130, 1131, 1132, 1133, 1134, - 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, - 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, - 1152, 1153, 1154, 1155, 1157, 1159, 1160, 1161, - 1162, 1163, 1164, 1166, 1167, 1168, 1170, 1171, - 1172, 1030, 1173, 1031, 1175, 1177, 1178, 1325, - 1326, 1996, 1192, 1193, 1194, 1195, 1196, 1197, - 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, - 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, - 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1222, - 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, - 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, - 1239, 1240, 1241, 1242, 1244, 1245, 1246, 1247, - 1248, 1250, 1251, 1253, 1254, 1255, 1256, 1257, - 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, - 1266, 1268, 1269, 1271, 1277, 1301, 1305, 1307, - 1314, 1318, 1272, 1273, 1274, 1275, 1276, 1278, - 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, - 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, - 1295, 1296, 1297, 1298, 1299, 1300, 1302, 1303, - 1304, 1306, 1308, 1309, 1310, 1311, 1312, 1313, - 1315, 1316, 1317, 1319, 1321, 1322, 1323, 1179, - 1324, 1180, 1997, 1974, 1342, 1343, 1344, 1345, - 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, - 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, - 1377, 1513, 1514, 1515, 1516, 1517, 1518, 1519, - 1520, 1521, 1998, 1359, 1360, 1361, 1362, 1363, - 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, - 1372, 1373, 1374, 1375, 1376, 1378, 1379, 1380, - 1381, 1382, 1383, 1384, 1385, 1386, 1388, 1389, - 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, - 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, - 1406, 1407, 1408, 1410, 1412, 1414, 1415, 1416, - 1999, 1413, 1418, 1419, 1421, 1422, 1423, 1424, - 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, - 1433, 1434, 1435, 1437, 1438, 1440, 1446, 1469, - 1474, 1476, 1483, 1487, 1441, 1442, 1443, 1444, - 1445, 1447, 1448, 1449, 1450, 1451, 1452, 1453, - 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, - 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1470, - 1471, 1472, 1473, 1475, 1477, 1478, 1479, 1480, - 1481, 1482, 1484, 1485, 1486, 1488, 1489, 1490, - 1492, 1493, 1494, 1346, 1495, 1347, 1523, 1524, - 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, - 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, - 1541, 1542, 1543, 1545, 1546, 1547, 1548, 1549, - 1551, 1552, 1554, 1555, 1556, 1557, 1558, 1559, - 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, - 1569, 1570, 1572, 1578, 1602, 1606, 1608, 1615, - 1619, 1573, 1574, 1575, 1576, 1577, 1579, 1580, - 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, - 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, - 1597, 1598, 1599, 1600, 1601, 1603, 1604, 1605, - 1607, 1609, 1610, 1611, 1612, 1613, 1614, 1616, - 1617, 1618, 1620, 1622, 1623, 1624, 1329, 1625, - 1330, 1630, 1631, 1632, 1633, 1634, 1635, 1636, - 1637, 1641, 1642, 1643, 1644, 1645, 1647, 1648, - 1628, 1650, 1653, 1655, 1657, 1651, 1652, 1654, - 1656, 1658, 1959, 1960, 1961, 1962, 1963, 1964, - 1965, 1966, 1967, 1968, 2000, 1974, 2001, 1672, - 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, - 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, - 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, - 1697, 1698, 1699, 1701, 1702, 1703, 1704, 1705, - 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, - 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, - 1723, 1725, 1727, 1728, 1729, 2002, 1726, 1731, - 1732, 1734, 1735, 1736, 1737, 1738, 1739, 1740, - 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, - 1750, 1751, 1753, 1759, 1782, 1787, 1789, 1796, - 1800, 1754, 1755, 1756, 1757, 1758, 1760, 1761, - 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, - 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, - 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786, - 1788, 1790, 1791, 1792, 1793, 1794, 1795, 1797, - 1798, 1799, 1801, 1948, 1949, 2003, 1815, 1816, - 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, - 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, - 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, - 1841, 1842, 1843, 1845, 1846, 1847, 1848, 1849, - 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, - 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, - 1867, 1868, 1869, 1870, 1871, 1873, 1874, 1876, - 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, - 1885, 1886, 1887, 1888, 1889, 1891, 1892, 1894, - 1900, 1924, 1928, 1930, 1937, 1941, 1895, 1896, - 1897, 1898, 1899, 1901, 1902, 1903, 1904, 1905, - 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, - 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, - 1922, 1923, 1925, 1926, 1927, 1929, 1931, 1932, - 1933, 1934, 1935, 1936, 1938, 1939, 1940, 1942, - 1944, 1945, 1946, 1802, 1947, 1803, 1951, 1952, - 1953, 1659, 1954, 1660, 1957, 1958, 1971, 1972, - 1973, 1974, 1, 1975, 299, 300, 301, 692, - 693, 694, 697, 1028, 1628, 1629, 1638, 1639, - 1640, 1646, 1649, 1969, 1970, 1974, 4, 5, - 6, 7, 8, 9, 10, 11, 12, 13, - 14, 43, 65, 73, 76, 92, 298, 293, - 67, 95, 147, 148, 149, 150, 151, 152, - 153, 154, 155, 156, 157, 187, 209, 215, - 218, 233, 236, 286, 1974, 600, 601, 602, - 603, 604, 607, 641, 648, 657, 658, 659, - 662, 665, 688, 689, 304, 305, 306, 307, - 308, 309, 310, 311, 312, 313, 314, 343, - 365, 373, 376, 392, 598, 593, 367, 395, - 447, 448, 449, 450, 451, 452, 453, 454, - 455, 456, 457, 487, 509, 515, 518, 533, - 536, 586, 609, 623, 636, 637, 638, 611, - 619, 614, 617, 626, 627, 675, 1974, 701, - 702, 703, 704, 705, 706, 707, 708, 709, - 710, 711, 996, 762, 770, 1010, 1023, 1024, - 1025, 789, 995, 990, 740, 773, 764, 792, - 844, 845, 846, 847, 848, 849, 850, 851, - 852, 853, 854, 884, 906, 912, 915, 930, - 933, 983, 998, 1006, 1001, 1004, 1013, 1014, - 1974, 1032, 1033, 1034, 1035, 1036, 1037, 1038, - 1039, 1040, 1041, 1042, 1071, 1174, 1099, 1102, - 1118, 1176, 1169, 1093, 1121, 1181, 1182, 1183, - 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, - 1221, 1243, 1249, 1252, 1267, 1270, 1320, 1974, - 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, - 1339, 1340, 1341, 1522, 1544, 1550, 1553, 1568, - 1571, 1621, 1348, 1349, 1350, 1351, 1352, 1353, - 1354, 1355, 1356, 1357, 1358, 1387, 1409, 1417, - 1420, 1436, 1496, 1491, 1411, 1439, 1974, 1661, - 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, - 1670, 1671, 1700, 1722, 1730, 1733, 1749, 1956, - 1950, 1955, 1724, 1752, 1804, 1805, 1806, 1807, - 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1844, - 1866, 1872, 1875, 1890, 1893, 1943, -} - -var _graphclust_trans_actions []byte = []byte{ - 31, 0, 27, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 34, 40, 25, 40, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 40, 0, 40, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 40, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 40, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 29, 51, 17, 40, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 51, 0, 51, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 40, 21, 40, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 40, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 19, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 40, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 40, 23, 40, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 40, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 40, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 43, 1, 47, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 15, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 13, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 5, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 9, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 7, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 11, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, -} - -var _graphclust_to_state_actions []byte = []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 37, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, -} - -var _graphclust_from_state_actions []byte = []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, -} - -var _graphclust_eof_trans []int16 = []int16{ - 0, 0, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 0, 0, 0, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 0, 0, 0, 0, - 0, 0, 610, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 610, 612, 612, - 610, 612, 612, 610, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 610, 612, - 612, 612, 612, 0, 0, 0, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 0, - 0, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1750, - 1750, 1750, 1789, 1789, 1789, 1789, 1789, 1789, - 1789, 1789, 1789, 1855, 1855, 1855, 1855, 1855, - 1855, 1855, 1905, 1905, 1905, 1944, 1944, 1944, - 1983, 1983, 1983, 1983, -} - -const graphclust_start int = 1974 -const graphclust_first_final int = 1974 -const graphclust_error int = 0 - -const graphclust_en_main int = 1974 - - -// line 14 "grapheme_clusters.rl" - - -var Error = errors.New("invalid UTF8 text") - -// ScanGraphemeClusters is a split function for bufio.Scanner that splits -// on grapheme cluster boundaries. -func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { - if len(data) == 0 { - return 0, nil, nil - } - - // Ragel state - cs := 0 // Current State - p := 0 // "Pointer" into data - pe := len(data) // End-of-data "pointer" - ts := 0 - te := 0 - act := 0 - eof := pe - - // Make Go compiler happy - _ = ts - _ = te - _ = act - _ = eof - - startPos := 0 - endPos := 0 - - -// line 4976 "grapheme_clusters.go" - { - cs = graphclust_start - ts = 0 - te = 0 - act = 0 - } - -// line 4984 "grapheme_clusters.go" - { - var _klen int - var _trans int - var _acts int - var _nacts uint - var _keys int - if p == pe { - goto _test_eof - } - if cs == 0 { - goto _out - } -_resume: - _acts = int(_graphclust_from_state_actions[cs]) - _nacts = uint(_graphclust_actions[_acts]); _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts - 1] { - case 4: -// line 1 "NONE" - -ts = p - -// line 5008 "grapheme_clusters.go" - } - } - - _keys = int(_graphclust_key_offsets[cs]) - _trans = int(_graphclust_index_offsets[cs]) - - _klen = int(_graphclust_single_lengths[cs]) - if _klen > 0 { - _lower := int(_keys) - var _mid int - _upper := int(_keys + _klen - 1) - for { - if _upper < _lower { - break - } - - _mid = _lower + ((_upper - _lower) >> 1) - switch { - case data[p] < _graphclust_trans_keys[_mid]: - _upper = _mid - 1 - case data[p] > _graphclust_trans_keys[_mid]: - _lower = _mid + 1 - default: - _trans += int(_mid - int(_keys)) - goto _match - } - } - _keys += _klen - _trans += _klen - } - - _klen = int(_graphclust_range_lengths[cs]) - if _klen > 0 { - _lower := int(_keys) - var _mid int - _upper := int(_keys + (_klen << 1) - 2) - for { - if _upper < _lower { - break - } - - _mid = _lower + (((_upper - _lower) >> 1) & ^1) - switch { - case data[p] < _graphclust_trans_keys[_mid]: - _upper = _mid - 2 - case data[p] > _graphclust_trans_keys[_mid + 1]: - _lower = _mid + 2 - default: - _trans += int((_mid - int(_keys)) >> 1) - goto _match - } - } - _trans += _klen - } - -_match: - _trans = int(_graphclust_indicies[_trans]) -_eof_trans: - cs = int(_graphclust_trans_targs[_trans]) - - if _graphclust_trans_actions[_trans] == 0 { - goto _again - } - - _acts = int(_graphclust_trans_actions[_trans]) - _nacts = uint(_graphclust_actions[_acts]); _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts-1] { - case 0: -// line 46 "grapheme_clusters.rl" - - - startPos = p - - case 1: -// line 50 "grapheme_clusters.rl" - - - endPos = p - - case 5: -// line 1 "NONE" - -te = p+1 - - case 6: -// line 54 "grapheme_clusters.rl" - -act = 3; - case 7: -// line 54 "grapheme_clusters.rl" - -te = p+1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 8: -// line 54 "grapheme_clusters.rl" - -te = p+1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 9: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 10: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 11: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 12: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 13: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 14: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 15: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 16: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 17: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 18: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 19: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 20: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 21: -// line 1 "NONE" - - switch act { - case 0: - {cs = 0 -goto _again -} - case 3: - {p = (te) - 1 - - return endPos+1, data[startPos:endPos+1], nil - } - } - -// line 5218 "grapheme_clusters.go" - } - } - -_again: - _acts = int(_graphclust_to_state_actions[cs]) - _nacts = uint(_graphclust_actions[_acts]); _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts-1] { - case 2: -// line 1 "NONE" - -ts = 0 - - case 3: -// line 1 "NONE" - -act = 0 - -// line 5238 "grapheme_clusters.go" - } - } - - if cs == 0 { - goto _out - } - p++ - if p != pe { - goto _resume - } - _test_eof: {} - if p == eof { - if _graphclust_eof_trans[cs] > 0 { - _trans = int(_graphclust_eof_trans[cs] - 1) - goto _eof_trans - } - } - - _out: {} - } - -// line 116 "grapheme_clusters.rl" - - - // If we fall out here then we were unable to complete a sequence. - // If we weren't able to complete a sequence then either we've - // reached the end of a partial buffer (so there's more data to come) - // or we have an isolated symbol that would normally be part of a - // grapheme cluster but has appeared in isolation here. - - if !atEOF { - // Request more - return 0, nil, nil - } - - // Just take the first UTF-8 sequence and return that. - _, seqLen := utf8.DecodeRune(data) - return seqLen, data[:seqLen], nil -} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl deleted file mode 100644 index 003ffbf59..000000000 --- a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl +++ /dev/null @@ -1,132 +0,0 @@ -package textseg - -import ( - "errors" - "unicode/utf8" -) - -// Generated from grapheme_clusters.rl. DO NOT EDIT -%%{ - # (except you are actually in grapheme_clusters.rl here, so edit away!) - - machine graphclust; - write data; -}%% - -var Error = errors.New("invalid UTF8 text") - -// ScanGraphemeClusters is a split function for bufio.Scanner that splits -// on grapheme cluster boundaries. -func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { - if len(data) == 0 { - return 0, nil, nil - } - - // Ragel state - cs := 0 // Current State - p := 0 // "Pointer" into data - pe := len(data) // End-of-data "pointer" - ts := 0 - te := 0 - act := 0 - eof := pe - - // Make Go compiler happy - _ = ts - _ = te - _ = act - _ = eof - - startPos := 0 - endPos := 0 - - %%{ - include GraphemeCluster "grapheme_clusters_table.rl"; - - action start { - startPos = p - } - - action end { - endPos = p - } - - action emit { - return endPos+1, data[startPos:endPos+1], nil - } - - ZWJGlue = ZWJ (Glue_After_Zwj | E_Base_GAZ Extend* E_Modifier?)?; - AnyExtender = Extend | ZWJGlue | SpacingMark; - Extension = AnyExtender*; - ReplacementChar = (0xEF 0xBF 0xBD); - - CRLFSeq = CR LF; - ControlSeq = Control | ReplacementChar; - HangulSeq = ( - L+ (((LV? V+ | LVT) T*)?|LV?) | - LV V* T* | - V+ T* | - LVT T* | - T+ - ) Extension; - EmojiSeq = (E_Base | E_Base_GAZ) Extend* E_Modifier? Extension; - ZWJSeq = ZWJGlue Extension; - EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension; - - UTF8Cont = 0x80 .. 0xBF; - AnyUTF8 = ( - 0x00..0x7F | - 0xC0..0xDF . UTF8Cont | - 0xE0..0xEF . UTF8Cont . UTF8Cont | - 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont - ); - - # OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension - OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|E_Base|E_Base_GAZ|ZWJ|Regional_Indicator|Prepend)) Extension; - - # PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break - PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?; - - CRLFTok = CRLFSeq >start @end; - ControlTok = ControlSeq >start @end; - HangulTok = HangulSeq >start @end; - EmojiTok = EmojiSeq >start @end; - ZWJTok = ZWJSeq >start @end; - EmojiFlagTok = EmojiFlagSeq >start @end; - OtherTok = OtherSeq >start @end; - PrependTok = PrependSeq >start @end; - - main := |* - CRLFTok => emit; - ControlTok => emit; - HangulTok => emit; - EmojiTok => emit; - ZWJTok => emit; - EmojiFlagTok => emit; - PrependTok => emit; - OtherTok => emit; - - # any single valid UTF-8 character would also be valid per spec, - # but we'll handle that separately after the loop so we can deal - # with requesting more bytes if we're not at EOF. - *|; - - write init; - write exec; - }%% - - // If we fall out here then we were unable to complete a sequence. - // If we weren't able to complete a sequence then either we've - // reached the end of a partial buffer (so there's more data to come) - // or we have an isolated symbol that would normally be part of a - // grapheme cluster but has appeared in isolation here. - - if !atEOF { - // Request more - return 0, nil, nil - } - - // Just take the first UTF-8 sequence and return that. - _, seqLen := utf8.DecodeRune(data) - return seqLen, data[:seqLen], nil -} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl deleted file mode 100644 index fb4511825..000000000 --- a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl +++ /dev/null @@ -1,1583 +0,0 @@ -# The following Ragel file was autogenerated with unicode2ragel.rb -# from: http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -# -# It defines ["Prepend", "CR", "LF", "Control", "Extend", "Regional_Indicator", "SpacingMark", "L", "V", "T", "LV", "LVT", "E_Base", "E_Modifier", "ZWJ", "Glue_After_Zwj", "E_Base_GAZ"]. -# -# To use this, make sure that your alphtype is set to byte, -# and that your input is in utf8. - -%%{ - machine GraphemeCluster; - - Prepend = - 0xD8 0x80..0x85 #Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER ... - | 0xDB 0x9D #Cf ARABIC END OF AYAH - | 0xDC 0x8F #Cf SYRIAC ABBREVIATION MARK - | 0xE0 0xA3 0xA2 #Cf ARABIC DISPUTED END OF AYAH - | 0xE0 0xB5 0x8E #Lo MALAYALAM LETTER DOT REPH - | 0xF0 0x91 0x82 0xBD #Cf KAITHI NUMBER SIGN - | 0xF0 0x91 0x87 0x82..0x83 #Lo [2] SHARADA SIGN JIHVAMULIYA..SHARA... - ; - - CR = - 0x0D #Cc - ; - - LF = - 0x0A #Cc - ; - - Control = - 0x00..0x09 #Cc [10] .. - | 0x0B..0x0C #Cc [2] .. - | 0x0E..0x1F #Cc [18] .. - | 0x7F #Cc [33] .. - | 0xC2 0x80..0x9F # - | 0xC2 0xAD #Cf SOFT HYPHEN - | 0xD8 0x9C #Cf ARABIC LETTER MARK - | 0xE1 0xA0 0x8E #Cf MONGOLIAN VOWEL SEPARATOR - | 0xE2 0x80 0x8B #Cf ZERO WIDTH SPACE - | 0xE2 0x80 0x8E..0x8F #Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT ... - | 0xE2 0x80 0xA8 #Zl LINE SEPARATOR - | 0xE2 0x80 0xA9 #Zp PARAGRAPH SEPARATOR - | 0xE2 0x80 0xAA..0xAE #Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-... - | 0xE2 0x81 0xA0..0xA4 #Cf [5] WORD JOINER..INVISIBLE PLUS - | 0xE2 0x81 0xA5 #Cn - | 0xE2 0x81 0xA6..0xAF #Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIG... - | 0xED 0xA0 0x80..0xFF #Cs [2048] .... - | 0xEF 0xBF 0xB9..0xBB #Cf [3] INTERLINEAR ANNOTATION ANCHOR..INT... - | 0xF0 0x9B 0xB2 0xA0..0xA3 #Cf [4] SHORTHAND FORMAT LETTER OVERLAP... - | 0xF0 0x9D 0x85 0xB3..0xBA #Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSI... - | 0xF3 0xA0 0x80 0x80 #Cn - | 0xF3 0xA0 0x80 0x81 #Cf LANGUAGE TAG - | 0xF3 0xA0 0x80 0x82..0x9F #Cn [30] .. - | 0xF3 0xA0 0x82 0x80..0xFF #Cn [128] .. - | 0xF3 0xA0 0x83 0x00..0xBF # - | 0xF3 0xA0 0x87 0xB0..0xFF #Cn [3600] .. -# -# This script uses the unicode spec to generate a Ragel state machine -# that recognizes unicode alphanumeric characters. It generates 5 -# character classes: uupper, ulower, ualpha, udigit, and ualnum. -# Currently supported encodings are UTF-8 [default] and UCS-4. -# -# Usage: unicode2ragel.rb [options] -# -e, --encoding [ucs4 | utf8] Data encoding -# -h, --help Show this message -# -# This script was originally written as part of the Ferret search -# engine library. -# -# Author: Rakan El-Khalil - -require 'optparse' -require 'open-uri' - -ENCODINGS = [ :utf8, :ucs4 ] -ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" } -DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt" -DEFAULT_MACHINE_NAME= "WChar" - -### -# Display vars & default option - -TOTAL_WIDTH = 80 -RANGE_WIDTH = 23 -@encoding = :utf8 -@chart_url = DEFAULT_CHART_URL -machine_name = DEFAULT_MACHINE_NAME -properties = [] -@output = $stdout - -### -# Option parsing - -cli_opts = OptionParser.new do |opts| - opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o| - @encoding = o.downcase.to_sym - end - opts.on("-h", "--help", "Show this message") do - puts opts - exit - end - opts.on("-u", "--url URL", "URL to process") do |o| - @chart_url = o - end - opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o| - machine_name = o - end - opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o| - properties = o - end - opts.on("-o", "--output FILE", "output file") do |o| - @output = File.new(o, "w+") - end -end - -cli_opts.parse(ARGV) -unless ENCODINGS.member? @encoding - puts "Invalid encoding: #{@encoding}" - puts cli_opts - exit -end - -## -# Downloads the document at url and yields every alpha line's hex -# range and description. - -def each_alpha( url, property ) - open( url ) do |file| - file.each_line do |line| - next if line =~ /^#/; - next if line !~ /; #{property} #/; - - range, description = line.split(/;/) - range.strip! - description.gsub!(/.*#/, '').strip! - - if range =~ /\.\./ - start, stop = range.split '..' - else start = stop = range - end - - yield start.hex .. stop.hex, description - end - end -end - -### -# Formats to hex at minimum width - -def to_hex( n ) - r = "%0X" % n - r = "0#{r}" unless (r.length % 2).zero? - r -end - -### -# UCS4 is just a straight hex conversion of the unicode codepoint. - -def to_ucs4( range ) - rangestr = "0x" + to_hex(range.begin) - rangestr << "..0x" + to_hex(range.end) if range.begin != range.end - [ rangestr ] -end - -## -# 0x00 - 0x7f -> 0zzzzzzz[7] -# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6] -# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6] -# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6] - -UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff] - -def to_utf8_enc( n ) - r = 0 - if n <= 0x7f - r = n - elsif n <= 0x7ff - y = 0xc0 | (n >> 6) - z = 0x80 | (n & 0x3f) - r = y << 8 | z - elsif n <= 0xffff - x = 0xe0 | (n >> 12) - y = 0x80 | (n >> 6) & 0x3f - z = 0x80 | n & 0x3f - r = x << 16 | y << 8 | z - elsif n <= 0x10ffff - w = 0xf0 | (n >> 18) - x = 0x80 | (n >> 12) & 0x3f - y = 0x80 | (n >> 6) & 0x3f - z = 0x80 | n & 0x3f - r = w << 24 | x << 16 | y << 8 | z - end - - to_hex(r) -end - -def from_utf8_enc( n ) - n = n.hex - r = 0 - if n <= 0x7f - r = n - elsif n <= 0xdfff - y = (n >> 8) & 0x1f - z = n & 0x3f - r = y << 6 | z - elsif n <= 0xefffff - x = (n >> 16) & 0x0f - y = (n >> 8) & 0x3f - z = n & 0x3f - r = x << 10 | y << 6 | z - elsif n <= 0xf7ffffff - w = (n >> 24) & 0x07 - x = (n >> 16) & 0x3f - y = (n >> 8) & 0x3f - z = n & 0x3f - r = w << 18 | x << 12 | y << 6 | z - end - r -end - -### -# Given a range, splits it up into ranges that can be continuously -# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff] -# This is not strictly needed since the current [5.1] unicode standard -# doesn't have ranges that straddle utf8 boundaries. This is included -# for completeness as there is no telling if that will ever change. - -def utf8_ranges( range ) - ranges = [] - UTF8_BOUNDARIES.each do |max| - if range.begin <= max - if range.end <= max - ranges << range - return ranges - end - - ranges << (range.begin .. max) - range = (max + 1) .. range.end - end - end - ranges -end - -def build_range( start, stop ) - size = start.size/2 - left = size - 1 - return [""] if size < 1 - - a = start[0..1] - b = stop[0..1] - - ### - # Shared prefix - - if a == b - return build_range(start[2..-1], stop[2..-1]).map do |elt| - "0x#{a} " + elt - end - end - - ### - # Unshared prefix, end of run - - return ["0x#{a}..0x#{b} "] if left.zero? - - ### - # Unshared prefix, not end of run - # Range can be 0x123456..0x56789A - # Which is equivalent to: - # 0x123456 .. 0x12FFFF - # 0x130000 .. 0x55FFFF - # 0x560000 .. 0x56789A - - ret = [] - ret << build_range(start, a + "FF" * left) - - ### - # Only generate middle range if need be. - - if a.hex+1 != b.hex - max = to_hex(b.hex - 1) - max = "FF" if b == "FF" - ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left - end - - ### - # Don't generate last range if it is covered by first range - - ret << build_range(b + "00" * left, stop) unless b == "FF" - ret.flatten! -end - -def to_utf8( range ) - utf8_ranges( range ).map do |r| - begin_enc = to_utf8_enc(r.begin) - end_enc = to_utf8_enc(r.end) - build_range begin_enc, end_enc - end.flatten! -end - -## -# Perform a 3-way comparison of the number of codepoints advertised by -# the unicode spec for the given range, the originally parsed range, -# and the resulting utf8 encoded range. - -def count_codepoints( code ) - code.split(' ').inject(1) do |acc, elt| - if elt =~ /0x(.+)\.\.0x(.+)/ - if @encoding == :utf8 - acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1) - else - acc * ($2.hex - $1.hex + 1) - end - else - acc - end - end -end - -def is_valid?( range, desc, codes ) - spec_count = 1 - spec_count = $1.to_i if desc =~ /\[(\d+)\]/ - range_count = range.end - range.begin + 1 - - sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) } - sum == spec_count and sum == range_count -end - -## -# Generate the state maching to stdout - -def generate_machine( name, property ) - pipe = " " - @output.puts " #{name} = " - each_alpha( @chart_url, property ) do |range, desc| - - codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range) - - #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless - # is_valid? range, desc, codes - - range_width = codes.map { |a| a.size }.max - range_width = RANGE_WIDTH if range_width < RANGE_WIDTH - - desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11 - desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH - - if desc.size > desc_width - desc = desc[0..desc_width - 4] + "..." - end - - codes.each_with_index do |r, idx| - desc = "" unless idx.zero? - code = "%-#{range_width}s" % r - @output.puts " #{pipe} #{code} ##{desc}" - pipe = "|" - end - end - @output.puts " ;" - @output.puts "" -end - -@output.puts < 0 { - buf.Write(spaceBytes) - } - fmt.Fprint(&buf, pc) - } - buf.Write(closeBracketBytes) - return buf.Bytes(), nil -} - -// Format implements fmt.Formatter by printing the CallStack as square brackets -// ([, ]) surrounding a space separated list of Calls each formatted with the -// supplied verb and options. -func (cs CallStack) Format(s fmt.State, verb rune) { - s.Write(openBracketBytes) - for i, pc := range cs { - if i > 0 { - s.Write(spaceBytes) - } - pc.Format(s, verb) - } - s.Write(closeBracketBytes) -} - -// Trace returns a CallStack for the current goroutine with element 0 -// identifying the calling function. -func Trace() CallStack { - var pcs [512]uintptr - n := runtime.Callers(1, pcs[:]) - - frames := runtime.CallersFrames(pcs[:n]) - cs := make(CallStack, 0, n) - - // Skip extra frame retrieved just to make sure the runtime.sigpanic - // special case is handled. - frame, more := frames.Next() - - for more { - frame, more = frames.Next() - cs = append(cs, Call{frame: frame}) - } - - return cs -} - -// TrimBelow returns a slice of the CallStack with all entries below c -// removed. -func (cs CallStack) TrimBelow(c Call) CallStack { - for len(cs) > 0 && cs[0] != c { - cs = cs[1:] - } - return cs -} - -// TrimAbove returns a slice of the CallStack with all entries above c -// removed. -func (cs CallStack) TrimAbove(c Call) CallStack { - for len(cs) > 0 && cs[len(cs)-1] != c { - cs = cs[:len(cs)-1] - } - return cs -} - -// pkgIndex returns the index that results in file[index:] being the path of -// file relative to the compile time GOPATH, and file[:index] being the -// $GOPATH/src/ portion of file. funcName must be the name of a function in -// file as returned by runtime.Func.Name. -func pkgIndex(file, funcName string) int { - // As of Go 1.6.2 there is no direct way to know the compile time GOPATH - // at runtime, but we can infer the number of path segments in the GOPATH. - // We note that runtime.Func.Name() returns the function name qualified by - // the import path, which does not include the GOPATH. Thus we can trim - // segments from the beginning of the file path until the number of path - // separators remaining is one more than the number of path separators in - // the function name. For example, given: - // - // GOPATH /home/user - // file /home/user/src/pkg/sub/file.go - // fn.Name() pkg/sub.Type.Method - // - // We want to produce: - // - // file[:idx] == /home/user/src/ - // file[idx:] == pkg/sub/file.go - // - // From this we can easily see that fn.Name() has one less path separator - // than our desired result for file[idx:]. We count separators from the - // end of the file path until it finds two more than in the function name - // and then move one character forward to preserve the initial path - // segment without a leading separator. - const sep = "/" - i := len(file) - for n := strings.Count(funcName, sep) + 2; n > 0; n-- { - i = strings.LastIndex(file[:i], sep) - if i == -1 { - i = -len(sep) - break - } - } - // get back to 0 or trim the leading separator - return i + len(sep) -} - -// pkgFilePath returns the frame's filepath relative to the compile-time GOPATH, -// or its module path joined to its path relative to the module root. -// -// As of Go 1.11 there is no direct way to know the compile time GOPATH or -// module paths at runtime, but we can piece together the desired information -// from available information. We note that runtime.Frame.Function contains the -// function name qualified by the package path, which includes the module path -// but not the GOPATH. We can extract the package path from that and append the -// last segments of the file path to arrive at the desired package qualified -// file path. For example, given: -// -// GOPATH /home/user -// import path pkg/sub -// frame.File /home/user/src/pkg/sub/file.go -// frame.Function pkg/sub.Type.Method -// Desired return pkg/sub/file.go -// -// It appears that we simply need to trim ".Type.Method" from frame.Function and -// append "/" + path.Base(file). -// -// But there are other wrinkles. Although it is idiomatic to do so, the internal -// name of a package is not required to match the last segment of its import -// path. In addition, the introduction of modules in Go 1.11 allows working -// without a GOPATH. So we also must make these work right: -// -// GOPATH /home/user -// import path pkg/go-sub -// package name sub -// frame.File /home/user/src/pkg/go-sub/file.go -// frame.Function pkg/sub.Type.Method -// Desired return pkg/go-sub/file.go -// -// Module path pkg/v2 -// import path pkg/v2/go-sub -// package name sub -// frame.File /home/user/cloned-pkg/go-sub/file.go -// frame.Function pkg/v2/sub.Type.Method -// Desired return pkg/v2/go-sub/file.go -// -// We can handle all of these situations by using the package path extracted -// from frame.Function up to, but not including, the last segment as the prefix -// and the last two segments of frame.File as the suffix of the returned path. -// This preserves the existing behavior when working in a GOPATH without modules -// and a semantically equivalent behavior when used in module aware project. -func pkgFilePath(frame *runtime.Frame) string { - pre := pkgPrefix(frame.Function) - post := pathSuffix(frame.File) - if pre == "" { - return post - } - return pre + "/" + post -} - -// pkgPrefix returns the import path of the function's package with the final -// segment removed. -func pkgPrefix(funcName string) string { - const pathSep = "/" - end := strings.LastIndex(funcName, pathSep) - if end == -1 { - return "" - } - return funcName[:end] -} - -// pathSuffix returns the last two segments of path. -func pathSuffix(path string) string { - const pathSep = "/" - lastSep := strings.LastIndex(path, pathSep) - if lastSep == -1 { - return path - } - return path[strings.LastIndex(path[:lastSep], pathSep)+1:] -} - -var runtimePath string - -func init() { - var pcs [3]uintptr - runtime.Callers(0, pcs[:]) - frames := runtime.CallersFrames(pcs[:]) - frame, _ := frames.Next() - file := frame.File - - idx := pkgIndex(frame.File, frame.Function) - - runtimePath = file[:idx] - if runtime.GOOS == "windows" { - runtimePath = strings.ToLower(runtimePath) - } -} - -func inGoroot(c Call) bool { - file := c.frame.File - if len(file) == 0 || file[0] == '?' { - return true - } - if runtime.GOOS == "windows" { - file = strings.ToLower(file) - } - return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go") -} - -// TrimRuntime returns a slice of the CallStack with the topmost entries from -// the go runtime removed. It considers any calls originating from unknown -// files, files under GOROOT, or _testmain.go as part of the runtime. -func (cs CallStack) TrimRuntime() CallStack { - for len(cs) > 0 && inGoroot(cs[len(cs)-1]) { - cs = cs[:len(cs)-1] - } - return cs -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go new file mode 100644 index 000000000..60e82caa9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -0,0 +1,524 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONUnmarshalV2 = false + +// UnmarshalNext unmarshals the next JSON object from d into m. +func UnmarshalNext(d *json.Decoder, m proto.Message) error { + return new(Unmarshaler).UnmarshalNext(d, m) +} + +// Unmarshal unmarshals a JSON object from r into m. +func Unmarshal(r io.Reader, m proto.Message) error { + return new(Unmarshaler).Unmarshal(r, m) +} + +// UnmarshalString unmarshals a JSON object from s into m. +func UnmarshalString(s string, m proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // AllowUnknownFields specifies whether to allow messages to contain + // unknown JSON fields, as opposed to failing to unmarshal. + AllowUnknownFields bool + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize the way +// they are unmarshaled from JSON. Messages that implement this should also +// implement JSONPBMarshaler so that the custom format can be produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Unmarshal unmarshals a JSON object from r into m. +func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { + return u.UnmarshalNext(json.NewDecoder(r), m) +} + +// UnmarshalNext unmarshals the next JSON object from d into m. +func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { + if m == nil { + return errors.New("invalid nil message") + } + + // Parse the next JSON object from the stream. + raw := json.RawMessage{} + if err := d.Decode(&raw); err != nil { + return err + } + + // Check for custom unmarshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsu, ok := m.(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, raw) + } + + mr := proto.MessageReflect(m) + + // NOTE: For historical reasons, a top-level null is treated as a noop. + // This is incorrect, but kept for compatibility. + if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { + return nil + } + + if wrapJSONUnmarshalV2 { + // NOTE: If input message is non-empty, we need to preserve merge semantics + // of the old jsonpb implementation. These semantics are not supported by + // the protobuf JSON specification. + isEmpty := true + mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { + isEmpty = false // at least one iteration implies non-empty + return false + }) + if !isEmpty { + // Perform unmarshaling into a newly allocated, empty message. + mr = mr.New() + + // Use a defer to copy all unmarshaled fields into the original message. + dst := proto.MessageReflect(m) + defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + dst.Set(fd, v) + return true + }) + } + + // Unmarshal using the v2 JSON unmarshaler. + opts := protojson.UnmarshalOptions{ + DiscardUnknown: u.AllowUnknownFields, + } + if u.AnyResolver != nil { + opts.Resolver = anyResolver{u.AnyResolver} + } + return opts.Unmarshal(raw, mr.Interface()) + } else { + if err := u.unmarshalMessage(mr, raw); err != nil { + return err + } + return protoV2.CheckInitialized(mr.Interface()) + } +} + +func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { + md := m.Descriptor() + fds := md.Fields() + + if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, in) + } + + if string(in) == "null" && md.FullName() != "google.protobuf.Value" { + return nil + } + + switch wellKnownType(md.FullName()) { + case "Any": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + rawTypeURL, ok := jsonObject["@type"] + if !ok { + return errors.New("Any JSON doesn't have '@type'") + } + typeURL, err := unquoteString(string(rawTypeURL)) + if err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) + } + m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) + + var m2 protoreflect.Message + if u.AnyResolver != nil { + mi, err := u.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + if err == protoregistry.NotFound { + return fmt.Errorf("could not resolve Any message type: %v", typeURL) + } + return err + } + m2 = mt.New() + } + + if wellKnownType(m2.Descriptor().FullName()) != "" { + rawValue, ok := jsonObject["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + if err := u.unmarshalMessage(m2, rawValue); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } else { + delete(jsonObject, "@type") + rawJSON, err := json.Marshal(jsonObject) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + if err = u.unmarshalMessage(m2, rawJSON); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } + + rawWire, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) + return nil + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + v, err := u.unmarshalValue(m.NewField(fd), in, fd) + if err != nil { + return err + } + m.Set(fd, v) + return nil + case "Duration": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + d, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + sec := d.Nanoseconds() / 1e9 + nsec := d.Nanoseconds() % 1e9 + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Timestamp": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + sec := t.Unix() + nsec := t.Nanosecond() + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Value": + switch { + case string(in) == "null": + m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) + case string(in) == "true": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) + case string(in) == "false": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) + case hasPrefixAndSuffix('"', in, '"'): + s, err := unquoteString(string(in)) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) + case hasPrefixAndSuffix('[', in, ']'): + v := m.Mutable(fds.ByNumber(6)) + return u.unmarshalMessage(v.Message(), in) + case hasPrefixAndSuffix('{', in, '}'): + v := m.Mutable(fds.ByNumber(5)) + return u.unmarshalMessage(v.Message(), in) + default: + f, err := strconv.ParseFloat(string(in), 0) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) + } + return nil + case "ListValue": + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + lv := m.Mutable(fds.ByNumber(1)).List() + for _, raw := range jsonArray { + ve := lv.NewElement() + if err := u.unmarshalMessage(ve.Message(), raw); err != nil { + return err + } + lv.Append(ve) + } + return nil + case "Struct": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + mv := m.Mutable(fds.ByNumber(1)).Map() + for key, raw := range jsonObject { + kv := protoreflect.ValueOf(key).MapKey() + vv := mv.NewValue() + if err := u.unmarshalMessage(vv.Message(), raw); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) + } + mv.Set(kv, vv) + } + return nil + } + + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + // Handle known fields. + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if fd.IsWeak() && fd.Message().IsPlaceholder() { + continue // weak reference is not linked in + } + + // Search for any raw JSON value associated with this field. + var raw json.RawMessage + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + name = string(fd.JSONName()) + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + // Handle extension fields. + for name, raw := range jsonObject { + if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { + continue + } + + // Resolve the extension field by name. + xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(md) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + continue + } + delete(jsonObject, name) + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + if !u.AllowUnknownFields && len(jsonObject) > 0 { + for name := range jsonObject { + return fmt.Errorf("unknown field %q in %v", name, md.FullName()) + } + } + return nil +} + +func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { + if md := fd.Message(); md != nil { + return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated + } + return false +} + +func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { + if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { + _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) + return ok + } + return false +} + +func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch { + case fd.IsList(): + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return v, err + } + lv := v.List() + for _, raw := range jsonArray { + ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) + if err != nil { + return v, err + } + lv.Append(ve) + } + return v, nil + case fd.IsMap(): + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return v, err + } + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + for key, raw := range jsonObject { + var kv protoreflect.MapKey + if kfd.Kind() == protoreflect.StringKind { + kv = protoreflect.ValueOf(key).MapKey() + } else { + v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) + if err != nil { + return v, err + } + kv = v.MapKey() + } + + vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) + if err != nil { + return v, err + } + mv.Set(kv, vv) + } + return v, nil + default: + return u.unmarshalSingularValue(v, in, fd) + } +} + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(+1), + `"-Infinity"`: math.Inf(-1), +} + +func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + return unmarshalValue(in, new(bool)) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return unmarshalValue(trimQuote(in), new(int32)) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return unmarshalValue(trimQuote(in), new(int64)) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return unmarshalValue(trimQuote(in), new(uint32)) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return unmarshalValue(trimQuote(in), new(uint64)) + case protoreflect.FloatKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat32(float32(f)), nil + } + return unmarshalValue(trimQuote(in), new(float32)) + case protoreflect.DoubleKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat64(float64(f)), nil + } + return unmarshalValue(trimQuote(in), new(float64)) + case protoreflect.StringKind: + return unmarshalValue(in, new(string)) + case protoreflect.BytesKind: + return unmarshalValue(in, new([]byte)) + case protoreflect.EnumKind: + if hasPrefixAndSuffix('"', in, '"') { + vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) + if vd == nil { + return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) + } + return protoreflect.ValueOfEnum(vd.Number()), nil + } + return unmarshalValue(in, new(protoreflect.EnumNumber)) + case protoreflect.MessageKind, protoreflect.GroupKind: + err := u.unmarshalMessage(v.Message(), in) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } +} + +func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { + err := json.Unmarshal(in, v) + return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err +} + +func unquoteString(in string) (out string, err error) { + err = json.Unmarshal([]byte(in), &out) + return out, err +} + +func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { + if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { + return true + } + return false +} + +// trimQuote is like unquoteString but simply strips surrounding quotes. +// This is incorrect, but is behavior done by the legacy implementation. +func trimQuote(in []byte) []byte { + if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { + in = in[1 : len(in)-1] + } + return in +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go new file mode 100644 index 000000000..685c80a62 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -0,0 +1,559 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONMarshalV2 = false + +// Marshaler is a configurable object for marshaling protocol buffer messages +// to the specified JSON representation. +type Marshaler struct { + // OrigName specifies whether to use the original protobuf name for fields. + OrigName bool + + // EnumsAsInts specifies whether to render enum values as integers, + // as opposed to string values. + EnumsAsInts bool + + // EmitDefaults specifies whether to render fields with zero values. + EmitDefaults bool + + // Indent controls whether the output is compact or not. + // If empty, the output is compact JSON. Otherwise, every JSON object + // entry and JSON array value will be on its own line. + // Each line will be preceded by repeated copies of Indent, where the + // number of copies is the current indentation depth. + Indent string + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should also +// implement JSONPBUnmarshaler so that the custom format can be parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// Marshal serializes a protobuf message as JSON into w. +func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { + b, err := jm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// MarshalToString serializes a protobuf message as JSON in string form. +func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { + b, err := jm.marshal(m) + if err != nil { + return "", err + } + return string(b), nil +} + +func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { + v := reflect.ValueOf(m) + if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, errors.New("Marshal called with nil") + } + + // Check for custom marshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsm, ok := m.(JSONPBMarshaler); ok { + return jsm.MarshalJSONPB(jm) + } + + if wrapJSONMarshalV2 { + opts := protojson.MarshalOptions{ + UseProtoNames: jm.OrigName, + UseEnumNumbers: jm.EnumsAsInts, + EmitUnpopulated: jm.EmitDefaults, + Indent: jm.Indent, + } + if jm.AnyResolver != nil { + opts.Resolver = anyResolver{jm.AnyResolver} + } + return opts.Marshal(proto.MessageReflect(m).Interface()) + } else { + // Check for unpopulated required fields first. + m2 := proto.MessageReflect(m) + if err := protoV2.CheckInitialized(m2.Interface()); err != nil { + return nil, err + } + + w := jsonWriter{Marshaler: jm} + err := w.marshalMessage(m2, "", "") + return w.buf, err + } +} + +type jsonWriter struct { + *Marshaler + buf []byte +} + +func (w *jsonWriter) write(s string) { + w.buf = append(w.buf, s...) +} + +func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { + if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(w.Marshaler) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + w.write(string(b)) + return nil + } + + md := m.Descriptor() + fds := md.Fields() + + // Handle well-known types. + const secondInNanos = int64(time.Second / time.Nanosecond) + switch wellKnownType(md.FullName()) { + case "Any": + return w.marshalAny(m, indent) + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + case "Duration": + const maxSecondsInDuration = 315576000000 + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + var sign string + if s < 0 || ns < 0 { + sign, s, ns = "-", -1*s, -1*ns + } + x := fmt.Sprintf("%s%d.%09d", sign, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vs"`, x)) + return nil + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vZ"`, x)) + return nil + case "Value": + // JSON value; which is a null, number, string, bool, object, or array. + od := md.Oneofs().Get(0) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("nil Value") + } + return w.marshalValue(fd, m.Get(fd), indent) + case "Struct", "ListValue": + // JSON object or array. + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + + firstField := true + if typeURL != "" { + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + if fd == nil { + continue + } + } else { + i++ + } + + v := m.Get(fd) + + if !m.Has(fd) { + if !w.EmitDefaults || fd.ContainingOneof() != nil { + continue + } + if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { + v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars + } + } + + if !firstField { + w.writeComma() + } + if err := w.marshalField(fd, v, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if md.ExtensionRanges().Len() > 0 { + // Collect a sorted list of all extension descriptor and values. + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + if !firstField { + w.writeComma() + } + if err := w.marshalField(ext.desc, ext.val, indent); err != nil { + return err + } + firstField = false + } + } + + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) writeComma() { + if w.Indent != "" { + w.write(",\n") + } else { + w.write(",") + } +} + +func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + md := m.Descriptor() + typeURL := m.Get(md.Fields().ByNumber(1)).String() + rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() + + var m2 protoreflect.Message + if w.AnyResolver != nil { + mi, err := w.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + return err + } + m2 = mt.New() + } + + if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { + return err + } + + if wellKnownType(m2.Descriptor().FullName()) == "" { + return w.marshalMessage(m2, indent, typeURL) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + w.writeComma() + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + w.write(`"value": `) + } else { + w.write(`"value":`) + } + if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { + return err + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"@type":`) + if w.Indent != "" { + w.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + w.write(string(b)) + return nil +} + +// marshalField writes field description and value to the Writer. +func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"`) + switch { + case fd.IsExtension(): + // For message set, use the fname of the message as the extension name. + name := string(fd.FullName()) + if isMessageSet(fd.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + w.write("[" + name + "]") + case w.OrigName: + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + w.write(name) + default: + w.write(string(fd.JSONName())) + } + w.write(`":`) + if w.Indent != "" { + w.write(" ") + } + return w.marshalValue(fd, v, indent) +} + +func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case fd.IsList(): + w.write("[") + comma := "" + lv := v.List() + for i := 0; i < lv.Len(); i++ { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write("]") + return nil + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + + // Collect a sorted list of all map keys and values. + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + + w.write(`{`) + comma := "" + for _, entry := range entries { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + + s := fmt.Sprint(entry.key.Interface()) + b, err := json.Marshal(s) + if err != nil { + return err + } + w.write(string(b)) + + w.write(`:`) + if w.Indent != "" { + w.write(` `) + } + + if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write(`}`) + return nil + default: + return w.marshalSingularValue(fd, v, indent) + } +} + +func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case !v.IsValid(): + w.write("null") + return nil + case fd.Message() != nil: + return w.marshalMessage(v.Message(), indent+w.Indent, "") + case fd.Enum() != nil: + if fd.Enum().FullName() == "google.protobuf.NullValue" { + w.write("null") + return nil + } + + vd := fd.Enum().Values().ByNumber(v.Enum()) + if vd == nil || w.EnumsAsInts { + w.write(strconv.Itoa(int(v.Enum()))) + } else { + w.write(`"` + string(vd.Name()) + `"`) + } + return nil + default: + switch v.Interface().(type) { + case float32, float64: + switch { + case math.IsInf(v.Float(), +1): + w.write(`"Infinity"`) + return nil + case math.IsInf(v.Float(), -1): + w.write(`"-Infinity"`) + return nil + case math.IsNaN(v.Float()): + w.write(`"NaN"`) + return nil + } + case int64, uint64: + w.write(fmt.Sprintf(`"%d"`, v.Interface())) + return nil + } + + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + w.write(string(b)) + return nil + } +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go new file mode 100644 index 000000000..480e2448d --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/json.go @@ -0,0 +1,69 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonpb provides functionality to marshal and unmarshal between a +// protocol buffer message and JSON. It follows the specification at +// https://developers.google.com/protocol-buffers/docs/proto3#json. +// +// Do not rely on the default behavior of the standard encoding/json package +// when called on generated message types as it does not operate correctly. +// +// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" +// package instead. +package jsonpb + +import ( + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// AnyResolver takes a type URL, present in an Any message, +// and resolves it into an instance of the associated message. +type AnyResolver interface { + Resolve(typeURL string) (proto.Message, error) +} + +type anyResolver struct{ AnyResolver } + +func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + return r.FindMessageByURL(string(message)) +} + +func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + m, err := r.Resolve(url) + if err != nil { + return nil, err + } + return protoimpl.X.MessageTypeOf(m), nil +} + +func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +func wellKnownType(s protoreflect.FullName) string { + if s.Parent() == "google.protobuf" { + switch s.Name() { + case "Empty", "Any", + "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue", + "Duration", "Timestamp", + "NullValue", "Struct", "Value", "ListValue": + return string(s.Name()) + } + } + return "" +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go index 1e7ff6420..066b4323b 100644 --- a/vendor/github.com/golang/protobuf/proto/registry.go +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -13,6 +13,7 @@ import ( "strings" "sync" + "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoimpl" @@ -62,14 +63,7 @@ func FileDescriptor(s filePath) fileDescGZIP { // Find the descriptor in the v2 registry. var b []byte if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { - if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { - b = fd.ProtoLegacyRawDesc() - } else { - // TODO: Use protodesc.ToFileDescriptorProto to construct - // a descriptorpb.FileDescriptorProto and marshal it. - // However, doing so causes the proto package to have a dependency - // on descriptorpb, leading to cyclic dependency issues. - } + b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) } // Locally cache the raw descriptor form for the file. diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go deleted file mode 100644 index 63dc05785..000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ /dev/null @@ -1,200 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto - -package descriptor - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/descriptor.proto. - -type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type - -const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE -const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT -const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64 -const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64 -const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32 -const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64 -const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32 -const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL -const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING -const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP -const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE -const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES -const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32 -const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM -const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32 -const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64 -const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32 -const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64 - -var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name -var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value - -type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label - -const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL -const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED -const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED - -var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name -var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value - -type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode - -const FileOptions_SPEED = descriptorpb.FileOptions_SPEED -const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE -const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME - -var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name -var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value - -type FieldOptions_CType = descriptorpb.FieldOptions_CType - -const FieldOptions_STRING = descriptorpb.FieldOptions_STRING -const FieldOptions_CORD = descriptorpb.FieldOptions_CORD -const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE - -var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name -var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value - -type FieldOptions_JSType = descriptorpb.FieldOptions_JSType - -const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL -const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING -const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER - -var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name -var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value - -type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel - -const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN -const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS -const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT - -var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name -var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value - -type FileDescriptorSet = descriptorpb.FileDescriptorSet -type FileDescriptorProto = descriptorpb.FileDescriptorProto -type DescriptorProto = descriptorpb.DescriptorProto -type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions -type FieldDescriptorProto = descriptorpb.FieldDescriptorProto -type OneofDescriptorProto = descriptorpb.OneofDescriptorProto -type EnumDescriptorProto = descriptorpb.EnumDescriptorProto -type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto -type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto -type MethodDescriptorProto = descriptorpb.MethodDescriptorProto - -const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming -const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming - -type FileOptions = descriptorpb.FileOptions - -const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles -const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8 -const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor -const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices -const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices -const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices -const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices -const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated -const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas - -type MessageOptions = descriptorpb.MessageOptions - -const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat -const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor -const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated - -type FieldOptions = descriptorpb.FieldOptions - -const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype -const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype -const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy -const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated -const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak - -type OneofOptions = descriptorpb.OneofOptions -type EnumOptions = descriptorpb.EnumOptions - -const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated - -type EnumValueOptions = descriptorpb.EnumValueOptions - -const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated - -type ServiceOptions = descriptorpb.ServiceOptions - -const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated - -type MethodOptions = descriptorpb.MethodOptions - -const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated -const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel - -type UninterpretedOption = descriptorpb.UninterpretedOption -type SourceCodeInfo = descriptorpb.SourceCodeInfo -type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo -type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange -type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange -type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange -type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart -type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location -type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation - -var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{ - 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, - 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x32, -} - -var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() } -func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() { - if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File - file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil - file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil - file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index e729dcff1..85f9f5736 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -19,6 +19,8 @@ const urlPrefix = "type.googleapis.com/" // AnyMessageName returns the message name contained in an anypb.Any message. // Most type assertions should use the Is function instead. +// +// Deprecated: Call the any.MessageName method instead. func AnyMessageName(any *anypb.Any) (string, error) { name, err := anyMessageName(any) return string(name), err @@ -38,6 +40,8 @@ func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { } // MarshalAny marshals the given message m into an anypb.Any message. +// +// Deprecated: Call the anypb.New function instead. func MarshalAny(m proto.Message) (*anypb.Any, error) { switch dm := m.(type) { case DynamicAny: @@ -58,6 +62,9 @@ func MarshalAny(m proto.Message) (*anypb.Any, error) { // Empty returns a new message of the type specified in an anypb.Any message. // It returns protoregistry.NotFound if the corresponding message type could not // be resolved in the global registry. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead +// to resolve the message name and create a new instance of it. func Empty(any *anypb.Any) (proto.Message, error) { name, err := anyMessageName(any) if err != nil { @@ -76,6 +83,8 @@ func Empty(any *anypb.Any) (proto.Message, error) { // // The target message m may be a *DynamicAny message. If the underlying message // type could not be resolved, then this returns protoregistry.NotFound. +// +// Deprecated: Call the any.UnmarshalTo method instead. func UnmarshalAny(any *anypb.Any, m proto.Message) error { if dm, ok := m.(*DynamicAny); ok { if dm.Message == nil { @@ -100,6 +109,8 @@ func UnmarshalAny(any *anypb.Any, m proto.Message) error { } // Is reports whether the Any message contains a message of the specified type. +// +// Deprecated: Call the any.MessageIs method instead. func Is(any *anypb.Any, m proto.Message) bool { if any == nil || m == nil { return false @@ -119,6 +130,9 @@ func Is(any *anypb.Any, m proto.Message) bool { // var x ptypes.DynamicAny // if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } // fmt.Printf("unmarshaled message: %v", x.Message) +// +// Deprecated: Use the any.UnmarshalNew method instead to unmarshal +// the any message contents into a new instance of the underlying message. type DynamicAny struct{ proto.Message } func (m DynamicAny) String() string { diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go index fb9edd5c6..d3c33259d 100644 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -3,4 +3,8 @@ // license that can be found in the LICENSE file. // Package ptypes provides functionality for interacting with well-known types. +// +// Deprecated: Well-known types have specialized functionality directly +// injected into the generated packages for each message type. +// See the deprecation notice for each function for the suggested alternative. package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 6110ae8a4..b2b55dd85 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -21,6 +21,8 @@ const ( // Duration converts a durationpb.Duration to a time.Duration. // Duration returns an error if dur is invalid or overflows a time.Duration. +// +// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. func Duration(dur *durationpb.Duration) (time.Duration, error) { if err := validateDuration(dur); err != nil { return 0, err @@ -39,6 +41,8 @@ func Duration(dur *durationpb.Duration) (time.Duration, error) { } // DurationProto converts a time.Duration to a durationpb.Duration. +// +// Deprecated: Call the durationpb.New function instead. func DurationProto(d time.Duration) *durationpb.Duration { nanos := d.Nanoseconds() secs := nanos / 1e9 diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 026d0d491..8368a3f70 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -33,6 +33,8 @@ const ( // // A nil Timestamp returns an error. The first return value in that case is // undefined. +// +// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { // Don't return the zero value on error, because corresponds to a valid // timestamp. Instead return whatever time.Unix gives us. @@ -46,6 +48,8 @@ func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { } // TimestampNow returns a google.protobuf.Timestamp for the current time. +// +// Deprecated: Call the timestamppb.Now function instead. func TimestampNow() *timestamppb.Timestamp { ts, err := TimestampProto(time.Now()) if err != nil { @@ -56,6 +60,8 @@ func TimestampNow() *timestamppb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. +// +// Deprecated: Call the timestamppb.New function instead. func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { ts := ×tamppb.Timestamp{ Seconds: t.Unix(), @@ -69,6 +75,9 @@ func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { // TimestampString returns the RFC 3339 string for valid Timestamps. // For invalid Timestamps, it returns an error message in parentheses. +// +// Deprecated: Call the ts.AsTime method instead, +// followed by a call to the Format method on the time.Time value. func TimestampString(ts *timestamppb.Timestamp) string { t, err := Timestamp(ts) if err != nil { diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS index bcfa19520..52ccb5a93 100644 --- a/vendor/github.com/golang/snappy/AUTHORS +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -8,8 +8,11 @@ # Please keep the list sorted. +Amazon.com, Inc Damian Gryski +Eric Buth Google Inc. Jan Mercl <0xjnml@gmail.com> +Klaus Post Rodolfo Carvalho Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS index 931ae3160..ea6524ddd 100644 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -26,9 +26,13 @@ # Please keep the list sorted. +Alex Legg Damian Gryski +Eric Buth Jan Mercl <0xjnml@gmail.com> +Jonathan Swinney Kai Backman +Klaus Post Marc-Antoine Ruel Nigel Tao Rob Pike diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go index 72efb0353..23c6e26c6 100644 --- a/vendor/github.com/golang/snappy/decode.go +++ b/vendor/github.com/golang/snappy/decode.go @@ -52,6 +52,8 @@ const ( // Otherwise, a newly allocated slice will be returned. // // The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. func Decode(dst, src []byte) ([]byte, error) { dLen, s, err := decodedLen(src) if err != nil { @@ -83,6 +85,8 @@ func NewReader(r io.Reader) *Reader { } // Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. type Reader struct { r io.Reader err error @@ -114,32 +118,23 @@ func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { return true } -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } +func (r *Reader) fill() error { + for r.i >= r.j { if !r.readFull(r.buf[:4], true) { - return 0, r.err + return r.err } chunkType := r.buf[0] if !r.readHeader { if chunkType != chunkTypeStreamIdentifier { r.err = ErrCorrupt - return 0, r.err + return r.err } r.readHeader = true } chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 if chunkLen > len(r.buf) { r.err = ErrUnsupported - return 0, r.err + return r.err } // The chunk types are specified at @@ -149,11 +144,11 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.2. Compressed data (chunk type 0x00). if chunkLen < checksumSize { r.err = ErrCorrupt - return 0, r.err + return r.err } buf := r.buf[:chunkLen] if !r.readFull(buf, false) { - return 0, r.err + return r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 buf = buf[checksumSize:] @@ -161,19 +156,19 @@ func (r *Reader) Read(p []byte) (int, error) { n, err := DecodedLen(buf) if err != nil { r.err = err - return 0, r.err + return r.err } if n > len(r.decoded) { r.err = ErrCorrupt - return 0, r.err + return r.err } if _, err := Decode(r.decoded, buf); err != nil { r.err = err - return 0, r.err + return r.err } if crc(r.decoded[:n]) != checksum { r.err = ErrCorrupt - return 0, r.err + return r.err } r.i, r.j = 0, n continue @@ -182,25 +177,25 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.3. Uncompressed data (chunk type 0x01). if chunkLen < checksumSize { r.err = ErrCorrupt - return 0, r.err + return r.err } buf := r.buf[:checksumSize] if !r.readFull(buf, false) { - return 0, r.err + return r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 // Read directly into r.decoded instead of via r.buf. n := chunkLen - checksumSize if n > len(r.decoded) { r.err = ErrCorrupt - return 0, r.err + return r.err } if !r.readFull(r.decoded[:n], false) { - return 0, r.err + return r.err } if crc(r.decoded[:n]) != checksum { r.err = ErrCorrupt - return 0, r.err + return r.err } r.i, r.j = 0, n continue @@ -209,15 +204,15 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.1. Stream identifier (chunk type 0xff). if chunkLen != len(magicBody) { r.err = ErrCorrupt - return 0, r.err + return r.err } if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err + return r.err } for i := 0; i < len(magicBody); i++ { if r.buf[i] != magicBody[i] { r.err = ErrCorrupt - return 0, r.err + return r.err } } continue @@ -226,12 +221,44 @@ func (r *Reader) Read(p []byte) (int, error) { if chunkType <= 0x7f { // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). r.err = ErrUnsupported - return 0, r.err + return r.err } // Section 4.4 Padding (chunk type 0xfe). // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). if !r.readFull(r.buf[:chunkLen], false) { - return 0, r.err + return r.err } } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil } diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go deleted file mode 100644 index fcd192b84..000000000 --- a/vendor/github.com/golang/snappy/decode_amd64.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// decode has the same semantics as in decode_other.go. -// -//go:noescape -func decode(dst, src []byte) int diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_arm64.s similarity index 67% rename from vendor/github.com/klauspost/compress/snappy/decode_amd64.s rename to vendor/github.com/golang/snappy/decode_arm64.s index 1c66e3723..7a3ead17e 100644 --- a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s +++ b/vendor/github.com/golang/snappy/decode_arm64.s @@ -1,4 +1,4 @@ -// Copyright 2016 The Go Authors. All rights reserved. +// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -15,12 +15,12 @@ // // All local variables fit into registers. The non-zero stack size is only to // spill registers and push args when issuing a CALL. The register allocation: -// - AX scratch -// - BX scratch -// - CX length or x -// - DX offset -// - SI &src[s] -// - DI &dst[d] +// - R2 scratch +// - R3 scratch +// - R4 length or x +// - R5 offset +// - R6 &src[s] +// - R7 &dst[d] // + R8 dst_base // + R9 dst_len // + R10 dst_base + dst_len @@ -33,34 +33,35 @@ // The registers R8-R13 (marked with a "+") are set at the start of the // function, and after a CALL returns, and are not otherwise modified. // -// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. -// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. -TEXT ·decode(SB), NOSPLIT, $48-56 - // Initialize SI, DI and R8-R13. - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, DI - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, SI - MOVQ R11, R13 - ADDQ R12, R13 +// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7. +// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6. +TEXT ·decode(SB), NOSPLIT, $56-56 + // Initialize R6, R7 and R8-R13. + MOVD dst_base+0(FP), R8 + MOVD dst_len+8(FP), R9 + MOVD R8, R7 + MOVD R8, R10 + ADD R9, R10, R10 + MOVD src_base+24(FP), R11 + MOVD src_len+32(FP), R12 + MOVD R11, R6 + MOVD R11, R13 + ADD R12, R13, R13 loop: // for s < len(src) - CMPQ SI, R13 - JEQ end + CMP R13, R6 + BEQ end - // CX = uint32(src[s]) + // R4 = uint32(src[s]) // // switch src[s] & 0x03 - MOVBLZX (SI), CX - MOVL CX, BX - ANDL $3, BX - CMPL BX, $1 - JAE tagCopy + MOVBU (R6), R4 + MOVW R4, R3 + ANDW $3, R3 + MOVW $1, R1 + CMPW R1, R3 + BGE tagCopy // ---------------------------------------- // The code below handles literal tags. @@ -68,35 +69,36 @@ loop: // case tagLiteral: // x := uint32(src[s] >> 2) // switch - SHRL $2, CX - CMPL CX, $60 - JAE tagLit60Plus + MOVW $60, R1 + LSRW $2, R4, R4 + CMPW R4, R1 + BLS tagLit60Plus // case x < 60: // s++ - INCQ SI + ADD $1, R6, R6 doLit: // This is the end of the inner "switch", when we have a literal tag. // - // We assume that CX == x and x fits in a uint32, where x is the variable + // We assume that R4 == x and x fits in a uint32, where x is the variable // used in the pure Go decode_other.go code. // length = int(x) + 1 // // Unlike the pure Go code, we don't need to check if length <= 0 because - // CX can hold 64 bits, so the increment cannot overflow. - INCQ CX + // R4 can hold 64 bits, so the increment cannot overflow. + ADD $1, R4, R4 // Prepare to check if copying length bytes will run past the end of dst or // src. // - // AX = len(dst) - d - // BX = len(src) - s - MOVQ R10, AX - SUBQ DI, AX - MOVQ R13, BX - SUBQ SI, BX + // R2 = len(dst) - d + // R3 = len(src) - s + MOVD R10, R2 + SUB R7, R2, R2 + MOVD R13, R3 + SUB R6, R3, R3 // !!! Try a faster technique for short (16 or fewer bytes) copies. // @@ -109,12 +111,12 @@ doLit: // is contiguous in memory and so it needs to leave enough source bytes to // read the next tag without refilling buffers, but Go's Decode assumes // contiguousness (the src argument is a []byte). - CMPQ CX, $16 - JGT callMemmove - CMPQ AX, $16 - JLT callMemmove - CMPQ BX, $16 - JLT callMemmove + CMP $16, R4 + BGT callMemmove + CMP $16, R2 + BLT callMemmove + CMP $16, R3 + BLT callMemmove // !!! Implement the copy from src to dst as a 16-byte load and store. // (Decode's documentation says that dst and src must not overlap.) @@ -124,57 +126,57 @@ doLit: // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a // non-nil error), so the overrun will be ignored. // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or // 16-byte loads and stores. This technique probably wouldn't be as // effective on architectures that are fussier about alignment. - MOVOU 0(SI), X0 - MOVOU X0, 0(DI) + LDP 0(R6), (R14, R15) + STP (R14, R15), 0(R7) // d += length // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop + ADD R4, R7, R7 + ADD R4, R6, R6 + B loop callMemmove: // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ CX, AX - JGT errCorrupt - CMPQ CX, BX - JGT errCorrupt + CMP R2, R4 + BGT errCorrupt + CMP R3, R4 + BGT errCorrupt // copy(dst[d:], src[s:s+length]) // // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // R7, R6 and R4 as arguments. Coincidentally, we also need to spill those // three registers to the stack, to save local variables across the CALL. - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) + MOVD R7, 8(RSP) + MOVD R6, 16(RSP) + MOVD R4, 24(RSP) + MOVD R7, 32(RSP) + MOVD R6, 40(RSP) + MOVD R4, 48(RSP) CALL runtime·memmove(SB) // Restore local variables: unspill registers from the stack and // re-calculate R8-R13. - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, R13 - ADDQ R12, R13 + MOVD 32(RSP), R7 + MOVD 40(RSP), R6 + MOVD 48(RSP), R4 + MOVD dst_base+0(FP), R8 + MOVD dst_len+8(FP), R9 + MOVD R8, R10 + ADD R9, R10, R10 + MOVD src_base+24(FP), R11 + MOVD src_len+32(FP), R12 + MOVD R11, R13 + ADD R12, R13, R13 // d += length // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop + ADD R4, R7, R7 + ADD R4, R6, R6 + B loop tagLit60Plus: // !!! This fragment does the @@ -182,142 +184,150 @@ tagLit60Plus: // s += x - 58; if uint(s) > uint(len(src)) { etc } // // checks. In the asm version, we code it once instead of once per switch case. - ADDQ CX, SI - SUBQ $58, SI - CMPQ SI, R13 - JA errCorrupt + ADD R4, R6, R6 + SUB $58, R6, R6 + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt // case x == 60: - CMPL CX, $61 - JEQ tagLit61 - JA tagLit62Plus + MOVW $61, R1 + CMPW R1, R4 + BEQ tagLit61 + BGT tagLit62Plus // x = uint32(src[s-1]) - MOVBLZX -1(SI), CX - JMP doLit + MOVBU -1(R6), R4 + B doLit tagLit61: // case x == 61: // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(SI), CX - JMP doLit + MOVHU -2(R6), R4 + B doLit tagLit62Plus: - CMPL CX, $62 - JA tagLit63 + CMPW $62, R4 + BHI tagLit63 // case x == 62: // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVWLZX -3(SI), CX - MOVBLZX -1(SI), BX - SHLL $16, BX - ORL BX, CX - JMP doLit + MOVHU -3(R6), R4 + MOVBU -1(R6), R3 + ORR R3<<16, R4 + B doLit tagLit63: // case x == 63: // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(SI), CX - JMP doLit + MOVWU -4(R6), R4 + B doLit -// The code above handles literal tags. -// ---------------------------------------- -// The code below handles copy tags. + // The code above handles literal tags. + // ---------------------------------------- + // The code below handles copy tags. tagCopy4: // case tagCopy4: // s += 5 - ADDQ $5, SI + ADD $5, R6, R6 // if uint(s) > uint(len(src)) { etc } - CMPQ SI, R13 - JA errCorrupt + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt // length = 1 + int(src[s-5])>>2 - SHRQ $2, CX - INCQ CX + MOVD $1, R1 + ADD R4>>2, R1, R4 // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(SI), DX - JMP doCopy + MOVWU -4(R6), R5 + B doCopy tagCopy2: // case tagCopy2: // s += 3 - ADDQ $3, SI + ADD $3, R6, R6 // if uint(s) > uint(len(src)) { etc } - CMPQ SI, R13 - JA errCorrupt + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt // length = 1 + int(src[s-3])>>2 - SHRQ $2, CX - INCQ CX + MOVD $1, R1 + ADD R4>>2, R1, R4 // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(SI), DX - JMP doCopy + MOVHU -2(R6), R5 + B doCopy tagCopy: // We have a copy tag. We assume that: - // - BX == src[s] & 0x03 - // - CX == src[s] - CMPQ BX, $2 - JEQ tagCopy2 - JA tagCopy4 + // - R3 == src[s] & 0x03 + // - R4 == src[s] + CMP $2, R3 + BEQ tagCopy2 + BGT tagCopy4 // case tagCopy1: // s += 2 - ADDQ $2, SI + ADD $2, R6, R6 // if uint(s) > uint(len(src)) { etc } - CMPQ SI, R13 - JA errCorrupt + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVQ CX, DX - ANDQ $0xe0, DX - SHLQ $3, DX - MOVBQZX -1(SI), BX - ORQ BX, DX + MOVD R4, R5 + AND $0xe0, R5 + MOVBU -1(R6), R3 + ORR R5<<3, R3, R5 // length = 4 + int(src[s-2])>>2&0x7 - SHRQ $2, CX - ANDQ $7, CX - ADDQ $4, CX + MOVD $7, R1 + AND R4>>2, R1, R4 + ADD $4, R4, R4 doCopy: // This is the end of the outer "switch", when we have a copy tag. // // We assume that: - // - CX == length && CX > 0 - // - DX == offset + // - R4 == length && R4 > 0 + // - R5 == offset // if offset <= 0 { etc } - CMPQ DX, $0 - JLE errCorrupt + MOVD $0, R1 + CMP R1, R5 + BLE errCorrupt // if d < offset { etc } - MOVQ DI, BX - SUBQ R8, BX - CMPQ BX, DX - JLT errCorrupt + MOVD R7, R3 + SUB R8, R3, R3 + CMP R5, R3 + BLT errCorrupt // if length > len(dst)-d { etc } - MOVQ R10, BX - SUBQ DI, BX - CMPQ CX, BX - JGT errCorrupt + MOVD R10, R3 + SUB R7, R3, R3 + CMP R3, R4 + BGT errCorrupt // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length // // Set: // - R14 = len(dst)-d // - R15 = &dst[d-offset] - MOVQ R10, R14 - SUBQ DI, R14 - MOVQ DI, R15 - SUBQ DX, R15 + MOVD R10, R14 + SUB R7, R14, R14 + MOVD R7, R15 + SUB R5, R15, R15 // !!! Try a faster technique for short (16 or fewer bytes) forward copies. // @@ -332,18 +342,18 @@ doCopy: // } // copy 16 bytes // d += length - CMPQ CX, $16 - JGT slowForwardCopy - CMPQ DX, $8 - JLT slowForwardCopy - CMPQ R14, $16 - JLT slowForwardCopy - MOVQ 0(R15), AX - MOVQ AX, 0(DI) - MOVQ 8(R15), BX - MOVQ BX, 8(DI) - ADDQ CX, DI - JMP loop + CMP $16, R4 + BGT slowForwardCopy + CMP $8, R5 + BLT slowForwardCopy + CMP $16, R14 + BLT slowForwardCopy + MOVD 0(R15), R2 + MOVD R2, 0(R7) + MOVD 8(R15), R3 + MOVD R3, 8(R7) + ADD R4, R7, R7 + B loop slowForwardCopy: // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we @@ -394,9 +404,9 @@ slowForwardCopy: // if length > len(dst)-d-10 { // goto verySlowForwardCopy // } - SUBQ $10, R14 - CMPQ CX, R14 - JGT verySlowForwardCopy + SUB $10, R14, R14 + CMP R14, R4 + BGT verySlowForwardCopy makeOffsetAtLeast8: // !!! As above, expand the pattern so that offset >= 8 and we can use @@ -410,36 +420,37 @@ makeOffsetAtLeast8: // // The two previous lines together means that d-offset, and therefore // // R15, is unchanged. // } - CMPQ DX, $8 - JGE fixUpSlowForwardCopy - MOVQ (R15), BX - MOVQ BX, (DI) - SUBQ DX, CX - ADDQ DX, DI - ADDQ DX, DX - JMP makeOffsetAtLeast8 + CMP $8, R5 + BGE fixUpSlowForwardCopy + MOVD (R15), R3 + MOVD R3, (R7) + SUB R5, R4, R4 + ADD R5, R7, R7 + ADD R5, R5, R5 + B makeOffsetAtLeast8 fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by DI being + // !!! Add length (which might be negative now) to d (implied by R7 being // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save DI to AX so that, if + // top of the loop. Before we do that, though, we save R7 to R2 so that, if // length is positive, copying the remaining length bytes will write to the // right place. - MOVQ DI, AX - ADDQ CX, DI + MOVD R7, R2 + ADD R4, R7, R7 finishSlowForwardCopy: // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative // length means that we overrun, but as above, that will be fixed up by // subsequent iterations of the outermost loop. - CMPQ CX, $0 - JLE loop - MOVQ (R15), BX - MOVQ BX, (AX) - ADDQ $8, R15 - ADDQ $8, AX - SUBQ $8, CX - JMP finishSlowForwardCopy + MOVD $0, R1 + CMP R1, R4 + BLE loop + MOVD (R15), R3 + MOVD R3, (R2) + ADD $8, R15, R15 + ADD $8, R2, R2 + SUB $8, R4, R4 + B finishSlowForwardCopy verySlowForwardCopy: // verySlowForwardCopy is a simple implementation of forward copy. In C @@ -454,29 +465,30 @@ verySlowForwardCopy: // break // } // } - MOVB (R15), BX - MOVB BX, (DI) - INCQ R15 - INCQ DI - DECQ CX - JNZ verySlowForwardCopy - JMP loop - -// The code above handles copy tags. -// ---------------------------------------- + MOVB (R15), R3 + MOVB R3, (R7) + ADD $1, R15, R15 + ADD $1, R7, R7 + SUB $1, R4, R4 + CBNZ R4, verySlowForwardCopy + B loop + + // The code above handles copy tags. + // ---------------------------------------- end: // This is the end of the "for s < len(src)". // // if d != len(dst) { etc } - CMPQ DI, R10 - JNE errCorrupt + CMP R10, R7 + BNE errCorrupt // return 0 - MOVQ $0, ret+48(FP) + MOVD $0, ret+48(FP) RET errCorrupt: // return decodeErrCodeCorrupt - MOVQ $1, ret+48(FP) + MOVD $1, R2 + MOVD R2, ret+48(FP) RET diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_asm.go similarity index 93% rename from vendor/github.com/klauspost/compress/snappy/decode_amd64.go rename to vendor/github.com/golang/snappy/decode_asm.go index fcd192b84..7082b3491 100644 --- a/vendor/github.com/klauspost/compress/snappy/decode_amd64.go +++ b/vendor/github.com/golang/snappy/decode_asm.go @@ -5,6 +5,7 @@ // +build !appengine // +build gc // +build !noasm +// +build amd64 arm64 package snappy diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go index 8c9f2049b..2f672be55 100644 --- a/vendor/github.com/golang/snappy/decode_other.go +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine !gc noasm +// +build !amd64,!arm64 appengine !gc noasm package snappy @@ -85,14 +85,28 @@ func decode(dst, src []byte) int { if offset <= 0 || d < offset || length > len(dst)-d { return decodeErrCodeCorrupt } - // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike - // the built-in copy function, this byte-by-byte copy always runs + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs // forwards, even if the slices overlap. Conceptually, this is: // // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - for end := d + length; d != end; d++ { - dst[d] = dst[d-offset] + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] } + d += length } if d != len(dst) { return decodeErrCodeCorrupt diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go index 8d393e904..7f2365707 100644 --- a/vendor/github.com/golang/snappy/encode.go +++ b/vendor/github.com/golang/snappy/encode.go @@ -15,6 +15,8 @@ import ( // Otherwise, a newly allocated slice will be returned. // // The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. func Encode(dst, src []byte) []byte { if n := MaxEncodedLen(len(src)); n < 0 { panic(ErrTooLarge) @@ -139,6 +141,8 @@ func NewBufferedWriter(w io.Writer) *Writer { } // Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. type Writer struct { w io.Writer err error diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s new file mode 100644 index 000000000..f8d54adfc --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_arm64.s @@ -0,0 +1,722 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - R3 len(lit) +// - R4 n +// - R6 return value +// - R8 &dst[i] +// - R10 &lit[0] +// +// The 32 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $32-56 + MOVD dst_base+0(FP), R8 + MOVD lit_base+24(FP), R10 + MOVD lit_len+32(FP), R3 + MOVD R3, R6 + MOVW R3, R4 + SUBW $1, R4, R4 + + CMPW $60, R4 + BLT oneByte + CMPW $256, R4 + BLT twoBytes + +threeBytes: + MOVD $0xf4, R2 + MOVB R2, 0(R8) + MOVW R4, 1(R8) + ADD $3, R8, R8 + ADD $3, R6, R6 + B memmove + +twoBytes: + MOVD $0xf0, R2 + MOVB R2, 0(R8) + MOVB R4, 1(R8) + ADD $2, R8, R8 + ADD $2, R6, R6 + B memmove + +oneByte: + LSLW $2, R4, R4 + MOVB R4, 0(R8) + ADD $1, R8, R8 + ADD $1, R6, R6 + +memmove: + MOVD R6, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // R8, R10 and R3 as arguments. + MOVD R8, 8(RSP) + MOVD R10, 16(RSP) + MOVD R3, 24(RSP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - R3 length +// - R7 &dst[0] +// - R8 &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVD dst_base+0(FP), R8 + MOVD R8, R7 + MOVD offset+24(FP), R11 + MOVD length+32(FP), R3 + +loop0: + // for length >= 68 { etc } + CMPW $68, R3 + BLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVD $0xfe, R2 + MOVB R2, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUB $64, R3, R3 + B loop0 + +step1: + // if length > 64 { etc } + CMP $64, R3 + BLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVD $0xee, R2 + MOVB R2, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUB $60, R3, R3 + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMP $12, R3 + BGE step3 + CMPW $2048, R11 + BGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(R8) + LSRW $3, R11, R11 + AND $0xe0, R11, R11 + SUB $4, R3, R3 + LSLW $2, R3 + AND $0xff, R3, R3 + ORRW R3, R11, R11 + ORRW $1, R11, R11 + MOVB R11, 0(R8) + ADD $2, R8, R8 + + // Return the number of bytes written. + SUB R7, R8, R8 + MOVD R8, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUB $1, R3, R3 + AND $0xff, R3, R3 + LSLW $2, R3, R3 + ORRW $2, R3, R3 + MOVB R3, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + + // Return the number of bytes written. + SUB R7, R8, R8 + MOVD R8, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - R6 &src[0] +// - R7 &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVD src_base+0(FP), R6 + MOVD src_len+8(FP), R14 + MOVD i+24(FP), R15 + MOVD j+32(FP), R7 + ADD R6, R14, R14 + ADD R6, R15, R15 + ADD R6, R7, R7 + MOVD R14, R13 + SUB $8, R13, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMP R13, R7 + BHI cmp1 + MOVD (R15), R3 + MOVD (R7), R4 + CMP R4, R3 + BNE bsf + ADD $8, R15, R15 + ADD $8, R7, R7 + B cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. + // RBIT reverses the bit order, then CLZ counts the leading zeros, the + // combination of which finds the least significant bit which is set. + // The arm64 architecture is little-endian, and the shift by 3 converts + // a bit index to a byte index. + EOR R3, R4, R4 + RBIT R4, R4 + CLZ R4, R4 + ADD R4>>3, R7, R7 + + // Convert from &src[ret] to ret. + SUB R6, R7, R7 + MOVD R7, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMP R7, R14 + BLS extendMatchEnd + MOVB (R15), R3 + MOVB (R7), R4 + CMP R4, R3 + BNE extendMatchEnd + ADD $1, R15, R15 + ADD $1, R7, R7 + B cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUB R6, R7, R7 + MOVD R7, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - R3 . . +// - R4 . . +// - R5 64 shift +// - R6 72 &src[0], tableSize +// - R7 80 &src[s] +// - R8 88 &dst[d] +// - R9 96 sLimit +// - R10 . &src[nextEmit] +// - R11 104 prevHash, currHash, nextHash, offset +// - R12 112 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 120 candidate +// - R16 . hash constant, 0x1e35a7bd +// - R17 . &table +// - . 128 table +// +// The second column (64, 72, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 64 + 64 = 32896. +TEXT ·encodeBlock(SB), 0, $32896-56 + MOVD dst_base+0(FP), R8 + MOVD src_base+24(FP), R7 + MOVD src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVD $24, R5 + MOVD $256, R6 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + MOVD $16384, R2 + CMP R2, R6 + BGE varTable + CMP R14, R6 + BGE varTable + SUB $1, R5, R5 + LSL $1, R6, R6 + B calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each + // iterations writes 64 bytes, so we can do only tableSize/32 writes + // instead of the 2048 writes that would zero-initialize all of table's + // 32768 bytes. This clear could overrun the first tableSize elements, but + // it won't overrun the allocated stack size. + ADD $128, RSP, R17 + MOVD R17, R4 + + // !!! R6 = &src[tableSize] + ADD R6<<1, R17, R6 + +memclr: + STP.P (ZR, ZR), 64(R4) + STP (ZR, ZR), -48(R4) + STP (ZR, ZR), -32(R4) + STP (ZR, ZR), -16(R4) + CMP R4, R6 + BHI memclr + + // !!! R6 = &src[0] + MOVD R7, R6 + + // sLimit := len(src) - inputMargin + MOVD R14, R9 + SUB $15, R9, R9 + + // !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't + // change for the rest of the function. + MOVD R5, 64(RSP) + MOVD R6, 72(RSP) + MOVD R9, 96(RSP) + + // nextEmit := 0 + MOVD R6, R10 + + // s := 1 + ADD $1, R7, R7 + + // nextHash := hash(load32(src, s), shift) + MOVW 0(R7), R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + +outer: + // for { etc } + + // skip := 32 + MOVD $32, R12 + + // nextS := s + MOVD R7, R13 + + // candidate := 0 + MOVD $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVD R13, R7 + + // bytesBetweenHashLookups := skip >> 5 + MOVD R12, R14 + LSR $5, R14, R14 + + // nextS = s + bytesBetweenHashLookups + ADD R14, R13, R13 + + // skip += bytesBetweenHashLookups + ADD R14, R12, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVD R13, R3 + SUB R6, R3, R3 + CMP R9, R3 + BHI emitRemainder + + // candidate = int(table[nextHash]) + MOVHU 0(R17)(R11<<1), R15 + + // table[nextHash] = uint16(s) + MOVD R7, R3 + SUB R6, R3, R3 + + MOVH R3, 0(R17)(R11<<1) + + // nextHash = hash(load32(src, nextS), shift) + MOVW 0(R13), R11 + MULW R16, R11 + LSRW R5, R11, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVW 0(R7), R3 + MOVW (R6)(R15), R4 + CMPW R4, R3 + BNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVD R7, R3 + SUB R10, R3, R3 + CMP $16, R3 + BLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVW R3, R4 + SUBW $1, R4, R4 + + MOVW $60, R2 + CMPW R2, R4 + BLT inlineEmitLiteralOneByte + MOVW $256, R2 + CMPW R2, R4 + BLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVD $0xf4, R1 + MOVB R1, 0(R8) + MOVW R4, 1(R8) + ADD $3, R8, R8 + B inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVD $0xf0, R1 + MOVB R1, 0(R8) + MOVB R4, 1(R8) + ADD $2, R8, R8 + B inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + LSLW $2, R4, R4 + MOVB R4, 0(R8) + ADD $1, R8, R8 + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // R8, R10 and R3 as arguments. + MOVD R8, 8(RSP) + MOVD R10, 16(RSP) + MOVD R3, 24(RSP) + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADD R3, R8, R8 + MOVD R7, 80(RSP) + MOVD R8, 88(RSP) + MOVD R15, 120(RSP) + CALL runtime·memmove(SB) + MOVD 64(RSP), R5 + MOVD 72(RSP), R6 + MOVD 80(RSP), R7 + MOVD 88(RSP), R8 + MOVD 96(RSP), R9 + MOVD 120(RSP), R15 + ADD $128, RSP, R17 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + B inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB R3, R4 + SUBW $1, R4, R4 + AND $0xff, R4, R4 + LSLW $2, R4, R4 + MOVB R4, (R8) + ADD $1, R8, R8 + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R10), (R0, R1) + STP (R0, R1), 0(R8) + ADD R3, R8, R8 + +inner1: + // for { etc } + + // base := s + MOVD R7, R12 + + // !!! offset := base - candidate + MOVD R12, R11 + SUB R15, R11, R11 + SUB R6, R11, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVD src_len+32(FP), R14 + ADD R6, R14, R14 + + // !!! R13 = &src[len(src) - 8] + MOVD R14, R13 + SUB $8, R13, R13 + + // !!! R15 = &src[candidate + 4] + ADD $4, R15, R15 + ADD R6, R15, R15 + + // !!! s += 4 + ADD $4, R7, R7 + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMP R13, R7 + BHI inlineExtendMatchCmp1 + MOVD (R15), R3 + MOVD (R7), R4 + CMP R4, R3 + BNE inlineExtendMatchBSF + ADD $8, R15, R15 + ADD $8, R7, R7 + B inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. + // RBIT reverses the bit order, then CLZ counts the leading zeros, the + // combination of which finds the least significant bit which is set. + // The arm64 architecture is little-endian, and the shift by 3 converts + // a bit index to a byte index. + EOR R3, R4, R4 + RBIT R4, R4 + CLZ R4, R4 + ADD R4>>3, R7, R7 + B inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMP R7, R14 + BLS inlineExtendMatchEnd + MOVB (R15), R3 + MOVB (R7), R4 + CMP R4, R3 + BNE inlineExtendMatchEnd + ADD $1, R15, R15 + ADD $1, R7, R7 + B inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVD R7, R3 + SUB R12, R3, R3 + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + MOVW $68, R2 + CMPW R2, R3 + BLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVD $0xfe, R1 + MOVB R1, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUBW $64, R3, R3 + B inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + MOVW $64, R2 + CMPW R2, R3 + BLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVD $0xee, R1 + MOVB R1, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUBW $60, R3, R3 + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + MOVW $12, R2 + CMPW R2, R3 + BGE inlineEmitCopyStep3 + MOVW $2048, R2 + CMPW R2, R11 + BGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(R8) + LSRW $8, R11, R11 + LSLW $5, R11, R11 + SUBW $4, R3, R3 + AND $0xff, R3, R3 + LSLW $2, R3, R3 + ORRW R3, R11, R11 + ORRW $1, R11, R11 + MOVB R11, 0(R8) + ADD $2, R8, R8 + B inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBW $1, R3, R3 + LSLW $2, R3, R3 + ORRW $2, R3, R3 + MOVB R3, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVD R7, R10 + + // if s >= sLimit { goto emitRemainder } + MOVD R7, R3 + SUB R6, R3, R3 + CMP R3, R9 + BLS emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVD -1(R7), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // table[prevHash] = uint16(s-1) + MOVD R7, R3 + SUB R6, R3, R3 + SUB $1, R3, R3 + + MOVHU R3, 0(R17)(R11<<1) + + // currHash := hash(uint32(x>>8), shift) + LSR $8, R14, R14 + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // candidate = int(table[currHash]) + MOVHU 0(R17)(R11<<1), R15 + + // table[currHash] = uint16(s) + ADD $1, R3, R3 + MOVHU R3, 0(R17)(R11<<1) + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVW (R6)(R15), R4 + CMPW R4, R14 + BEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + LSR $8, R14, R14 + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // s++ + ADD $1, R7, R7 + + // break out of the inner1 for loop, i.e. continue the outer loop. + B outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVD src_len+32(FP), R3 + ADD R6, R3, R3 + CMP R3, R10 + BEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVD R8, 8(RSP) + MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative. + MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative. + MOVD R10, 32(RSP) + SUB R10, R3, R3 + MOVD R3, 40(RSP) + MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVD R8, 88(RSP) + CALL ·emitLiteral(SB) + MOVD 88(RSP), R8 + + // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVD 56(RSP), R1 + ADD R1, R8, R8 + +encodeBlockEnd: + MOVD dst_base+0(FP), R3 + SUB R3, R8, R8 + MOVD R8, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_asm.go similarity index 97% rename from vendor/github.com/golang/snappy/encode_amd64.go rename to vendor/github.com/golang/snappy/encode_asm.go index 150d91bc8..107c1e714 100644 --- a/vendor/github.com/golang/snappy/encode_amd64.go +++ b/vendor/github.com/golang/snappy/encode_asm.go @@ -5,6 +5,7 @@ // +build !appengine // +build gc // +build !noasm +// +build amd64 arm64 package snappy diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go index dbcae905e..296d7f0be 100644 --- a/vendor/github.com/golang/snappy/encode_other.go +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine !gc noasm +// +build !amd64,!arm64 appengine !gc noasm package snappy diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 86d0903b8..fd2b3a42b 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -36,11 +36,12 @@ import ( "strings" "github.com/google/go-cmp/cmp/internal/diff" - "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/function" "github.com/google/go-cmp/cmp/internal/value" ) +// TODO(≥go1.18): Use any instead of interface{}. + // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // @@ -319,7 +320,6 @@ func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { } func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { - v = sanitizeValue(v, f.Type().In(0)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{v})[0] } @@ -343,8 +343,6 @@ func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { } func (s *state) callTTBFunc(f, x, y reflect.Value) bool { - x = sanitizeValue(x, f.Type().In(0)) - y = sanitizeValue(y, f.Type().In(1)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{x, y})[0].Bool() } @@ -372,19 +370,6 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { ret = f.Call(vs)[0] } -// sanitizeValue converts nil interfaces of type T to those of type R, -// assuming that T is assignable to R. -// Otherwise, it returns the input value as is. -func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143). - if !flags.AtLeastGo110 { - if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { - return reflect.New(t).Elem() - } - } - return v -} - func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { var addr bool var vax, vay reflect.Value // Addressable versions of vx and vy diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go index 5ff0b4218..ae851fe53 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego // +build purego package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 21eb54858..e2c0f74e8 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego // +build !purego package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go index 1daaaacc5..36062a604 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !cmp_debug // +build !cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go index 4b91dbcac..a3b97a1ad 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build cmp_debug // +build cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go deleted file mode 100644 index 82d1d7fbf..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go deleted file mode 100644 index 8646f0529..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go index b6c12cefb..7b498bb2c 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -9,6 +9,8 @@ import ( "strconv" ) +var anyType = reflect.TypeOf((*interface{})(nil)).Elem() + // TypeString is nearly identical to reflect.Type.String, // but has an additional option to specify that full type names be used. func TypeString(t reflect.Type, qualified bool) string { @@ -20,6 +22,11 @@ func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte // of the same name and within the same package, // but declared within the namespace of different functions. + // Use the "any" alias instead of "interface{}" for better readability. + if t == anyType { + return append(b, "any"...) + } + // Named type. if t.Name() != "" { if qualified && t.PkgPath() != "" { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go index 44f4a5afd..1a71bfcbd 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego // +build purego package value diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go index a605953d4..16e6860af 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego // +build !purego package value diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index f01eff318..c71003463 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -178,7 +178,7 @@ type structField struct { unexported bool mayForce bool // Forcibly allow visibility paddr bool // Was parent addressable? - pvx, pvy reflect.Value // Parent values (always addressible) + pvx, pvy reflect.Value // Parent values (always addressable) field reflect.StructField // Field information } diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 104bb3053..1ef65ac1d 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -116,7 +116,10 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out } // For leaf nodes, format the value based on the reflect.Values alone. - if v.MaxDepth == 0 { + // As a special case, treat equal []byte as a leaf nodes. + isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == reflect.TypeOf(byte(0)) + isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 + if v.MaxDepth == 0 || isEqualBytes { switch opts.DiffMode { case diffUnknown, diffIdentical: // Format Equal. diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 33f03577f..287b89358 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -207,10 +207,11 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, // Check whether this is a []byte of text data. if t.Elem() == reflect.TypeOf(byte(0)) { b := v.Bytes() - isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) } + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { out = opts.formatString("", string(b)) - return opts.WithTypeMode(emitType).FormatType(t, out) + skipType = true + return opts.FormatType(t, out) } } @@ -281,7 +282,12 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } defer ptrs.Pop() - skipType = true // Let the underlying value print the type instead + // Skip the name only if this is an unnamed pointer type. + // Otherwise taking the address of a value does not reproduce + // the named pointer type. + if v.Type().Name() == "" { + skipType = true // Let the underlying value print the type instead + } out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) out = &textWrap{Prefix: "&", Value: out} @@ -292,7 +298,6 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } // Interfaces accept different concrete types, // so configure the underlying value to explicitly print the type. - skipType = true // Print the concrete type instead return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) default: panic(fmt.Sprintf("%v kind not handled", v.Kind())) diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 2ad3bc85b..68b5c1ae1 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -80,7 +80,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { } // Use specialized string diffing for longer slices or strings. - const minLength = 64 + const minLength = 32 return vx.Len() >= minLength && vy.Len() >= minLength } @@ -563,10 +563,10 @@ func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []d nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified ny := ds.NumIdentical + ds.NumInserted + ds.NumModified var numLeadingIdentical, numTrailingIdentical int - for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ { + for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ { numLeadingIdentical++ } - for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ { + for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ { numTrailingIdentical++ } if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go index a733bef18..44e368e56 100644 --- a/vendor/github.com/hashicorp/errwrap/errwrap.go +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -44,6 +44,8 @@ func Wrap(outer, inner error) error { // // format is the format of the error message. The string '{{err}}' will // be replaced with the original error message. +// +// Deprecated: Use fmt.Errorf() func Wrapf(format string, err error) error { outerMsg := "" if err != nil { @@ -148,6 +150,9 @@ func Walk(err error, cb WalkFunc) { for _, err := range e.WrappedErrors() { Walk(err, cb) } + case interface{ Unwrap() error }: + cb(err) + Walk(e.Unwrap(), cb) default: cb(err) } @@ -167,3 +172,7 @@ func (w *wrappedError) Error() string { func (w *wrappedError) WrappedErrors() []error { return []error{w.Outer, w.Inner} } + +func (w *wrappedError) Unwrap() error { + return w.Inner +} diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md index 5d56f4b59..21a17c5af 100644 --- a/vendor/github.com/hashicorp/go-hclog/README.md +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -17,11 +17,8 @@ JSON output mode for production. ## Stability Note -While this library is fully open source and HashiCorp will be maintaining it -(since we are and will be making extensive use of it), the API and output -format is subject to minor changes as we fully bake and vet it in our projects. -This notice will be removed once it's fully integrated into our major projects -and no further changes are anticipated. +This library has reached 1.0 stability. Its API can be considered solidified +and promised through future versions. ## Installation and Docs @@ -102,7 +99,7 @@ into all the callers. ### Using `hclog.Fmt()` ```go -var int totalBandwidth = 200 +totalBandwidth := 200 appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth)) ``` @@ -146,3 +143,6 @@ log.Printf("[DEBUG] %d", 42) Notice that if `appLogger` is initialized with the `INFO` log level _and_ you specify `InferLevels: true`, you will not see any output here. You must change `appLogger` to `DEBUG` to see output. See the docs for more information. + +If the log lines start with a timestamp you can use the +`InferLevelsWithTimestamp` option to try and ignore them. diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go index 44aa9bf2c..9635c838b 100644 --- a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go +++ b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package hclog @@ -21,6 +22,7 @@ func (l *intLogger) setColorization(opts *LoggerOptions) { isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd()) isTerm := isUnixTerm || isCygwinTerm if !isTerm { + l.headerColor = ColorOff l.writer.color = ColorOff } } diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go index 23486b6d7..30859168e 100644 --- a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go +++ b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package hclog @@ -26,8 +27,12 @@ func (l *intLogger) setColorization(opts *LoggerOptions) { isTerm := isUnixTerm || isCygwinTerm if !isTerm { l.writer.color = ColorOff + l.headerColor = ColorOff return } - l.writer.w = colorable.NewColorable(fi) + + if l.headerColor == ColorOff { + l.writer.w = colorable.NewColorable(fi) + } } } diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go index 22ebc57d8..b9f00217c 100644 --- a/vendor/github.com/hashicorp/go-hclog/global.go +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -2,6 +2,7 @@ package hclog import ( "sync" + "time" ) var ( @@ -14,6 +15,7 @@ var ( DefaultOptions = &LoggerOptions{ Level: DefaultLevel, Output: DefaultOutput, + TimeFn: time.Now, } ) diff --git a/vendor/github.com/hashicorp/go-hclog/go.mod b/vendor/github.com/hashicorp/go-hclog/go.mod index b6698c083..3d02298c8 100644 --- a/vendor/github.com/hashicorp/go-hclog/go.mod +++ b/vendor/github.com/hashicorp/go-hclog/go.mod @@ -2,11 +2,11 @@ module github.com/hashicorp/go-hclog require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/fatih/color v1.7.0 - github.com/mattn/go-colorable v0.1.4 - github.com/mattn/go-isatty v0.0.10 - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 + github.com/fatih/color v1.13.0 + github.com/mattn/go-colorable v0.1.12 + github.com/mattn/go-isatty v0.0.14 + github.com/stretchr/testify v1.7.2 + golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 // indirect ) go 1.13 diff --git a/vendor/github.com/hashicorp/go-hclog/go.sum b/vendor/github.com/hashicorp/go-hclog/go.sum index 3a656dfd9..a8dadb218 100644 --- a/vendor/github.com/hashicorp/go-hclog/go.sum +++ b/vendor/github.com/hashicorp/go-hclog/go.sum @@ -1,18 +1,26 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 h1:nonptSpoQ4vQjyraW20DXPAglgQfVnM9ZC6MmNLMR60= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go index 631baf2f0..ff42f1bfc 100644 --- a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -180,9 +180,10 @@ func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) i func (i *interceptLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { return &stdlogAdapter{ - log: i, - inferLevels: opts.InferLevels, - forceLevel: opts.ForceLevel, + log: i, + inferLevels: opts.InferLevels, + inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp, + forceLevel: opts.ForceLevel, } } diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index 6099e6726..83232f7a6 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -60,6 +60,7 @@ type intLogger struct { callerOffset int name string timeFormat string + timeFn TimeFunction disableTime bool // This is an interface so that it's shared by any derived loggers, since @@ -68,6 +69,8 @@ type intLogger struct { writer *writer level *int32 + headerColor ColorOption + implied []interface{} exclude func(level Level, msg string, args ...interface{}) bool @@ -112,16 +115,28 @@ func newLogger(opts *LoggerOptions) *intLogger { mutex = new(sync.Mutex) } + var primaryColor, headerColor ColorOption + + if opts.ColorHeaderOnly { + primaryColor = ColorOff + headerColor = opts.Color + } else { + primaryColor = opts.Color + headerColor = ColorOff + } + l := &intLogger{ json: opts.JSONFormat, name: opts.Name, timeFormat: TimeFormat, + timeFn: time.Now, disableTime: opts.DisableTime, mutex: mutex, - writer: newWriter(output, opts.Color), + writer: newWriter(output, primaryColor), level: new(int32), exclude: opts.Exclude, independentLevels: opts.IndependentLevels, + headerColor: headerColor, } if opts.IncludeLocation { l.callerOffset = offsetIntLogger + opts.AdditionalLocationOffset @@ -130,6 +145,9 @@ func newLogger(opts *LoggerOptions) *intLogger { if l.json { l.timeFormat = TimeFormatJSON } + if opts.TimeFn != nil { + l.timeFn = opts.TimeFn + } if opts.TimeFormat != "" { l.timeFormat = opts.TimeFormat } @@ -152,7 +170,7 @@ func (l *intLogger) log(name string, level Level, msg string, args ...interface{ return } - t := time.Now() + t := l.timeFn() l.mutex.Lock() defer l.mutex.Unlock() @@ -199,6 +217,24 @@ func trimCallerPath(path string) string { return path[idx+1:] } +// isNormal indicates if the rune is one allowed to exist as an unquoted +// string value. This is a subset of ASCII, `-` through `~`. +func isNormal(r rune) bool { + return 0x2D <= r && r <= 0x7E // - through ~ +} + +// needsQuoting returns false if all the runes in string are normal, according +// to isNormal +func needsQuoting(str string) bool { + for _, r := range str { + if !isNormal(r) { + return true + } + } + + return false +} + // Non-JSON logging format function func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) { @@ -209,7 +245,12 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, s, ok := _levelToBracket[level] if ok { - l.writer.WriteString(s) + if l.headerColor != ColorOff { + color := _levelToColor[level] + color.Fprint(l.writer, s) + } else { + l.writer.WriteString(s) + } } else { l.writer.WriteString("[?????]") } @@ -263,6 +304,7 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, val = st if st == "" { val = `""` + raw = true } case int: val = strconv.FormatInt(int64(st), 10) @@ -295,6 +337,9 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, continue FOR case Format: val = fmt.Sprintf(st[0].(string), st[1:]...) + case Quote: + raw = true + val = strconv.Quote(string(st)) default: v := reflect.ValueOf(st) if v.Kind() == reflect.Slice { @@ -320,13 +365,11 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, l.writer.WriteString("=\n") writeIndent(l.writer, val, " | ") l.writer.WriteString(" ") - } else if !raw && strings.ContainsAny(val, " \t") { + } else if !raw && needsQuoting(val) { l.writer.WriteByte(' ') l.writer.WriteString(key) l.writer.WriteByte('=') - l.writer.WriteByte('"') - l.writer.WriteString(val) - l.writer.WriteByte('"') + l.writer.WriteString(strconv.Quote(val)) } else { l.writer.WriteByte(' ') l.writer.WriteString(key) @@ -684,9 +727,10 @@ func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { newLog.callerOffset = l.callerOffset + 4 } return &stdlogAdapter{ - log: &newLog, - inferLevels: opts.InferLevels, - forceLevel: opts.ForceLevel, + log: &newLog, + inferLevels: opts.InferLevels, + inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp, + forceLevel: opts.ForceLevel, } } diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index 7f36b1fd2..858143028 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -5,6 +5,7 @@ import ( "log" "os" "strings" + "time" ) var ( @@ -67,6 +68,12 @@ type Octal int // text output. For example: L.Info("bits", Binary(17)) type Binary int +// A simple shortcut to format strings with Go quoting. Control and +// non-printable characters will be escaped with their backslash equivalents in +// output. Intended for untrusted or multiline strings which should be logged +// as concisely as possible. +type Quote string + // ColorOption expresses how the output should be colored, if at all. type ColorOption uint8 @@ -206,6 +213,15 @@ type StandardLoggerOptions struct { // [DEBUG] and strip it off before reapplying it. InferLevels bool + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them while ignoring possible + // timestamp values in the beginning of the string. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + // The timestamp detection may result in false positives and incomplete + // string outputs. + InferLevelsWithTimestamp bool + // ForceLevel is used to force all output from the standard logger to be at // the specified level. Similar to InferLevels, this will strip any level // prefix contained in the logged string before applying the forced level. @@ -213,6 +229,8 @@ type StandardLoggerOptions struct { ForceLevel Level } +type TimeFunction = func() time.Time + // LoggerOptions can be used to configure a new logger. type LoggerOptions struct { // Name of the subsystem to prefix logs with @@ -242,6 +260,9 @@ type LoggerOptions struct { // The time format to use instead of the default TimeFormat string + // A function which is called to get the time object that is formatted using `TimeFormat` + TimeFn TimeFunction + // Control whether or not to display the time at all. This is required // because setting TimeFormat to empty assumes the default format. DisableTime bool @@ -250,6 +271,9 @@ type LoggerOptions struct { // are concretely instances of *os.File. Color ColorOption + // Only color the header, not the body. This can help with readability of long messages. + ColorHeaderOnly bool + // A function which is called with the log information and if it returns true the value // should not be logged. // This is useful when interacting with a system that you wish to suppress the log diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go index f35d875d3..641f20ccb 100644 --- a/vendor/github.com/hashicorp/go-hclog/stdlog.go +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -3,16 +3,22 @@ package hclog import ( "bytes" "log" + "regexp" "strings" ) +// Regex to ignore characters commonly found in timestamp formats from the +// beginning of inputs. +var logTimestampRegexp = regexp.MustCompile(`^[\d\s\:\/\.\+-TZ]*`) + // Provides a io.Writer to shim the data out of *log.Logger // and back into our Logger. This is basically the only way to // build upon *log.Logger. type stdlogAdapter struct { - log Logger - inferLevels bool - forceLevel Level + log Logger + inferLevels bool + inferLevelsWithTimestamp bool + forceLevel Level } // Take the data, infer the levels if configured, and send it through @@ -28,6 +34,10 @@ func (s *stdlogAdapter) Write(data []byte) (int, error) { // Log at the forced level s.dispatch(str, s.forceLevel) } else if s.inferLevels { + if s.inferLevelsWithTimestamp { + str = s.trimTimestamp(str) + } + level, str := s.pickLevel(str) s.dispatch(str, level) } else { @@ -64,7 +74,7 @@ func (s *stdlogAdapter) pickLevel(str string) (Level, string) { case strings.HasPrefix(str, "[INFO]"): return Info, strings.TrimSpace(str[6:]) case strings.HasPrefix(str, "[WARN]"): - return Warn, strings.TrimSpace(str[7:]) + return Warn, strings.TrimSpace(str[6:]) case strings.HasPrefix(str, "[ERROR]"): return Error, strings.TrimSpace(str[7:]) case strings.HasPrefix(str, "[ERR]"): @@ -74,6 +84,11 @@ func (s *stdlogAdapter) pickLevel(str string) (Level, string) { } } +func (s *stdlogAdapter) trimTimestamp(str string) string { + idx := logTimestampRegexp.FindStringIndex(str) + return str[idx[1]:] +} + type logWriter struct { l *log.Logger } diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md index 46ee09fc0..39391f24f 100644 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -3,8 +3,9 @@ `go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system that has been in use by HashiCorp tooling for over 4 years. While initially created for [Packer](https://www.packer.io), it is additionally in use by -[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and -[Vault](https://www.vaultproject.io). +[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), +[Vault](https://www.vaultproject.io), and +[Boundary](https://www.boundaryproject.io). While the plugin system is over RPC, it is currently only designed to work over a local [reliable] network. Plugins over a real network are not supported diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go index 6b42975b3..e0bee88a1 100644 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -22,7 +22,8 @@ import ( "sync/atomic" "time" - hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" + "google.golang.org/grpc" ) // If this is 1, then we've called CleanupClients. This can be used @@ -203,6 +204,11 @@ type ClientConfig struct { // // You cannot Reattach to a server with this option enabled. AutoMTLS bool + + // GRPCDialOptions allows plugin users to pass custom grpc.DialOption + // to create gRPC connections. This only affects plugins using the gRPC + // protocol. + GRPCDialOptions []grpc.DialOption } // ReattachConfig is used to configure a client to reattach to an @@ -568,6 +574,8 @@ func (c *Client) Start() (addr net.Addr, err error) { c.config.TLSConfig = &tls.Config{ Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + MinVersion: tls.VersionTLS12, ServerName: "localhost", } } @@ -623,17 +631,19 @@ func (c *Client) Start() (addr net.Addr, err error) { // Wait for the command to end. err := cmd.Wait() - debugMsgArgs := []interface{}{ + msgArgs := []interface{}{ "path", path, "pid", pid, } if err != nil { - debugMsgArgs = append(debugMsgArgs, + msgArgs = append(msgArgs, []interface{}{"error", err.Error()}...) + c.logger.Error("plugin process exited", msgArgs...) + } else { + // Log and make sure to flush the logs right away + c.logger.Info("plugin process exited", msgArgs...) } - // Log and make sure to flush the logs write away - c.logger.Debug("plugin process exited", debugMsgArgs...) os.Stderr.Sync() // Set that we exited, which takes a lock @@ -768,7 +778,7 @@ func (c *Client) Start() (addr net.Addr, err error) { } // loadServerCert is used by AutoMTLS to read an x.509 cert returned by the -// server, and load it as the RootCA for the client TLSConfig. +// server, and load it as the RootCA and ClientCA for the client TLSConfig. func (c *Client) loadServerCert(cert string) error { certPool := x509.NewCertPool() @@ -785,6 +795,7 @@ func (c *Client) loadServerCert(cert string) error { certPool.AddCert(x509Cert) c.config.TLSConfig.RootCAs = certPool + c.config.TLSConfig.ClientCAs = certPool return nil } diff --git a/vendor/github.com/hashicorp/go-plugin/go.mod b/vendor/github.com/hashicorp/go-plugin/go.mod index 4e182e625..f28852215 100644 --- a/vendor/github.com/hashicorp/go-plugin/go.mod +++ b/vendor/github.com/hashicorp/go-plugin/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/go-plugin -go 1.13 +go 1.17 require ( github.com/golang/protobuf v1.3.4 @@ -9,7 +9,16 @@ require ( github.com/jhump/protoreflect v1.6.0 github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 github.com/oklog/run v1.0.0 - github.com/stretchr/testify v1.3.0 // indirect golang.org/x/net v0.0.0-20190311183353-d8887717615a google.golang.org/grpc v1.27.1 ) + +require ( + github.com/fatih/color v1.7.0 // indirect + github.com/mattn/go-colorable v0.1.4 // indirect + github.com/mattn/go-isatty v0.0.10 // indirect + github.com/stretchr/testify v1.3.0 // indirect + golang.org/x/sys v0.0.0-20191008105621-543471e840be // indirect + golang.org/x/text v0.3.0 // indirect + google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 // indirect +) diff --git a/vendor/github.com/hashicorp/go-plugin/go.sum b/vendor/github.com/hashicorp/go-plugin/go.sum index 56062044e..93d2905cd 100644 --- a/vendor/github.com/hashicorp/go-plugin/go.sum +++ b/vendor/github.com/hashicorp/go-plugin/go.sum @@ -2,7 +2,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -13,7 +12,6 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -50,7 +48,6 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= @@ -60,7 +57,6 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= @@ -74,7 +70,6 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go index 978121913..842903c92 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_client.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -14,9 +14,9 @@ import ( "google.golang.org/grpc/health/grpc_health_v1" ) -func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) { +func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error), dialOpts ...grpc.DialOption) (*grpc.ClientConn, error) { // Build dialing options. - opts := make([]grpc.DialOption, 0, 5) + opts := make([]grpc.DialOption, 0) // We use a custom dialer so that we can connect over unix domain sockets. opts = append(opts, grpc.WithDialer(dialer)) @@ -37,6 +37,9 @@ func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32))) + // Add our custom options if we have any + opts = append(opts, dialOpts...) + // Connect. Note the first parameter is unused because we use a custom // dialer that has the state to see the address. conn, err := grpc.Dial("unused", opts...) @@ -50,7 +53,7 @@ func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, // newGRPCClient creates a new GRPCClient. The Client argument is expected // to be successfully started already with a lock held. func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { - conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer) + conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer, c.config.GRPCDialOptions...) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go index 70ba546bf..185957f8d 100644 --- a/vendor/github.com/hashicorp/go-plugin/process_posix.go +++ b/vendor/github.com/hashicorp/go-plugin/process_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go index 9f7b01809..0eaa7705d 100644 --- a/vendor/github.com/hashicorp/go-plugin/process_windows.go +++ b/vendor/github.com/hashicorp/go-plugin/process_windows.go @@ -19,6 +19,7 @@ func _pidAlive(pid int) bool { if err != nil { return false } + defer syscall.CloseHandle(h) var ec uint32 if e := syscall.GetExitCodeProcess(h, &ec); e != nil { diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go index 5bb18dd5d..449ba6cc1 100644 --- a/vendor/github.com/hashicorp/go-plugin/rpc_server.go +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -45,7 +45,11 @@ func (s *RPCServer) Serve(lis net.Listener) { for { conn, err := lis.Accept() if err != nil { - log.Printf("[ERR] plugin: plugin server: %s", err) + severity := "ERR" + if errors.Is(err, net.ErrClosed) { + severity = "DEBUG" + } + log.Printf("[%s] plugin: plugin server: %s", severity, err) return } diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go index 7a58cc391..e13499910 100644 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -304,13 +304,13 @@ func Serve(opts *ServeConfig) { certPEM, keyPEM, err := generateCert() if err != nil { - logger.Error("failed to generate client certificate", "error", err) + logger.Error("failed to generate server certificate", "error", err) panic(err) } cert, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { - logger.Error("failed to parse client certificate", "error", err) + logger.Error("failed to parse server certificate", "error", err) panic(err) } @@ -319,6 +319,8 @@ func Serve(opts *ServeConfig) { ClientAuth: tls.RequireAndVerifyClientCert, ClientCAs: clientCertPool, MinVersion: tls.VersionTLS12, + RootCAs: clientCertPool, + ServerName: "localhost", } // We send back the raw leaf cert data for the client rather than the diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE index e87a115e4..a320b309c 100644 --- a/vendor/github.com/hashicorp/go-uuid/LICENSE +++ b/vendor/github.com/hashicorp/go-uuid/LICENSE @@ -1,3 +1,5 @@ +Copyright © 2015-2022 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md index dbae7f7be..5f16dd140 100644 --- a/vendor/github.com/hashicorp/go-version/CHANGELOG.md +++ b/vendor/github.com/hashicorp/go-version/CHANGELOG.md @@ -1,3 +1,23 @@ +# 1.6.0 (June 28, 2022) + +FEATURES: + +- Add `Prerelease` function to `Constraint` to return true if the version includes a prerelease field ([#100](https://github.com/hashicorp/go-version/pull/100)) + +# 1.5.0 (May 18, 2022) + +FEATURES: + +- Use `encoding` `TextMarshaler` & `TextUnmarshaler` instead of JSON equivalents ([#95](https://github.com/hashicorp/go-version/pull/95)) +- Add JSON handlers to allow parsing from/to JSON ([#93](https://github.com/hashicorp/go-version/pull/93)) + +# 1.4.0 (January 5, 2022) + +FEATURES: + + - Introduce `MustConstraints()` ([#87](https://github.com/hashicorp/go-version/pull/87)) + - `Constraints`: Introduce `Equals()` and `sort.Interface` methods ([#88](https://github.com/hashicorp/go-version/pull/88)) + # 1.3.0 (March 31, 2021) Please note that CHANGELOG.md does not exist in the source code prior to this release. diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md index 851a337be..4d2505090 100644 --- a/vendor/github.com/hashicorp/go-version/README.md +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -1,5 +1,5 @@ # Versioning Library for Go -[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/master) +[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/main.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/main) [![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) go-version is a library for parsing versions and version constraints, diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go index d05575961..da5d1aca1 100644 --- a/vendor/github.com/hashicorp/go-version/constraint.go +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -4,6 +4,7 @@ import ( "fmt" "reflect" "regexp" + "sort" "strings" ) @@ -11,30 +12,40 @@ import ( // ">= 1.0". type Constraint struct { f constraintFunc + op operator check *Version original string } +func (c *Constraint) Equals(con *Constraint) bool { + return c.op == con.op && c.check.Equal(con.check) +} + // Constraints is a slice of constraints. We make a custom type so that // we can add methods to it. type Constraints []*Constraint type constraintFunc func(v, c *Version) bool -var constraintOperators map[string]constraintFunc +var constraintOperators map[string]constraintOperation + +type constraintOperation struct { + op operator + f constraintFunc +} var constraintRegexp *regexp.Regexp func init() { - constraintOperators = map[string]constraintFunc{ - "": constraintEqual, - "=": constraintEqual, - "!=": constraintNotEqual, - ">": constraintGreaterThan, - "<": constraintLessThan, - ">=": constraintGreaterThanEqual, - "<=": constraintLessThanEqual, - "~>": constraintPessimistic, + constraintOperators = map[string]constraintOperation{ + "": {op: equal, f: constraintEqual}, + "=": {op: equal, f: constraintEqual}, + "!=": {op: notEqual, f: constraintNotEqual}, + ">": {op: greaterThan, f: constraintGreaterThan}, + "<": {op: lessThan, f: constraintLessThan}, + ">=": {op: greaterThanEqual, f: constraintGreaterThanEqual}, + "<=": {op: lessThanEqual, f: constraintLessThanEqual}, + "~>": {op: pessimistic, f: constraintPessimistic}, } ops := make([]string, 0, len(constraintOperators)) @@ -66,6 +77,16 @@ func NewConstraint(v string) (Constraints, error) { return Constraints(result), nil } +// MustConstraints is a helper that wraps a call to a function +// returning (Constraints, error) and panics if error is non-nil. +func MustConstraints(c Constraints, err error) Constraints { + if err != nil { + panic(err) + } + + return c +} + // Check tests if a version satisfies all the constraints. func (cs Constraints) Check(v *Version) bool { for _, c := range cs { @@ -77,6 +98,56 @@ func (cs Constraints) Check(v *Version) bool { return true } +// Equals compares Constraints with other Constraints +// for equality. This may not represent logical equivalence +// of compared constraints. +// e.g. even though '>0.1,>0.2' is logically equivalent +// to '>0.2' it is *NOT* treated as equal. +// +// Missing operator is treated as equal to '=', whitespaces +// are ignored and constraints are sorted before comaparison. +func (cs Constraints) Equals(c Constraints) bool { + if len(cs) != len(c) { + return false + } + + // make copies to retain order of the original slices + left := make(Constraints, len(cs)) + copy(left, cs) + sort.Stable(left) + right := make(Constraints, len(c)) + copy(right, c) + sort.Stable(right) + + // compare sorted slices + for i, con := range left { + if !con.Equals(right[i]) { + return false + } + } + + return true +} + +func (cs Constraints) Len() int { + return len(cs) +} + +func (cs Constraints) Less(i, j int) bool { + if cs[i].op < cs[j].op { + return true + } + if cs[i].op > cs[j].op { + return false + } + + return cs[i].check.LessThan(cs[j].check) +} + +func (cs Constraints) Swap(i, j int) { + cs[i], cs[j] = cs[j], cs[i] +} + // Returns the string format of the constraints func (cs Constraints) String() string { csStr := make([]string, len(cs)) @@ -92,6 +163,12 @@ func (c *Constraint) Check(v *Version) bool { return c.f(v, c.check) } +// Prerelease returns true if the version underlying this constraint +// contains a prerelease field. +func (c *Constraint) Prerelease() bool { + return len(c.check.Prerelease()) > 0 +} + func (c *Constraint) String() string { return c.original } @@ -107,8 +184,11 @@ func parseSingle(v string) (*Constraint, error) { return nil, err } + cop := constraintOperators[matches[1]] + return &Constraint{ - f: constraintOperators[matches[1]], + f: cop.f, + op: cop.op, check: check, original: v, }, nil @@ -138,6 +218,18 @@ func prereleaseCheck(v, c *Version) bool { // Constraint functions //------------------------------------------------------------------- +type operator rune + +const ( + equal operator = '=' + notEqual operator = '≠' + greaterThan operator = '>' + lessThan operator = '<' + greaterThanEqual operator = '≥' + lessThanEqual operator = '≤' + pessimistic operator = '~' +) + func constraintEqual(v, c *Version) bool { return v.Equal(c) } diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go index 8068834ec..e87df6990 100644 --- a/vendor/github.com/hashicorp/go-version/version.go +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -64,7 +64,6 @@ func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { } segmentsStr := strings.Split(matches[1], ".") segments := make([]int64, len(segmentsStr)) - si := 0 for i, str := range segmentsStr { val, err := strconv.ParseInt(str, 10, 64) if err != nil { @@ -72,8 +71,7 @@ func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { "Error parsing version: %s", err) } - segments[i] = int64(val) - si++ + segments[i] = val } // Even though we could support more than three segments, if we @@ -92,7 +90,7 @@ func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { metadata: matches[10], pre: pre, segments: segments, - si: si, + si: len(segmentsStr), original: v, }, nil } @@ -390,3 +388,20 @@ func (v *Version) String() string { func (v *Version) Original() string { return v.original } + +// UnmarshalText implements encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(b []byte) error { + temp, err := NewVersion(string(b)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements encoding.TextMarshaler interface. +func (v *Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} diff --git a/vendor/github.com/hashicorp/hc-install/README.md b/vendor/github.com/hashicorp/hc-install/README.md index 87c06a203..eb287ff0f 100644 --- a/vendor/github.com/hashicorp/hc-install/README.md +++ b/vendor/github.com/hashicorp/hc-install/README.md @@ -31,7 +31,7 @@ The `Installer` offers a few high-level methods: The `Installer` methods accept number of different `Source` types. Each comes with different trade-offs described below. - - `fs.{AnyVersion,ExactVersion}` - Finds a binary in `$PATH` (or additional paths) + - `fs.{AnyVersion,ExactVersion,Version}` - Finds a binary in `$PATH` (or additional paths) - **Pros:** - This is most convenient when you already have the product installed on your system which you already manage. diff --git a/vendor/github.com/hashicorp/hc-install/fs/version.go b/vendor/github.com/hashicorp/hc-install/fs/version.go new file mode 100644 index 000000000..26633b8af --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/fs/version.go @@ -0,0 +1,97 @@ +package fs + +import ( + "context" + "fmt" + "log" + "path/filepath" + "time" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/errors" + "github.com/hashicorp/hc-install/internal/src" + "github.com/hashicorp/hc-install/internal/validators" + "github.com/hashicorp/hc-install/product" +) + +// Version finds the first executable binary of the product name +// which matches the version constraint within system $PATH and any declared ExtraPaths +// (which are *appended* to any directories in $PATH) +type Version struct { + Product product.Product + Constraints version.Constraints + ExtraPaths []string + Timeout time.Duration + + logger *log.Logger +} + +func (*Version) IsSourceImpl() src.InstallSrcSigil { + return src.InstallSrcSigil{} +} + +func (v *Version) SetLogger(logger *log.Logger) { + v.logger = logger +} + +func (v *Version) log() *log.Logger { + if v.logger == nil { + return discardLogger + } + return v.logger +} + +func (v *Version) Validate() error { + if !validators.IsBinaryNameValid(v.Product.BinaryName()) { + return fmt.Errorf("invalid binary name: %q", v.Product.BinaryName()) + } + if len(v.Constraints) == 0 { + return fmt.Errorf("undeclared version constraints") + } + if v.Product.GetVersion == nil { + return fmt.Errorf("undeclared version getter") + } + return nil +} + +func (v *Version) Find(ctx context.Context) (string, error) { + timeout := defaultTimeout + if v.Timeout > 0 { + timeout = v.Timeout + } + ctx, cancelFunc := context.WithTimeout(ctx, timeout) + defer cancelFunc() + + execPath, err := findFile(lookupDirs(v.ExtraPaths), v.Product.BinaryName(), func(file string) error { + err := checkExecutable(file) + if err != nil { + return err + } + + ver, err := v.Product.GetVersion(ctx, file) + if err != nil { + return err + } + + for _, vc := range v.Constraints { + if !vc.Check(ver) { + return fmt.Errorf("version (%s) doesn't meet constraints %s", ver, vc.String()) + } + } + + return nil + }) + if err != nil { + return "", errors.SkippableErr(err) + } + + if !filepath.IsAbs(execPath) { + var err error + execPath, err = filepath.Abs(execPath) + if err != nil { + return "", errors.SkippableErr(err) + } + } + + return execPath, nil +} diff --git a/vendor/github.com/hashicorp/hc-install/go.mod b/vendor/github.com/hashicorp/hc-install/go.mod index a460fcc66..fe8ec92d7 100644 --- a/vendor/github.com/hashicorp/hc-install/go.mod +++ b/vendor/github.com/hashicorp/hc-install/go.mod @@ -4,10 +4,10 @@ go 1.16 require ( github.com/go-git/go-git/v5 v5.4.2 - github.com/google/go-cmp v0.5.6 + github.com/google/go-cmp v0.5.8 github.com/hashicorp/go-checkpoint v0.5.0 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-version v1.3.0 + github.com/hashicorp/go-version v1.5.0 golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e ) diff --git a/vendor/github.com/hashicorp/hc-install/go.sum b/vendor/github.com/hashicorp/hc-install/go.sum index 276292194..abba1c6a4 100644 --- a/vendor/github.com/hashicorp/hc-install/go.sum +++ b/vendor/github.com/hashicorp/hc-install/go.sum @@ -28,8 +28,8 @@ github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6 github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= @@ -41,8 +41,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.5.0 h1:O293SZ2Eg+AAYijkVK3jR786Am1bhDEh2GHT0tIVE5E= +github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -102,8 +102,6 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go index 4936dda2f..2b85ea344 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go @@ -121,12 +121,23 @@ func fileMapFromChecksums(checksums strings.Builder) (ChecksumFileMap, error) { return csMap, nil } -func compareChecksum(logger *log.Logger, r io.Reader, verifiedHashSum HashSum) error { +func compareChecksum(logger *log.Logger, r io.Reader, verifiedHashSum HashSum, filename string, expectedSize int64) error { h := sha256.New() - _, err := io.Copy(h, r) + + // This may take a while depending on network connection as the io.Reader + // is expected to be http.Response.Body which streams the bytes + // on demand over the network. + logger.Printf("copying %q (%d bytes) to calculate checksum", filename, expectedSize) + bytesCopied, err := io.Copy(h, r) if err != nil { return err } + logger.Printf("copied %d bytes of %q", bytesCopied, filename) + + if expectedSize != 0 && bytesCopied != int64(expectedSize) { + return fmt.Errorf("unexpected size (downloaded: %d, expected: %d)", + bytesCopied, expectedSize) + } calculatedSum := h.Sum(nil) if !bytes.Equal(calculatedSum, verifiedHashSum) { diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go index 62ed6afd0..e9cd94e43 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go @@ -12,7 +12,6 @@ import ( "os" "path/filepath" "runtime" - "strconv" "github.com/hashicorp/hc-install/internal/httpclient" ) @@ -95,14 +94,16 @@ func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, contentType, zipMimeTypes) } + expectedSize := resp.ContentLength + if d.VerifyChecksum { - d.Logger.Printf("calculating checksum of %q", pb.Filename) + d.Logger.Printf("verifying checksum of %q", pb.Filename) // provide extra reader to calculate & compare checksum var buf bytes.Buffer r := io.TeeReader(resp.Body, &buf) pkgReader = &buf - err := compareChecksum(d.Logger, r, verifiedChecksum) + err := compareChecksum(d.Logger, r, verifiedChecksum, pb.Filename, expectedSize) if err != nil { return err } @@ -114,21 +115,17 @@ func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, } defer pkgFile.Close() - d.Logger.Printf("copying downloaded file to %s", pkgFile.Name()) + d.Logger.Printf("copying %q (%d bytes) to %s", pb.Filename, expectedSize, pkgFile.Name()) + // Unless the bytes were already downloaded above for checksum verification + // this may take a while depending on network connection as the io.Reader + // is expected to be http.Response.Body which streams the bytes + // on demand over the network. bytesCopied, err := io.Copy(pkgFile, pkgReader) if err != nil { return err } d.Logger.Printf("copied %d bytes to %s", bytesCopied, pkgFile.Name()) - expectedSize := 0 - if length := resp.Header.Get("content-length"); length != "" { - var err error - expectedSize, err = strconv.Atoi(length) - if err != nil { - return err - } - } if expectedSize != 0 && bytesCopied != int64(expectedSize) { return fmt.Errorf("unexpected size (downloaded: %d, expected: %d)", bytesCopied, expectedSize) diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go index 24e32ffc4..849f16a52 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go @@ -79,7 +79,7 @@ func (r *Releases) ListProductVersions(ctx context.Context, productName string) } contentType := resp.Header.Get("content-type") - if contentType != "application/json" { + if contentType != "application/json" && contentType != "application/vnd+hashicorp.releases-api.v0+json" { return nil, fmt.Errorf("unexpected Content-Type: %q", contentType) } @@ -144,7 +144,7 @@ func (r *Releases) GetProductVersion(ctx context.Context, product string, versio } contentType := resp.Header.Get("content-type") - if contentType != "application/json" { + if contentType != "application/json" && contentType != "application/vnd+hashicorp.releases-api.v0+json" { return nil, fmt.Errorf("unexpected Content-Type: %q", contentType) } diff --git a/vendor/github.com/hashicorp/hc-install/product/consul.go b/vendor/github.com/hashicorp/hc-install/product/consul.go index b619d89ed..aeeac9469 100644 --- a/vendor/github.com/hashicorp/hc-install/product/consul.go +++ b/vendor/github.com/hashicorp/hc-install/product/consul.go @@ -15,9 +15,7 @@ import ( var consulVersionOutputRe = regexp.MustCompile(`Consul ` + simpleVersionRe) var ( - v1_16 = version.Must(version.NewVersion("1.16")) - // TODO: version.MustConstraint() ? - v1_16c, _ = version.NewConstraint("1.16") + v1_18 = version.Must(version.NewVersion("1.18")) ) var Consul = Product{ @@ -52,6 +50,6 @@ var Consul = Product{ BuildInstructions: &BuildInstructions{ GitRepoURL: "https://github.com/hashicorp/consul.git", PreCloneCheck: &build.GoIsInstalled{}, - Build: &build.GoBuild{Version: v1_16}, + Build: &build.GoBuild{Version: v1_18}, }, } diff --git a/vendor/github.com/hashicorp/hc-install/product/vault.go b/vendor/github.com/hashicorp/hc-install/product/vault.go new file mode 100644 index 000000000..d03bc4fab --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/product/vault.go @@ -0,0 +1,54 @@ +package product + +import ( + "context" + "fmt" + "os/exec" + "regexp" + "runtime" + "strings" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/internal/build" +) + +var ( + vaultVersionOutputRe = regexp.MustCompile(`Vault ` + simpleVersionRe) + v1_17 = version.Must(version.NewVersion("1.17")) +) + +var Vault = Product{ + Name: "vault", + BinaryName: func() string { + if runtime.GOOS == "windows" { + return "vault.exe" + } + return "vault" + }, + GetVersion: func(ctx context.Context, path string) (*version.Version, error) { + cmd := exec.CommandContext(ctx, path, "version") + + out, err := cmd.Output() + if err != nil { + return nil, err + } + + stdout := strings.TrimSpace(string(out)) + + submatches := vaultVersionOutputRe.FindStringSubmatch(stdout) + if len(submatches) != 2 { + return nil, fmt.Errorf("unexpected number of version matches %d for %s", len(submatches), stdout) + } + v, err := version.NewVersion(submatches[1]) + if err != nil { + return nil, fmt.Errorf("unable to parse version %q: %w", submatches[1], err) + } + + return v, err + }, + BuildInstructions: &BuildInstructions{ + GitRepoURL: "https://github.com/hashicorp/vault.git", + PreCloneCheck: &build.GoIsInstalled{}, + Build: &build.GoBuild{Version: v1_17}, + }, +} diff --git a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md index 4c644fcfb..9f6c23b1c 100644 --- a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md +++ b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md @@ -1,5 +1,161 @@ # HCL Changelog +## v2.13.0 (June 22, 2022) + +### Enhancements + +* hcl: `hcl.Diagnostic` how has an additional field `Extra` which is intended for carrying arbitrary supporting data ("extra information") related to the diagnostic message, intended to allow diagnostic renderers to optionally tailor the presentation of messages for particular situations. ([#539](https://github.com/hashicorp/hcl/pull/539)) +* hclsyntax: When an error occurs during a function call, the returned diagnostics will include _extra information_ (as described in the previous point) about which function was being called and, if the message is about an error returned by the function itself, that raw `error` value without any post-processing. ([#539](https://github.com/hashicorp/hcl/pull/539)) + +### Bugs Fixed + +* hclwrite: Fixed a potential data race for any situation where `hclwrite.Format` runs concurrently with itself. ([#534](https://github.com/hashicorp/hcl/pull/534)) + +## v2.12.0 (April 22, 2022) + +### Enhancements + +* hclsyntax: Evaluation of conditional expressions will now produce more precise error messages about inconsistencies between the types of the true and false result expressions, particularly in cases where both are of the same structural type kind but differ in their nested elements. ([#530](https://github.com/hashicorp/hcl/pull/530)) +* hclsyntax: The lexer will no longer allocate a small object on the heap for each token. Instead, in that situation it will allocate only when needed to return a diagnostic message with source location information. ([#490](https://github.com/hashicorp/hcl/pull/490)) +* hclwrite: New functions `TokensForTuple`, `TokensForObject`, and `TokensForFunctionCall` allow for more easily constructing the three constructs which are supported for static analysis and which HCL-based languages typically use in contexts where an expression is used only for its syntax, and not evaluated to produce a real value. For example, these new functions together are sufficient to construct all valid type constraint expressions from [the Type Expressions Extension](./ext/typeexpr/), which is the basis of variable type constraints in the Terraform language at the time of writing. ([#502](https://github.com/hashicorp/hcl/pull/502)) +* json: New functions `IsJSONExpression` and `IsJSONBody` to determine if a given expression or body was created by the JSON syntax parser. In normal situations it's better not to worry about what syntax a particular expression/body originated in, but this can be useful in some trickier cases where an application needs to shim for backwards-compatibility or for static analysis that needs to have special handling of the JSON syntax's embedded expression/template conventions. ([#524](https://github.com/hashicorp/hcl/pull/524)) + +### Bugs Fixed + +* gohcl: Fix docs about supported types for blocks. ([#507](https://github.com/hashicorp/hcl/pull/507)) + +## v2.11.1 (December 1, 2021) + +### Bugs Fixed + +* hclsyntax: The type for an upgraded unknown value with a splat expression cannot be known ([#495](https://github.com/hashicorp/hcl/pull/495)) + +## v2.11.0 (December 1, 2021) + +### Enhancements + +* hclsyntax: Various error messages related to unexpectedly reaching end of file while parsing a delimited subtree will now return specialized messages describing the opening tokens as "unclosed", instead of returning a generic diagnostic that just happens to refer to the empty source range at the end of the file. This gives better feedback when error messages are being presented alongside a source code snippet, as is common in HCL-based applications, because it shows which innermost container the parser was working on when it encountered the error. ([#492](https://github.com/hashicorp/hcl/pull/492)) + +### Bugs Fixed + +* hclsyntax: Upgrading an unknown single value to a list using a splat expression must return unknown ([#493](https://github.com/hashicorp/hcl/pull/493)) + +## v2.10.1 (July 21, 2021) + +* dynblock: Decode unknown dynamic blocks in order to obtain any diagnostics even though the decoded value is not used ([#476](https://github.com/hashicorp/hcl/pull/476)) +* hclsyntax: Calling functions is now more robust in the face of an incorrectly-implemented function which returns a `function.ArgError` whose argument index is out of range for the length of the arguments. Previously this would often lead to a panic, but now it'll return a less-precice error message instead. Functions that return out-of-bounds argument indices still ought to be fixed so that the resulting error diagnostics can be as precise as possible. ([#472](https://github.com/hashicorp/hcl/pull/472)) +* hclsyntax: Ensure marks on unknown values are maintained when processing string templates. ([#478](https://github.com/hashicorp/hcl/pull/478)) +* hcl: Improved error messages for various common error situtions in `hcl.Index` and `hcl.GetAttr`. These are part of the implementation of indexing and attribute lookup in the native syntax expression language too, so the new error messages will apply to problems using those operators. ([#474](https://github.com/hashicorp/hcl/pull/474)) + +## v2.10.0 (April 20, 2021) + +### Enhancements + +* dynblock,hcldec: Using dynblock in conjunction with hcldec can now decode blocks with unknown dynamic for_each arguments as entirely unknown values ([#461](https://github.com/hashicorp/hcl/pull/461)) +* hclsyntax: Some syntax errors during parsing of the inside of `${` ... `}` template interpolation sequences will now produce an extra hint message about the need to escape as `$${` when trying to include interpolation syntax for other languages like shell scripting, AWS IAM policies, etc. ([#462](https://github.com/hashicorp/hcl/pull/462)) + +## v2.9.1 (March 10, 2021) + +### Bugs Fixed + +* hclsyntax: Fix panic for marked index value. ([#451](https://github.com/hashicorp/hcl/pull/451)) + +## v2.9.0 (February 23, 2021) + +### Enhancements + +* HCL's native syntax and JSON scanners -- and thus all of the other parsing components that build on top of them -- are now using Unicode 13 rules for text segmentation when counting text characters for the purpose of reporting source location columns. Previously HCL was using Unicode 12. Unicode 13 still uses the same algorithm but includes some additions to the character tables the algorithm is defined in terms of, to properly categorize new characters defined in Unicode 13. + +## v2.8.2 (January 6, 2021) + +### Bugs Fixed + +* hclsyntax: Fix panic for marked collection splat. ([#436](https://github.com/hashicorp/hcl/pull/436)) +* hclsyntax: Fix panic for marked template loops. ([#437](https://github.com/hashicorp/hcl/pull/437)) +* hclsyntax: Fix `for` expression marked conditional. ([#438](https://github.com/hashicorp/hcl/pull/438)) +* hclsyntax: Mark objects with keys that are sensitive. ([#440](https://github.com/hashicorp/hcl/pull/440)) + +## v2.8.1 (December 17, 2020) + +### Bugs Fixed + +* hclsyntax: Fix panic when expanding marked function arguments. ([#429](https://github.com/hashicorp/hcl/pull/429)) +* hclsyntax: Error when attempting to use a marked value as an object key. ([#434](https://github.com/hashicorp/hcl/pull/434)) +* hclsyntax: Error when attempting to use a marked value as an object key in expressions. ([#433](https://github.com/hashicorp/hcl/pull/433)) + +## v2.8.0 (December 7, 2020) + +### Enhancements + +* hclsyntax: Expression grouping parentheses will now be reflected by an explicit node in the AST, whereas before they were only considered during parsing. ([#426](https://github.com/hashicorp/hcl/pull/426)) + +### Bugs Fixed + +* hclwrite: The parser will now correctly include the `(` and `)` tokens when an expression is surrounded by parentheses. Previously it would incorrectly recognize those tokens as being extraneous tokens outside of the expression. ([#426](https://github.com/hashicorp/hcl/pull/426)) +* hclwrite: The formatter will now remove (rather than insert) spaces between the `!` (unary boolean "not") operator and its subsequent operand. ([#403](https://github.com/hashicorp/hcl/pull/403)) +* hclsyntax: Unmark conditional values in expressions before checking their truthfulness ([#427](https://github.com/hashicorp/hcl/pull/427)) + +## v2.7.2 (November 30, 2020) + +### Bugs Fixed + +* gohcl: Fix panic when decoding into type containing value slices. ([#335](https://github.com/hashicorp/hcl/pull/335)) +* hclsyntax: The unusual expression `null[*]` was previously always returning an unknown value, even though the rules for `[*]` normally call for it to return an empty tuple when applied to a null. As well as being a surprising result, it was particularly problematic because it violated the rule that a calling application may assume that an expression result will always be known unless the application itself introduces unknown values via the evaluation context. `null[*]` will now produce an empty tuple. ([#416](https://github.com/hashicorp/hcl/pull/416)) +* hclsyntax: Fix panic when traversing a list, tuple, or map with cty "marks" ([#424](https://github.com/hashicorp/hcl/pull/424)) + +## v2.7.1 (November 18, 2020) + +### Bugs Fixed + +* hclwrite: Correctly handle blank quoted string block labels, instead of dropping them ([#422](https://github.com/hashicorp/hcl/pull/422)) + +## v2.7.0 (October 14, 2020) + +### Enhancements + +* json: There is a new function `ParseWithStartPos`, which allows overriding the starting position for parsing in case the given JSON bytes are a fragment of a larger document, such as might happen when decoding with `encoding/json` into a `json.RawMessage`. ([#389](https://github.com/hashicorp/hcl/pull/389)) +* json: There is a new function `ParseExpression`, which allows parsing a JSON string directly in expression mode, whereas previously it was only possible to parse a JSON string in body mode. ([#381](https://github.com/hashicorp/hcl/pull/381)) +* hclwrite: `Block` type now supports `SetType` and `SetLabels`, allowing surgical changes to the type and labels of an existing block without having to reconstruct the entire block. ([#340](https://github.com/hashicorp/hcl/pull/340)) + +### Bugs Fixed + +* hclsyntax: Fix confusing error message for bitwise OR operator ([#380](https://github.com/hashicorp/hcl/pull/380)) +* hclsyntax: Several bug fixes for using HCL with values containing cty "marks" ([#404](https://github.com/hashicorp/hcl/pull/404), [#406](https://github.com/hashicorp/hcl/pull/404), [#407](https://github.com/hashicorp/hcl/pull/404)) + +## v2.6.0 (June 4, 2020) + +### Enhancements + +* hcldec: Add a new `Spec`, `ValidateSpec`, which allows custom validation of values at decode-time. ([#387](https://github.com/hashicorp/hcl/pull/387)) + +### Bugs Fixed + +* hclsyntax: Fix panic with combination of sequences and null arguments ([#386](https://github.com/hashicorp/hcl/pull/386)) +* hclsyntax: Fix handling of unknown values and sequences ([#386](https://github.com/hashicorp/hcl/pull/386)) + +## v2.5.1 (May 14, 2020) + +### Bugs Fixed + +* hclwrite: handle legacy dot access of numeric indexes. ([#369](https://github.com/hashicorp/hcl/pull/369)) +* hclwrite: Fix panic for dotted full splat (`foo.*`) ([#374](https://github.com/hashicorp/hcl/pull/374)) + +## v2.5.0 (May 6, 2020) + +### Enhancements + +* hclwrite: Generate multi-line objects and maps. ([#372](https://github.com/hashicorp/hcl/pull/372)) + +## v2.4.0 (Apr 13, 2020) + +### Enhancements + +* The Unicode data tables that HCL uses to produce user-perceived "column" positions in diagnostics and other source ranges are now updated to Unicode 12.0.0, which will cause HCL to produce more accurate column numbers for combining characters introduced to Unicode since Unicode 9.0.0. + +### Bugs Fixed + +* json: Fix panic when parsing malformed JSON. ([#358](https://github.com/hashicorp/hcl/pull/358)) + ## v2.3.0 (Jan 3, 2020) ### Enhancements diff --git a/vendor/github.com/hashicorp/hcl/v2/README.md b/vendor/github.com/hashicorp/hcl/v2/README.md index 3d0d509d5..9af736c9d 100644 --- a/vendor/github.com/hashicorp/hcl/v2/README.md +++ b/vendor/github.com/hashicorp/hcl/v2/README.md @@ -33,11 +33,25 @@ package main import ( "log" + "github.com/hashicorp/hcl/v2/hclsimple" ) type Config struct { - LogLevel string `hcl:"log_level"` + IOMode string `hcl:"io_mode"` + Service ServiceConfig `hcl:"service,block"` +} + +type ServiceConfig struct { + Protocol string `hcl:"protocol,label"` + Type string `hcl:"type,label"` + ListenAddr string `hcl:"listen_addr"` + Processes []ProcessConfig `hcl:"process,block"` +} + +type ProcessConfig struct { + Type string `hcl:"type,label"` + Command []string `hcl:"command"` } func main() { diff --git a/vendor/github.com/hashicorp/hcl/v2/appveyor.yml b/vendor/github.com/hashicorp/hcl/v2/appveyor.yml deleted file mode 100644 index e382f8f57..000000000 --- a/vendor/github.com/hashicorp/hcl/v2/appveyor.yml +++ /dev/null @@ -1,13 +0,0 @@ -build: off - -clone_folder: c:\gopath\src\github.com\hashicorp\hcl - -environment: - GOPATH: c:\gopath - GO111MODULE: on - GOPROXY: https://goproxy.io - -stack: go 1.12 - -test_script: - - go test ./... diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic.go index c320961e1..bcf4eb39c 100644 --- a/vendor/github.com/hashicorp/hcl/v2/diagnostic.go +++ b/vendor/github.com/hashicorp/hcl/v2/diagnostic.go @@ -22,14 +22,14 @@ const ( ) // Diagnostic represents information to be presented to a user about an -// error or anomoly in parsing or evaluating configuration. +// error or anomaly in parsing or evaluating configuration. type Diagnostic struct { Severity DiagnosticSeverity // Summary and Detail contain the English-language description of the // problem. Summary is a terse description of the general problem and // detail is a more elaborate, often-multi-sentence description of - // the probem and what might be done to solve it. + // the problem and what might be done to solve it. Summary string Detail string @@ -63,6 +63,28 @@ type Diagnostic struct { // case of colliding names. Expression Expression EvalContext *EvalContext + + // Extra is an extension point for additional machine-readable information + // about this problem. + // + // Recipients of diagnostic objects may type-assert this value with + // specific interface types they know about to discover if any additional + // information is available that is interesting for their use-case. + // + // Extra is always considered to be optional extra information and so a + // diagnostic message should still always be fully described (from the + // perspective of a human who understands the language the messages are + // written in) by the other fields in case a particular recipient. + // + // Functions that return diagnostics with Extra populated should typically + // document that they place values implementing a particular interface, + // rather than a concrete type, and define that interface such that its + // methods can dynamically indicate a lack of support at runtime even + // if the interface happens to be statically available. An Extra + // type that wraps other Extra values should additionally implement + // interface DiagnosticExtraUnwrapper to return the value they are wrapping + // so that callers can access inner values to type-assert against. + Extra interface{} } // Diagnostics is a list of Diagnostic instances. @@ -141,3 +163,24 @@ type DiagnosticWriter interface { WriteDiagnostic(*Diagnostic) error WriteDiagnostics(Diagnostics) error } + +// DiagnosticExtraUnwrapper is an interface implemented by values in the +// Extra field of Diagnostic when they are wrapping another "Extra" value that +// was generated downstream. +// +// Diagnostic recipients which want to examine "Extra" values to sniff for +// particular types of extra data can either type-assert this interface +// directly and repeatedly unwrap until they recieve nil, or can use the +// helper function DiagnosticExtra. +type DiagnosticExtraUnwrapper interface { + // If the reciever is wrapping another "diagnostic extra" value, returns + // that value. Otherwise returns nil to indicate dynamically that nothing + // is wrapped. + // + // The "nothing is wrapped" condition can be signalled either by this + // method returning nil or by a type not implementing this interface at all. + // + // Implementers should never create unwrap "cycles" where a nested extra + // value returns a value that was also wrapping it. + UnwrapDiagnosticExtra() interface{} +} diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go new file mode 100644 index 000000000..6994e2336 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go @@ -0,0 +1,39 @@ +//go:build go1.18 +// +build go1.18 + +package hcl + +// This file contains additional diagnostics-related symbols that use the +// Go 1.18 type parameters syntax and would therefore be incompatible with +// Go 1.17 and earlier. + +// DiagnosticExtra attempts to retrieve an "extra value" of type T from the +// given diagnostic, if either the diag.Extra field directly contains a value +// of that type or the value implements DiagnosticExtraUnwrapper and directly +// or indirectly returns a value of that type. +// +// Type T should typically be an interface type, so that code which generates +// diagnostics can potentially return different implementations of the same +// interface dynamically as needed. +// +// If a value of type T is found, returns that value and true to indicate +// success. Otherwise, returns the zero value of T and false to indicate +// failure. +func DiagnosticExtra[T any](diag *Diagnostic) (T, bool) { + extra := diag.Extra + var zero T + + for { + if ret, ok := extra.(T); ok { + return ret, true + } + + if unwrap, ok := extra.(DiagnosticExtraUnwrapper); ok { + // If our "extra" implements DiagnosticExtraUnwrapper then we'll + // unwrap one level and try this again. + extra = unwrap.UnwrapDiagnosticExtra() + } else { + return zero, false + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/go.mod b/vendor/github.com/hashicorp/hcl/v2/go.mod index d80c99d9b..8f7c79fc2 100644 --- a/vendor/github.com/hashicorp/hcl/v2/go.mod +++ b/vendor/github.com/hashicorp/hcl/v2/go.mod @@ -1,21 +1,29 @@ module github.com/hashicorp/hcl/v2 +go 1.18 + require ( github.com/agext/levenshtein v1.2.1 github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3 - github.com/apparentlymart/go-textseg v1.0.0 + github.com/apparentlymart/go-textseg/v13 v13.0.0 github.com/davecgh/go-spew v1.1.1 github.com/go-test/deep v1.0.3 github.com/google/go-cmp v0.3.1 github.com/kr/pretty v0.1.0 github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 - github.com/pmezard/go-difflib v1.0.0 // indirect github.com/sergi/go-diff v1.0.0 github.com/spf13/pflag v1.0.2 + github.com/zclconf/go-cty v1.8.0 + github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b + golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 +) + +require ( + github.com/kr/text v0.1.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/testify v1.2.2 // indirect - github.com/zclconf/go-cty v1.2.0 - golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 - golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82 // indirect - golang.org/x/text v0.3.2 // indirect + golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect + golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect + golang.org/x/text v0.3.6 // indirect ) diff --git a/vendor/github.com/hashicorp/hcl/v2/go.sum b/vendor/github.com/hashicorp/hcl/v2/go.sum index 76b135fb4..2d2ddb33b 100644 --- a/vendor/github.com/hashicorp/hcl/v2/go.sum +++ b/vendor/github.com/hashicorp/hcl/v2/go.sum @@ -4,11 +4,15 @@ github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3 h1:ZSTrOEhi github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -29,23 +33,34 @@ github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/zclconf/go-cty v1.2.0 h1:sPHsy7ADcIZQP3vILvTjrh74ZA175TFP5vqiNK1UmlI= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +github.com/zclconf/go-cty v1.8.0 h1:s4AvqaeQzJIu3ndv4gVIhplVD0krU+bgrcLSVUnaWuA= +github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 h1:p/H982KKEjUnLJkM3tt/LemDnOc1GiZL5FCVlORJ5zo= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 h1:O8uGbHCqlTp2P6QJSLmCojM4mN6UemYv8K+dCnmHmu0= +golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82 h1:vsphBvatvfbhlb4PO1BYSr9dzugGxJ/SQHoNufZJq1w= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go index 3fe84ddc3..358fd5d51 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go @@ -2,6 +2,7 @@ package hclsyntax import ( "fmt" + "sort" "sync" "github.com/hashicorp/hcl/v2" @@ -25,7 +26,33 @@ type Expression interface { } // Assert that Expression implements hcl.Expression -var assertExprImplExpr hcl.Expression = Expression(nil) +var _ hcl.Expression = Expression(nil) + +// ParenthesesExpr represents an expression written in grouping +// parentheses. +// +// The parser takes care of the precedence effect of the parentheses, so the +// only purpose of this separate expression node is to capture the source range +// of the parentheses themselves, rather than the source range of the +// expression within. All of the other expression operations just pass through +// to the underlying expression. +type ParenthesesExpr struct { + Expression + SrcRange hcl.Range +} + +var _ hcl.Expression = (*ParenthesesExpr)(nil) + +func (e *ParenthesesExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ParenthesesExpr) walkChildNodes(w internalWalkFunc) { + // We override the walkChildNodes from the embedded Expression to + // ensure that both the parentheses _and_ the content are visible + // in a walk. + w(e.Expression) +} // LiteralValueExpr is an expression that just always returns a given value. type LiteralValueExpr struct { @@ -243,6 +270,10 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti } } + diagExtra := functionCallDiagExtra{ + calledFunctionName: e.Name, + } + params := f.Params() varParam := f.VarParam() @@ -260,6 +291,21 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti } switch { + case expandVal.Type().Equals(cty.DynamicPseudoType): + if expandVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must not be null.", + Subject: expandExpr.Range().Ptr(), + Context: e.Range().Ptr(), + Expression: expandExpr, + EvalContext: ctx, + Extra: &diagExtra, + }) + return cty.DynamicVal, diags + } + return cty.DynamicVal, diags case expandVal.Type().IsTupleType() || expandVal.Type().IsListType() || expandVal.Type().IsSetType(): if expandVal.IsNull() { diags = append(diags, &hcl.Diagnostic{ @@ -270,6 +316,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: expandExpr, EvalContext: ctx, + Extra: &diagExtra, }) return cty.DynamicVal, diags } @@ -277,13 +324,17 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti return cty.DynamicVal, diags } + // When expanding arguments from a collection, we must first unmark + // the collection itself, and apply any marks directly to the + // elements. This ensures that marks propagate correctly. + expandVal, marks := expandVal.Unmark() newArgs := make([]Expression, 0, (len(args)-1)+expandVal.LengthInt()) newArgs = append(newArgs, args[:len(args)-1]...) it := expandVal.ElementIterator() for it.Next() { _, val := it.Element() newArgs = append(newArgs, &LiteralValueExpr{ - Val: val, + Val: val.WithMarks(marks), SrcRange: expandExpr.Range(), }) } @@ -297,6 +348,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: expandExpr, EvalContext: ctx, + Extra: &diagExtra, }) return cty.DynamicVal, diags } @@ -320,6 +372,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: e, EvalContext: ctx, + Extra: &diagExtra, }, } } @@ -337,6 +390,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: e, EvalContext: ctx, + Extra: &diagExtra, }, } } @@ -381,6 +435,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: argExpr, EvalContext: ctx, + Extra: &diagExtra, }) } } @@ -397,6 +452,10 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti resultVal, err := f.Call(argVals) if err != nil { + // For errors in the underlying call itself we also return the raw + // call error via an extra method on our "diagnostic extra" value. + diagExtra.functionCallError = err + switch terr := err.(type) { case function.ArgError: i := terr.Index @@ -406,22 +465,75 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti } else { param = varParam } - argExpr := e.Args[i] - // TODO: we should also unpick a PathError here and show the - // path to the deep value where the error was detected. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid function argument", - Detail: fmt.Sprintf( - "Invalid value for %q parameter: %s.", - param.Name, err, - ), - Subject: argExpr.StartRange().Ptr(), - Context: e.Range().Ptr(), - Expression: argExpr, - EvalContext: ctx, - }) + if param == nil || i > len(args)-1 { + // Getting here means that the function we called has a bug: + // it returned an arg error that refers to an argument index + // that wasn't present in the call. For that situation + // we'll degrade to a less specific error just to give + // some sort of answer, but best to still fix the buggy + // function so that it only returns argument indices that + // are in range. + switch { + case param != nil: + // In this case we'll assume that the function was trying + // to talk about a final variadic parameter but the caller + // didn't actually provide any arguments for it. That means + // we can at least still name the parameter in the + // error message, but our source range will be the call + // as a whole because we don't have an argument expression + // to highlight specifically. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + Extra: &diagExtra, + }) + default: + // This is the most degenerate case of all, where the + // index is out of range even for the declared parameters, + // and so we can't tell which parameter the function is + // trying to report an error for. Just a generic error + // report in that case. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Error in function call", + Detail: fmt.Sprintf( + "Call to function %q failed: %s.", + e.Name, err, + ), + Subject: e.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + Extra: &diagExtra, + }) + } + } else { + argExpr := args[i] + + // TODO: we should also unpick a PathError here and show the + // path to the deep value where the error was detected. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: argExpr, + EvalContext: ctx, + Extra: &diagExtra, + }) + } default: diags = append(diags, &hcl.Diagnostic{ @@ -435,6 +547,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: e, EvalContext: ctx, + Extra: &diagExtra, }) } @@ -467,6 +580,39 @@ func (e *FunctionCallExpr) ExprCall() *hcl.StaticCall { return ret } +// FunctionCallDiagExtra is an interface implemented by the value in the "Extra" +// field of some diagnostics returned by FunctionCallExpr.Value, giving +// cooperating callers access to some machine-readable information about the +// call that a diagnostic relates to. +type FunctionCallDiagExtra interface { + // CalledFunctionName returns the name of the function being called at + // the time the diagnostic was generated, if any. Returns an empty string + // if there is no known called function. + CalledFunctionName() string + + // FunctionCallError returns the error value returned by the implementation + // of the function being called, if any. Returns nil if the diagnostic was + // not returned in response to a call error. + // + // Some errors related to calling functions are generated by HCL itself + // rather than by the underlying function, in which case this method + // will return nil. + FunctionCallError() error +} + +type functionCallDiagExtra struct { + calledFunctionName string + functionCallError error +} + +func (e *functionCallDiagExtra) CalledFunctionName() string { + return e.calledFunctionName +} + +func (e *functionCallDiagExtra) FunctionCallError() error { + return e.functionCallError +} + type ConditionalExpr struct { Condition Expression TrueResult Expression @@ -521,12 +667,8 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic Severity: hcl.DiagError, Summary: "Inconsistent conditional result types", Detail: fmt.Sprintf( - // FIXME: Need a helper function for showing natural-language type diffs, - // since this will generate some useless messages in some cases, like - // "These expressions are object and object respectively" if the - // object types don't exactly match. - "The true and false result expressions must have consistent types. The given expressions are %s and %s, respectively.", - trueResult.Type().FriendlyName(), falseResult.Type().FriendlyName(), + "The true and false result expressions must have consistent types. %s.", + describeConditionalTypeMismatch(trueResult.Type(), falseResult.Type()), ), Subject: hcl.RangeBetween(e.TrueResult.Range(), e.FalseResult.Range()).Ptr(), Context: &e.SrcRange, @@ -558,7 +700,7 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Incorrect condition type", - Detail: fmt.Sprintf("The condition expression must be of type bool."), + Detail: "The condition expression must be of type bool.", Subject: e.Condition.Range().Ptr(), Context: &e.SrcRange, Expression: e.Condition, @@ -567,6 +709,8 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic return cty.UnknownVal(resultType), diags } + // Unmark result before testing for truthiness + condResult, _ = condResult.UnmarkDeep() if condResult.True() { diags = append(diags, trueDiags...) if convs[0] != nil { @@ -616,6 +760,144 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic } } +// describeConditionalTypeMismatch makes a best effort to describe the +// difference between types in the true and false arms of a conditional +// expression in a way that would be useful to someone trying to understand +// why their conditional expression isn't valid. +// +// NOTE: This function is only designed to deal with situations +// where trueTy and falseTy are different. Calling it with two equal +// types will produce a nonsense result. This function also only really +// deals with situations that type unification can't resolve, so we should +// call this function only after trying type unification first. +func describeConditionalTypeMismatch(trueTy, falseTy cty.Type) string { + // The main tricky cases here are when both trueTy and falseTy are + // of the same structural type kind, such as both being object types + // or both being tuple types. In that case the "FriendlyName" method + // returns only "object" or "tuple" and so we need to do some more + // work to describe what's different inside them. + + switch { + case trueTy.IsObjectType() && falseTy.IsObjectType(): + // We'll first gather up the attribute names and sort them. In the + // event that there are multiple attributes that disagree across + // the two types, we'll prefer to report the one that sorts lexically + // least just so that our error message is consistent between + // evaluations. + var trueAttrs, falseAttrs []string + for name := range trueTy.AttributeTypes() { + trueAttrs = append(trueAttrs, name) + } + sort.Strings(trueAttrs) + for name := range falseTy.AttributeTypes() { + falseAttrs = append(falseAttrs, name) + } + sort.Strings(falseAttrs) + + for _, name := range trueAttrs { + if !falseTy.HasAttribute(name) { + return fmt.Sprintf("The 'true' value includes object attribute %q, which is absent in the 'false' value", name) + } + trueAty := trueTy.AttributeType(name) + falseAty := falseTy.AttributeType(name) + if !trueAty.Equals(falseAty) { + // For deeply-nested differences this will likely get very + // clunky quickly by nesting these messages inside one another, + // but we'll accept that for now in the interests of producing + // _some_ useful feedback, even if it isn't as concise as + // we'd prefer it to be. Deeply-nested structures in + // conditionals are thankfully not super common. + return fmt.Sprintf( + "Type mismatch for object attribute %q: %s", + name, describeConditionalTypeMismatch(trueAty, falseAty), + ) + } + } + for _, name := range falseAttrs { + if !trueTy.HasAttribute(name) { + return fmt.Sprintf("The 'false' value includes object attribute %q, which is absent in the 'true' value", name) + } + // NOTE: We don't need to check the attribute types again, because + // any attribute that both types have in common would already have + // been checked in the previous loop. + } + case trueTy.IsTupleType() && falseTy.IsTupleType(): + trueEtys := trueTy.TupleElementTypes() + falseEtys := falseTy.TupleElementTypes() + + if trueCount, falseCount := len(trueEtys), len(falseEtys); trueCount != falseCount { + return fmt.Sprintf("The 'true' tuple has length %d, but the 'false' tuple has length %d", trueCount, falseCount) + } + + // NOTE: Thanks to the condition above, we know that both tuples are + // of the same length and so they must have some differing types + // instead. + for i := range trueEtys { + trueEty := trueEtys[i] + falseEty := falseEtys[i] + + if !trueEty.Equals(falseEty) { + // For deeply-nested differences this will likely get very + // clunky quickly by nesting these messages inside one another, + // but we'll accept that for now in the interests of producing + // _some_ useful feedback, even if it isn't as concise as + // we'd prefer it to be. Deeply-nested structures in + // conditionals are thankfully not super common. + return fmt.Sprintf( + "Type mismatch for tuple element %d: %s", + i, describeConditionalTypeMismatch(trueEty, falseEty), + ) + } + } + case trueTy.IsCollectionType() && falseTy.IsCollectionType(): + // For this case we're specifically interested in the situation where: + // - both collections are of the same kind, AND + // - the element types of both are either object or tuple types. + // This is just to avoid writing a useless statement like + // "The 'true' value is list of object, but the 'false' value is list of object". + // This still doesn't account for more awkward cases like collections + // of collections of structural types, but we won't let perfect be + // the enemy of the good. + trueEty := trueTy.ElementType() + falseEty := falseTy.ElementType() + if (trueTy.IsListType() && falseTy.IsListType()) || (trueTy.IsMapType() && falseTy.IsMapType()) || (trueTy.IsSetType() && falseTy.IsSetType()) { + if (trueEty.IsObjectType() && falseEty.IsObjectType()) || (trueEty.IsTupleType() && falseEty.IsTupleType()) { + noun := "collection" + switch { // NOTE: We now know that trueTy and falseTy have the same collection kind + case trueTy.IsListType(): + noun = "list" + case trueTy.IsSetType(): + noun = "set" + case trueTy.IsMapType(): + noun = "map" + } + return fmt.Sprintf( + "Mismatched %s element types: %s", + noun, describeConditionalTypeMismatch(trueEty, falseEty), + ) + } + } + } + + // If we don't manage any more specialized message, we'll just report + // what the two types are. + trueName := trueTy.FriendlyName() + falseName := falseTy.FriendlyName() + if trueName == falseName { + // Absolute last resort for when we have no special rule above but + // we have two types with the same friendly name anyway. This is + // the most vague of all possible messages but is reserved for + // particularly awkward cases, like lists of lists of differing tuple + // types. + return "At least one deeply-nested attribute or element is not compatible across both the 'true' and the 'false' value" + } + return fmt.Sprintf( + "The 'true' value is %s, but the 'false' value is %s", + trueTy.FriendlyName(), falseTy.FriendlyName(), + ) + +} + func (e *ConditionalExpr) Range() hcl.Range { return e.SrcRange } @@ -725,6 +1007,7 @@ func (e *ObjectConsExpr) walkChildNodes(w internalWalkFunc) { func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { var vals map[string]cty.Value var diags hcl.Diagnostics + var marks []cty.ValueMarks // This will get set to true if we fail to produce any of our keys, // either because they are actually unknown or if the evaluation produces @@ -762,6 +1045,9 @@ func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics continue } + key, keyMarks := key.Unmark() + marks = append(marks, keyMarks) + var err error key, err = convert.Convert(key, cty.String) if err != nil { @@ -791,7 +1077,7 @@ func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics return cty.DynamicVal, diags } - return cty.ObjectVal(vals), diags + return cty.ObjectVal(vals).WithMarks(marks...), diags } func (e *ObjectConsExpr) Range() hcl.Range { @@ -921,6 +1207,7 @@ type ForExpr struct { func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { var diags hcl.Diagnostics + var marks []cty.ValueMarks collVal, collDiags := e.CollExpr.Value(ctx) diags = append(diags, collDiags...) @@ -940,6 +1227,10 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { if collVal.Type() == cty.DynamicPseudoType { return cty.DynamicVal, diags } + // Unmark collection before checking for iterability, because marked + // values cannot be iterated + collVal, collMarks := collVal.Unmark() + marks = append(marks, collMarks) if !collVal.CanIterateElements() { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, @@ -1064,7 +1355,11 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { continue } - if include.False() { + // Extract and merge marks from the include expression into the + // main set of marks + includeUnmarked, includeMarks := include.Unmark() + marks = append(marks, includeMarks) + if includeUnmarked.False() { // Skip this element continue } @@ -1109,6 +1404,9 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { continue } + key, keyMarks := key.Unmark() + marks = append(marks, keyMarks) + val, valDiags := e.ValExpr.Value(childCtx) diags = append(diags, valDiags...) @@ -1147,7 +1445,7 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { } } - return cty.ObjectVal(vals), diags + return cty.ObjectVal(vals).WithMarks(marks...), diags } else { // Producing a tuple @@ -1208,7 +1506,11 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { continue } - if include.False() { + // Extract and merge marks from the include expression into the + // main set of marks + includeUnmarked, includeMarks := include.Unmark() + marks = append(marks, includeMarks) + if includeUnmarked.False() { // Skip this element continue } @@ -1223,7 +1525,7 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { return cty.DynamicVal, diags } - return cty.TupleVal(vals), diags + return cty.TupleVal(vals).WithMarks(marks...), diags } } @@ -1286,12 +1588,6 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { } sourceTy := sourceVal.Type() - if sourceTy == cty.DynamicPseudoType { - // If we don't even know the _type_ of our source value yet then - // we'll need to defer all processing, since we can't decide our - // result type either. - return cty.DynamicVal, diags - } // A "special power" of splat expressions is that they can be applied // both to tuples/lists and to other values, and in the latter case @@ -1315,9 +1611,29 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { return cty.DynamicVal, diags } + if sourceTy == cty.DynamicPseudoType { + // If we don't even know the _type_ of our source value yet then + // we'll need to defer all processing, since we can't decide our + // result type either. + return cty.DynamicVal, diags + } + + upgradedUnknown := false if autoUpgrade { + // If we're upgrading an unknown value to a tuple/list, the result + // cannot be known. Otherwise a tuple containing an unknown value will + // upgrade to a different number of elements depending on whether + // sourceVal becomes null or not. + // We record this condition here so we can process any remaining + // expression after the * to verify the result of the traversal. For + // example, it is valid to use a splat on a single object to retrieve a + // list of a single attribute, but we still need to check if that + // attribute actually exists. + upgradedUnknown = !sourceVal.IsKnown() + sourceVal = cty.TupleVal([]cty.Value{sourceVal}) sourceTy = sourceVal.Type() + } // We'll compute our result type lazily if we need it. In the normal case @@ -1359,6 +1675,9 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { return cty.UnknownVal(ty), diags } + // Unmark the collection, and save the marks to apply to the returned + // collection result + sourceVal, marks := sourceVal.Unmark() vals := make([]cty.Value, 0, sourceVal.LengthInt()) it := sourceVal.ElementIterator() if ctx == nil { @@ -1379,6 +1698,10 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { } e.Item.clearValue(ctx) // clean up our temporary value + if upgradedUnknown { + return cty.DynamicVal, diags + } + if !isKnown { // We'll ingore the resultTy diagnostics in this case since they // will just be the same errors we saw while iterating above. @@ -1393,9 +1716,9 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { diags = append(diags, tyDiags...) return cty.ListValEmpty(ty.ElementType()), diags } - return cty.ListVal(vals), diags + return cty.ListVal(vals).WithMarks(marks), diags default: - return cty.TupleVal(vals), diags + return cty.TupleVal(vals).WithMarks(marks), diags } } diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go index 9d425115f..c3f96943d 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go @@ -26,6 +26,9 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) var diags hcl.Diagnostics isKnown := true + // Maintain a set of marks for values used in the template + marks := make(cty.ValueMarks) + for _, part := range e.Parts { partVal, partDiags := part.Value(ctx) diags = append(diags, partDiags...) @@ -45,6 +48,12 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) continue } + // Unmark the part and merge its marks into the set + unmarkedVal, partMarks := partVal.Unmark() + for k, v := range partMarks { + marks[k] = v + } + if !partVal.IsKnown() { // If any part is unknown then the result as a whole must be // unknown too. We'll keep on processing the rest of the parts @@ -54,7 +63,7 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) continue } - strVal, err := convert.Convert(partVal, cty.String) + strVal, err := convert.Convert(unmarkedVal, cty.String) if err != nil { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, @@ -74,11 +83,15 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) buf.WriteString(strVal.AsString()) } + var ret cty.Value if !isKnown { - return cty.UnknownVal(cty.String), diags + ret = cty.UnknownVal(cty.String) + } else { + ret = cty.StringVal(buf.String()) } - return cty.StringVal(buf.String()), diags + // Apply the full set of marks to the returned value + return ret.WithMarks(marks), diags } func (e *TemplateExpr) Range() hcl.Range { @@ -139,6 +152,8 @@ func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti return cty.UnknownVal(cty.String), diags } + tuple, marks := tuple.Unmark() + allMarks := []cty.ValueMarks{marks} buf := &bytes.Buffer{} it := tuple.ElementIterator() for it.Next() { @@ -158,7 +173,7 @@ func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti continue } if val.Type() == cty.DynamicPseudoType { - return cty.UnknownVal(cty.String), diags + return cty.UnknownVal(cty.String).WithMarks(marks), diags } strVal, err := convert.Convert(val, cty.String) if err != nil { @@ -176,13 +191,17 @@ func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti continue } if !val.IsKnown() { - return cty.UnknownVal(cty.String), diags + return cty.UnknownVal(cty.String).WithMarks(marks), diags } + strVal, strValMarks := strVal.Unmark() + if len(strValMarks) > 0 { + allMarks = append(allMarks, strValMarks) + } buf.WriteString(strVal.AsString()) } - return cty.StringVal(buf.String()), diags + return cty.StringVal(buf.String()).WithMarks(allMarks...), diags } func (e *TemplateJoinExpr) Range() hcl.Range { diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go index f67d989e5..ec83d3dc2 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go @@ -6,7 +6,7 @@ import ( "strconv" "unicode/utf8" - "github.com/apparentlymart/go-textseg/textseg" + "github.com/apparentlymart/go-textseg/v13/textseg" "github.com/hashicorp/hcl/v2" "github.com/zclconf/go-cty/cty" ) @@ -76,14 +76,37 @@ Token: default: bad := p.Read() if !p.recovery { - if bad.Type == TokenOQuote { + switch bad.Type { + case TokenOQuote: diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Invalid argument name", Detail: "Argument names must not be quoted.", Subject: &bad.Range, }) - } else { + case TokenEOF: + switch end { + case TokenCBrace: + // If we're looking for a closing brace then we're parsing a block + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed configuration block", + Detail: "There is no closing brace for this block before the end of the file. This may be caused by incorrect brace nesting elsewhere in this file.", + Subject: &startRange, + }) + default: + // The only other "end" should itself be TokenEOF (for + // the top-level body) and so we shouldn't get here, + // but we'll return a generic error message anyway to + // be resilient. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed configuration body", + Detail: "Found end of file before the end of this configuration body.", + Subject: &startRange, + }) + } + default: diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Argument or block definition required", @@ -144,8 +167,6 @@ func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) { }, } } - - return nil, nil } // parseSingleAttrBody is a weird variant of ParseBody that deals with the @@ -388,12 +409,23 @@ Token: // user intent for this one, we'll skip it if we're already in // recovery mode. if !p.recovery { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid single-argument block definition", - Detail: "A single-line block definition must end with a closing brace immediately after its single argument definition.", - Subject: p.Peek().Range.Ptr(), - }) + switch p.Peek().Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed configuration block", + Detail: "There is no closing brace for this block before the end of the file. This may be caused by incorrect brace nesting elsewhere in this file.", + Subject: oBrace.Range.Ptr(), + Context: hcl.RangeBetween(ident.Range, oBrace.Range).Ptr(), + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid single-argument block definition", + Detail: "A single-line block definition must end with a closing brace immediately after its single argument definition.", + Subject: p.Peek().Range.Ptr(), + }) + } } p.recover(TokenCBrace) } @@ -670,6 +702,7 @@ Traversal: trav := make(hcl.Traversal, 0, 1) var firstRange, lastRange hcl.Range firstRange = p.NextRange() + lastRange = marker.Range for p.Peek().Type == TokenDot { dot := p.Read() @@ -910,7 +943,7 @@ func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { switch start.Type { case TokenOParen: - p.Read() // eat open paren + oParen := p.Read() // eat open paren p.PushIncludeNewlines(false) @@ -936,9 +969,19 @@ func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { p.setRecovery() } - p.Read() // eat closing paren + cParen := p.Read() // eat closing paren p.PopIncludeNewlines() + // Our parser's already taken care of the precedence effect of the + // parentheses by considering them to be a kind of "term", but we + // still need to include the parentheses in our AST so we can give + // an accurate representation of the source range that includes the + // open and closing parentheses. + expr = &ParenthesesExpr{ + Expression: expr, + SrcRange: hcl.RangeBetween(oParen.Range, cParen.Range), + } + return expr, diags case TokenNumberLit: @@ -1048,12 +1091,22 @@ func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { default: var diags hcl.Diagnostics if !p.recovery { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid expression", - Detail: "Expected the start of an expression, but found an invalid expression token.", - Subject: &start.Range, - }) + switch start.Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing expression", + Detail: "Expected the start of an expression, but found the end of the file.", + Subject: &start.Range, + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expression", + Detail: "Expected the start of an expression, but found an invalid expression token.", + Subject: &start.Range, + }) + } } p.setRecovery() @@ -1152,13 +1205,23 @@ Token: } if sep.Type != TokenComma { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing argument separator", - Detail: "A comma is required to separate each function argument from the next.", - Subject: &sep.Range, - Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), - }) + switch sep.Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated function call", + Detail: "There is no closing parenthesis for this function call before the end of the file. This may be caused by incorrect parethesis nesting elsewhere in this file.", + Subject: hcl.RangeBetween(name.Range, openTok.Range).Ptr(), + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing argument separator", + Detail: "A comma is required to separate each function argument from the next.", + Subject: &sep.Range, + Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), + }) + } closeTok = p.recover(TokenCParen) break Token } @@ -1231,13 +1294,23 @@ func (p *parser) parseTupleCons() (Expression, hcl.Diagnostics) { if next.Type != TokenComma { if !p.recovery { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing item separator", - Detail: "Expected a comma to mark the beginning of the next item.", - Subject: &next.Range, - Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), - }) + switch next.Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated tuple constructor expression", + Detail: "There is no corresponding closing bracket before the end of the file. This may be caused by incorrect bracket nesting elsewhere in this file.", + Subject: open.Range.Ptr(), + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing item separator", + Detail: "Expected a comma to mark the beginning of the next item.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } } close = p.recover(TokenCBrack) break @@ -1348,6 +1421,13 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { Subject: &next.Range, Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), }) + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated object constructor expression", + Detail: "There is no corresponding closing brace before the end of the file. This may be caused by incorrect brace nesting elsewhere in this file.", + Subject: open.Range.Ptr(), + }) default: diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, @@ -1388,13 +1468,23 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { if next.Type != TokenComma && next.Type != TokenNewline { if !p.recovery { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing attribute separator", - Detail: "Expected a newline or comma to mark the beginning of the next attribute.", - Subject: &next.Range, - Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), - }) + switch next.Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated object constructor expression", + Detail: "There is no corresponding closing brace before the end of the file. This may be caused by incorrect brace nesting elsewhere in this file.", + Subject: open.Range.Ptr(), + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute separator", + Detail: "Expected a newline or comma to mark the beginning of the next attribute.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } } close = p.recover(TokenCBrace) break @@ -1650,7 +1740,7 @@ func (p *parser) parseQuotedStringLiteral() (string, hcl.Range, hcl.Diagnostics) var diags hcl.Diagnostics ret := &bytes.Buffer{} - var cQuote Token + var endRange hcl.Range Token: for { @@ -1658,7 +1748,7 @@ Token: switch tok.Type { case TokenCQuote: - cQuote = tok + endRange = tok.Range break Token case TokenQuotedLit: @@ -1701,6 +1791,7 @@ Token: Subject: &tok.Range, Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), }) + endRange = tok.Range break Token default: @@ -1713,13 +1804,14 @@ Token: Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), }) p.recover(TokenCQuote) + endRange = tok.Range break Token } } - return ret.String(), hcl.RangeBetween(oQuote.Range, cQuote.Range), diags + return ret.String(), hcl.RangeBetween(oQuote.Range, endRange), diags } // ParseStringLiteralToken processes the given token, which must be either a diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go index eb8f7ea38..ae8805856 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go @@ -5,7 +5,7 @@ import ( "strings" "unicode" - "github.com/apparentlymart/go-textseg/textseg" + "github.com/apparentlymart/go-textseg/v13/textseg" "github.com/hashicorp/hcl/v2" "github.com/zclconf/go-cty/cty" ) @@ -413,13 +413,44 @@ Token: close := p.Peek() if close.Type != TokenTemplateSeqEnd { if !p.recovery { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extra characters after interpolation expression", - Detail: "Expected a closing brace to end the interpolation expression, but found extra characters.", - Subject: &close.Range, - Context: hcl.RangeBetween(startRange, close.Range).Ptr(), - }) + switch close.Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed template interpolation sequence", + Detail: "There is no closing brace for this interpolation sequence before the end of the file. This might be caused by incorrect nesting inside the given expression.", + Subject: &startRange, + }) + case TokenColon: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after interpolation expression", + Detail: "Template interpolation doesn't expect a colon at this location. Did you intend this to be a literal sequence to be processed as part of another language? If so, you can escape it by starting with \"$${\" instead of just \"${\".", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + default: + if (close.Type == TokenCQuote || close.Type == TokenOQuote) && end == TokenCQuote { + // We'll get here if we're processing a _quoted_ + // template and we find an errant quote inside an + // interpolation sequence, which suggests that + // the interpolation sequence is missing its terminator. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed template interpolation sequence", + Detail: "There is no closing brace for this interpolation sequence before the end of the quoted template. This might be caused by incorrect nesting inside the given expression.", + Subject: &startRange, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after interpolation expression", + Detail: "Expected a closing brace to end the interpolation expression, but found extra characters.\n\nThis can happen when you include interpolation syntax for another language, such as shell scripting, but forget to escape the interpolation start token. If this is an embedded sequence for another language, escape it by starting with \"$${\" instead of just \"${\".", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + } + } } p.recover(TokenTemplateSeqEnd) } else { diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md index 3fc5f5f1b..33c152daa 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md @@ -273,7 +273,7 @@ tuple = "[" ( object = "{" ( (objectelem ("," objectelem)* ","?)? ) "}"; -objectelem = (Identifier | Expression) "=" Expression; +objectelem = (Identifier | Expression) ("=" | ":") Expression; ``` Only tuple and object values can be directly constructed via native syntax. @@ -293,18 +293,20 @@ Between the open and closing delimiters of these sequences, newline sequences are ignored as whitespace. There is a syntax ambiguity between _for expressions_ and collection values -whose first element is a reference to a variable named `for`. The -_for expression_ interpretation has priority, so to produce a tuple whose -first element is the value of a variable named `for`, or an object with a -key named `for`, use parentheses to disambiguate: +whose first element starts with an identifier named `for`. The _for expression_ +interpretation has priority, so to write a key literally named `for` +or an expression derived from a variable named `for` you must use parentheses +or quotes to disambiguate: - `[for, foo, baz]` is a syntax error. - `[(for), foo, baz]` is a tuple whose first element is the value of variable `for`. -- `{for: 1, baz: 2}` is a syntax error. -- `{(for): 1, baz: 2}` is an object with an attribute literally named `for`. -- `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the +- `{for = 1, baz = 2}` is a syntax error. +- `{"for" = 1, baz = 2}` is an object with an attribute literally named `for`. +- `{baz = 2, for = 1}` is equivalent to the previous example, and resolves the ambiguity by reordering. +- `{(for) = 1, baz = 2}` is an object with a key with the same value as the + variable `for`. ### Template Expressions @@ -489,7 +491,7 @@ that were produced against each distinct key. - `[for v in ["a", "b"]: v]` returns `["a", "b"]`. - `[for i, v in ["a", "b"]: i]` returns `[0, 1]`. - `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`. -- `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute +- `{for i, v in ["a", "a", "b"]: v => i}` produces an error, because attribute `a` is defined twice. - `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`. @@ -888,7 +890,7 @@ as templates. - `hello ${true}` produces the string `"hello true"` - `${""}${true}` produces the string `"true"` because there are two interpolation sequences, even though one produces an empty result. -- `%{ for v in [true] }${v}%{ endif }` produces the string `true` because +- `%{ for v in [true] }${v}%{ endfor }` produces the string `true` because the presence of the `for` directive circumvents the unwrapping even though the final result is a single value. diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go index 2f7470c77..f42ae918e 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go @@ -13,17 +13,12 @@ func (b *Block) AsHCLBlock() *hcl.Block { return nil } - lastHeaderRange := b.TypeRange - if len(b.LabelRanges) > 0 { - lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1] - } - return &hcl.Block{ Type: b.Type, Labels: b.Labels, Body: b.Body, - DefRange: hcl.RangeBetween(b.TypeRange, lastHeaderRange), + DefRange: b.DefRange(), TypeRange: b.TypeRange, LabelRanges: b.LabelRanges, } @@ -40,7 +35,7 @@ type Body struct { hiddenBlocks map[string]struct{} SrcRange hcl.Range - EndRange hcl.Range // Final token of the body, for reporting missing items + EndRange hcl.Range // Final token of the body (zero-length range) } // Assert that *Body implements hcl.Body @@ -390,5 +385,9 @@ func (b *Block) Range() hcl.Range { } func (b *Block) DefRange() hcl.Range { - return hcl.RangeBetween(b.TypeRange, b.OpenBraceRange) + lastHeaderRange := b.TypeRange + if len(b.LabelRanges) > 0 { + lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1] + } + return hcl.RangeBetween(b.TypeRange, lastHeaderRange) } diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go index c7ffe2073..5ef093f9a 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go @@ -4,7 +4,7 @@ import ( "bytes" "fmt" - "github.com/apparentlymart/go-textseg/textseg" + "github.com/apparentlymart/go-textseg/v13/textseg" "github.com/hashicorp/hcl/v2" ) @@ -191,8 +191,10 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { toldBadUTF8 := 0 for _, tok := range tokens { - // copy token so it's safe to point to it - tok := tok + tokRange := func() *hcl.Range { + r := tok.Range + return &r + } switch tok.Type { case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot: @@ -202,7 +204,7 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { case TokenBitwiseAnd: suggestion = " Did you mean boolean AND (\"&&\")?" case TokenBitwiseOr: - suggestion = " Did you mean boolean OR (\"&&\")?" + suggestion = " Did you mean boolean OR (\"||\")?" case TokenBitwiseNot: suggestion = " Did you mean boolean NOT (\"!\")?" } @@ -211,7 +213,7 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { Severity: hcl.DiagError, Summary: "Unsupported operator", Detail: fmt.Sprintf("Bitwise operators are not supported.%s", suggestion), - Subject: &tok.Range, + Subject: tokRange(), }) toldBitwise++ } @@ -221,7 +223,7 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { Severity: hcl.DiagError, Summary: "Unsupported operator", Detail: "\"**\" is not a supported operator. Exponentiation is not supported as an operator.", - Subject: &tok.Range, + Subject: tokRange(), }) toldExponent++ @@ -234,7 +236,7 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { Severity: hcl.DiagError, Summary: "Invalid character", Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"< 0 { + if _, err := w.Write(line); err != nil { + return err + } + } + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + + return err + } + } +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go index 2e88dd3e6..6d7b768ee 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package tfexec @@ -6,26 +7,12 @@ import ( "context" "os/exec" "strings" + "sync" ) func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { var errBuf strings.Builder - cmd.Stdout = mergeWriters(cmd.Stdout, tf.stdout) - cmd.Stderr = mergeWriters(cmd.Stderr, tf.stderr, &errBuf) - - go func() { - <-ctx.Done() - if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled { - if cmd != nil && cmd.Process != nil && cmd.ProcessState != nil { - err := cmd.Process.Kill() - if err != nil { - tf.logger.Printf("error from kill: %s", err) - } - } - } - }() - // check for early cancellation select { case <-ctx.Done(): @@ -33,7 +20,52 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { default: } - err := cmd.Run() + // Read stdout / stderr logs from pipe instead of setting cmd.Stdout and + // cmd.Stderr because it can cause hanging when killing the command + // https://github.com/golang/go/issues/23019 + stdoutWriter := mergeWriters(cmd.Stdout, tf.stdout) + stderrWriter := mergeWriters(tf.stderr, &errBuf) + + cmd.Stderr = nil + cmd.Stdout = nil + + stdoutPipe, err := cmd.StdoutPipe() + if err != nil { + return err + } + + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return err + } + + err = cmd.Start() + if err == nil && ctx.Err() != nil { + err = ctx.Err() + } + if err != nil { + return tf.wrapExitError(ctx, err, "") + } + + var errStdout, errStderr error + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + errStdout = writeOutput(ctx, stdoutPipe, stdoutWriter) + }() + + wg.Add(1) + go func() { + defer wg.Done() + errStderr = writeOutput(ctx, stderrPipe, stderrWriter) + }() + + // Reads from pipes must be completed before calling cmd.Wait(). Otherwise + // can cause a race condition + wg.Wait() + + err = cmd.Wait() if err == nil && ctx.Err() != nil { err = ctx.Err() } @@ -41,5 +73,13 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { return tf.wrapExitError(ctx, err, errBuf.String()) } + // Return error if there was an issue reading the std out/err + if errStdout != nil && ctx.Err() != nil { + return tf.wrapExitError(ctx, errStdout, errBuf.String()) + } + if errStderr != nil && ctx.Err() != nil { + return tf.wrapExitError(ctx, errStderr, errBuf.String()) + } + return nil } diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go index 7cbdcb96f..6fa40e0aa 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go @@ -4,15 +4,13 @@ import ( "context" "os/exec" "strings" + "sync" "syscall" ) func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { var errBuf strings.Builder - cmd.Stdout = mergeWriters(cmd.Stdout, tf.stdout) - cmd.Stderr = mergeWriters(cmd.Stderr, tf.stderr, &errBuf) - cmd.SysProcAttr = &syscall.SysProcAttr{ // kill children if parent is dead Pdeathsig: syscall.SIGKILL, @@ -20,21 +18,6 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { Setpgid: true, } - go func() { - <-ctx.Done() - if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled { - if cmd != nil && cmd.Process != nil && cmd.ProcessState != nil { - // send SIGINT to process group - err := syscall.Kill(-cmd.Process.Pid, syscall.SIGINT) - if err != nil { - tf.logger.Printf("error from SIGINT: %s", err) - } - } - - // TODO: send a kill if it doesn't respond for a bit? - } - }() - // check for early cancellation select { case <-ctx.Done(): @@ -42,7 +25,52 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { default: } - err := cmd.Run() + // Read stdout / stderr logs from pipe instead of setting cmd.Stdout and + // cmd.Stderr because it can cause hanging when killing the command + // https://github.com/golang/go/issues/23019 + stdoutWriter := mergeWriters(cmd.Stdout, tf.stdout) + stderrWriter := mergeWriters(tf.stderr, &errBuf) + + cmd.Stderr = nil + cmd.Stdout = nil + + stdoutPipe, err := cmd.StdoutPipe() + if err != nil { + return err + } + + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return err + } + + err = cmd.Start() + if err == nil && ctx.Err() != nil { + err = ctx.Err() + } + if err != nil { + return tf.wrapExitError(ctx, err, "") + } + + var errStdout, errStderr error + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + errStdout = writeOutput(ctx, stdoutPipe, stdoutWriter) + }() + + wg.Add(1) + go func() { + defer wg.Done() + errStderr = writeOutput(ctx, stderrPipe, stderrWriter) + }() + + // Reads from pipes must be completed before calling cmd.Wait(). Otherwise + // can cause a race condition + wg.Wait() + + err = cmd.Wait() if err == nil && ctx.Err() != nil { err = ctx.Err() } @@ -50,5 +78,13 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { return tf.wrapExitError(ctx, err, errBuf.String()) } + // Return error if there was an issue reading the std out/err + if errStdout != nil && ctx.Err() != nil { + return tf.wrapExitError(ctx, errStdout, errBuf.String()) + } + if errStderr != nil && ctx.Err() != nil { + return tf.wrapExitError(ctx, errStderr, errBuf.String()) + } + return nil } diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring.go deleted file mode 100644 index 4f81d1148..000000000 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.13 - -package tfexec - -import ( - "os/exec" -) - -// cmdString handles go 1.12 as stringer was only added to exec.Cmd in 1.13 -func cmdString(c *exec.Cmd) string { - return c.String() -} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring_go112.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring_go112.go deleted file mode 100644 index 75614dbf6..000000000 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring_go112.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !go1.13 - -package tfexec - -import ( - "os/exec" - "strings" -) - -// cmdString handles go 1.12 as stringer was only added to exec.Cmd in 1.13 -func cmdString(c *exec.Cmd) string { - b := new(strings.Builder) - b.WriteString(c.Path) - for _, a := range c.Args[1:] { - b.WriteByte(' ') - b.WriteString(a) - } - return b.String() -} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go index b2b0ada62..ea25b2a56 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go @@ -18,10 +18,18 @@ var ( usageRegexp = regexp.MustCompile(`Too many command line arguments|^Usage: .*Options:.*|Error: Invalid -\d+ option`) - noInitErrRegexp = regexp.MustCompile(`Error: Could not satisfy plugin requirements|` + - `Error: Could not load plugin|` + // v0.13 - `Please run \"terraform init\"|` + // v1.1.0 early alpha versions (ref 89b05050) - `run:\s+terraform init`) // v1.1.0 (ref df578afd) + noInitErrRegexp = regexp.MustCompile( + // UNINITIALISED PROVIDERS/MODULES + `Error: Could not satisfy plugin requirements|` + + `Error: Could not load plugin|` + // v0.13 + `Please run \"terraform init\"|` + // v1.1.0 early alpha versions (ref 89b05050) + `run:\s+terraform init|` + // v1.1.0 (ref df578afd) + `Run\s+\"terraform init\"|` + // v1.2.0 + + // UNINITIALISED BACKENDS + `Error: Initialization required.|` + // v0.13 + `Error: Backend initialization required, please run \"terraform init\"`, // v0.15 + ) noConfigErrRegexp = regexp.MustCompile(`Error: No configuration files`) @@ -33,8 +41,11 @@ var ( tfVersionMismatchConstraintRegexp = regexp.MustCompile(`required_version = "(.+)"|Required version: (.+)\b`) configInvalidErrRegexp = regexp.MustCompile(`There are some problems with the configuration, described below.`) - stateLockErrRegexp = regexp.MustCompile(`Error acquiring the state lock`) - stateLockInfoRegexp = regexp.MustCompile(`Lock Info:\n\s*ID:\s*([^\n]+)\n\s*Path:\s*([^\n]+)\n\s*Operation:\s*([^\n]+)\n\s*Who:\s*([^\n]+)\n\s*Version:\s*([^\n]+)\n\s*Created:\s*([^\n]+)\n`) + stateLockErrRegexp = regexp.MustCompile(`Error acquiring the state lock`) + stateLockInfoRegexp = regexp.MustCompile(`Lock Info:\n\s*ID:\s*([^\n]+)\n\s*Path:\s*([^\n]+)\n\s*Operation:\s*([^\n]+)\n\s*Who:\s*([^\n]+)\n\s*Version:\s*([^\n]+)\n\s*Created:\s*([^\n]+)\n`) + statePlanReadErrRegexp = regexp.MustCompile( + `Terraform couldn't read the given file as a state or plan file.|` + + `Error: Failed to read the given file as a state or plan file`) ) func (tf *Terraform) wrapExitError(ctx context.Context, err error, stderr string) error { @@ -147,6 +158,8 @@ func (tf *Terraform) wrapExitError(ctx context.Context, err error, stderr string Created: submatches[6], } } + case statePlanReadErrRegexp.MatchString(stderr): + return &ErrStatePlanRead{stderr: stderr} } return fmt.Errorf("%w\n%s", &unwrapper{exitErr, ctxErr}, stderr) @@ -223,6 +236,16 @@ func (e *ErrNoInit) Error() string { return e.stderr } +type ErrStatePlanRead struct { + unwrapper + + stderr string +} + +func (e *ErrStatePlanRead) Error() string { + return e.stderr +} + type ErrNoConfig struct { unwrapper diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/force_unlock.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/force_unlock.go new file mode 100644 index 000000000..c8dddffa1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/force_unlock.go @@ -0,0 +1,50 @@ +package tfexec + +import ( + "context" + "os/exec" +) + +type forceUnlockConfig struct { + dir string +} + +var defaultForceUnlockOptions = forceUnlockConfig{} + +type ForceUnlockOption interface { + configureForceUnlock(*forceUnlockConfig) +} + +func (opt *DirOption) configureForceUnlock(conf *forceUnlockConfig) { + conf.dir = opt.path +} + +// ForceUnlock represents the `terraform force-unlock` command +func (tf *Terraform) ForceUnlock(ctx context.Context, lockID string, opts ...ForceUnlockOption) error { + unlockCmd := tf.forceUnlockCmd(ctx, lockID, opts...) + + if err := tf.runTerraformCmd(ctx, unlockCmd); err != nil { + return err + } + + return nil +} + +func (tf *Terraform) forceUnlockCmd(ctx context.Context, lockID string, opts ...ForceUnlockOption) *exec.Cmd { + c := defaultForceUnlockOptions + + for _, o := range opts { + o.configureForceUnlock(&c) + } + args := []string{"force-unlock", "-force"} + + // positional arguments + args = append(args, lockID) + + // optional positional arguments + if c.dir != "" { + args = append(args, c.dir) + } + + return tf.buildTerraformCmd(ctx, nil, args...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/graph.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/graph.go new file mode 100644 index 000000000..73396280b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/graph.go @@ -0,0 +1,85 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strings" +) + +type graphConfig struct { + plan string + drawCycles bool + graphType string +} + +var defaultGraphOptions = graphConfig{} + +type GraphOption interface { + configureGraph(*graphConfig) +} + +func (opt *GraphPlanOption) configureGraph(conf *graphConfig) { + conf.plan = opt.file +} + +func (opt *DrawCyclesOption) configureGraph(conf *graphConfig) { + conf.drawCycles = opt.drawCycles +} + +func (opt *GraphTypeOption) configureGraph(conf *graphConfig) { + conf.graphType = opt.graphType +} + +func (tf *Terraform) Graph(ctx context.Context, opts ...GraphOption) (string, error) { + graphCmd, err := tf.graphCmd(ctx, opts...) + if err != nil { + return "", err + } + var outBuf strings.Builder + graphCmd.Stdout = &outBuf + err = tf.runTerraformCmd(ctx, graphCmd) + if err != nil { + return "", err + } + + return outBuf.String(), nil + +} + +func (tf *Terraform) graphCmd(ctx context.Context, opts ...GraphOption) (*exec.Cmd, error) { + c := defaultGraphOptions + + for _, o := range opts { + o.configureGraph(&c) + } + + args := []string{"graph"} + + if c.plan != "" { + // plan was a positional arguement prior to Terraform 0.15.0. Ensure proper use by checking version. + if err := tf.compatible(ctx, tf0_15_0, nil); err == nil { + args = append(args, "-plan="+c.plan) + } else { + args = append(args, c.plan) + } + } + + if c.drawCycles { + err := tf.compatible(ctx, tf0_5_0, nil) + if err != nil { + return nil, fmt.Errorf("-draw-cycles was first introduced in Terraform 0.5.0: %w", err) + } + args = append(args, "-draw-cycles") + } + + if c.graphType != "" { + err := tf.compatible(ctx, tf0_8_0, nil) + if err != nil { + return nil, fmt.Errorf("-graph-type was first introduced in Terraform 0.8.0: %w", err) + } + args = append(args, "-type="+c.graphType) + } + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go index fb2d5bd74..ad3cc65c6 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go @@ -14,6 +14,16 @@ func AllowMissingConfig(allowMissingConfig bool) *AllowMissingConfigOption { return &AllowMissingConfigOption{allowMissingConfig} } +// AllowMissingOption represents the -allow-missing flag. +type AllowMissingOption struct { + allowMissing bool +} + +// AllowMissing represents the -allow-missing flag. +func AllowMissing(allowMissing bool) *AllowMissingOption { + return &AllowMissingOption{allowMissing} +} + // BackendOption represents the -backend flag. type BackendOption struct { backend bool @@ -108,6 +118,15 @@ func Destroy(destroy bool) *DestroyFlagOption { return &DestroyFlagOption{destroy} } +type DrawCyclesOption struct { + drawCycles bool +} + +// DrawCycles represents the -draw-cycles flag. +func DrawCycles(drawCycles bool) *DrawCyclesOption { + return &DrawCyclesOption{drawCycles} +} + type DryRunOption struct { dryRun bool } @@ -212,6 +231,15 @@ func Parallelism(n int) *ParallelismOption { return &ParallelismOption{n} } +type GraphPlanOption struct { + file string +} + +// GraphPlan represents the -plan flag which is a specified plan file string +func GraphPlan(file string) *GraphPlanOption { + return &GraphPlanOption{file} +} + type PlatformOption struct { platform string } @@ -334,6 +362,14 @@ func Target(resource string) *TargetOption { return &TargetOption{resource} } +type GraphTypeOption struct { + graphType string +} + +func GraphType(graphType string) *GraphTypeOption { + return &GraphTypeOption{graphType} +} + type UpdateOption struct { update bool } @@ -373,21 +409,3 @@ type VerifyPluginsOption struct { func VerifyPlugins(verifyPlugins bool) *VerifyPluginsOption { return &VerifyPluginsOption{verifyPlugins} } - -// FromStateOption represents the -from-state option of the "terraform add" command. -type FromStateOption struct { - fromState bool -} - -func FromState(fromState bool) *FromStateOption { - return &FromStateOption{fromState} -} - -// IncludeOptionalOption represents the -optional option of the "terraform add" command. -type IncludeOptionalOption struct { - includeOptional bool -} - -func IncludeOptional(includeOptional bool) *IncludeOptionalOption { - return &IncludeOptionalOption{includeOptional} -} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_pull.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_pull.go new file mode 100644 index 000000000..11b6b9c77 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_pull.go @@ -0,0 +1,55 @@ +package tfexec + +import ( + "bytes" + "context" + "os/exec" +) + +type statePullConfig struct { + reattachInfo ReattachInfo +} + +var defaultStatePullConfig = statePullConfig{} + +type StatePullOption interface { + configureShow(*statePullConfig) +} + +func (opt *ReattachOption) configureStatePull(conf *statePullConfig) { + conf.reattachInfo = opt.info +} + +func (tf *Terraform) StatePull(ctx context.Context, opts ...StatePullOption) (string, error) { + c := defaultStatePullConfig + + for _, o := range opts { + o.configureShow(&c) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return "", err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + cmd := tf.statePullCmd(ctx, mergeEnv) + + var ret bytes.Buffer + cmd.Stdout = &ret + err := tf.runTerraformCmd(ctx, cmd) + if err != nil { + return "", err + } + + return ret.String(), nil +} + +func (tf *Terraform) statePullCmd(ctx context.Context, mergeEnv map[string]string) *exec.Cmd { + args := []string{"state", "pull"} + + return tf.buildTerraformCmd(ctx, mergeEnv, args...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_push.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_push.go new file mode 100644 index 000000000..14e55a2eb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_push.go @@ -0,0 +1,67 @@ +package tfexec + +import ( + "context" + "os/exec" + "strconv" +) + +type statePushConfig struct { + force bool + lock bool + lockTimeout string +} + +var defaultStatePushOptions = statePushConfig{ + lock: false, + lockTimeout: "0s", +} + +// StatePushCmdOption represents options used in the Refresh method. +type StatePushCmdOption interface { + configureStatePush(*statePushConfig) +} + +func (opt *ForceOption) configureStatePush(conf *statePushConfig) { + conf.force = opt.force +} + +func (opt *LockOption) configureStatePush(conf *statePushConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureStatePush(conf *statePushConfig) { + conf.lockTimeout = opt.timeout +} + +func (tf *Terraform) StatePush(ctx context.Context, path string, opts ...StatePushCmdOption) error { + cmd, err := tf.statePushCmd(ctx, path, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) statePushCmd(ctx context.Context, path string, opts ...StatePushCmdOption) (*exec.Cmd, error) { + c := defaultStatePushOptions + + for _, o := range opts { + o.configureStatePush(&c) + } + + args := []string{"state", "push"} + + if c.force { + args = append(args, "-force") + } + + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + + args = append(args, path) + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/taint.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/taint.go new file mode 100644 index 000000000..cd69df308 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/taint.go @@ -0,0 +1,78 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type taintConfig struct { + state string + allowMissing bool + lock bool + lockTimeout string +} + +var defaultTaintOptions = taintConfig{ + allowMissing: false, + lock: true, +} + +// TaintOption represents options used in the Taint method. +type TaintOption interface { + configureTaint(*taintConfig) +} + +func (opt *StateOption) configureTaint(conf *taintConfig) { + conf.state = opt.path +} + +func (opt *AllowMissingOption) configureTaint(conf *taintConfig) { + conf.allowMissing = opt.allowMissing +} + +func (opt *LockOption) configureTaint(conf *taintConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureTaint(conf *taintConfig) { + conf.lockTimeout = opt.timeout +} + +// Taint represents the terraform taint subcommand. +func (tf *Terraform) Taint(ctx context.Context, address string, opts ...TaintOption) error { + err := tf.compatible(ctx, tf0_4_1, nil) + if err != nil { + return fmt.Errorf("taint was first introduced in Terraform 0.4.1: %w", err) + } + taintCmd := tf.taintCmd(ctx, address, opts...) + return tf.runTerraformCmd(ctx, taintCmd) +} + +func (tf *Terraform) taintCmd(ctx context.Context, address string, opts ...TaintOption) *exec.Cmd { + c := defaultTaintOptions + + for _, o := range opts { + o.configureTaint(&c) + } + + args := []string{"taint", "-no-color"} + + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + + // string opts: only pass if set + if c.state != "" { + args = append(args, "-state="+c.state) + } + + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + if c.allowMissing { + args = append(args, "-allow-missing") + } + args = append(args, address) + + return tf.buildTerraformCmd(ctx, nil, args...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go index 74787af0c..bb8be17d5 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go @@ -48,19 +48,30 @@ type Terraform struct { skipProviderVerify bool env map[string]string - stdout io.Writer - stderr io.Writer - logger printfer + stdout io.Writer + stderr io.Writer + logger printfer + + // TF_LOG environment variable, defaults to TRACE if logPath is set. + log string + + // TF_LOG_CORE environment variable + logCore string + + // TF_LOG_PATH environment variable logPath string + // TF_LOG_PROVIDER environment variable + logProvider string + versionLock sync.Mutex execVersion *version.Version provVersions map[string]*version.Version } // NewTerraform returns a Terraform struct with default values for all fields. -// If a blank execPath is supplied, NewTerraform will attempt to locate an -// appropriate binary on the system PATH. +// If a blank execPath is supplied, NewTerraform will error. +// Use hc-install or output from os.LookPath to get a desirable execPath. func NewTerraform(workingDir string, execPath string) (*Terraform, error) { if workingDir == "" { return nil, fmt.Errorf("Terraform cannot be initialised with empty workdir") @@ -71,7 +82,7 @@ func NewTerraform(workingDir string, execPath string) (*Terraform, error) { } if execPath == "" { - err := fmt.Errorf("NewTerraform: please supply the path to a Terraform executable using execPath, e.g. using the tfinstall package.") + err := fmt.Errorf("NewTerraform: please supply the path to a Terraform executable using execPath, e.g. using the github.com/hashicorp/hc-install module.") return nil, &ErrNoSuitableBinary{ err: err, } @@ -122,10 +133,58 @@ func (tf *Terraform) SetStderr(w io.Writer) { tf.stderr = w } +// SetLog sets the TF_LOG environment variable for Terraform CLI execution. +// This must be combined with a call to SetLogPath to take effect. +// +// This is only compatible with Terraform CLI 0.15.0 or later as setting the +// log level was unreliable in earlier versions. It will default to TRACE when +// SetLogPath is called on versions 0.14.11 and earlier, or if SetLogCore and +// SetLogProvider have not been called before SetLogPath on versions 0.15.0 and +// later. +func (tf *Terraform) SetLog(log string) error { + err := tf.compatible(context.Background(), tf0_15_0, nil) + if err != nil { + return err + } + tf.log = log + return nil +} + +// SetLogCore sets the TF_LOG_CORE environment variable for Terraform CLI +// execution. This must be combined with a call to SetLogPath to take effect. +// +// This is only compatible with Terraform CLI 0.15.0 or later. +func (tf *Terraform) SetLogCore(logCore string) error { + err := tf.compatible(context.Background(), tf0_15_0, nil) + if err != nil { + return err + } + tf.logCore = logCore + return nil +} + // SetLogPath sets the TF_LOG_PATH environment variable for Terraform CLI // execution. func (tf *Terraform) SetLogPath(path string) error { tf.logPath = path + // Prevent setting the log path without enabling logging + if tf.log == "" && tf.logCore == "" && tf.logProvider == "" { + tf.log = "TRACE" + } + return nil +} + +// SetLogProvider sets the TF_LOG_PROVIDER environment variable for Terraform +// CLI execution. This must be combined with a call to SetLogPath to take +// effect. +// +// This is only compatible with Terraform CLI 0.15.0 or later. +func (tf *Terraform) SetLogProvider(logProvider string) error { + err := tf.compatible(context.Background(), tf0_15_0, nil) + if err != nil { + return err + } + tf.logProvider = logProvider return nil } diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/untaint.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/untaint.go new file mode 100644 index 000000000..bda127277 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/untaint.go @@ -0,0 +1,78 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type untaintConfig struct { + state string + allowMissing bool + lock bool + lockTimeout string +} + +var defaultUntaintOptions = untaintConfig{ + allowMissing: false, + lock: true, +} + +// OutputOption represents options used in the Output method. +type UntaintOption interface { + configureUntaint(*untaintConfig) +} + +func (opt *StateOption) configureUntaint(conf *untaintConfig) { + conf.state = opt.path +} + +func (opt *AllowMissingOption) configureUntaint(conf *untaintConfig) { + conf.allowMissing = opt.allowMissing +} + +func (opt *LockOption) configureUntaint(conf *untaintConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureUntaint(conf *untaintConfig) { + conf.lockTimeout = opt.timeout +} + +// Untaint represents the terraform untaint subcommand. +func (tf *Terraform) Untaint(ctx context.Context, address string, opts ...UntaintOption) error { + err := tf.compatible(ctx, tf0_6_13, nil) + if err != nil { + return fmt.Errorf("untaint was first introduced in Terraform 0.6.13: %w", err) + } + untaintCmd := tf.untaintCmd(ctx, address, opts...) + return tf.runTerraformCmd(ctx, untaintCmd) +} + +func (tf *Terraform) untaintCmd(ctx context.Context, address string, opts ...UntaintOption) *exec.Cmd { + c := defaultUntaintOptions + + for _, o := range opts { + o.configureUntaint(&c) + } + + args := []string{"untaint", "-no-color"} + + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + + // string opts: only pass if set + if c.state != "" { + args = append(args, "-state="+c.state) + } + + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + if c.allowMissing { + args = append(args, "-allow-missing") + } + args = append(args, address) + + return tf.buildTerraformCmd(ctx, nil, args...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go index d9c57cd07..9978ae28d 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go @@ -14,7 +14,12 @@ import ( ) var ( + tf0_4_1 = version.Must(version.NewVersion("0.4.1")) + tf0_5_0 = version.Must(version.NewVersion("0.5.0")) + tf0_6_13 = version.Must(version.NewVersion("0.6.13")) tf0_7_7 = version.Must(version.NewVersion("0.7.7")) + tf0_8_0 = version.Must(version.NewVersion("0.8.0")) + tf0_10_0 = version.Must(version.NewVersion("0.10.0")) tf0_12_0 = version.Must(version.NewVersion("0.12.0")) tf0_13_0 = version.Must(version.NewVersion("0.13.0")) tf0_14_0 = version.Must(version.NewVersion("0.14.0")) diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_delete.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_delete.go new file mode 100644 index 000000000..526772079 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_delete.go @@ -0,0 +1,81 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type workspaceDeleteConfig struct { + lock bool + lockTimeout string + force bool +} + +var defaultWorkspaceDeleteOptions = workspaceDeleteConfig{ + lock: true, + lockTimeout: "0s", +} + +// WorkspaceDeleteCmdOption represents options that are applicable to the WorkspaceDelete method. +type WorkspaceDeleteCmdOption interface { + configureWorkspaceDelete(*workspaceDeleteConfig) +} + +func (opt *LockOption) configureWorkspaceDelete(conf *workspaceDeleteConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureWorkspaceDelete(conf *workspaceDeleteConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *ForceOption) configureWorkspaceDelete(conf *workspaceDeleteConfig) { + conf.force = opt.force +} + +// WorkspaceDelete represents the workspace delete subcommand to the Terraform CLI. +func (tf *Terraform) WorkspaceDelete(ctx context.Context, workspace string, opts ...WorkspaceDeleteCmdOption) error { + cmd, err := tf.workspaceDeleteCmd(ctx, workspace, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) workspaceDeleteCmd(ctx context.Context, workspace string, opts ...WorkspaceDeleteCmdOption) (*exec.Cmd, error) { + c := defaultWorkspaceDeleteOptions + + for _, o := range opts { + switch o.(type) { + case *LockOption, *LockTimeoutOption: + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("-lock and -lock-timeout were added to workspace delete in Terraform 0.12: %w", err) + } + } + + o.configureWorkspaceDelete(&c) + } + + args := []string{"workspace", "delete", "-no-color"} + + if c.force { + args = append(args, "-force") + } + if c.lockTimeout != "" && c.lockTimeout != defaultWorkspaceDeleteOptions.lockTimeout { + // only pass if not default, so we don't need to worry about the 0.11 version check + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if !c.lock { + // only pass if false, so we don't need to worry about the 0.11 version check + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + } + + args = append(args, workspace) + + cmd := tf.buildTerraformCmd(ctx, nil, args...) + + return cmd, nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_show.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_show.go new file mode 100644 index 000000000..7d5a267f1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_show.go @@ -0,0 +1,35 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strings" +) + +// WorkspaceShow represents the workspace show subcommand to the Terraform CLI. +func (tf *Terraform) WorkspaceShow(ctx context.Context) (string, error) { + workspaceShowCmd, err := tf.workspaceShowCmd(ctx) + if err != nil { + return "", err + } + + var outBuffer strings.Builder + workspaceShowCmd.Stdout = &outBuffer + + err = tf.runTerraformCmd(ctx, workspaceShowCmd) + if err != nil { + return "", err + } + + return strings.TrimSpace(outBuffer.String()), nil +} + +func (tf *Terraform) workspaceShowCmd(ctx context.Context) (*exec.Cmd, error) { + err := tf.compatible(ctx, tf0_10_0, nil) + if err != nil { + return nil, fmt.Errorf("workspace show was first introduced in Terraform 0.10.0: %w", err) + } + + return tf.buildTerraformCmd(ctx, nil, "workspace", "show", "-no-color"), nil +} diff --git a/vendor/github.com/hashicorp/terraform-json/config.go b/vendor/github.com/hashicorp/terraform-json/config.go index e093cfa8b..5ebe4bc84 100644 --- a/vendor/github.com/hashicorp/terraform-json/config.go +++ b/vendor/github.com/hashicorp/terraform-json/config.go @@ -48,6 +48,9 @@ type ProviderConfig struct { // The name of the provider, ie: "aws". Name string `json:"name,omitempty"` + // The fully-specified name of the provider, ie: "registry.terraform.io/hashicorp/aws". + FullName string `json:"full_name,omitempty"` + // The alias of the provider, ie: "us-east-1". Alias string `json:"alias,omitempty"` diff --git a/vendor/github.com/hashicorp/terraform-json/go.mod b/vendor/github.com/hashicorp/terraform-json/go.mod index a04aeeac9..76514d474 100644 --- a/vendor/github.com/hashicorp/terraform-json/go.mod +++ b/vendor/github.com/hashicorp/terraform-json/go.mod @@ -4,9 +4,10 @@ go 1.13 require ( github.com/davecgh/go-spew v1.1.1 - github.com/google/go-cmp v0.5.6 - github.com/hashicorp/go-version v1.3.0 + github.com/google/go-cmp v0.5.8 + github.com/hashicorp/go-version v1.5.0 github.com/mitchellh/copystructure v1.2.0 github.com/sebdah/goldie v1.0.0 - github.com/zclconf/go-cty v1.9.1 + github.com/zclconf/go-cty v1.10.0 + github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b ) diff --git a/vendor/github.com/hashicorp/terraform-json/go.sum b/vendor/github.com/hashicorp/terraform-json/go.sum index 51d9bd152..cf933f240 100644 --- a/vendor/github.com/hashicorp/terraform-json/go.sum +++ b/vendor/github.com/hashicorp/terraform-json/go.sum @@ -1,17 +1,20 @@ +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/hashicorp/go-version v1.5.0 h1:O293SZ2Eg+AAYijkVK3jR786Am1bhDEh2GHT0tIVE5E= +github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= @@ -23,20 +26,25 @@ github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdk github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/zclconf/go-cty v1.9.1 h1:viqrgQwFl5UpSxc046qblj78wZXVDFnSOufaOTER+cc= -github.com/zclconf/go-cty v1.9.1/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +github.com/zclconf/go-cty v1.10.0 h1:mp9ZXQeIcN8kAwuqorjH+Q+njbJKjLrvB2yIh4q7U+0= +github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/hashicorp/terraform-json/plan.go b/vendor/github.com/hashicorp/terraform-json/plan.go index 1de5bc82a..274006a01 100644 --- a/vendor/github.com/hashicorp/terraform-json/plan.go +++ b/vendor/github.com/hashicorp/terraform-json/plan.go @@ -55,6 +55,19 @@ type Plan struct { // The Terraform configuration used to make the plan. Config *Config `json:"configuration,omitempty"` + + // RelevantAttributes represents any resource instances and their + // attributes which may have contributed to the planned changes + RelevantAttributes []ResourceAttribute `json:"relevant_attributes,omitempty"` +} + +// ResourceAttribute describes a full path to a resource attribute +type ResourceAttribute struct { + // Resource describes resource instance address (e.g. null_resource.foo) + Resource string `json:"resource"` + // Attribute describes the attribute path using a lossy representation + // of cty.Path. (e.g. ["id"] or ["objects", 0, "val"]). + Attribute []json.RawMessage `json:"attribute"` } // Validate checks to ensure that the plan is present, and the diff --git a/vendor/github.com/hashicorp/terraform-json/schemas.go b/vendor/github.com/hashicorp/terraform-json/schemas.go index 2360231d0..027224b62 100644 --- a/vendor/github.com/hashicorp/terraform-json/schemas.go +++ b/vendor/github.com/hashicorp/terraform-json/schemas.go @@ -223,6 +223,13 @@ type SchemaAttribute struct { Sensitive bool `json:"sensitive,omitempty"` } +// jsonSchemaAttribute describes an attribute within a schema block +// in a middle-step internal representation before marshalled into +// a more useful SchemaAttribute with cty.Type. +// +// This avoid panic on marshalling cty.NilType (from cty upstream) +// which the default Go marshaller cannot ignore because it's a +// not nil-able struct. type jsonSchemaAttribute struct { AttributeType json.RawMessage `json:"type,omitempty"` AttributeNestedType *SchemaNestedAttributeType `json:"nested_type,omitempty"` diff --git a/vendor/github.com/hashicorp/terraform-json/state.go b/vendor/github.com/hashicorp/terraform-json/state.go index bece5c220..3c3f6a4b0 100644 --- a/vendor/github.com/hashicorp/terraform-json/state.go +++ b/vendor/github.com/hashicorp/terraform-json/state.go @@ -7,6 +7,7 @@ import ( "fmt" "github.com/hashicorp/go-version" + "github.com/zclconf/go-cty/cty" ) // StateFormatVersionConstraints defines the versions of the JSON state format @@ -175,4 +176,31 @@ type StateOutput struct { // The value of the output. Value interface{} `json:"value,omitempty"` + + // The type of the output. + Type cty.Type `json:"type,omitempty"` +} + +// jsonStateOutput describes an output value in a middle-step internal +// representation before marshalled into a more useful StateOutput with cty.Type. +// +// This avoid panic on marshalling cty.NilType (from cty upstream) +// which the default Go marshaller cannot ignore because it's a +// not nil-able struct. +type jsonStateOutput struct { + Sensitive bool `json:"sensitive"` + Value interface{} `json:"value,omitempty"` + Type json.RawMessage `json:"type,omitempty"` +} + +func (so *StateOutput) MarshalJSON() ([]byte, error) { + jsonSa := &jsonStateOutput{ + Sensitive: so.Sensitive, + Value: so.Value, + } + if so.Type != cty.NilType { + outputType, _ := so.Type.MarshalJSON() + jsonSa.Type = outputType + } + return json.Marshal(jsonSa) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/context.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/context.go new file mode 100644 index 000000000..385283bb9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/context.go @@ -0,0 +1,89 @@ +package logging + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-log/tfsdklog" +) + +// DataSourceContext injects the data source type into logger contexts. +func DataSourceContext(ctx context.Context, dataSource string) context.Context { + ctx = tfsdklog.SetField(ctx, KeyDataSourceType, dataSource) + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyDataSourceType, dataSource) + ctx = tflog.SetField(ctx, KeyDataSourceType, dataSource) + + return ctx +} + +// InitContext creates SDK and provider logger contexts. +func InitContext(ctx context.Context, sdkOpts tfsdklog.Options, providerOpts tflog.Options) context.Context { + ctx = tfsdklog.NewRootSDKLogger(ctx, append(tfsdklog.Options{ + tfsdklog.WithLevelFromEnv(EnvTfLogSdk), + }, sdkOpts...)...) + ctx = ProtoSubsystemContext(ctx, sdkOpts) + ctx = tfsdklog.NewRootProviderLogger(ctx, providerOpts...) + + return ctx +} + +// ProtoSubsystemContext adds the proto subsystem to the SDK logger context. +func ProtoSubsystemContext(ctx context.Context, sdkOpts tfsdklog.Options) context.Context { + ctx = tfsdklog.NewSubsystem(ctx, SubsystemProto, append(tfsdklog.Options{ + // All calls are through the Protocol* helper functions + tfsdklog.WithAdditionalLocationOffset(1), + tfsdklog.WithLevelFromEnv(EnvTfLogSdkProto), + }, sdkOpts...)...) + + return ctx +} + +// ProtocolVersionContext injects the protocol version into logger contexts. +func ProtocolVersionContext(ctx context.Context, protocolVersion string) context.Context { + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyProtocolVersion, protocolVersion) + + return ctx +} + +// ProviderAddressContext injects the provider address into logger contexts. +func ProviderAddressContext(ctx context.Context, providerAddress string) context.Context { + ctx = tfsdklog.SetField(ctx, KeyProviderAddress, providerAddress) + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyProviderAddress, providerAddress) + ctx = tflog.SetField(ctx, KeyProviderAddress, providerAddress) + + return ctx +} + +// RequestIdContext injects a unique request ID into logger contexts. +func RequestIdContext(ctx context.Context) context.Context { + reqID, err := uuid.GenerateUUID() + + if err != nil { + reqID = "unable to assign request ID: " + err.Error() + } + + ctx = tfsdklog.SetField(ctx, KeyRequestID, reqID) + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyRequestID, reqID) + ctx = tflog.SetField(ctx, KeyRequestID, reqID) + + return ctx +} + +// ResourceContext injects the resource type into logger contexts. +func ResourceContext(ctx context.Context, resource string) context.Context { + ctx = tfsdklog.SetField(ctx, KeyResourceType, resource) + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyResourceType, resource) + ctx = tflog.SetField(ctx, KeyResourceType, resource) + + return ctx +} + +// RpcContext injects the RPC name into logger contexts. +func RpcContext(ctx context.Context, rpc string) context.Context { + ctx = tfsdklog.SetField(ctx, KeyRPC, rpc) + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyRPC, rpc) + ctx = tflog.SetField(ctx, KeyRPC, rpc) + + return ctx +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/doc.go new file mode 100644 index 000000000..cc1a033e6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/doc.go @@ -0,0 +1,2 @@ +// Package logging contains shared environment variable and log functionality. +package logging diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/environment_variables.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/environment_variables.go new file mode 100644 index 000000000..49b8072c8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/environment_variables.go @@ -0,0 +1,22 @@ +package logging + +// Environment variables. +const ( + // EnvTfLogProvider is the prefix of the environment variable that sets the + // logging level of the root provider logger for the provider being served. + // The suffix is an underscore and the parsed provider name. For example, + // registry.terraform.io/hashicorp/example becomes TF_LOG_PROVIDER_EXAMPLE. + EnvTfLogProvider = "TF_LOG_PROVIDER" + + // EnvTfLogSdk is an environment variable that sets the root logging level + // of SDK loggers. + EnvTfLogSdk = "TF_LOG_SDK" + + // EnvTfLogSdkProto is an environment variable that sets the logging level + // of SDK protocol loggers. Infers root SDK logging level, if unset. + EnvTfLogSdkProto = "TF_LOG_SDK_PROTO" + + // EnvTfLogSdkProtoDataDir is an environment variable that sets the + // directory to write raw protocol data files for debugging purposes. + EnvTfLogSdkProtoDataDir = "TF_LOG_SDK_PROTO_DATA_DIR" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go new file mode 100644 index 000000000..ce803d752 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go @@ -0,0 +1,53 @@ +package logging + +// Global logging keys attached to all requests. +// +// Practitioners or tooling reading logs may be depending on these keys, so be +// conscious of that when changing them. +const ( + // Attribute of the diagnostic being logged. + KeyDiagnosticAttribute = "diagnostic_attribute" + + // Number of the error diagnostics. + KeyDiagnosticErrorCount = "diagnostic_error_count" + + // Severity of the diagnostic being logged. + KeyDiagnosticSeverity = "diagnostic_severity" + + // Detail of the diagnostic being logged. + KeyDiagnosticDetail = "diagnostic_detail" + + // Summary of the diagnostic being logged. + KeyDiagnosticSummary = "diagnostic_summary" + + // Number of the warning diagnostics. + KeyDiagnosticWarningCount = "diagnostic_warning_count" + + // Underlying error string + KeyError = "error" + + // Duration in milliseconds for the RPC request + KeyRequestDurationMs = "tf_req_duration_ms" + + // A unique ID for the RPC request + KeyRequestID = "tf_req_id" + + // The full address of the provider, such as + // registry.terraform.io/hashicorp/random + KeyProviderAddress = "tf_provider_addr" + + // The RPC being run, such as "ApplyResourceChange" + KeyRPC = "tf_rpc" + + // The type of resource being operated on, such as "random_pet" + KeyResourceType = "tf_resource_type" + + // The type of data source being operated on, such as "archive_file" + KeyDataSourceType = "tf_data_source_type" + + // Path to protocol data file, such as "/tmp/example.json" + KeyProtocolDataFile = "tf_proto_data_file" + + // The protocol version being used, as a string, such as "6" + KeyProtocolVersion = "tf_proto_version" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol.go new file mode 100644 index 000000000..21a392a3c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol.go @@ -0,0 +1,27 @@ +package logging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tfsdklog" +) + +const ( + // SubsystemProto is the tfsdklog subsystem name for protocol logging. + SubsystemProto = "proto" +) + +// ProtocolError emits a protocol subsystem log at ERROR level. +func ProtocolError(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemError(ctx, SubsystemProto, msg, additionalFields...) +} + +// ProtocolWarn emits a protocol subsystem log at WARN level. +func ProtocolWarn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemWarn(ctx, SubsystemProto, msg, additionalFields...) +} + +// ProtocolTrace emits a protocol subsystem log at TRACE level. +func ProtocolTrace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemTrace(ctx, SubsystemProto, msg, additionalFields...) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol_data.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol_data.go new file mode 100644 index 000000000..630358232 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol_data.go @@ -0,0 +1,108 @@ +package logging + +import ( + "context" + "fmt" + "os" + "path" + "sync" + "time" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +const ( + // fileExtEmpty is the file extension for empty data. + // Empty data may be expected, depending on the RPC. + fileExtEmpty = "empty" + + // fileExtJson is the file extension for JSON data. + fileExtJson = "json" + + // fileExtMsgpack is the file extension for MessagePack data. + fileExtMsgpack = "msgpack" +) + +var protocolDataSkippedLog sync.Once + +// ProtocolData emits raw protocol data to a file, if given a directory. +// +// The directory must exist and be writable, prior to invoking this function. +// +// File names are in the format: {TIME}_{RPC}_{MESSAGE}_{FIELD}.{EXT} +func ProtocolData(ctx context.Context, dataDir string, rpc string, message string, field string, data interface{}) { + if dataDir == "" { + // Write a log, only once, that explains how to enable this functionality. + protocolDataSkippedLog.Do(func() { + ProtocolTrace(ctx, "Skipping protocol data file writing because no data directory is set. "+ + fmt.Sprintf("Use the %s environment variable to enable this functionality.", EnvTfLogSdkProtoDataDir)) + }) + + return + } + + var fileContents []byte + var fileExtension string + + switch data := data.(type) { + case *tfprotov5.DynamicValue: + fileExtension, fileContents = protocolDataDynamicValue5(ctx, data) + case *tfprotov6.DynamicValue: + fileExtension, fileContents = protocolDataDynamicValue6(ctx, data) + default: + ProtocolError(ctx, fmt.Sprintf("Skipping unknown protocol data type: %T", data)) + return + } + + fileName := fmt.Sprintf("%d_%s_%s_%s.%s", time.Now().Unix(), rpc, message, field, fileExtension) + filePath := path.Join(dataDir, fileName) + logFields := map[string]interface{}{KeyProtocolDataFile: filePath} // should not be persisted using With() + + ProtocolTrace(ctx, "Writing protocol data file", logFields) + + err := os.WriteFile(filePath, fileContents, 0644) + + if err != nil { + ProtocolError(ctx, fmt.Sprintf("Unable to write protocol data file: %s", err), logFields) + return + } + + ProtocolTrace(ctx, "Wrote protocol data file", logFields) +} + +func protocolDataDynamicValue5(_ context.Context, value *tfprotov5.DynamicValue) (string, []byte) { + if value == nil { + return fileExtEmpty, nil + } + + // (tfprotov5.DynamicValue).Unmarshal() prefers JSON first, so prefer to + // output JSON if found. + if len(value.JSON) > 0 { + return fileExtJson, value.JSON + } + + if len(value.MsgPack) > 0 { + return fileExtMsgpack, value.MsgPack + } + + return fileExtEmpty, nil +} + +func protocolDataDynamicValue6(_ context.Context, value *tfprotov6.DynamicValue) (string, []byte) { + if value == nil { + return fileExtEmpty, nil + } + + // (tfprotov6.DynamicValue).Unmarshal() prefers JSON first, so prefer to + // output JSON if found. + if len(value.JSON) > 0 { + return fileExtJson, value.JSON + } + + if len(value.MsgPack) > 0 { + return fileExtMsgpack, value.MsgPack + } + + return fileExtEmpty, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go new file mode 100644 index 000000000..b40763c6e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go @@ -0,0 +1,18 @@ +package logging + +import ( + "log" + "strings" + + tfaddr "github.com/hashicorp/terraform-registry-address" +) + +func ProviderLoggerName(providerAddress string) string { + provider, err := tfaddr.ParseProviderSource(providerAddress) + if err != nil { + log.Printf("[ERROR] Error parsing provider name %q: %s", providerAddress, err) + return "" + } + + return strings.ReplaceAll(provider.Type, "-", "_") +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/diagnostics.go new file mode 100644 index 000000000..1032f7d4f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/diagnostics.go @@ -0,0 +1,82 @@ +package diag + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// Diagnostics is a collection of Diagnostic. +type Diagnostics []*tfprotov5.Diagnostic + +// ErrorCount returns the number of error severity diagnostics. +func (d Diagnostics) ErrorCount() int { + var result int + + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + if diagnostic.Severity != tfprotov5.DiagnosticSeverityError { + continue + } + + result++ + } + + return result +} + +// Log will log every diagnostic: +// +// - Error severity at ERROR level +// - Warning severity at WARN level +// - Invalid/Unknown severity at WARN level +// +func (d Diagnostics) Log(ctx context.Context) { + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + diagnosticFields := map[string]interface{}{ + logging.KeyDiagnosticDetail: diagnostic.Detail, + logging.KeyDiagnosticSeverity: diagnostic.Severity.String(), + logging.KeyDiagnosticSummary: diagnostic.Summary, + } + + if diagnostic.Attribute != nil { + diagnosticFields[logging.KeyDiagnosticAttribute] = diagnostic.Attribute.String() + } + + switch diagnostic.Severity { + case tfprotov5.DiagnosticSeverityError: + logging.ProtocolError(ctx, "Response contains error diagnostic", diagnosticFields) + case tfprotov5.DiagnosticSeverityWarning: + logging.ProtocolWarn(ctx, "Response contains warning diagnostic", diagnosticFields) + default: + logging.ProtocolWarn(ctx, "Response contains unknown diagnostic", diagnosticFields) + } + } +} + +// WarningCount returns the number of warning severity diagnostics. +func (d Diagnostics) WarningCount() int { + var result int + + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + if diagnostic.Severity != tfprotov5.DiagnosticSeverityWarning { + continue + } + + result++ + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/doc.go new file mode 100644 index 000000000..0c73dab12 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/doc.go @@ -0,0 +1,3 @@ +// Package diag contains diagnostics helpers. These implementations are +// intentionally outside the public API. +package diag diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/context_keys.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/context_keys.go new file mode 100644 index 000000000..cc72fe4bb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/context_keys.go @@ -0,0 +1,8 @@ +package tf5serverlogging + +// Context key types. +// Reference: https://staticcheck.io/docs/checks/#SA1029 + +// ContextKeyDownstreamRequestStartTime is a context.Context key to store the +// time.Time when the server began a downstream request. +type ContextKeyDownstreamRequestStartTime struct{} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/doc.go new file mode 100644 index 000000000..e77a831c0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/doc.go @@ -0,0 +1,3 @@ +// Package tf5serverlogging contains logging functionality specific to +// tf5server and tfprotov5 types. +package tf5serverlogging diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/downstream_request.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/downstream_request.go new file mode 100644 index 000000000..eeec76e8c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/downstream_request.go @@ -0,0 +1,40 @@ +package tf5serverlogging + +import ( + "context" + "time" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag" +) + +// DownstreamRequest sets a request duration start time context key and +// generates a TRACE "Sending request downstream" log. +func DownstreamRequest(ctx context.Context) context.Context { + requestStart := time.Now() + ctx = context.WithValue(ctx, ContextKeyDownstreamRequestStartTime{}, requestStart) + + logging.ProtocolTrace(ctx, "Sending request downstream") + + return ctx +} + +// DownstreamResponse generates the following logging: +// +// - TRACE "Received downstream response" log with request duration and +// diagnostic severity counts +// - Per-diagnostic logs +// +func DownstreamResponse(ctx context.Context, diagnostics diag.Diagnostics) { + responseFields := map[string]interface{}{ + logging.KeyDiagnosticErrorCount: diagnostics.ErrorCount(), + logging.KeyDiagnosticWarningCount: diagnostics.WarningCount(), + } + + if requestStart, ok := ctx.Value(ContextKeyDownstreamRequestStartTime{}).(time.Time); ok { + responseFields[logging.KeyRequestDurationMs] = time.Since(requestStart).Milliseconds() + } + + logging.ProtocolTrace(ctx, "Received downstream response", responseFields) + diagnostics.Log(ctx) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/generate.sh b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/generate.sh deleted file mode 100644 index ac9ec74b3..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/generate.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# We do not run protoc under go:generate because we want to ensure that all -# dependencies of go:generate are "go get"-able for general dev environment -# usability. To compile all protobuf files in this repository, run -# "make protobuf" at the top-level. - -set -eu - -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - -cd "$DIR" - -protoc --proto_path=. --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative tfplugin5.proto diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go index 793eed7a8..2d73459c4 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go @@ -1,6 +1,6 @@ -// Terraform Plugin RPC protocol version 5.2 +// Terraform Plugin RPC protocol version 5.3 // -// This file defines version 5.2 of the RPC protocol. To implement a plugin +// This file defines version 5.3 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -13,20 +13,19 @@ // official protocol releases. Proto files taken from other commits may include // incomplete changes or features that did not make it into a final release. // In all reasonable cases, plugin developers should take the proto file from -// the tag of the most recent release of Terraform, and not from the master +// the tag of the most recent release of Terraform, and not from the main // branch or any other development branch. // // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 +// protoc-gen-go v1.28.0 +// protoc v3.19.4 // source: tfplugin5.proto package tfplugin5 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -40,10 +39,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type StringKind int32 const ( @@ -1563,11 +1558,12 @@ type GetProviderSchema_Response struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` - ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - ProviderMeta *Schema `protobuf:"bytes,5,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` + ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + ProviderMeta *Schema `protobuf:"bytes,5,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + ServerCapabilities *GetProviderSchema_ServerCapabilities `protobuf:"bytes,6,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"` } func (x *GetProviderSchema_Response) Reset() { @@ -1637,6 +1633,66 @@ func (x *GetProviderSchema_Response) GetProviderMeta() *Schema { return nil } +func (x *GetProviderSchema_Response) GetServerCapabilities() *GetProviderSchema_ServerCapabilities { + if x != nil { + return x.ServerCapabilities + } + return nil +} + +// ServerCapabilities allows providers to communicate extra information +// regarding supported protocol features. This is used to indicate +// availability of certain forward-compatible changes which may be optional +// in a major protocol version, but cannot be tested for directly. +type GetProviderSchema_ServerCapabilities struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The plan_destroy capability signals that a provider expects a call + // to PlanResourceChange when a resource is going to be destroyed. + PlanDestroy bool `protobuf:"varint,1,opt,name=plan_destroy,json=planDestroy,proto3" json:"plan_destroy,omitempty"` +} + +func (x *GetProviderSchema_ServerCapabilities) Reset() { + *x = GetProviderSchema_ServerCapabilities{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema_ServerCapabilities) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema_ServerCapabilities) ProtoMessage() {} + +func (x *GetProviderSchema_ServerCapabilities) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema_ServerCapabilities.ProtoReflect.Descriptor instead. +func (*GetProviderSchema_ServerCapabilities) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{6, 2} +} + +func (x *GetProviderSchema_ServerCapabilities) GetPlanDestroy() bool { + if x != nil { + return x.PlanDestroy + } + return false +} + type PrepareProviderConfig_Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1648,7 +1704,7 @@ type PrepareProviderConfig_Request struct { func (x *PrepareProviderConfig_Request) Reset() { *x = PrepareProviderConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[31] + mi := &file_tfplugin5_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1661,7 +1717,7 @@ func (x *PrepareProviderConfig_Request) String() string { func (*PrepareProviderConfig_Request) ProtoMessage() {} func (x *PrepareProviderConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[31] + mi := &file_tfplugin5_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1696,7 +1752,7 @@ type PrepareProviderConfig_Response struct { func (x *PrepareProviderConfig_Response) Reset() { *x = PrepareProviderConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[32] + mi := &file_tfplugin5_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1709,7 +1765,7 @@ func (x *PrepareProviderConfig_Response) String() string { func (*PrepareProviderConfig_Response) ProtoMessage() {} func (x *PrepareProviderConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[32] + mi := &file_tfplugin5_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1758,7 +1814,7 @@ type UpgradeResourceState_Request struct { func (x *UpgradeResourceState_Request) Reset() { *x = UpgradeResourceState_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[33] + mi := &file_tfplugin5_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1771,7 +1827,7 @@ func (x *UpgradeResourceState_Request) String() string { func (*UpgradeResourceState_Request) ProtoMessage() {} func (x *UpgradeResourceState_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[33] + mi := &file_tfplugin5_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1826,7 +1882,7 @@ type UpgradeResourceState_Response struct { func (x *UpgradeResourceState_Response) Reset() { *x = UpgradeResourceState_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[34] + mi := &file_tfplugin5_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1839,7 +1895,7 @@ func (x *UpgradeResourceState_Response) String() string { func (*UpgradeResourceState_Response) ProtoMessage() {} func (x *UpgradeResourceState_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[34] + mi := &file_tfplugin5_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1881,7 +1937,7 @@ type ValidateResourceTypeConfig_Request struct { func (x *ValidateResourceTypeConfig_Request) Reset() { *x = ValidateResourceTypeConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[35] + mi := &file_tfplugin5_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1894,7 +1950,7 @@ func (x *ValidateResourceTypeConfig_Request) String() string { func (*ValidateResourceTypeConfig_Request) ProtoMessage() {} func (x *ValidateResourceTypeConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[35] + mi := &file_tfplugin5_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1935,7 +1991,7 @@ type ValidateResourceTypeConfig_Response struct { func (x *ValidateResourceTypeConfig_Response) Reset() { *x = ValidateResourceTypeConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[36] + mi := &file_tfplugin5_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1948,7 +2004,7 @@ func (x *ValidateResourceTypeConfig_Response) String() string { func (*ValidateResourceTypeConfig_Response) ProtoMessage() {} func (x *ValidateResourceTypeConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[36] + mi := &file_tfplugin5_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1983,7 +2039,7 @@ type ValidateDataSourceConfig_Request struct { func (x *ValidateDataSourceConfig_Request) Reset() { *x = ValidateDataSourceConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[37] + mi := &file_tfplugin5_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1996,7 +2052,7 @@ func (x *ValidateDataSourceConfig_Request) String() string { func (*ValidateDataSourceConfig_Request) ProtoMessage() {} func (x *ValidateDataSourceConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[37] + mi := &file_tfplugin5_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2037,7 +2093,7 @@ type ValidateDataSourceConfig_Response struct { func (x *ValidateDataSourceConfig_Response) Reset() { *x = ValidateDataSourceConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[38] + mi := &file_tfplugin5_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2050,7 +2106,7 @@ func (x *ValidateDataSourceConfig_Response) String() string { func (*ValidateDataSourceConfig_Response) ProtoMessage() {} func (x *ValidateDataSourceConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[38] + mi := &file_tfplugin5_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2085,7 +2141,7 @@ type Configure_Request struct { func (x *Configure_Request) Reset() { *x = Configure_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[39] + mi := &file_tfplugin5_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2098,7 +2154,7 @@ func (x *Configure_Request) String() string { func (*Configure_Request) ProtoMessage() {} func (x *Configure_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[39] + mi := &file_tfplugin5_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2139,7 +2195,7 @@ type Configure_Response struct { func (x *Configure_Response) Reset() { *x = Configure_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[40] + mi := &file_tfplugin5_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2152,7 +2208,7 @@ func (x *Configure_Response) String() string { func (*Configure_Response) ProtoMessage() {} func (x *Configure_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[40] + mi := &file_tfplugin5_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2189,7 +2245,7 @@ type ReadResource_Request struct { func (x *ReadResource_Request) Reset() { *x = ReadResource_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[41] + mi := &file_tfplugin5_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2202,7 +2258,7 @@ func (x *ReadResource_Request) String() string { func (*ReadResource_Request) ProtoMessage() {} func (x *ReadResource_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[41] + mi := &file_tfplugin5_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2259,7 +2315,7 @@ type ReadResource_Response struct { func (x *ReadResource_Response) Reset() { *x = ReadResource_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[42] + mi := &file_tfplugin5_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2272,7 +2328,7 @@ func (x *ReadResource_Response) String() string { func (*ReadResource_Response) ProtoMessage() {} func (x *ReadResource_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[42] + mi := &file_tfplugin5_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2325,7 +2381,7 @@ type PlanResourceChange_Request struct { func (x *PlanResourceChange_Request) Reset() { *x = PlanResourceChange_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[43] + mi := &file_tfplugin5_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2338,7 +2394,7 @@ func (x *PlanResourceChange_Request) String() string { func (*PlanResourceChange_Request) ProtoMessage() {} func (x *PlanResourceChange_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[43] + mi := &file_tfplugin5_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2422,7 +2478,7 @@ type PlanResourceChange_Response struct { func (x *PlanResourceChange_Response) Reset() { *x = PlanResourceChange_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[44] + mi := &file_tfplugin5_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2435,7 +2491,7 @@ func (x *PlanResourceChange_Response) String() string { func (*PlanResourceChange_Response) ProtoMessage() {} func (x *PlanResourceChange_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[44] + mi := &file_tfplugin5_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2502,7 +2558,7 @@ type ApplyResourceChange_Request struct { func (x *ApplyResourceChange_Request) Reset() { *x = ApplyResourceChange_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[45] + mi := &file_tfplugin5_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2515,7 +2571,7 @@ func (x *ApplyResourceChange_Request) String() string { func (*ApplyResourceChange_Request) ProtoMessage() {} func (x *ApplyResourceChange_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[45] + mi := &file_tfplugin5_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2598,7 +2654,7 @@ type ApplyResourceChange_Response struct { func (x *ApplyResourceChange_Response) Reset() { *x = ApplyResourceChange_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[46] + mi := &file_tfplugin5_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2611,7 +2667,7 @@ func (x *ApplyResourceChange_Response) String() string { func (*ApplyResourceChange_Response) ProtoMessage() {} func (x *ApplyResourceChange_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[46] + mi := &file_tfplugin5_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2667,7 +2723,7 @@ type ImportResourceState_Request struct { func (x *ImportResourceState_Request) Reset() { *x = ImportResourceState_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[47] + mi := &file_tfplugin5_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2680,7 +2736,7 @@ func (x *ImportResourceState_Request) String() string { func (*ImportResourceState_Request) ProtoMessage() {} func (x *ImportResourceState_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[47] + mi := &file_tfplugin5_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2723,7 +2779,7 @@ type ImportResourceState_ImportedResource struct { func (x *ImportResourceState_ImportedResource) Reset() { *x = ImportResourceState_ImportedResource{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[48] + mi := &file_tfplugin5_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2736,7 +2792,7 @@ func (x *ImportResourceState_ImportedResource) String() string { func (*ImportResourceState_ImportedResource) ProtoMessage() {} func (x *ImportResourceState_ImportedResource) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[48] + mi := &file_tfplugin5_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2785,7 +2841,7 @@ type ImportResourceState_Response struct { func (x *ImportResourceState_Response) Reset() { *x = ImportResourceState_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[49] + mi := &file_tfplugin5_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2798,7 +2854,7 @@ func (x *ImportResourceState_Response) String() string { func (*ImportResourceState_Response) ProtoMessage() {} func (x *ImportResourceState_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[49] + mi := &file_tfplugin5_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2841,7 +2897,7 @@ type ReadDataSource_Request struct { func (x *ReadDataSource_Request) Reset() { *x = ReadDataSource_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[50] + mi := &file_tfplugin5_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2854,7 +2910,7 @@ func (x *ReadDataSource_Request) String() string { func (*ReadDataSource_Request) ProtoMessage() {} func (x *ReadDataSource_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[50] + mi := &file_tfplugin5_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2903,7 +2959,7 @@ type ReadDataSource_Response struct { func (x *ReadDataSource_Response) Reset() { *x = ReadDataSource_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[51] + mi := &file_tfplugin5_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2916,7 +2972,7 @@ func (x *ReadDataSource_Response) String() string { func (*ReadDataSource_Response) ProtoMessage() {} func (x *ReadDataSource_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[51] + mi := &file_tfplugin5_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2955,7 +3011,7 @@ type GetProvisionerSchema_Request struct { func (x *GetProvisionerSchema_Request) Reset() { *x = GetProvisionerSchema_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[52] + mi := &file_tfplugin5_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2968,7 +3024,7 @@ func (x *GetProvisionerSchema_Request) String() string { func (*GetProvisionerSchema_Request) ProtoMessage() {} func (x *GetProvisionerSchema_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[52] + mi := &file_tfplugin5_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2996,7 +3052,7 @@ type GetProvisionerSchema_Response struct { func (x *GetProvisionerSchema_Response) Reset() { *x = GetProvisionerSchema_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[53] + mi := &file_tfplugin5_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3009,7 +3065,7 @@ func (x *GetProvisionerSchema_Response) String() string { func (*GetProvisionerSchema_Response) ProtoMessage() {} func (x *GetProvisionerSchema_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[53] + mi := &file_tfplugin5_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3050,7 +3106,7 @@ type ValidateProvisionerConfig_Request struct { func (x *ValidateProvisionerConfig_Request) Reset() { *x = ValidateProvisionerConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[54] + mi := &file_tfplugin5_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3063,7 +3119,7 @@ func (x *ValidateProvisionerConfig_Request) String() string { func (*ValidateProvisionerConfig_Request) ProtoMessage() {} func (x *ValidateProvisionerConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[54] + mi := &file_tfplugin5_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3097,7 +3153,7 @@ type ValidateProvisionerConfig_Response struct { func (x *ValidateProvisionerConfig_Response) Reset() { *x = ValidateProvisionerConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[55] + mi := &file_tfplugin5_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3110,7 +3166,7 @@ func (x *ValidateProvisionerConfig_Response) String() string { func (*ValidateProvisionerConfig_Response) ProtoMessage() {} func (x *ValidateProvisionerConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[55] + mi := &file_tfplugin5_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3145,7 +3201,7 @@ type ProvisionResource_Request struct { func (x *ProvisionResource_Request) Reset() { *x = ProvisionResource_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[56] + mi := &file_tfplugin5_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3158,7 +3214,7 @@ func (x *ProvisionResource_Request) String() string { func (*ProvisionResource_Request) ProtoMessage() {} func (x *ProvisionResource_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[56] + mi := &file_tfplugin5_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3200,7 +3256,7 @@ type ProvisionResource_Response struct { func (x *ProvisionResource_Response) Reset() { *x = ProvisionResource_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[57] + mi := &file_tfplugin5_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3213,7 +3269,7 @@ func (x *ProvisionResource_Response) String() string { func (*ProvisionResource_Response) ProtoMessage() {} func (x *ProvisionResource_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[57] + mi := &file_tfplugin5_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3353,9 +3409,9 @@ var file_tfplugin5_proto_rawDesc = []byte{ 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, - 0x04, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x05, 0x22, 0xd0, 0x04, 0x0a, + 0x04, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x05, 0x22, 0xeb, 0x05, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xaf, 0x04, + 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x91, 0x05, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, @@ -3379,63 +3435,61 @@ var file_tfplugin5_proto_rawDesc = []byte{ 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, - 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x16, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0xdb, 0x01, 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x85, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0f, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x02, - 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x72, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x01, 0x0a, 0x08, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, - 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x12, + 0x60, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, + 0x73, 0x1a, 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x16, 0x44, 0x61, 0x74, 0x61, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x37, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x5f, + 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, + 0x6c, 0x61, 0x6e, 0x44, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x22, 0xdb, 0x01, 0x0a, 0x15, 0x50, + 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, - 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, - 0x22, 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, - 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, - 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, - 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb8, 0x01, - 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x1a, 0x85, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, + 0x0f, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0e, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x1a, 0x72, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xba, 0x01, 0x0a, 0x1a, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, @@ -3445,295 +3499,306 @@ var file_tfplugin5_proto_rawDesc = []byte{ 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb9, 0x01, 0x0a, 0x09, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x1a, 0x67, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, - 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, - 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, - 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0xbc, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, - 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, - 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb8, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x93, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, - 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, - 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0xf2, 0x04, 0x0a, 0x12, 0x50, - 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x1a, 0xbb, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, - 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, - 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x45, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, - 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x22, 0xb9, 0x01, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x65, 0x1a, 0x67, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, + 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, + 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, + 0xe3, 0x02, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x1a, 0xbc, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x63, 0x75, 0x72, + 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, 0x6f, - 0x73, 0x65, 0x64, 0x4e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, - 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, - 0x9d, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, - 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, + 0x93, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, + 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0xf2, 0x04, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xbb, 0x02, 0x0a, + 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x45, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6e, 0x65, 0x77, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x4e, 0x65, + 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6f, 0x72, + 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x9d, 0x02, 0x0a, 0x08, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, + 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, + 0x72, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, + 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, + 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0x92, 0x04, 0x0a, 0x13, 0x41, + 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, + 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, - 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, - 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x72, 0x65, - 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x0f, - 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, - 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, - 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, - 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, + 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, + 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, + 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0xc1, 0x01, 0x0a, 0x08, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, + 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, - 0x92, 0x04, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0xed, 0x02, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x36, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x1a, + 0x78, 0x0a, 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, - 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, - 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa3, 0x01, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x12, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x49, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x11, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, + 0x9c, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x1a, 0x95, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x0d, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x72, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x9b, + 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, + 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x78, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, + 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, + 0x6e, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x9c, 0x01, 0x0a, + 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, + 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x11, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x1a, 0x73, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, + 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, - 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, - 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, - 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, - 0x1a, 0xc1, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, - 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, - 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, - 0x73, 0x74, 0x65, 0x6d, 0x22, 0xed, 0x02, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x36, 0x0a, 0x07, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x1a, 0x78, 0x0a, 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa3, - 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x12, 0x69, - 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x11, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, - 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x22, 0x9c, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, - 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x95, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, - 0x72, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5b, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x73, 0x22, 0x9b, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x78, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0b, 0x70, 0x72, 0x6f, - 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, - 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, - 0x22, 0xe5, 0x01, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x73, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5b, 0x0a, 0x08, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, - 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, - 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x41, 0x52, 0x4b, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x32, - 0x97, 0x09, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x58, 0x0a, 0x09, - 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x65, 0x70, - 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, - 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x75, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, - 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2b, 0x2e, + 0x69, 0x63, 0x73, 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, + 0x64, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, + 0x4d, 0x41, 0x52, 0x4b, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x32, 0x97, 0x09, 0x0a, 0x08, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x6c, 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x7b, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, - 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, - 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x55, 0x70, 0x67, - 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, - 0x12, 0x1c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, - 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x18, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x27, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, + 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x1c, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x12, 0x50, + 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x6c, + 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x66, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x70, 0x70, 0x6c, + 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x21, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, + 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x04, 0x53, 0x74, 0x6f, + 0x70, 0x12, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, + 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x86, 0x03, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, + 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x5e, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, + 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, + 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x2c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, + 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x63, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x35, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x26, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, - 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, - 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x21, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, - 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x12, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x86, 0x03, 0x0a, 0x0b, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x5e, 0x0a, 0x09, 0x47, 0x65, 0x74, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, - 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x19, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x39, 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x12, - 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, - 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x74, 0x65, 0x72, 0x72, 0x61, - 0x66, 0x6f, 0x72, 0x6d, 0x2d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x74, - 0x66, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x76, 0x35, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x30, 0x01, 0x12, 0x39, 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x12, 0x17, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x47, 0x5a, + 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x2d, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x74, 0x66, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x76, 0x35, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3749,7 +3814,7 @@ func file_tfplugin5_proto_rawDescGZIP() []byte { } var file_tfplugin5_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_tfplugin5_proto_msgTypes = make([]protoimpl.MessageInfo, 58) +var file_tfplugin5_proto_msgTypes = make([]protoimpl.MessageInfo, 59) var file_tfplugin5_proto_goTypes = []interface{}{ (StringKind)(0), // 0: tfplugin5.StringKind (Diagnostic_Severity)(0), // 1: tfplugin5.Diagnostic.Severity @@ -3783,35 +3848,36 @@ var file_tfplugin5_proto_goTypes = []interface{}{ (*Schema_NestedBlock)(nil), // 29: tfplugin5.Schema.NestedBlock (*GetProviderSchema_Request)(nil), // 30: tfplugin5.GetProviderSchema.Request (*GetProviderSchema_Response)(nil), // 31: tfplugin5.GetProviderSchema.Response - nil, // 32: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry - nil, // 33: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry - (*PrepareProviderConfig_Request)(nil), // 34: tfplugin5.PrepareProviderConfig.Request - (*PrepareProviderConfig_Response)(nil), // 35: tfplugin5.PrepareProviderConfig.Response - (*UpgradeResourceState_Request)(nil), // 36: tfplugin5.UpgradeResourceState.Request - (*UpgradeResourceState_Response)(nil), // 37: tfplugin5.UpgradeResourceState.Response - (*ValidateResourceTypeConfig_Request)(nil), // 38: tfplugin5.ValidateResourceTypeConfig.Request - (*ValidateResourceTypeConfig_Response)(nil), // 39: tfplugin5.ValidateResourceTypeConfig.Response - (*ValidateDataSourceConfig_Request)(nil), // 40: tfplugin5.ValidateDataSourceConfig.Request - (*ValidateDataSourceConfig_Response)(nil), // 41: tfplugin5.ValidateDataSourceConfig.Response - (*Configure_Request)(nil), // 42: tfplugin5.Configure.Request - (*Configure_Response)(nil), // 43: tfplugin5.Configure.Response - (*ReadResource_Request)(nil), // 44: tfplugin5.ReadResource.Request - (*ReadResource_Response)(nil), // 45: tfplugin5.ReadResource.Response - (*PlanResourceChange_Request)(nil), // 46: tfplugin5.PlanResourceChange.Request - (*PlanResourceChange_Response)(nil), // 47: tfplugin5.PlanResourceChange.Response - (*ApplyResourceChange_Request)(nil), // 48: tfplugin5.ApplyResourceChange.Request - (*ApplyResourceChange_Response)(nil), // 49: tfplugin5.ApplyResourceChange.Response - (*ImportResourceState_Request)(nil), // 50: tfplugin5.ImportResourceState.Request - (*ImportResourceState_ImportedResource)(nil), // 51: tfplugin5.ImportResourceState.ImportedResource - (*ImportResourceState_Response)(nil), // 52: tfplugin5.ImportResourceState.Response - (*ReadDataSource_Request)(nil), // 53: tfplugin5.ReadDataSource.Request - (*ReadDataSource_Response)(nil), // 54: tfplugin5.ReadDataSource.Response - (*GetProvisionerSchema_Request)(nil), // 55: tfplugin5.GetProvisionerSchema.Request - (*GetProvisionerSchema_Response)(nil), // 56: tfplugin5.GetProvisionerSchema.Response - (*ValidateProvisionerConfig_Request)(nil), // 57: tfplugin5.ValidateProvisionerConfig.Request - (*ValidateProvisionerConfig_Response)(nil), // 58: tfplugin5.ValidateProvisionerConfig.Response - (*ProvisionResource_Request)(nil), // 59: tfplugin5.ProvisionResource.Request - (*ProvisionResource_Response)(nil), // 60: tfplugin5.ProvisionResource.Response + (*GetProviderSchema_ServerCapabilities)(nil), // 32: tfplugin5.GetProviderSchema.ServerCapabilities + nil, // 33: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry + nil, // 34: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry + (*PrepareProviderConfig_Request)(nil), // 35: tfplugin5.PrepareProviderConfig.Request + (*PrepareProviderConfig_Response)(nil), // 36: tfplugin5.PrepareProviderConfig.Response + (*UpgradeResourceState_Request)(nil), // 37: tfplugin5.UpgradeResourceState.Request + (*UpgradeResourceState_Response)(nil), // 38: tfplugin5.UpgradeResourceState.Response + (*ValidateResourceTypeConfig_Request)(nil), // 39: tfplugin5.ValidateResourceTypeConfig.Request + (*ValidateResourceTypeConfig_Response)(nil), // 40: tfplugin5.ValidateResourceTypeConfig.Response + (*ValidateDataSourceConfig_Request)(nil), // 41: tfplugin5.ValidateDataSourceConfig.Request + (*ValidateDataSourceConfig_Response)(nil), // 42: tfplugin5.ValidateDataSourceConfig.Response + (*Configure_Request)(nil), // 43: tfplugin5.Configure.Request + (*Configure_Response)(nil), // 44: tfplugin5.Configure.Response + (*ReadResource_Request)(nil), // 45: tfplugin5.ReadResource.Request + (*ReadResource_Response)(nil), // 46: tfplugin5.ReadResource.Response + (*PlanResourceChange_Request)(nil), // 47: tfplugin5.PlanResourceChange.Request + (*PlanResourceChange_Response)(nil), // 48: tfplugin5.PlanResourceChange.Response + (*ApplyResourceChange_Request)(nil), // 49: tfplugin5.ApplyResourceChange.Request + (*ApplyResourceChange_Response)(nil), // 50: tfplugin5.ApplyResourceChange.Response + (*ImportResourceState_Request)(nil), // 51: tfplugin5.ImportResourceState.Request + (*ImportResourceState_ImportedResource)(nil), // 52: tfplugin5.ImportResourceState.ImportedResource + (*ImportResourceState_Response)(nil), // 53: tfplugin5.ImportResourceState.Response + (*ReadDataSource_Request)(nil), // 54: tfplugin5.ReadDataSource.Request + (*ReadDataSource_Response)(nil), // 55: tfplugin5.ReadDataSource.Response + (*GetProvisionerSchema_Request)(nil), // 56: tfplugin5.GetProvisionerSchema.Request + (*GetProvisionerSchema_Response)(nil), // 57: tfplugin5.GetProvisionerSchema.Response + (*ValidateProvisionerConfig_Request)(nil), // 58: tfplugin5.ValidateProvisionerConfig.Request + (*ValidateProvisionerConfig_Response)(nil), // 59: tfplugin5.ValidateProvisionerConfig.Response + (*ProvisionResource_Request)(nil), // 60: tfplugin5.ProvisionResource.Request + (*ProvisionResource_Response)(nil), // 61: tfplugin5.ProvisionResource.Response } var file_tfplugin5_proto_depIdxs = []int32{ 1, // 0: tfplugin5.Diagnostic.severity:type_name -> tfplugin5.Diagnostic.Severity @@ -3826,92 +3892,93 @@ var file_tfplugin5_proto_depIdxs = []int32{ 27, // 9: tfplugin5.Schema.NestedBlock.block:type_name -> tfplugin5.Schema.Block 2, // 10: tfplugin5.Schema.NestedBlock.nesting:type_name -> tfplugin5.Schema.NestedBlock.NestingMode 8, // 11: tfplugin5.GetProviderSchema.Response.provider:type_name -> tfplugin5.Schema - 32, // 12: tfplugin5.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry - 33, // 13: tfplugin5.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry + 33, // 12: tfplugin5.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry + 34, // 13: tfplugin5.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry 4, // 14: tfplugin5.GetProviderSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic 8, // 15: tfplugin5.GetProviderSchema.Response.provider_meta:type_name -> tfplugin5.Schema - 8, // 16: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin5.Schema - 8, // 17: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin5.Schema - 3, // 18: tfplugin5.PrepareProviderConfig.Request.config:type_name -> tfplugin5.DynamicValue - 3, // 19: tfplugin5.PrepareProviderConfig.Response.prepared_config:type_name -> tfplugin5.DynamicValue - 4, // 20: tfplugin5.PrepareProviderConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 7, // 21: tfplugin5.UpgradeResourceState.Request.raw_state:type_name -> tfplugin5.RawState - 3, // 22: tfplugin5.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin5.DynamicValue - 4, // 23: tfplugin5.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 24: tfplugin5.ValidateResourceTypeConfig.Request.config:type_name -> tfplugin5.DynamicValue - 4, // 25: tfplugin5.ValidateResourceTypeConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 26: tfplugin5.ValidateDataSourceConfig.Request.config:type_name -> tfplugin5.DynamicValue - 4, // 27: tfplugin5.ValidateDataSourceConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 28: tfplugin5.Configure.Request.config:type_name -> tfplugin5.DynamicValue - 4, // 29: tfplugin5.Configure.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 30: tfplugin5.ReadResource.Request.current_state:type_name -> tfplugin5.DynamicValue - 3, // 31: tfplugin5.ReadResource.Request.provider_meta:type_name -> tfplugin5.DynamicValue - 3, // 32: tfplugin5.ReadResource.Response.new_state:type_name -> tfplugin5.DynamicValue - 4, // 33: tfplugin5.ReadResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 34: tfplugin5.PlanResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue - 3, // 35: tfplugin5.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin5.DynamicValue - 3, // 36: tfplugin5.PlanResourceChange.Request.config:type_name -> tfplugin5.DynamicValue - 3, // 37: tfplugin5.PlanResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue - 3, // 38: tfplugin5.PlanResourceChange.Response.planned_state:type_name -> tfplugin5.DynamicValue - 5, // 39: tfplugin5.PlanResourceChange.Response.requires_replace:type_name -> tfplugin5.AttributePath - 4, // 40: tfplugin5.PlanResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 41: tfplugin5.ApplyResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue - 3, // 42: tfplugin5.ApplyResourceChange.Request.planned_state:type_name -> tfplugin5.DynamicValue - 3, // 43: tfplugin5.ApplyResourceChange.Request.config:type_name -> tfplugin5.DynamicValue - 3, // 44: tfplugin5.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue - 3, // 45: tfplugin5.ApplyResourceChange.Response.new_state:type_name -> tfplugin5.DynamicValue - 4, // 46: tfplugin5.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 47: tfplugin5.ImportResourceState.ImportedResource.state:type_name -> tfplugin5.DynamicValue - 51, // 48: tfplugin5.ImportResourceState.Response.imported_resources:type_name -> tfplugin5.ImportResourceState.ImportedResource - 4, // 49: tfplugin5.ImportResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 50: tfplugin5.ReadDataSource.Request.config:type_name -> tfplugin5.DynamicValue - 3, // 51: tfplugin5.ReadDataSource.Request.provider_meta:type_name -> tfplugin5.DynamicValue - 3, // 52: tfplugin5.ReadDataSource.Response.state:type_name -> tfplugin5.DynamicValue - 4, // 53: tfplugin5.ReadDataSource.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 8, // 54: tfplugin5.GetProvisionerSchema.Response.provisioner:type_name -> tfplugin5.Schema - 4, // 55: tfplugin5.GetProvisionerSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 56: tfplugin5.ValidateProvisionerConfig.Request.config:type_name -> tfplugin5.DynamicValue - 4, // 57: tfplugin5.ValidateProvisionerConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 58: tfplugin5.ProvisionResource.Request.config:type_name -> tfplugin5.DynamicValue - 3, // 59: tfplugin5.ProvisionResource.Request.connection:type_name -> tfplugin5.DynamicValue - 4, // 60: tfplugin5.ProvisionResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 30, // 61: tfplugin5.Provider.GetSchema:input_type -> tfplugin5.GetProviderSchema.Request - 34, // 62: tfplugin5.Provider.PrepareProviderConfig:input_type -> tfplugin5.PrepareProviderConfig.Request - 38, // 63: tfplugin5.Provider.ValidateResourceTypeConfig:input_type -> tfplugin5.ValidateResourceTypeConfig.Request - 40, // 64: tfplugin5.Provider.ValidateDataSourceConfig:input_type -> tfplugin5.ValidateDataSourceConfig.Request - 36, // 65: tfplugin5.Provider.UpgradeResourceState:input_type -> tfplugin5.UpgradeResourceState.Request - 42, // 66: tfplugin5.Provider.Configure:input_type -> tfplugin5.Configure.Request - 44, // 67: tfplugin5.Provider.ReadResource:input_type -> tfplugin5.ReadResource.Request - 46, // 68: tfplugin5.Provider.PlanResourceChange:input_type -> tfplugin5.PlanResourceChange.Request - 48, // 69: tfplugin5.Provider.ApplyResourceChange:input_type -> tfplugin5.ApplyResourceChange.Request - 50, // 70: tfplugin5.Provider.ImportResourceState:input_type -> tfplugin5.ImportResourceState.Request - 53, // 71: tfplugin5.Provider.ReadDataSource:input_type -> tfplugin5.ReadDataSource.Request - 24, // 72: tfplugin5.Provider.Stop:input_type -> tfplugin5.Stop.Request - 55, // 73: tfplugin5.Provisioner.GetSchema:input_type -> tfplugin5.GetProvisionerSchema.Request - 57, // 74: tfplugin5.Provisioner.ValidateProvisionerConfig:input_type -> tfplugin5.ValidateProvisionerConfig.Request - 59, // 75: tfplugin5.Provisioner.ProvisionResource:input_type -> tfplugin5.ProvisionResource.Request - 24, // 76: tfplugin5.Provisioner.Stop:input_type -> tfplugin5.Stop.Request - 31, // 77: tfplugin5.Provider.GetSchema:output_type -> tfplugin5.GetProviderSchema.Response - 35, // 78: tfplugin5.Provider.PrepareProviderConfig:output_type -> tfplugin5.PrepareProviderConfig.Response - 39, // 79: tfplugin5.Provider.ValidateResourceTypeConfig:output_type -> tfplugin5.ValidateResourceTypeConfig.Response - 41, // 80: tfplugin5.Provider.ValidateDataSourceConfig:output_type -> tfplugin5.ValidateDataSourceConfig.Response - 37, // 81: tfplugin5.Provider.UpgradeResourceState:output_type -> tfplugin5.UpgradeResourceState.Response - 43, // 82: tfplugin5.Provider.Configure:output_type -> tfplugin5.Configure.Response - 45, // 83: tfplugin5.Provider.ReadResource:output_type -> tfplugin5.ReadResource.Response - 47, // 84: tfplugin5.Provider.PlanResourceChange:output_type -> tfplugin5.PlanResourceChange.Response - 49, // 85: tfplugin5.Provider.ApplyResourceChange:output_type -> tfplugin5.ApplyResourceChange.Response - 52, // 86: tfplugin5.Provider.ImportResourceState:output_type -> tfplugin5.ImportResourceState.Response - 54, // 87: tfplugin5.Provider.ReadDataSource:output_type -> tfplugin5.ReadDataSource.Response - 25, // 88: tfplugin5.Provider.Stop:output_type -> tfplugin5.Stop.Response - 56, // 89: tfplugin5.Provisioner.GetSchema:output_type -> tfplugin5.GetProvisionerSchema.Response - 58, // 90: tfplugin5.Provisioner.ValidateProvisionerConfig:output_type -> tfplugin5.ValidateProvisionerConfig.Response - 60, // 91: tfplugin5.Provisioner.ProvisionResource:output_type -> tfplugin5.ProvisionResource.Response - 25, // 92: tfplugin5.Provisioner.Stop:output_type -> tfplugin5.Stop.Response - 77, // [77:93] is the sub-list for method output_type - 61, // [61:77] is the sub-list for method input_type - 61, // [61:61] is the sub-list for extension type_name - 61, // [61:61] is the sub-list for extension extendee - 0, // [0:61] is the sub-list for field type_name + 32, // 16: tfplugin5.GetProviderSchema.Response.server_capabilities:type_name -> tfplugin5.GetProviderSchema.ServerCapabilities + 8, // 17: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin5.Schema + 8, // 18: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin5.Schema + 3, // 19: tfplugin5.PrepareProviderConfig.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 20: tfplugin5.PrepareProviderConfig.Response.prepared_config:type_name -> tfplugin5.DynamicValue + 4, // 21: tfplugin5.PrepareProviderConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 7, // 22: tfplugin5.UpgradeResourceState.Request.raw_state:type_name -> tfplugin5.RawState + 3, // 23: tfplugin5.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin5.DynamicValue + 4, // 24: tfplugin5.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 25: tfplugin5.ValidateResourceTypeConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 26: tfplugin5.ValidateResourceTypeConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 27: tfplugin5.ValidateDataSourceConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 28: tfplugin5.ValidateDataSourceConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 29: tfplugin5.Configure.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 30: tfplugin5.Configure.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 31: tfplugin5.ReadResource.Request.current_state:type_name -> tfplugin5.DynamicValue + 3, // 32: tfplugin5.ReadResource.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 33: tfplugin5.ReadResource.Response.new_state:type_name -> tfplugin5.DynamicValue + 4, // 34: tfplugin5.ReadResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 35: tfplugin5.PlanResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue + 3, // 36: tfplugin5.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin5.DynamicValue + 3, // 37: tfplugin5.PlanResourceChange.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 38: tfplugin5.PlanResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 39: tfplugin5.PlanResourceChange.Response.planned_state:type_name -> tfplugin5.DynamicValue + 5, // 40: tfplugin5.PlanResourceChange.Response.requires_replace:type_name -> tfplugin5.AttributePath + 4, // 41: tfplugin5.PlanResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 42: tfplugin5.ApplyResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue + 3, // 43: tfplugin5.ApplyResourceChange.Request.planned_state:type_name -> tfplugin5.DynamicValue + 3, // 44: tfplugin5.ApplyResourceChange.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 45: tfplugin5.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 46: tfplugin5.ApplyResourceChange.Response.new_state:type_name -> tfplugin5.DynamicValue + 4, // 47: tfplugin5.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 48: tfplugin5.ImportResourceState.ImportedResource.state:type_name -> tfplugin5.DynamicValue + 52, // 49: tfplugin5.ImportResourceState.Response.imported_resources:type_name -> tfplugin5.ImportResourceState.ImportedResource + 4, // 50: tfplugin5.ImportResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 51: tfplugin5.ReadDataSource.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 52: tfplugin5.ReadDataSource.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 53: tfplugin5.ReadDataSource.Response.state:type_name -> tfplugin5.DynamicValue + 4, // 54: tfplugin5.ReadDataSource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 8, // 55: tfplugin5.GetProvisionerSchema.Response.provisioner:type_name -> tfplugin5.Schema + 4, // 56: tfplugin5.GetProvisionerSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 57: tfplugin5.ValidateProvisionerConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 58: tfplugin5.ValidateProvisionerConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 59: tfplugin5.ProvisionResource.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 60: tfplugin5.ProvisionResource.Request.connection:type_name -> tfplugin5.DynamicValue + 4, // 61: tfplugin5.ProvisionResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 30, // 62: tfplugin5.Provider.GetSchema:input_type -> tfplugin5.GetProviderSchema.Request + 35, // 63: tfplugin5.Provider.PrepareProviderConfig:input_type -> tfplugin5.PrepareProviderConfig.Request + 39, // 64: tfplugin5.Provider.ValidateResourceTypeConfig:input_type -> tfplugin5.ValidateResourceTypeConfig.Request + 41, // 65: tfplugin5.Provider.ValidateDataSourceConfig:input_type -> tfplugin5.ValidateDataSourceConfig.Request + 37, // 66: tfplugin5.Provider.UpgradeResourceState:input_type -> tfplugin5.UpgradeResourceState.Request + 43, // 67: tfplugin5.Provider.Configure:input_type -> tfplugin5.Configure.Request + 45, // 68: tfplugin5.Provider.ReadResource:input_type -> tfplugin5.ReadResource.Request + 47, // 69: tfplugin5.Provider.PlanResourceChange:input_type -> tfplugin5.PlanResourceChange.Request + 49, // 70: tfplugin5.Provider.ApplyResourceChange:input_type -> tfplugin5.ApplyResourceChange.Request + 51, // 71: tfplugin5.Provider.ImportResourceState:input_type -> tfplugin5.ImportResourceState.Request + 54, // 72: tfplugin5.Provider.ReadDataSource:input_type -> tfplugin5.ReadDataSource.Request + 24, // 73: tfplugin5.Provider.Stop:input_type -> tfplugin5.Stop.Request + 56, // 74: tfplugin5.Provisioner.GetSchema:input_type -> tfplugin5.GetProvisionerSchema.Request + 58, // 75: tfplugin5.Provisioner.ValidateProvisionerConfig:input_type -> tfplugin5.ValidateProvisionerConfig.Request + 60, // 76: tfplugin5.Provisioner.ProvisionResource:input_type -> tfplugin5.ProvisionResource.Request + 24, // 77: tfplugin5.Provisioner.Stop:input_type -> tfplugin5.Stop.Request + 31, // 78: tfplugin5.Provider.GetSchema:output_type -> tfplugin5.GetProviderSchema.Response + 36, // 79: tfplugin5.Provider.PrepareProviderConfig:output_type -> tfplugin5.PrepareProviderConfig.Response + 40, // 80: tfplugin5.Provider.ValidateResourceTypeConfig:output_type -> tfplugin5.ValidateResourceTypeConfig.Response + 42, // 81: tfplugin5.Provider.ValidateDataSourceConfig:output_type -> tfplugin5.ValidateDataSourceConfig.Response + 38, // 82: tfplugin5.Provider.UpgradeResourceState:output_type -> tfplugin5.UpgradeResourceState.Response + 44, // 83: tfplugin5.Provider.Configure:output_type -> tfplugin5.Configure.Response + 46, // 84: tfplugin5.Provider.ReadResource:output_type -> tfplugin5.ReadResource.Response + 48, // 85: tfplugin5.Provider.PlanResourceChange:output_type -> tfplugin5.PlanResourceChange.Response + 50, // 86: tfplugin5.Provider.ApplyResourceChange:output_type -> tfplugin5.ApplyResourceChange.Response + 53, // 87: tfplugin5.Provider.ImportResourceState:output_type -> tfplugin5.ImportResourceState.Response + 55, // 88: tfplugin5.Provider.ReadDataSource:output_type -> tfplugin5.ReadDataSource.Response + 25, // 89: tfplugin5.Provider.Stop:output_type -> tfplugin5.Stop.Response + 57, // 90: tfplugin5.Provisioner.GetSchema:output_type -> tfplugin5.GetProvisionerSchema.Response + 59, // 91: tfplugin5.Provisioner.ValidateProvisionerConfig:output_type -> tfplugin5.ValidateProvisionerConfig.Response + 61, // 92: tfplugin5.Provisioner.ProvisionResource:output_type -> tfplugin5.ProvisionResource.Response + 25, // 93: tfplugin5.Provisioner.Stop:output_type -> tfplugin5.Stop.Response + 78, // [78:94] is the sub-list for method output_type + 62, // [62:78] is the sub-list for method input_type + 62, // [62:62] is the sub-list for extension type_name + 62, // [62:62] is the sub-list for extension extendee + 0, // [0:62] is the sub-list for field type_name } func init() { file_tfplugin5_proto_init() } @@ -4256,8 +4323,8 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrepareProviderConfig_Request); i { + file_tfplugin5_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema_ServerCapabilities); i { case 0: return &v.state case 1: @@ -4269,7 +4336,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrepareProviderConfig_Response); i { + switch v := v.(*PrepareProviderConfig_Request); i { case 0: return &v.state case 1: @@ -4281,7 +4348,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpgradeResourceState_Request); i { + switch v := v.(*PrepareProviderConfig_Response); i { case 0: return &v.state case 1: @@ -4293,7 +4360,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpgradeResourceState_Response); i { + switch v := v.(*UpgradeResourceState_Request); i { case 0: return &v.state case 1: @@ -4305,7 +4372,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateResourceTypeConfig_Request); i { + switch v := v.(*UpgradeResourceState_Response); i { case 0: return &v.state case 1: @@ -4317,7 +4384,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateResourceTypeConfig_Response); i { + switch v := v.(*ValidateResourceTypeConfig_Request); i { case 0: return &v.state case 1: @@ -4329,7 +4396,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateDataSourceConfig_Request); i { + switch v := v.(*ValidateResourceTypeConfig_Response); i { case 0: return &v.state case 1: @@ -4341,7 +4408,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateDataSourceConfig_Response); i { + switch v := v.(*ValidateDataSourceConfig_Request); i { case 0: return &v.state case 1: @@ -4353,7 +4420,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Configure_Request); i { + switch v := v.(*ValidateDataSourceConfig_Response); i { case 0: return &v.state case 1: @@ -4365,7 +4432,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Configure_Response); i { + switch v := v.(*Configure_Request); i { case 0: return &v.state case 1: @@ -4377,7 +4444,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResource_Request); i { + switch v := v.(*Configure_Response); i { case 0: return &v.state case 1: @@ -4389,7 +4456,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResource_Response); i { + switch v := v.(*ReadResource_Request); i { case 0: return &v.state case 1: @@ -4401,7 +4468,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PlanResourceChange_Request); i { + switch v := v.(*ReadResource_Response); i { case 0: return &v.state case 1: @@ -4413,7 +4480,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PlanResourceChange_Response); i { + switch v := v.(*PlanResourceChange_Request); i { case 0: return &v.state case 1: @@ -4425,7 +4492,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyResourceChange_Request); i { + switch v := v.(*PlanResourceChange_Response); i { case 0: return &v.state case 1: @@ -4437,7 +4504,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyResourceChange_Response); i { + switch v := v.(*ApplyResourceChange_Request); i { case 0: return &v.state case 1: @@ -4449,7 +4516,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportResourceState_Request); i { + switch v := v.(*ApplyResourceChange_Response); i { case 0: return &v.state case 1: @@ -4461,7 +4528,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportResourceState_ImportedResource); i { + switch v := v.(*ImportResourceState_Request); i { case 0: return &v.state case 1: @@ -4473,7 +4540,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportResourceState_Response); i { + switch v := v.(*ImportResourceState_ImportedResource); i { case 0: return &v.state case 1: @@ -4485,7 +4552,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadDataSource_Request); i { + switch v := v.(*ImportResourceState_Response); i { case 0: return &v.state case 1: @@ -4497,7 +4564,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadDataSource_Response); i { + switch v := v.(*ReadDataSource_Request); i { case 0: return &v.state case 1: @@ -4509,7 +4576,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetProvisionerSchema_Request); i { + switch v := v.(*ReadDataSource_Response); i { case 0: return &v.state case 1: @@ -4521,7 +4588,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetProvisionerSchema_Response); i { + switch v := v.(*GetProvisionerSchema_Request); i { case 0: return &v.state case 1: @@ -4533,7 +4600,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateProvisionerConfig_Request); i { + switch v := v.(*GetProvisionerSchema_Response); i { case 0: return &v.state case 1: @@ -4545,7 +4612,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateProvisionerConfig_Response); i { + switch v := v.(*ValidateProvisionerConfig_Request); i { case 0: return &v.state case 1: @@ -4557,7 +4624,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProvisionResource_Request); i { + switch v := v.(*ValidateProvisionerConfig_Response); i { case 0: return &v.state case 1: @@ -4569,6 +4636,18 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProvisionResource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProvisionResource_Response); i { case 0: return &v.state @@ -4592,7 +4671,7 @@ func file_tfplugin5_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_tfplugin5_proto_rawDesc, NumEnums: 3, - NumMessages: 58, + NumMessages: 59, NumExtensions: 0, NumServices: 2, }, diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto index 341bd2597..fdc123a85 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto @@ -1,6 +1,6 @@ -// Terraform Plugin RPC protocol version 5.2 +// Terraform Plugin RPC protocol version 5.3 // -// This file defines version 5.2 of the RPC protocol. To implement a plugin +// This file defines version 5.3 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -13,7 +13,7 @@ // official protocol releases. Proto files taken from other commits may include // incomplete changes or features that did not make it into a final release. // In all reasonable cases, plugin developers should take the proto file from -// the tag of the most recent release of Terraform, and not from the master +// the tag of the most recent release of Terraform, and not from the main // branch or any other development branch. // syntax = "proto3"; @@ -118,7 +118,7 @@ message Schema { // The version of the schema. // Schemas are versioned, so that providers can upgrade a saved resource - // state when the schema is changed. + // state when the schema is changed. int64 version = 1; // Block is the top level configuration block for this schema. @@ -157,6 +157,18 @@ message GetProviderSchema { map data_source_schemas = 3; repeated Diagnostic diagnostics = 4; Schema provider_meta = 5; + ServerCapabilities server_capabilities = 6; + } + + + // ServerCapabilities allows providers to communicate extra information + // regarding supported protocol features. This is used to indicate + // availability of certain forward-compatible changes which may be optional + // in a major protocol version, but cannot be tested for directly. + message ServerCapabilities { + // The plan_destroy capability signals that a provider expects a call + // to PlanResourceChange when a resource is going to be destroyed. + bool plan_destroy = 1; } } @@ -247,14 +259,14 @@ message PlanResourceChange { DynamicValue prior_state = 2; DynamicValue proposed_new_state = 3; DynamicValue config = 4; - bytes prior_private = 5; + bytes prior_private = 5; DynamicValue provider_meta = 6; } message Response { DynamicValue planned_state = 1; repeated AttributePath requires_replace = 2; - bytes planned_private = 3; + bytes planned_private = 3; repeated Diagnostic diagnostics = 4; @@ -279,12 +291,12 @@ message ApplyResourceChange { DynamicValue prior_state = 2; DynamicValue planned_state = 3; DynamicValue config = 4; - bytes planned_private = 5; + bytes planned_private = 5; DynamicValue provider_meta = 6; } message Response { DynamicValue new_state = 1; - bytes private = 2; + bytes private = 2; repeated Diagnostic diagnostics = 3; // This may be set only by the helper/schema "SDK" in the main Terraform @@ -365,5 +377,5 @@ message ProvisionResource { message Response { string output = 1; repeated Diagnostic diagnostics = 2; - } + } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go index 2a1c19ad1..ddbdea31e 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.19.4 +// source: tfplugin5.proto package tfplugin5 @@ -11,6 +15,7 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 // ProviderClient is the client API for Provider service. @@ -223,8 +228,8 @@ type UnsafeProviderServer interface { mustEmbedUnimplementedProviderServer() } -func RegisterProviderServer(s *grpc.Server, srv ProviderServer) { - s.RegisterService(&_Provider_serviceDesc, srv) +func RegisterProviderServer(s grpc.ServiceRegistrar, srv ProviderServer) { + s.RegisterService(&Provider_ServiceDesc, srv) } func _Provider_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -443,7 +448,10 @@ func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(inter return interceptor(ctx, in, info, handler) } -var _Provider_serviceDesc = grpc.ServiceDesc{ +// Provider_ServiceDesc is the grpc.ServiceDesc for Provider service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Provider_ServiceDesc = grpc.ServiceDesc{ ServiceName: "tfplugin5.Provider", HandlerType: (*ProviderServer)(nil), Methods: []grpc.MethodDesc{ @@ -537,7 +545,7 @@ func (c *provisionerClient) ValidateProvisionerConfig(ctx context.Context, in *V } func (c *provisionerClient) ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) { - stream, err := c.cc.NewStream(ctx, &_Provisioner_serviceDesc.Streams[0], "/tfplugin5.Provisioner/ProvisionResource", opts...) + stream, err := c.cc.NewStream(ctx, &Provisioner_ServiceDesc.Streams[0], "/tfplugin5.Provisioner/ProvisionResource", opts...) if err != nil { return nil, err } @@ -613,8 +621,8 @@ type UnsafeProvisionerServer interface { mustEmbedUnimplementedProvisionerServer() } -func RegisterProvisionerServer(s *grpc.Server, srv ProvisionerServer) { - s.RegisterService(&_Provisioner_serviceDesc, srv) +func RegisterProvisionerServer(s grpc.ServiceRegistrar, srv ProvisionerServer) { + s.RegisterService(&Provisioner_ServiceDesc, srv) } func _Provisioner_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -692,7 +700,10 @@ func _Provisioner_Stop_Handler(srv interface{}, ctx context.Context, dec func(in return interceptor(ctx, in, info, handler) } -var _Provisioner_serviceDesc = grpc.ServiceDesc{ +// Provisioner_ServiceDesc is the grpc.ServiceDesc for Provisioner service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Provisioner_ServiceDesc = grpc.ServiceDesc{ ServiceName: "tfplugin5.Provisioner", HandlerType: (*ProvisionerServer)(nil), Methods: []grpc.MethodDesc{ diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/provider.go index a18430f5c..9b0d9ac25 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/provider.go @@ -12,7 +12,12 @@ func GetProviderSchema_Request(in *tfprotov5.GetProviderSchemaRequest) (*tfplugi } func GetProviderSchema_Response(in *tfprotov5.GetProviderSchemaResponse) (*tfplugin5.GetProviderSchema_Response, error) { - var resp tfplugin5.GetProviderSchema_Response + if in == nil { + return nil, nil + } + resp := tfplugin5.GetProviderSchema_Response{ + ServerCapabilities: GetProviderSchema_ServerCapabilities(in.ServerCapabilities), + } if in.Provider != nil { schema, err := Schema(in.Provider) if err != nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/server_capabilities.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/server_capabilities.go new file mode 100644 index 000000000..c1ff45ae4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/server_capabilities.go @@ -0,0 +1,16 @@ +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func GetProviderSchema_ServerCapabilities(in *tfprotov5.ServerCapabilities) *tfplugin5.GetProviderSchema_ServerCapabilities { + if in == nil { + return nil + } + + return &tfplugin5.GetProviderSchema_ServerCapabilities{ + PlanDestroy: in.PlanDestroy, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go index c4b91cdcf..c7b95a5d1 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go @@ -46,6 +46,10 @@ type GetProviderSchemaRequest struct{} // GetProviderSchemaResponse represents a Terraform RPC response containing the // provider's schemas. type GetProviderSchemaResponse struct { + // ServerCapabilities defines optionally supported protocol features, + // such as forward-compatible Terraform behavior changes. + ServerCapabilities *ServerCapabilities + // Provider defines the schema for the provider configuration, which // will be specified in the provider block of the user's configuration. Provider *Schema diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go index c3fdd6ce4..b86a045e9 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go @@ -98,6 +98,9 @@ type UpgradeResourceStateResponse struct { // // The state should be represented as a tftypes.Object, with each // attribute and nested block getting its own key and value. + // + // Terraform CLI 0.12 through 0.14 require the Msgpack field to be + // populated or an EOF error will be returned. UpgradedState *DynamicValue // Diagnostics report errors or warnings related to upgrading the @@ -125,6 +128,9 @@ type ReadResourceRequest struct { // Private is any provider-defined private state stored with the // resource. It is used for keeping state with the resource that is not // meant to be included when calculating diffs. + // + // To ensure private state data is preserved, copy any necessary data to + // the ReadResourceResponse type Private field. Private []byte // ProviderMeta supplies the provider metadata configuration for the @@ -212,6 +218,9 @@ type PlanResourceChangeRequest struct { // PriorPrivate is any provider-defined private state stored with the // resource. It is used for keeping state with the resource that is not // meant to be included when calculating diffs. + // + // To ensure private state data is preserved, copy any necessary data to + // the PlanResourceChangeResponse type PlannedPrivate field. PriorPrivate []byte // ProviderMeta supplies the provider metadata configuration for the @@ -280,6 +289,10 @@ type PlanResourceChangeResponse struct { // like sent with requests for this resource. This state will be // associated with the resource, but will not be considered when // calculating diffs. + // + // This private state data will be sent in the ApplyResourceChange RPC, in + // relation to the types of this package, the ApplyResourceChangeRequest + // type PlannedPrivate field. PlannedPrivate []byte // Diagnostics report errors or warnings related to determining the @@ -341,6 +354,13 @@ type ApplyResourceChangeRequest struct { // PlannedPrivate is any provider-defined private state stored with the // resource. It is used for keeping state with the resource that is not // meant to be included when calculating diffs. + // + // This private state data is sourced from the PlanResourceChange RPC, in + // relation to the types in this package, the PlanResourceChangeResponse + // type PlannedPrivate field. + // + // To ensure private state data is preserved, copy any necessary data to + // the ApplyResourceChangeResponse type Private field. PlannedPrivate []byte // ProviderMeta supplies the provider metadata configuration for the diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/schema.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/schema.go index 5f1acc34d..b0b6dff28 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/schema.go @@ -68,6 +68,19 @@ type Schema struct { Block *SchemaBlock } +// ValueType returns the tftypes.Type for a Schema. +// +// If Schema is missing, an empty Object is returned. +func (s *Schema) ValueType() tftypes.Type { + if s == nil { + return tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{}, + } + } + + return s.Block.ValueType() +} + // SchemaBlock represents a block in a schema. Blocks are how Terraform creates // groupings of attributes. In configurations, they don't use the equals sign // and use dynamic instead of list comprehensions. @@ -105,6 +118,51 @@ type SchemaBlock struct { Deprecated bool } +// ValueType returns the tftypes.Type for a SchemaBlock. +// +// If SchemaBlock is missing, an empty Object is returned. +func (s *SchemaBlock) ValueType() tftypes.Type { + if s == nil { + return tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{}, + } + } + + attributeTypes := map[string]tftypes.Type{} + + for _, attribute := range s.Attributes { + if attribute == nil { + continue + } + + attributeType := attribute.ValueType() + + if attributeType == nil { + continue + } + + attributeTypes[attribute.Name] = attributeType + } + + for _, block := range s.BlockTypes { + if block == nil { + continue + } + + blockType := block.ValueType() + + if blockType == nil { + continue + } + + attributeTypes[block.TypeName] = blockType + } + + return tftypes.Object{ + AttributeTypes: attributeTypes, + } +} + // SchemaAttribute represents a single attribute within a schema block. // Attributes are the fields users can set in configuration using the equals // sign, can assign to variables, can interpolate, and can use list @@ -162,6 +220,17 @@ type SchemaAttribute struct { Deprecated bool } +// ValueType returns the tftypes.Type for a SchemaAttribute. +// +// If SchemaAttribute is missing, nil is returned. +func (s *SchemaAttribute) ValueType() tftypes.Type { + if s == nil { + return nil + } + + return s.Type +} + // SchemaNestedBlock is a nested block within another block. See SchemaBlock // for more information on blocks. type SchemaNestedBlock struct { @@ -198,6 +267,39 @@ type SchemaNestedBlock struct { MaxItems int64 } +// ValueType returns the tftypes.Type for a SchemaNestedBlock. +// +// If SchemaNestedBlock is missing or the Nesting mode is invalid, nil is +// returned. +func (s *SchemaNestedBlock) ValueType() tftypes.Type { + if s == nil { + return nil + } + + blockType := s.Block.ValueType() + + switch s.Nesting { + case SchemaNestedBlockNestingModeGroup: + return blockType + case SchemaNestedBlockNestingModeList: + return tftypes.List{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeMap: + return tftypes.Map{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeSet: + return tftypes.Set{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeSingle: + return blockType + default: + return nil + } +} + // SchemaNestedBlockNestingMode indicates the nesting mode for // SchemaNestedBlocks. The nesting mode determines the number of instances of // the block allowed, how many labels the block expects, and the data structure diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server_capabilities.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server_capabilities.go new file mode 100644 index 000000000..f87cf2f4a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server_capabilities.go @@ -0,0 +1,14 @@ +package tfprotov5 + +// ServerCapabilities allows providers to communicate optionally supported +// protocol features, such as forward-compatible Terraform behavior changes. +// +// This information is used in GetProviderSchemaResponse as capabilities are +// static features which must be known upfront in the provider server. +type ServerCapabilities struct { + // PlanDestroy signals that a provider expects a call to + // PlanResourceChange when a resource is going to be destroyed. This is + // opt-in to prevent unexpected errors or panics since the + // ProposedNewState in PlanResourceChangeRequest will be a null value. + PlanDestroy bool +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go index 47426ada4..c45cfd5fa 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go @@ -2,51 +2,83 @@ package tf5server import ( "context" + "encoding/json" "errors" - "log" + "fmt" + "os" + "os/signal" "regexp" + "runtime" "strings" "sync" + "time" + "github.com/hashicorp/terraform-plugin-go/internal/logging" "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging" "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto" + "google.golang.org/grpc" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" - "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-log/tfsdklog" - tfaddr "github.com/hashicorp/terraform-registry-address" testing "github.com/mitchellh/go-testing-interface" ) -const tflogSubsystemName = "proto" - -// Global logging keys attached to all requests. -// -// Practitioners or tooling reading logs may be depending on these keys, so be -// conscious of that when changing them. const ( - // A unique ID for the RPC request - logKeyRequestID = "tf_req_id" - - // The full address of the provider, such as - // registry.terraform.io/hashicorp/random - logKeyProviderAddress = "tf_provider_addr" - - // The RPC being run, such as "ApplyResourceChange" - logKeyRPC = "tf_rpc" + // protocolVersionMajor represents the major version number of the protocol + // being served. This is used during the plugin handshake to validate the + // server and client are compatible. + // + // In the future, it may be possible to include this information directly + // in the protocol buffers rather than recreating a constant here. + protocolVersionMajor uint = 5 + + // protocolVersionMinor represents the minor version number of the protocol + // being served. Backwards compatible additions are possible in the + // protocol definitions, which is when this may be increased. While it is + // not used in plugin negotiation, it can be helpful to include this value + // for debugging, such as in logs. + // + // In the future, it may be possible to include this information directly + // in the protocol buffers rather than recreating a constant here. + protocolVersionMinor uint = 3 +) - // The type of resource being operated on, such as "random_pet" - logKeyResourceType = "tf_resource_type" +// protocolVersion represents the combined major and minor version numbers of +// the protocol being served. +var protocolVersion string = fmt.Sprintf("%d.%d", protocolVersionMajor, protocolVersionMinor) - // The type of data source being operated on, such as "archive_file" - logKeyDataSourceType = "tf_data_source_type" +const ( + // envTfReattachProviders is the environment variable used by Terraform CLI + // to directly connect to already running provider processes, such as those + // being inspected by debugging processes. When connecting to providers in + // this manner, Terraform CLI disables certain plugin handshake checks and + // will not stop the provider process. + envTfReattachProviders = "TF_REATTACH_PROVIDERS" +) - // The protocol version being used, as a string, such as "5" - logKeyProtocolVersion = "tf_proto_version" +const ( + // grpcMaxMessageSize is the maximum gRPC send and receive message sizes + // for the server. + // + // This 256MB value is arbitrarily raised from the default message sizes of + // 4MB to account for advanced use cases, but arbitrarily lowered from + // MaxInt32 (or similar) to prevent incorrect server implementations from + // exhausting resources in common execution environments. Receiving a gRPC + // message size error is preferable for troubleshooting over determining + // why an execution environment may have terminated the process via its + // memory management processes, such as oom-killer on Linux. + // + // This value is kept as constant over allowing server configurability + // since there are many factors that influence message size, such as + // Terraform configuration and state data. If larger message size use + // cases appear, other gRPC options should be explored, such as + // implementing streaming RPCs and messages. + grpcMaxMessageSize = 256 << 20 ) // ServeOpt is an interface for defining options that can be passed to the @@ -65,6 +97,10 @@ type ServeConfig struct { debugCh chan *plugin.ReattachConfig debugCloseCh chan struct{} + managedDebug bool + managedDebugReattachConfigTimeout time.Duration + managedDebugStopSignals []os.Signal + disableLogInitStderr bool disableLogLocation bool useLoggingSink testing.T @@ -79,8 +115,17 @@ func (s serveConfigFunc) ApplyServeOpt(in *ServeConfig) error { // WithDebug returns a ServeOpt that will set the server into debug mode, using // the passed options to populate the go-plugin ServeTestConfig. +// +// This is an advanced ServeOpt that assumes the caller will fully manage the +// reattach configuration and server lifecycle. Refer to WithManagedDebug for a +// ServeOpt that handles common use cases, such as implementing provider main +// functions. func WithDebug(ctx context.Context, config chan *plugin.ReattachConfig, closeCh chan struct{}) ServeOpt { return serveConfigFunc(func(in *ServeConfig) error { + if in.managedDebug { + return errors.New("cannot set both WithDebug and WithManagedDebug") + } + in.debugCtx = ctx in.debugCh = config in.debugCloseCh = closeCh @@ -88,6 +133,47 @@ func WithDebug(ctx context.Context, config chan *plugin.ReattachConfig, closeCh }) } +// WithManagedDebug returns a ServeOpt that will start the server in debug +// mode, managing the reattach configuration handling and server lifecycle. +// Reattach configuration is output to stdout with human friendly instructions. +// By default, the server can be stopped with os.Interrupt (SIGINT; ctrl-c). +// +// Refer to the optional WithManagedDebugStopSignals and +// WithManagedDebugReattachConfigTimeout ServeOpt for additional configuration. +// +// The reattach configuration output of this handling is not protected by +// compatibility guarantees. Use the WithDebug ServeOpt for advanced use cases. +func WithManagedDebug() ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + if in.debugCh != nil { + return errors.New("cannot set both WithDebug and WithManagedDebug") + } + + in.managedDebug = true + return nil + }) +} + +// WithManagedDebugStopSignals returns a ServeOpt that will set the stop signals for a +// debug managed process (WithManagedDebug). When not configured, os.Interrupt +// (SIGINT; Ctrl-c) will stop the process. +func WithManagedDebugStopSignals(signals []os.Signal) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.managedDebugStopSignals = signals + return nil + }) +} + +// WithManagedDebugReattachConfigTimeout returns a ServeOpt that will set the timeout +// for a debug managed process to start and return its reattach configuration. +// When not configured, 2 seconds is the default. +func WithManagedDebugReattachConfigTimeout(timeout time.Duration) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.managedDebugReattachConfigTimeout = timeout + return nil + }) +} + // WithGoPluginLogger returns a ServeOpt that will set the logger that // go-plugin should use to log messages. func WithGoPluginLogger(logger hclog.Logger) ServeOpt { @@ -152,29 +238,68 @@ func WithLogEnvVarName(name string) ServeOpt { // modify the logger that go-plugin is using, ServeOpts can be specified to // support that. func Serve(name string, serverFactory func() tfprotov5.ProviderServer, opts ...ServeOpt) error { - var conf ServeConfig + // Defaults + conf := ServeConfig{ + managedDebugReattachConfigTimeout: 2 * time.Second, + managedDebugStopSignals: []os.Signal{os.Interrupt}, + } + for _, opt := range opts { err := opt.ApplyServeOpt(&conf) if err != nil { return err } } + serveConfig := &plugin.ServeConfig{ HandshakeConfig: plugin.HandshakeConfig{ - ProtocolVersion: 5, + ProtocolVersion: protocolVersionMajor, MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", }, Plugins: plugin.PluginSet{ "provider": &GRPCProviderPlugin{ + Name: name, + Opts: opts, GRPCProvider: serverFactory, }, }, - GRPCServer: plugin.DefaultGRPCServer, + GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { + opts = append(opts, grpc.MaxRecvMsgSize(grpcMaxMessageSize)) + opts = append(opts, grpc.MaxSendMsgSize(grpcMaxMessageSize)) + + return grpc.NewServer(opts...) + }, } + if conf.logger != nil { serveConfig.Logger = conf.logger } + + if conf.managedDebug { + ctx, cancel := context.WithCancel(context.Background()) + signalCh := make(chan os.Signal, len(conf.managedDebugStopSignals)) + + signal.Notify(signalCh, conf.managedDebugStopSignals...) + + defer func() { + signal.Stop(signalCh) + cancel() + }() + + go func() { + select { + case <-signalCh: + cancel() + case <-ctx.Done(): + } + }() + + conf.debugCh = make(chan *plugin.ReattachConfig) + conf.debugCloseCh = make(chan struct{}) + conf.debugCtx = ctx + } + if conf.debugCh != nil { serveConfig.Test = &plugin.ServeTestConfig{ Context: conf.debugCtx, @@ -182,7 +307,78 @@ func Serve(name string, serverFactory func() tfprotov5.ProviderServer, opts ...S CloseCh: conf.debugCloseCh, } } - plugin.Serve(serveConfig) + + if !conf.managedDebug { + plugin.Serve(serveConfig) + return nil + } + + go plugin.Serve(serveConfig) + + var pluginReattachConfig *plugin.ReattachConfig + + select { + case pluginReattachConfig = <-conf.debugCh: + case <-time.After(conf.managedDebugReattachConfigTimeout): + return errors.New("timeout waiting on reattach configuration") + } + + if pluginReattachConfig == nil { + return errors.New("nil reattach configuration received") + } + + // Duplicate implementation is required because the go-plugin + // ReattachConfig.Addr implementation is not friendly for JSON encoding + // and to avoid importing terraform-exec. + type reattachConfigAddr struct { + Network string + String string + } + + type reattachConfig struct { + Protocol string + ProtocolVersion int + Pid int + Test bool + Addr reattachConfigAddr + } + + reattachBytes, err := json.Marshal(map[string]reattachConfig{ + name: { + Protocol: string(pluginReattachConfig.Protocol), + ProtocolVersion: pluginReattachConfig.ProtocolVersion, + Pid: pluginReattachConfig.Pid, + Test: pluginReattachConfig.Test, + Addr: reattachConfigAddr{ + Network: pluginReattachConfig.Addr.Network(), + String: pluginReattachConfig.Addr.String(), + }, + }, + }) + + if err != nil { + return fmt.Errorf("Error building reattach string: %w", err) + } + + reattachStr := string(reattachBytes) + + // This is currently intended to be executed via provider main function and + // human friendly, so output directly to stdout. + fmt.Printf("Provider started. To attach Terraform CLI, set the %s environment variable with the following:\n\n", envTfReattachProviders) + + switch runtime.GOOS { + case "windows": + fmt.Printf("\tCommand Prompt:\tset \"%s=%s\"\n", envTfReattachProviders, reattachStr) + fmt.Printf("\tPowerShell:\t$env:%s='%s'\n", envTfReattachProviders, strings.ReplaceAll(reattachStr, `'`, `''`)) + default: + fmt.Printf("\t%s='%s'\n", envTfReattachProviders, strings.ReplaceAll(reattachStr, `'`, `'"'"'`)) + } + + fmt.Println("") + + // Wait for the server to be done. + <-conf.debugCloseCh + return nil } @@ -198,6 +394,13 @@ type server struct { useTFLogSink bool testHandle testing.T name string + + // protocolDataDir is a directory to store raw protocol data files for + // debugging purposes. + protocolDataDir string + + // protocolVersion is the protocol version for the server. + protocolVersion string } func mergeStop(ctx context.Context, cancel context.CancelFunc, stopCh chan struct{}) { @@ -230,50 +433,11 @@ func (s *server) loggingContext(ctx context.Context) context.Context { ctx = tfsdklog.RegisterTestSink(ctx, s.testHandle) } - // generate a request ID - reqID, err := uuid.GenerateUUID() - if err != nil { - reqID = "unable to assign request ID: " + err.Error() - } - - // set up the logger SDK loggers are derived from - ctx = tfsdklog.NewRootSDKLogger(ctx, append(tfsdklog.Options{ - tfsdklog.WithLevelFromEnv("TF_LOG_SDK"), - }, s.tflogSDKOpts...)...) - ctx = tfsdklog.With(ctx, logKeyRequestID, reqID) - ctx = tfsdklog.With(ctx, logKeyProviderAddress, s.name) - - // set up our protocol-level subsystem logger - ctx = tfsdklog.NewSubsystem(ctx, tflogSubsystemName, append(tfsdklog.Options{ - tfsdklog.WithLevelFromEnv("TF_LOG_SDK_PROTO"), - }, s.tflogSDKOpts...)...) - ctx = tfsdklog.SubsystemWith(ctx, tflogSubsystemName, logKeyProtocolVersion, "5") - - // set up the provider logger - ctx = tfsdklog.NewRootProviderLogger(ctx, s.tflogOpts...) - ctx = tflog.With(ctx, logKeyRequestID, reqID) - ctx = tflog.With(ctx, logKeyProviderAddress, s.name) - return ctx -} - -func rpcLoggingContext(ctx context.Context, rpc string) context.Context { - ctx = tfsdklog.With(ctx, logKeyRPC, rpc) - ctx = tfsdklog.SubsystemWith(ctx, tflogSubsystemName, logKeyRPC, rpc) - ctx = tflog.With(ctx, logKeyRPC, rpc) - return ctx -} + ctx = logging.InitContext(ctx, s.tflogSDKOpts, s.tflogOpts) + ctx = logging.RequestIdContext(ctx) + ctx = logging.ProviderAddressContext(ctx, s.name) + ctx = logging.ProtocolVersionContext(ctx, s.protocolVersion) -func resourceLoggingContext(ctx context.Context, resource string) context.Context { - ctx = tfsdklog.With(ctx, logKeyResourceType, resource) - ctx = tfsdklog.SubsystemWith(ctx, tflogSubsystemName, logKeyResourceType, resource) - ctx = tflog.With(ctx, logKeyResourceType, resource) - return ctx -} - -func dataSourceLoggingContext(ctx context.Context, dataSource string) context.Context { - ctx = tfsdklog.With(ctx, logKeyDataSourceType, dataSource) - ctx = tfsdklog.SubsystemWith(ctx, tflogSubsystemName, logKeyDataSourceType, dataSource) - ctx = tflog.With(ctx, logKeyDataSourceType, dataSource) return ctx } @@ -301,98 +465,103 @@ func New(name string, serve tfprotov5.ProviderServer, opts ...ServeOpt) tfplugin } envVar := conf.envVar if envVar == "" { - addr, err := tfaddr.ParseRawProviderSourceString(name) - if err != nil { - log.Printf("[ERROR] Error parsing provider name %q: %s", name, err) - } else { - envVar = addr.Type - } + envVar = logging.ProviderLoggerName(name) } - envVar = strings.ReplaceAll(envVar, "-", "_") if envVar != "" { - options = append(options, tfsdklog.WithLogName(envVar), tflog.WithLevelFromEnv("TF_LOG_PROVIDER", envVar)) + options = append(options, tfsdklog.WithLogName(envVar), tflog.WithLevelFromEnv(logging.EnvTfLogProvider, envVar)) } return &server{ - downstream: serve, - stopCh: make(chan struct{}), - tflogOpts: options, - tflogSDKOpts: sdkOptions, - name: name, - useTFLogSink: conf.useLoggingSink != nil, - testHandle: conf.useLoggingSink, + downstream: serve, + stopCh: make(chan struct{}), + tflogOpts: options, + tflogSDKOpts: sdkOptions, + name: name, + useTFLogSink: conf.useLoggingSink != nil, + testHandle: conf.useLoggingSink, + protocolDataDir: os.Getenv(logging.EnvTfLogSdkProtoDataDir), + protocolVersion: protocolVersion, } } func (s *server) GetSchema(ctx context.Context, req *tfplugin5.GetProviderSchema_Request) (*tfplugin5.GetProviderSchema_Response, error) { - ctx = rpcLoggingContext(s.loggingContext(ctx), "GetSchema") + rpc := "GetProviderSchema" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.GetProviderSchemaRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.GetProviderSchema(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.GetProviderSchema_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) PrepareProviderConfig(ctx context.Context, req *tfplugin5.PrepareProviderConfig_Request) (*tfplugin5.PrepareProviderConfig_Response, error) { - ctx = rpcLoggingContext(s.loggingContext(ctx), "PrepareProviderConfig") + rpc := "PrepareProviderConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.PrepareProviderConfigRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.PrepareProviderConfig(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "PreparedConfig", resp.PreparedConfig) ret, err := toproto.PrepareProviderConfig_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) Configure(ctx context.Context, req *tfplugin5.Configure_Request) (*tfplugin5.Configure_Response, error) { - ctx = rpcLoggingContext(s.loggingContext(ctx), "Configure") + rpc := "Configure" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ConfigureProviderRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ConfigureProvider(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.Configure_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil @@ -412,228 +581,276 @@ func (s *server) stop() { } func (s *server) Stop(ctx context.Context, req *tfplugin5.Stop_Request) (*tfplugin5.Stop_Response, error) { - ctx = rpcLoggingContext(s.loggingContext(ctx), "Stop") + rpc := "Stop" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.StopProviderRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.StopProvider(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Closing all our contexts") + tf5serverlogging.DownstreamResponse(ctx, nil) + logging.ProtocolTrace(ctx, "Closing all our contexts") s.stop() - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Closed all our contexts") + logging.ProtocolTrace(ctx, "Closed all our contexts") ret, err := toproto.Stop_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ValidateDataSourceConfig(ctx context.Context, req *tfplugin5.ValidateDataSourceConfig_Request) (*tfplugin5.ValidateDataSourceConfig_Response, error) { - ctx = dataSourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ValidateDataSourceConfig"), req.TypeName) + rpc := "ValidateDataSourceConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.DataSourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ValidateDataSourceConfigRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ValidateDataSourceConfig(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.ValidateDataSourceConfig_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ReadDataSource(ctx context.Context, req *tfplugin5.ReadDataSource_Request) (*tfplugin5.ReadDataSource_Response, error) { - ctx = dataSourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ReadDataSource"), req.TypeName) + rpc := "ReadDataSource" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.DataSourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ReadDataSourceRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ReadDataSource(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "State", resp.State) ret, err := toproto.ReadDataSource_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ValidateResourceTypeConfig(ctx context.Context, req *tfplugin5.ValidateResourceTypeConfig_Request) (*tfplugin5.ValidateResourceTypeConfig_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ValidateResourceTypeConfig"), req.TypeName) + rpc := "ValidateResourceTypeConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ValidateResourceTypeConfigRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ValidateResourceTypeConfig(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.ValidateResourceTypeConfig_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) UpgradeResourceState(ctx context.Context, req *tfplugin5.UpgradeResourceState_Request) (*tfplugin5.UpgradeResourceState_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "UpgradeResourceState"), req.TypeName) + rpc := "UpgradeResourceState" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.UpgradeResourceStateRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.UpgradeResourceState(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "UpgradedState", resp.UpgradedState) ret, err := toproto.UpgradeResourceState_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ReadResource(ctx context.Context, req *tfplugin5.ReadResource_Request) (*tfplugin5.ReadResource_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ReadResource"), req.TypeName) + rpc := "ReadResource" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ReadResourceRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "CurrentState", r.CurrentState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ReadResource(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) ret, err := toproto.ReadResource_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) PlanResourceChange(ctx context.Context, req *tfplugin5.PlanResourceChange_Request) (*tfplugin5.PlanResourceChange_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "PlanResourceChange"), req.TypeName) + rpc := "PlanResourceChange" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.PlanResourceChangeRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PriorState", r.PriorState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProposedNewState", r.ProposedNewState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.PlanResourceChange(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "PlannedState", resp.PlannedState) ret, err := toproto.PlanResourceChange_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ApplyResourceChange(ctx context.Context, req *tfplugin5.ApplyResourceChange_Request) (*tfplugin5.ApplyResourceChange_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ApplyResourceChange"), req.TypeName) + rpc := "ApplyResourceChange" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ApplyResourceChangeRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PlannedState", r.PlannedState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ApplyResourceChange(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) ret, err := toproto.ApplyResourceChange_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ImportResourceState(ctx context.Context, req *tfplugin5.ImportResourceState_Request) (*tfplugin5.ImportResourceState_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ImportResourceState"), req.TypeName) + rpc := "ImportResourceState" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ImportResourceStateRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ImportResourceState(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + for _, importedResource := range resp.ImportedResources { + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response_ImportedResource", "State", importedResource.State) + } ret, err := toproto.ImportResourceState_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/diagnostics.go new file mode 100644 index 000000000..543a36232 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/diagnostics.go @@ -0,0 +1,82 @@ +package diag + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// Diagnostics is a collection of Diagnostic. +type Diagnostics []*tfprotov6.Diagnostic + +// ErrorCount returns the number of error severity diagnostics. +func (d Diagnostics) ErrorCount() int { + var result int + + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + if diagnostic.Severity != tfprotov6.DiagnosticSeverityError { + continue + } + + result++ + } + + return result +} + +// Log will log every diagnostic: +// +// - Error severity at ERROR level +// - Warning severity at WARN level +// - Invalid/Unknown severity at WARN level +// +func (d Diagnostics) Log(ctx context.Context) { + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + diagnosticFields := map[string]interface{}{ + logging.KeyDiagnosticDetail: diagnostic.Detail, + logging.KeyDiagnosticSeverity: diagnostic.Severity.String(), + logging.KeyDiagnosticSummary: diagnostic.Summary, + } + + if diagnostic.Attribute != nil { + diagnosticFields[logging.KeyDiagnosticAttribute] = diagnostic.Attribute.String() + } + + switch diagnostic.Severity { + case tfprotov6.DiagnosticSeverityError: + logging.ProtocolError(ctx, "Response contains error diagnostic", diagnosticFields) + case tfprotov6.DiagnosticSeverityWarning: + logging.ProtocolWarn(ctx, "Response contains warning diagnostic", diagnosticFields) + default: + logging.ProtocolWarn(ctx, "Response contains unknown diagnostic", diagnosticFields) + } + } +} + +// WarningCount returns the number of warning severity diagnostics. +func (d Diagnostics) WarningCount() int { + var result int + + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + if diagnostic.Severity != tfprotov6.DiagnosticSeverityWarning { + continue + } + + result++ + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/doc.go new file mode 100644 index 000000000..0c73dab12 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/doc.go @@ -0,0 +1,3 @@ +// Package diag contains diagnostics helpers. These implementations are +// intentionally outside the public API. +package diag diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/resource.go index 1f8deed34..94addd801 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/resource.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/resource.go @@ -103,7 +103,8 @@ func PlanResourceChangeRequest(in *tfplugin6.PlanResourceChange_Request) (*tfpro func PlanResourceChangeResponse(in *tfplugin6.PlanResourceChange_Response) (*tfprotov6.PlanResourceChangeResponse, error) { resp := &tfprotov6.PlanResourceChangeResponse{ - PlannedPrivate: in.PlannedPrivate, + PlannedPrivate: in.PlannedPrivate, + UnsafeToUseLegacyTypeSystem: in.LegacyTypeSystem, } attributePaths, err := AttributePaths(in.RequiresReplace) if err != nil { @@ -143,7 +144,8 @@ func ApplyResourceChangeRequest(in *tfplugin6.ApplyResourceChange_Request) (*tfp func ApplyResourceChangeResponse(in *tfplugin6.ApplyResourceChange_Response) (*tfprotov6.ApplyResourceChangeResponse, error) { resp := &tfprotov6.ApplyResourceChangeResponse{ - Private: in.Private, + Private: in.Private, + UnsafeToUseLegacyTypeSystem: in.LegacyTypeSystem, } diags, err := Diagnostics(in.Diagnostics) if err != nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/schema.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/schema.go index 388a0193c..db720246f 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/schema.go @@ -131,9 +131,7 @@ func SchemaObjectNestingMode(in tfplugin6.Schema_Object_NestingMode) tfprotov6.S func SchemaObject(in *tfplugin6.Schema_Object) (*tfprotov6.SchemaObject, error) { resp := &tfprotov6.SchemaObject{ - Nesting: SchemaObjectNestingMode(in.Nesting), - MinItems: in.MinItems, - MaxItems: in.MaxItems, + Nesting: SchemaObjectNestingMode(in.Nesting), } attrs, err := SchemaAttributes(in.Attributes) diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/context_keys.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/context_keys.go new file mode 100644 index 000000000..15386cd2c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/context_keys.go @@ -0,0 +1,8 @@ +package tf6serverlogging + +// Context key types. +// Reference: https://staticcheck.io/docs/checks/#SA1029 + +// ContextKeyDownstreamRequestStartTime is a context.Context key to store the +// time.Time when the server began a downstream request. +type ContextKeyDownstreamRequestStartTime struct{} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/doc.go new file mode 100644 index 000000000..167a61825 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/doc.go @@ -0,0 +1,3 @@ +// Package tf5serverlogging contains logging functionality specific to +// tf5server and tfprotov5 types. +package tf6serverlogging diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/downstream_request.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/downstream_request.go new file mode 100644 index 000000000..c47df9b45 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/downstream_request.go @@ -0,0 +1,40 @@ +package tf6serverlogging + +import ( + "context" + "time" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag" +) + +// DownstreamRequest sets a request duration start time context key and +// generates a TRACE "Sending request downstream" log. +func DownstreamRequest(ctx context.Context) context.Context { + requestStart := time.Now() + ctx = context.WithValue(ctx, ContextKeyDownstreamRequestStartTime{}, requestStart) + + logging.ProtocolTrace(ctx, "Sending request downstream") + + return ctx +} + +// DownstreamResponse generates the following logging: +// +// - TRACE "Received downstream response" log with request duration and +// diagnostic severity counts +// - Per-diagnostic logs +// +func DownstreamResponse(ctx context.Context, diagnostics diag.Diagnostics) { + responseFields := map[string]interface{}{ + logging.KeyDiagnosticErrorCount: diagnostics.ErrorCount(), + logging.KeyDiagnosticWarningCount: diagnostics.WarningCount(), + } + + if requestStart, ok := ctx.Value(ContextKeyDownstreamRequestStartTime{}).(time.Time); ok { + responseFields[logging.KeyRequestDurationMs] = time.Since(requestStart).Milliseconds() + } + + logging.ProtocolTrace(ctx, "Received downstream response", responseFields) + diagnostics.Log(ctx) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/generate.sh b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/generate.sh deleted file mode 100644 index ca2a04688..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/generate.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# We do not run protoc under go:generate because we want to ensure that all -# dependencies of go:generate are "go get"-able for general dev environment -# usability. To compile all protobuf files in this repository, run -# "make protobuf" at the top-level. - -set -eu - -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - -cd "$DIR" - -protoc --proto_path=. --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative tfplugin6.proto diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go index 332ece40e..d67b61e11 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go @@ -1,6 +1,6 @@ -// Terraform Plugin RPC protocol version 6.0 +// Terraform Plugin RPC protocol version 6.3 // -// This file defines version 6.0 of the RPC protocol. To implement a plugin +// This file defines version 6.3 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -19,14 +19,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.23.0 -// protoc v3.15.6 +// protoc-gen-go v1.28.0 +// protoc v3.19.4 // source: tfplugin6.proto package tfplugin6 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -40,10 +39,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type StringKind int32 const ( @@ -1476,8 +1471,13 @@ type Schema_Object struct { Attributes []*Schema_Attribute `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` Nesting Schema_Object_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin6.Schema_Object_NestingMode" json:"nesting,omitempty"` - MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + // MinItems and MaxItems were never used in the protocol, and have no + // effect on validation. + // + // Deprecated: Do not use. + MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + // Deprecated: Do not use. + MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` } func (x *Schema_Object) Reset() { @@ -1526,6 +1526,7 @@ func (x *Schema_Object) GetNesting() Schema_Object_NestingMode { return Schema_Object_INVALID } +// Deprecated: Do not use. func (x *Schema_Object) GetMinItems() int64 { if x != nil { return x.MinItems @@ -1533,6 +1534,7 @@ func (x *Schema_Object) GetMinItems() int64 { return 0 } +// Deprecated: Do not use. func (x *Schema_Object) GetMaxItems() int64 { if x != nil { return x.MaxItems @@ -1583,11 +1585,12 @@ type GetProviderSchema_Response struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` - ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - ProviderMeta *Schema `protobuf:"bytes,5,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` + ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + ProviderMeta *Schema `protobuf:"bytes,5,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + ServerCapabilities *GetProviderSchema_ServerCapabilities `protobuf:"bytes,6,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"` } func (x *GetProviderSchema_Response) Reset() { @@ -1657,6 +1660,66 @@ func (x *GetProviderSchema_Response) GetProviderMeta() *Schema { return nil } +func (x *GetProviderSchema_Response) GetServerCapabilities() *GetProviderSchema_ServerCapabilities { + if x != nil { + return x.ServerCapabilities + } + return nil +} + +// ServerCapabilities allows providers to communicate extra information +// regarding supported protocol features. This is used to indicate +// availability of certain forward-compatible changes which may be optional +// in a major protocol version, but cannot be tested for directly. +type GetProviderSchema_ServerCapabilities struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The plan_destroy capability signals that a provider expects a call + // to PlanResourceChange when a resource is going to be destroyed. + PlanDestroy bool `protobuf:"varint,1,opt,name=plan_destroy,json=planDestroy,proto3" json:"plan_destroy,omitempty"` +} + +func (x *GetProviderSchema_ServerCapabilities) Reset() { + *x = GetProviderSchema_ServerCapabilities{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema_ServerCapabilities) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema_ServerCapabilities) ProtoMessage() {} + +func (x *GetProviderSchema_ServerCapabilities) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema_ServerCapabilities.ProtoReflect.Descriptor instead. +func (*GetProviderSchema_ServerCapabilities) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6, 2} +} + +func (x *GetProviderSchema_ServerCapabilities) GetPlanDestroy() bool { + if x != nil { + return x.PlanDestroy + } + return false +} + type ValidateProviderConfig_Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1668,7 +1731,7 @@ type ValidateProviderConfig_Request struct { func (x *ValidateProviderConfig_Request) Reset() { *x = ValidateProviderConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[29] + mi := &file_tfplugin6_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1681,7 +1744,7 @@ func (x *ValidateProviderConfig_Request) String() string { func (*ValidateProviderConfig_Request) ProtoMessage() {} func (x *ValidateProviderConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[29] + mi := &file_tfplugin6_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1715,7 +1778,7 @@ type ValidateProviderConfig_Response struct { func (x *ValidateProviderConfig_Response) Reset() { *x = ValidateProviderConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[30] + mi := &file_tfplugin6_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1728,7 +1791,7 @@ func (x *ValidateProviderConfig_Response) String() string { func (*ValidateProviderConfig_Response) ProtoMessage() {} func (x *ValidateProviderConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[30] + mi := &file_tfplugin6_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1770,7 +1833,7 @@ type UpgradeResourceState_Request struct { func (x *UpgradeResourceState_Request) Reset() { *x = UpgradeResourceState_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[31] + mi := &file_tfplugin6_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1783,7 +1846,7 @@ func (x *UpgradeResourceState_Request) String() string { func (*UpgradeResourceState_Request) ProtoMessage() {} func (x *UpgradeResourceState_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[31] + mi := &file_tfplugin6_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1838,7 +1901,7 @@ type UpgradeResourceState_Response struct { func (x *UpgradeResourceState_Response) Reset() { *x = UpgradeResourceState_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[32] + mi := &file_tfplugin6_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1851,7 +1914,7 @@ func (x *UpgradeResourceState_Response) String() string { func (*UpgradeResourceState_Response) ProtoMessage() {} func (x *UpgradeResourceState_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[32] + mi := &file_tfplugin6_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1893,7 +1956,7 @@ type ValidateResourceConfig_Request struct { func (x *ValidateResourceConfig_Request) Reset() { *x = ValidateResourceConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[33] + mi := &file_tfplugin6_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1906,7 +1969,7 @@ func (x *ValidateResourceConfig_Request) String() string { func (*ValidateResourceConfig_Request) ProtoMessage() {} func (x *ValidateResourceConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[33] + mi := &file_tfplugin6_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1947,7 +2010,7 @@ type ValidateResourceConfig_Response struct { func (x *ValidateResourceConfig_Response) Reset() { *x = ValidateResourceConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[34] + mi := &file_tfplugin6_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1960,7 +2023,7 @@ func (x *ValidateResourceConfig_Response) String() string { func (*ValidateResourceConfig_Response) ProtoMessage() {} func (x *ValidateResourceConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[34] + mi := &file_tfplugin6_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1995,7 +2058,7 @@ type ValidateDataResourceConfig_Request struct { func (x *ValidateDataResourceConfig_Request) Reset() { *x = ValidateDataResourceConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[35] + mi := &file_tfplugin6_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2008,7 +2071,7 @@ func (x *ValidateDataResourceConfig_Request) String() string { func (*ValidateDataResourceConfig_Request) ProtoMessage() {} func (x *ValidateDataResourceConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[35] + mi := &file_tfplugin6_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2049,7 +2112,7 @@ type ValidateDataResourceConfig_Response struct { func (x *ValidateDataResourceConfig_Response) Reset() { *x = ValidateDataResourceConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[36] + mi := &file_tfplugin6_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2062,7 +2125,7 @@ func (x *ValidateDataResourceConfig_Response) String() string { func (*ValidateDataResourceConfig_Response) ProtoMessage() {} func (x *ValidateDataResourceConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[36] + mi := &file_tfplugin6_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2097,7 +2160,7 @@ type ConfigureProvider_Request struct { func (x *ConfigureProvider_Request) Reset() { *x = ConfigureProvider_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[37] + mi := &file_tfplugin6_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2110,7 +2173,7 @@ func (x *ConfigureProvider_Request) String() string { func (*ConfigureProvider_Request) ProtoMessage() {} func (x *ConfigureProvider_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[37] + mi := &file_tfplugin6_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2151,7 +2214,7 @@ type ConfigureProvider_Response struct { func (x *ConfigureProvider_Response) Reset() { *x = ConfigureProvider_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[38] + mi := &file_tfplugin6_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2164,7 +2227,7 @@ func (x *ConfigureProvider_Response) String() string { func (*ConfigureProvider_Response) ProtoMessage() {} func (x *ConfigureProvider_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[38] + mi := &file_tfplugin6_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2201,7 +2264,7 @@ type ReadResource_Request struct { func (x *ReadResource_Request) Reset() { *x = ReadResource_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[39] + mi := &file_tfplugin6_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2214,7 +2277,7 @@ func (x *ReadResource_Request) String() string { func (*ReadResource_Request) ProtoMessage() {} func (x *ReadResource_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[39] + mi := &file_tfplugin6_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2271,7 +2334,7 @@ type ReadResource_Response struct { func (x *ReadResource_Response) Reset() { *x = ReadResource_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[40] + mi := &file_tfplugin6_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2284,7 +2347,7 @@ func (x *ReadResource_Response) String() string { func (*ReadResource_Response) ProtoMessage() {} func (x *ReadResource_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[40] + mi := &file_tfplugin6_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2337,7 +2400,7 @@ type PlanResourceChange_Request struct { func (x *PlanResourceChange_Request) Reset() { *x = PlanResourceChange_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[41] + mi := &file_tfplugin6_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2350,7 +2413,7 @@ func (x *PlanResourceChange_Request) String() string { func (*PlanResourceChange_Request) ProtoMessage() {} func (x *PlanResourceChange_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[41] + mi := &file_tfplugin6_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2417,12 +2480,24 @@ type PlanResourceChange_Response struct { RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"` PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` } func (x *PlanResourceChange_Response) Reset() { *x = PlanResourceChange_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[42] + mi := &file_tfplugin6_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2435,7 +2510,7 @@ func (x *PlanResourceChange_Response) String() string { func (*PlanResourceChange_Response) ProtoMessage() {} func (x *PlanResourceChange_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[42] + mi := &file_tfplugin6_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2479,6 +2554,13 @@ func (x *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic { return nil } +func (x *PlanResourceChange_Response) GetLegacyTypeSystem() bool { + if x != nil { + return x.LegacyTypeSystem + } + return false +} + type ApplyResourceChange_Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2495,7 +2577,7 @@ type ApplyResourceChange_Request struct { func (x *ApplyResourceChange_Request) Reset() { *x = ApplyResourceChange_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[43] + mi := &file_tfplugin6_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2508,7 +2590,7 @@ func (x *ApplyResourceChange_Request) String() string { func (*ApplyResourceChange_Request) ProtoMessage() {} func (x *ApplyResourceChange_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[43] + mi := &file_tfplugin6_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2574,12 +2656,24 @@ type ApplyResourceChange_Response struct { NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"` Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` } func (x *ApplyResourceChange_Response) Reset() { *x = ApplyResourceChange_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[44] + mi := &file_tfplugin6_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2592,7 +2686,7 @@ func (x *ApplyResourceChange_Response) String() string { func (*ApplyResourceChange_Response) ProtoMessage() {} func (x *ApplyResourceChange_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[44] + mi := &file_tfplugin6_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2629,6 +2723,13 @@ func (x *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic { return nil } +func (x *ApplyResourceChange_Response) GetLegacyTypeSystem() bool { + if x != nil { + return x.LegacyTypeSystem + } + return false +} + type ImportResourceState_Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2641,7 +2742,7 @@ type ImportResourceState_Request struct { func (x *ImportResourceState_Request) Reset() { *x = ImportResourceState_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[45] + mi := &file_tfplugin6_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2654,7 +2755,7 @@ func (x *ImportResourceState_Request) String() string { func (*ImportResourceState_Request) ProtoMessage() {} func (x *ImportResourceState_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[45] + mi := &file_tfplugin6_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2697,7 +2798,7 @@ type ImportResourceState_ImportedResource struct { func (x *ImportResourceState_ImportedResource) Reset() { *x = ImportResourceState_ImportedResource{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[46] + mi := &file_tfplugin6_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2710,7 +2811,7 @@ func (x *ImportResourceState_ImportedResource) String() string { func (*ImportResourceState_ImportedResource) ProtoMessage() {} func (x *ImportResourceState_ImportedResource) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[46] + mi := &file_tfplugin6_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2759,7 +2860,7 @@ type ImportResourceState_Response struct { func (x *ImportResourceState_Response) Reset() { *x = ImportResourceState_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[47] + mi := &file_tfplugin6_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2772,7 +2873,7 @@ func (x *ImportResourceState_Response) String() string { func (*ImportResourceState_Response) ProtoMessage() {} func (x *ImportResourceState_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[47] + mi := &file_tfplugin6_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2815,7 +2916,7 @@ type ReadDataSource_Request struct { func (x *ReadDataSource_Request) Reset() { *x = ReadDataSource_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[48] + mi := &file_tfplugin6_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2828,7 +2929,7 @@ func (x *ReadDataSource_Request) String() string { func (*ReadDataSource_Request) ProtoMessage() {} func (x *ReadDataSource_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[48] + mi := &file_tfplugin6_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2877,7 +2978,7 @@ type ReadDataSource_Response struct { func (x *ReadDataSource_Response) Reset() { *x = ReadDataSource_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[49] + mi := &file_tfplugin6_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2890,7 +2991,7 @@ func (x *ReadDataSource_Response) String() string { func (*ReadDataSource_Response) ProtoMessage() {} func (x *ReadDataSource_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[49] + mi := &file_tfplugin6_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2970,7 +3071,7 @@ var file_tfplugin6_proto_rawDesc = []byte{ 0x74, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x8d, 0x0a, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x95, 0x0a, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, @@ -3035,7 +3136,7 @@ var file_tfplugin6_proto_rawDesc = []byte{ 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x05, 0x1a, - 0x83, 0x02, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x61, 0x74, + 0x8b, 0x02, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, @@ -3043,80 +3144,102 @@ var file_tfplugin6_proto_rawDesc = []byte{ 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, - 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, - 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, - 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, - 0x73, 0x22, 0x42, 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, - 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, - 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, - 0x4d, 0x41, 0x50, 0x10, 0x04, 0x22, 0xd0, 0x04, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xaf, 0x04, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x12, 0x65, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x6c, 0x0a, 0x13, 0x64, 0x61, 0x74, - 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, - 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, - 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, + 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, + 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, + 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x42, 0x0a, 0x0b, 0x4e, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, + 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, + 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x53, + 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, 0x04, 0x22, 0xeb, 0x05, + 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x91, + 0x05, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x10, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, - 0x57, 0x0a, 0x16, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x99, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, - 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, - 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x72, 0x0a, - 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x30, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, - 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x1a, 0x83, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, - 0x0a, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, + 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x73, 0x12, 0x6c, 0x0a, 0x13, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, 0x61, + 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, + 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, + 0x12, 0x60, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, + 0x65, 0x73, 0x1a, 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x16, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x37, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x6c, 0x61, 0x6e, + 0x5f, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x70, 0x6c, 0x61, 0x6e, 0x44, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x22, 0x99, 0x01, 0x0a, 0x16, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x1a, 0x72, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x3e, 0x0a, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x16, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x22, 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, @@ -3127,244 +3250,239 @@ var file_tfplugin6_proto_rawDesc = []byte{ 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, - 0x22, 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, - 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, - 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, - 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, - 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xc1, 0x01, - 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x1a, 0x67, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, - 0x0a, 0x11, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, 0x72, 0x72, 0x61, - 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x1a, 0xbc, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, - 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, - 0x61, 0x1a, 0x93, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, - 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x18, 0x0a, - 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, - 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0xc4, 0x04, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xbb, - 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, - 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, - 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x45, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6e, 0x65, - 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, - 0x4e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x69, - 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, - 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x22, 0xc1, 0x01, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x1a, 0x67, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, + 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, + 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, + 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0xbc, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, + 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, - 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0xef, 0x01, 0x0a, - 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, - 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, - 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x69, - 0x72, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x0f, 0x72, 0x65, 0x71, - 0x75, 0x69, 0x72, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, - 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, - 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xe4, - 0x03, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x93, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, - 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, - 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, - 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, + 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0xf2, 0x04, 0x0a, 0x12, 0x50, + 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x1a, 0xbb, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, + 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x45, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, + 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, - 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, - 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, 0x6f, + 0x73, 0x65, 0x64, 0x4e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, - 0x93, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, - 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, - 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xed, 0x02, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x36, 0x0a, - 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x64, 0x1a, 0x78, 0x0a, 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, - 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x9d, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, + 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, + 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x0f, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, + 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, + 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, + 0x92, 0x04, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, - 0xa3, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x12, - 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x11, 0x69, 0x6d, 0x70, 0x6f, 0x72, - 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, - 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x9c, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, - 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x95, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, + 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, + 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, + 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, - 0x1a, 0x72, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, + 0x1a, 0xc1, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, + 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x22, 0xed, 0x02, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x36, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x02, 0x69, 0x64, 0x1a, 0x78, 0x0a, 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa3, + 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x12, 0x69, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x11, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, - 0x6e, 0x64, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, - 0x08, 0x4d, 0x41, 0x52, 0x4b, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x32, 0xcc, 0x09, 0x0a, 0x08, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x60, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, - 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, + 0x74, 0x69, 0x63, 0x73, 0x22, 0x9c, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x95, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, + 0x72, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, + 0x64, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, + 0x4d, 0x41, 0x52, 0x4b, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x32, 0xcc, 0x09, 0x0a, 0x08, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x60, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, + 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, - 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x16, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x1a, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2d, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, - 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x1a, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x14, 0x55, 0x70, 0x67, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, + 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x55, 0x70, - 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, - 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x12, 0x50, 0x6c, 0x61, - 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x50, 0x6c, 0x61, 0x6e, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, - 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x36, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x25, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x57, - 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x21, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x65, 0x61, - 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, - 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x53, 0x74, 0x6f, 0x70, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, + 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x57, 0x0a, + 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x21, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, + 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, - 0x72, 0x70, 0x2f, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x2f, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x2f, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x2d, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x74, 0x66, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x76, 0x36, 0x2f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3380,7 +3498,7 @@ func file_tfplugin6_proto_rawDescGZIP() []byte { } var file_tfplugin6_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_tfplugin6_proto_msgTypes = make([]protoimpl.MessageInfo, 50) +var file_tfplugin6_proto_msgTypes = make([]protoimpl.MessageInfo, 51) var file_tfplugin6_proto_goTypes = []interface{}{ (StringKind)(0), // 0: tfplugin6.StringKind (Diagnostic_Severity)(0), // 1: tfplugin6.Diagnostic.Severity @@ -3413,29 +3531,30 @@ var file_tfplugin6_proto_goTypes = []interface{}{ (*Schema_Object)(nil), // 28: tfplugin6.Schema.Object (*GetProviderSchema_Request)(nil), // 29: tfplugin6.GetProviderSchema.Request (*GetProviderSchema_Response)(nil), // 30: tfplugin6.GetProviderSchema.Response - nil, // 31: tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry - nil, // 32: tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry - (*ValidateProviderConfig_Request)(nil), // 33: tfplugin6.ValidateProviderConfig.Request - (*ValidateProviderConfig_Response)(nil), // 34: tfplugin6.ValidateProviderConfig.Response - (*UpgradeResourceState_Request)(nil), // 35: tfplugin6.UpgradeResourceState.Request - (*UpgradeResourceState_Response)(nil), // 36: tfplugin6.UpgradeResourceState.Response - (*ValidateResourceConfig_Request)(nil), // 37: tfplugin6.ValidateResourceConfig.Request - (*ValidateResourceConfig_Response)(nil), // 38: tfplugin6.ValidateResourceConfig.Response - (*ValidateDataResourceConfig_Request)(nil), // 39: tfplugin6.ValidateDataResourceConfig.Request - (*ValidateDataResourceConfig_Response)(nil), // 40: tfplugin6.ValidateDataResourceConfig.Response - (*ConfigureProvider_Request)(nil), // 41: tfplugin6.ConfigureProvider.Request - (*ConfigureProvider_Response)(nil), // 42: tfplugin6.ConfigureProvider.Response - (*ReadResource_Request)(nil), // 43: tfplugin6.ReadResource.Request - (*ReadResource_Response)(nil), // 44: tfplugin6.ReadResource.Response - (*PlanResourceChange_Request)(nil), // 45: tfplugin6.PlanResourceChange.Request - (*PlanResourceChange_Response)(nil), // 46: tfplugin6.PlanResourceChange.Response - (*ApplyResourceChange_Request)(nil), // 47: tfplugin6.ApplyResourceChange.Request - (*ApplyResourceChange_Response)(nil), // 48: tfplugin6.ApplyResourceChange.Response - (*ImportResourceState_Request)(nil), // 49: tfplugin6.ImportResourceState.Request - (*ImportResourceState_ImportedResource)(nil), // 50: tfplugin6.ImportResourceState.ImportedResource - (*ImportResourceState_Response)(nil), // 51: tfplugin6.ImportResourceState.Response - (*ReadDataSource_Request)(nil), // 52: tfplugin6.ReadDataSource.Request - (*ReadDataSource_Response)(nil), // 53: tfplugin6.ReadDataSource.Response + (*GetProviderSchema_ServerCapabilities)(nil), // 31: tfplugin6.GetProviderSchema.ServerCapabilities + nil, // 32: tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry + nil, // 33: tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry + (*ValidateProviderConfig_Request)(nil), // 34: tfplugin6.ValidateProviderConfig.Request + (*ValidateProviderConfig_Response)(nil), // 35: tfplugin6.ValidateProviderConfig.Response + (*UpgradeResourceState_Request)(nil), // 36: tfplugin6.UpgradeResourceState.Request + (*UpgradeResourceState_Response)(nil), // 37: tfplugin6.UpgradeResourceState.Response + (*ValidateResourceConfig_Request)(nil), // 38: tfplugin6.ValidateResourceConfig.Request + (*ValidateResourceConfig_Response)(nil), // 39: tfplugin6.ValidateResourceConfig.Response + (*ValidateDataResourceConfig_Request)(nil), // 40: tfplugin6.ValidateDataResourceConfig.Request + (*ValidateDataResourceConfig_Response)(nil), // 41: tfplugin6.ValidateDataResourceConfig.Response + (*ConfigureProvider_Request)(nil), // 42: tfplugin6.ConfigureProvider.Request + (*ConfigureProvider_Response)(nil), // 43: tfplugin6.ConfigureProvider.Response + (*ReadResource_Request)(nil), // 44: tfplugin6.ReadResource.Request + (*ReadResource_Response)(nil), // 45: tfplugin6.ReadResource.Response + (*PlanResourceChange_Request)(nil), // 46: tfplugin6.PlanResourceChange.Request + (*PlanResourceChange_Response)(nil), // 47: tfplugin6.PlanResourceChange.Response + (*ApplyResourceChange_Request)(nil), // 48: tfplugin6.ApplyResourceChange.Request + (*ApplyResourceChange_Response)(nil), // 49: tfplugin6.ApplyResourceChange.Response + (*ImportResourceState_Request)(nil), // 50: tfplugin6.ImportResourceState.Request + (*ImportResourceState_ImportedResource)(nil), // 51: tfplugin6.ImportResourceState.ImportedResource + (*ImportResourceState_Response)(nil), // 52: tfplugin6.ImportResourceState.Response + (*ReadDataSource_Request)(nil), // 53: tfplugin6.ReadDataSource.Request + (*ReadDataSource_Response)(nil), // 54: tfplugin6.ReadDataSource.Response } var file_tfplugin6_proto_depIdxs = []int32{ 1, // 0: tfplugin6.Diagnostic.severity:type_name -> tfplugin6.Diagnostic.Severity @@ -3453,76 +3572,77 @@ var file_tfplugin6_proto_depIdxs = []int32{ 26, // 12: tfplugin6.Schema.Object.attributes:type_name -> tfplugin6.Schema.Attribute 3, // 13: tfplugin6.Schema.Object.nesting:type_name -> tfplugin6.Schema.Object.NestingMode 9, // 14: tfplugin6.GetProviderSchema.Response.provider:type_name -> tfplugin6.Schema - 31, // 15: tfplugin6.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry - 32, // 16: tfplugin6.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry + 32, // 15: tfplugin6.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry + 33, // 16: tfplugin6.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry 5, // 17: tfplugin6.GetProviderSchema.Response.diagnostics:type_name -> tfplugin6.Diagnostic 9, // 18: tfplugin6.GetProviderSchema.Response.provider_meta:type_name -> tfplugin6.Schema - 9, // 19: tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin6.Schema - 9, // 20: tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin6.Schema - 4, // 21: tfplugin6.ValidateProviderConfig.Request.config:type_name -> tfplugin6.DynamicValue - 5, // 22: tfplugin6.ValidateProviderConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 8, // 23: tfplugin6.UpgradeResourceState.Request.raw_state:type_name -> tfplugin6.RawState - 4, // 24: tfplugin6.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin6.DynamicValue - 5, // 25: tfplugin6.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 26: tfplugin6.ValidateResourceConfig.Request.config:type_name -> tfplugin6.DynamicValue - 5, // 27: tfplugin6.ValidateResourceConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 28: tfplugin6.ValidateDataResourceConfig.Request.config:type_name -> tfplugin6.DynamicValue - 5, // 29: tfplugin6.ValidateDataResourceConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 30: tfplugin6.ConfigureProvider.Request.config:type_name -> tfplugin6.DynamicValue - 5, // 31: tfplugin6.ConfigureProvider.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 32: tfplugin6.ReadResource.Request.current_state:type_name -> tfplugin6.DynamicValue - 4, // 33: tfplugin6.ReadResource.Request.provider_meta:type_name -> tfplugin6.DynamicValue - 4, // 34: tfplugin6.ReadResource.Response.new_state:type_name -> tfplugin6.DynamicValue - 5, // 35: tfplugin6.ReadResource.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 36: tfplugin6.PlanResourceChange.Request.prior_state:type_name -> tfplugin6.DynamicValue - 4, // 37: tfplugin6.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin6.DynamicValue - 4, // 38: tfplugin6.PlanResourceChange.Request.config:type_name -> tfplugin6.DynamicValue - 4, // 39: tfplugin6.PlanResourceChange.Request.provider_meta:type_name -> tfplugin6.DynamicValue - 4, // 40: tfplugin6.PlanResourceChange.Response.planned_state:type_name -> tfplugin6.DynamicValue - 6, // 41: tfplugin6.PlanResourceChange.Response.requires_replace:type_name -> tfplugin6.AttributePath - 5, // 42: tfplugin6.PlanResourceChange.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 43: tfplugin6.ApplyResourceChange.Request.prior_state:type_name -> tfplugin6.DynamicValue - 4, // 44: tfplugin6.ApplyResourceChange.Request.planned_state:type_name -> tfplugin6.DynamicValue - 4, // 45: tfplugin6.ApplyResourceChange.Request.config:type_name -> tfplugin6.DynamicValue - 4, // 46: tfplugin6.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin6.DynamicValue - 4, // 47: tfplugin6.ApplyResourceChange.Response.new_state:type_name -> tfplugin6.DynamicValue - 5, // 48: tfplugin6.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 49: tfplugin6.ImportResourceState.ImportedResource.state:type_name -> tfplugin6.DynamicValue - 50, // 50: tfplugin6.ImportResourceState.Response.imported_resources:type_name -> tfplugin6.ImportResourceState.ImportedResource - 5, // 51: tfplugin6.ImportResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 52: tfplugin6.ReadDataSource.Request.config:type_name -> tfplugin6.DynamicValue - 4, // 53: tfplugin6.ReadDataSource.Request.provider_meta:type_name -> tfplugin6.DynamicValue - 4, // 54: tfplugin6.ReadDataSource.Response.state:type_name -> tfplugin6.DynamicValue - 5, // 55: tfplugin6.ReadDataSource.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 29, // 56: tfplugin6.Provider.GetProviderSchema:input_type -> tfplugin6.GetProviderSchema.Request - 33, // 57: tfplugin6.Provider.ValidateProviderConfig:input_type -> tfplugin6.ValidateProviderConfig.Request - 37, // 58: tfplugin6.Provider.ValidateResourceConfig:input_type -> tfplugin6.ValidateResourceConfig.Request - 39, // 59: tfplugin6.Provider.ValidateDataResourceConfig:input_type -> tfplugin6.ValidateDataResourceConfig.Request - 35, // 60: tfplugin6.Provider.UpgradeResourceState:input_type -> tfplugin6.UpgradeResourceState.Request - 41, // 61: tfplugin6.Provider.ConfigureProvider:input_type -> tfplugin6.ConfigureProvider.Request - 43, // 62: tfplugin6.Provider.ReadResource:input_type -> tfplugin6.ReadResource.Request - 45, // 63: tfplugin6.Provider.PlanResourceChange:input_type -> tfplugin6.PlanResourceChange.Request - 47, // 64: tfplugin6.Provider.ApplyResourceChange:input_type -> tfplugin6.ApplyResourceChange.Request - 49, // 65: tfplugin6.Provider.ImportResourceState:input_type -> tfplugin6.ImportResourceState.Request - 52, // 66: tfplugin6.Provider.ReadDataSource:input_type -> tfplugin6.ReadDataSource.Request - 22, // 67: tfplugin6.Provider.StopProvider:input_type -> tfplugin6.StopProvider.Request - 30, // 68: tfplugin6.Provider.GetProviderSchema:output_type -> tfplugin6.GetProviderSchema.Response - 34, // 69: tfplugin6.Provider.ValidateProviderConfig:output_type -> tfplugin6.ValidateProviderConfig.Response - 38, // 70: tfplugin6.Provider.ValidateResourceConfig:output_type -> tfplugin6.ValidateResourceConfig.Response - 40, // 71: tfplugin6.Provider.ValidateDataResourceConfig:output_type -> tfplugin6.ValidateDataResourceConfig.Response - 36, // 72: tfplugin6.Provider.UpgradeResourceState:output_type -> tfplugin6.UpgradeResourceState.Response - 42, // 73: tfplugin6.Provider.ConfigureProvider:output_type -> tfplugin6.ConfigureProvider.Response - 44, // 74: tfplugin6.Provider.ReadResource:output_type -> tfplugin6.ReadResource.Response - 46, // 75: tfplugin6.Provider.PlanResourceChange:output_type -> tfplugin6.PlanResourceChange.Response - 48, // 76: tfplugin6.Provider.ApplyResourceChange:output_type -> tfplugin6.ApplyResourceChange.Response - 51, // 77: tfplugin6.Provider.ImportResourceState:output_type -> tfplugin6.ImportResourceState.Response - 53, // 78: tfplugin6.Provider.ReadDataSource:output_type -> tfplugin6.ReadDataSource.Response - 23, // 79: tfplugin6.Provider.StopProvider:output_type -> tfplugin6.StopProvider.Response - 68, // [68:80] is the sub-list for method output_type - 56, // [56:68] is the sub-list for method input_type - 56, // [56:56] is the sub-list for extension type_name - 56, // [56:56] is the sub-list for extension extendee - 0, // [0:56] is the sub-list for field type_name + 31, // 19: tfplugin6.GetProviderSchema.Response.server_capabilities:type_name -> tfplugin6.GetProviderSchema.ServerCapabilities + 9, // 20: tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin6.Schema + 9, // 21: tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin6.Schema + 4, // 22: tfplugin6.ValidateProviderConfig.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 23: tfplugin6.ValidateProviderConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 8, // 24: tfplugin6.UpgradeResourceState.Request.raw_state:type_name -> tfplugin6.RawState + 4, // 25: tfplugin6.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin6.DynamicValue + 5, // 26: tfplugin6.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 27: tfplugin6.ValidateResourceConfig.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 28: tfplugin6.ValidateResourceConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 29: tfplugin6.ValidateDataResourceConfig.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 30: tfplugin6.ValidateDataResourceConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 31: tfplugin6.ConfigureProvider.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 32: tfplugin6.ConfigureProvider.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 33: tfplugin6.ReadResource.Request.current_state:type_name -> tfplugin6.DynamicValue + 4, // 34: tfplugin6.ReadResource.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 4, // 35: tfplugin6.ReadResource.Response.new_state:type_name -> tfplugin6.DynamicValue + 5, // 36: tfplugin6.ReadResource.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 37: tfplugin6.PlanResourceChange.Request.prior_state:type_name -> tfplugin6.DynamicValue + 4, // 38: tfplugin6.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin6.DynamicValue + 4, // 39: tfplugin6.PlanResourceChange.Request.config:type_name -> tfplugin6.DynamicValue + 4, // 40: tfplugin6.PlanResourceChange.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 4, // 41: tfplugin6.PlanResourceChange.Response.planned_state:type_name -> tfplugin6.DynamicValue + 6, // 42: tfplugin6.PlanResourceChange.Response.requires_replace:type_name -> tfplugin6.AttributePath + 5, // 43: tfplugin6.PlanResourceChange.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 44: tfplugin6.ApplyResourceChange.Request.prior_state:type_name -> tfplugin6.DynamicValue + 4, // 45: tfplugin6.ApplyResourceChange.Request.planned_state:type_name -> tfplugin6.DynamicValue + 4, // 46: tfplugin6.ApplyResourceChange.Request.config:type_name -> tfplugin6.DynamicValue + 4, // 47: tfplugin6.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 4, // 48: tfplugin6.ApplyResourceChange.Response.new_state:type_name -> tfplugin6.DynamicValue + 5, // 49: tfplugin6.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 50: tfplugin6.ImportResourceState.ImportedResource.state:type_name -> tfplugin6.DynamicValue + 51, // 51: tfplugin6.ImportResourceState.Response.imported_resources:type_name -> tfplugin6.ImportResourceState.ImportedResource + 5, // 52: tfplugin6.ImportResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 53: tfplugin6.ReadDataSource.Request.config:type_name -> tfplugin6.DynamicValue + 4, // 54: tfplugin6.ReadDataSource.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 4, // 55: tfplugin6.ReadDataSource.Response.state:type_name -> tfplugin6.DynamicValue + 5, // 56: tfplugin6.ReadDataSource.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 29, // 57: tfplugin6.Provider.GetProviderSchema:input_type -> tfplugin6.GetProviderSchema.Request + 34, // 58: tfplugin6.Provider.ValidateProviderConfig:input_type -> tfplugin6.ValidateProviderConfig.Request + 38, // 59: tfplugin6.Provider.ValidateResourceConfig:input_type -> tfplugin6.ValidateResourceConfig.Request + 40, // 60: tfplugin6.Provider.ValidateDataResourceConfig:input_type -> tfplugin6.ValidateDataResourceConfig.Request + 36, // 61: tfplugin6.Provider.UpgradeResourceState:input_type -> tfplugin6.UpgradeResourceState.Request + 42, // 62: tfplugin6.Provider.ConfigureProvider:input_type -> tfplugin6.ConfigureProvider.Request + 44, // 63: tfplugin6.Provider.ReadResource:input_type -> tfplugin6.ReadResource.Request + 46, // 64: tfplugin6.Provider.PlanResourceChange:input_type -> tfplugin6.PlanResourceChange.Request + 48, // 65: tfplugin6.Provider.ApplyResourceChange:input_type -> tfplugin6.ApplyResourceChange.Request + 50, // 66: tfplugin6.Provider.ImportResourceState:input_type -> tfplugin6.ImportResourceState.Request + 53, // 67: tfplugin6.Provider.ReadDataSource:input_type -> tfplugin6.ReadDataSource.Request + 22, // 68: tfplugin6.Provider.StopProvider:input_type -> tfplugin6.StopProvider.Request + 30, // 69: tfplugin6.Provider.GetProviderSchema:output_type -> tfplugin6.GetProviderSchema.Response + 35, // 70: tfplugin6.Provider.ValidateProviderConfig:output_type -> tfplugin6.ValidateProviderConfig.Response + 39, // 71: tfplugin6.Provider.ValidateResourceConfig:output_type -> tfplugin6.ValidateResourceConfig.Response + 41, // 72: tfplugin6.Provider.ValidateDataResourceConfig:output_type -> tfplugin6.ValidateDataResourceConfig.Response + 37, // 73: tfplugin6.Provider.UpgradeResourceState:output_type -> tfplugin6.UpgradeResourceState.Response + 43, // 74: tfplugin6.Provider.ConfigureProvider:output_type -> tfplugin6.ConfigureProvider.Response + 45, // 75: tfplugin6.Provider.ReadResource:output_type -> tfplugin6.ReadResource.Response + 47, // 76: tfplugin6.Provider.PlanResourceChange:output_type -> tfplugin6.PlanResourceChange.Response + 49, // 77: tfplugin6.Provider.ApplyResourceChange:output_type -> tfplugin6.ApplyResourceChange.Response + 52, // 78: tfplugin6.Provider.ImportResourceState:output_type -> tfplugin6.ImportResourceState.Response + 54, // 79: tfplugin6.Provider.ReadDataSource:output_type -> tfplugin6.ReadDataSource.Response + 23, // 80: tfplugin6.Provider.StopProvider:output_type -> tfplugin6.StopProvider.Response + 69, // [69:81] is the sub-list for method output_type + 57, // [57:69] is the sub-list for method input_type + 57, // [57:57] is the sub-list for extension type_name + 57, // [57:57] is the sub-list for extension extendee + 0, // [0:57] is the sub-list for field type_name } func init() { file_tfplugin6_proto_init() } @@ -3843,8 +3963,8 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateProviderConfig_Request); i { + file_tfplugin6_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema_ServerCapabilities); i { case 0: return &v.state case 1: @@ -3856,7 +3976,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateProviderConfig_Response); i { + switch v := v.(*ValidateProviderConfig_Request); i { case 0: return &v.state case 1: @@ -3868,7 +3988,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpgradeResourceState_Request); i { + switch v := v.(*ValidateProviderConfig_Response); i { case 0: return &v.state case 1: @@ -3880,7 +4000,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpgradeResourceState_Response); i { + switch v := v.(*UpgradeResourceState_Request); i { case 0: return &v.state case 1: @@ -3892,7 +4012,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateResourceConfig_Request); i { + switch v := v.(*UpgradeResourceState_Response); i { case 0: return &v.state case 1: @@ -3904,7 +4024,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateResourceConfig_Response); i { + switch v := v.(*ValidateResourceConfig_Request); i { case 0: return &v.state case 1: @@ -3916,7 +4036,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateDataResourceConfig_Request); i { + switch v := v.(*ValidateResourceConfig_Response); i { case 0: return &v.state case 1: @@ -3928,7 +4048,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateDataResourceConfig_Response); i { + switch v := v.(*ValidateDataResourceConfig_Request); i { case 0: return &v.state case 1: @@ -3940,7 +4060,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConfigureProvider_Request); i { + switch v := v.(*ValidateDataResourceConfig_Response); i { case 0: return &v.state case 1: @@ -3952,7 +4072,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConfigureProvider_Response); i { + switch v := v.(*ConfigureProvider_Request); i { case 0: return &v.state case 1: @@ -3964,7 +4084,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResource_Request); i { + switch v := v.(*ConfigureProvider_Response); i { case 0: return &v.state case 1: @@ -3976,7 +4096,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResource_Response); i { + switch v := v.(*ReadResource_Request); i { case 0: return &v.state case 1: @@ -3988,7 +4108,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PlanResourceChange_Request); i { + switch v := v.(*ReadResource_Response); i { case 0: return &v.state case 1: @@ -4000,7 +4120,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PlanResourceChange_Response); i { + switch v := v.(*PlanResourceChange_Request); i { case 0: return &v.state case 1: @@ -4012,7 +4132,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyResourceChange_Request); i { + switch v := v.(*PlanResourceChange_Response); i { case 0: return &v.state case 1: @@ -4024,7 +4144,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyResourceChange_Response); i { + switch v := v.(*ApplyResourceChange_Request); i { case 0: return &v.state case 1: @@ -4036,7 +4156,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportResourceState_Request); i { + switch v := v.(*ApplyResourceChange_Response); i { case 0: return &v.state case 1: @@ -4048,7 +4168,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportResourceState_ImportedResource); i { + switch v := v.(*ImportResourceState_Request); i { case 0: return &v.state case 1: @@ -4060,7 +4180,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportResourceState_Response); i { + switch v := v.(*ImportResourceState_ImportedResource); i { case 0: return &v.state case 1: @@ -4072,7 +4192,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadDataSource_Request); i { + switch v := v.(*ImportResourceState_Response); i { case 0: return &v.state case 1: @@ -4084,6 +4204,18 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadDataSource_Response); i { case 0: return &v.state @@ -4107,7 +4239,7 @@ func file_tfplugin6_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_tfplugin6_proto_rawDesc, NumEnums: 4, - NumMessages: 50, + NumMessages: 51, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto index 9986f780a..b6d05bb74 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto @@ -1,6 +1,6 @@ -// Terraform Plugin RPC protocol version 6.0 +// Terraform Plugin RPC protocol version 6.3 // -// This file defines version 6.0 of the RPC protocol. To implement a plugin +// This file defines version 6.3 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -17,7 +17,7 @@ // branch or any other development branch. // syntax = "proto3"; -option go_package = "github.com/hashicorp/terraform/internal/tfplugin6"; +option go_package = "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6"; package tfplugin6; @@ -128,13 +128,16 @@ message Schema { repeated Attribute attributes = 1; NestingMode nesting = 3; - int64 min_items = 4; - int64 max_items = 5; + + // MinItems and MaxItems were never used in the protocol, and have no + // effect on validation. + int64 min_items = 4 [deprecated = true]; + int64 max_items = 5 [deprecated = true]; } // The version of the schema. // Schemas are versioned, so that providers can upgrade a saved resource - // state when the schema is changed. + // state when the schema is changed. int64 version = 1; // Block is the top level configuration block for this schema. @@ -173,6 +176,18 @@ message GetProviderSchema { map data_source_schemas = 3; repeated Diagnostic diagnostics = 4; Schema provider_meta = 5; + ServerCapabilities server_capabilities = 6; + } + + + // ServerCapabilities allows providers to communicate extra information + // regarding supported protocol features. This is used to indicate + // availability of certain forward-compatible changes which may be optional + // in a major protocol version, but cannot be tested for directly. + message ServerCapabilities { + // The plan_destroy capability signals that a provider expects a call + // to PlanResourceChange when a resource is going to be destroyed. + bool plan_destroy = 1; } } @@ -262,15 +277,28 @@ message PlanResourceChange { DynamicValue prior_state = 2; DynamicValue proposed_new_state = 3; DynamicValue config = 4; - bytes prior_private = 5; + bytes prior_private = 5; DynamicValue provider_meta = 6; } message Response { DynamicValue planned_state = 1; repeated AttributePath requires_replace = 2; - bytes planned_private = 3; + bytes planned_private = 3; repeated Diagnostic diagnostics = 4; + + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + bool legacy_type_system = 5; } } @@ -280,13 +308,26 @@ message ApplyResourceChange { DynamicValue prior_state = 2; DynamicValue planned_state = 3; DynamicValue config = 4; - bytes planned_private = 5; + bytes planned_private = 5; DynamicValue provider_meta = 6; } message Response { DynamicValue new_state = 1; - bytes private = 2; + bytes private = 2; repeated Diagnostic diagnostics = 3; + + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + bool legacy_type_system = 4; } } @@ -318,4 +359,4 @@ message ReadDataSource { DynamicValue state = 1; repeated Diagnostic diagnostics = 2; } -} \ No newline at end of file +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go index 957e9d239..a22d32dc9 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.19.4 +// source: tfplugin6.proto package tfplugin6 diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/provider.go index 503ba4e2b..ce37207f9 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/provider.go @@ -12,7 +12,12 @@ func GetProviderSchema_Request(in *tfprotov6.GetProviderSchemaRequest) (*tfplugi } func GetProviderSchema_Response(in *tfprotov6.GetProviderSchemaResponse) (*tfplugin6.GetProviderSchema_Response, error) { - var resp tfplugin6.GetProviderSchema_Response + if in == nil { + return nil, nil + } + resp := tfplugin6.GetProviderSchema_Response{ + ServerCapabilities: GetProviderSchema_ServerCapabilities(in.ServerCapabilities), + } if in.Provider != nil { schema, err := Schema(in.Provider) if err != nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/resource.go index e86988849..bb0968185 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/resource.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/resource.go @@ -101,7 +101,8 @@ func PlanResourceChange_Request(in *tfprotov6.PlanResourceChangeRequest) (*tfplu func PlanResourceChange_Response(in *tfprotov6.PlanResourceChangeResponse) (*tfplugin6.PlanResourceChange_Response, error) { resp := &tfplugin6.PlanResourceChange_Response{ - PlannedPrivate: in.PlannedPrivate, + PlannedPrivate: in.PlannedPrivate, + LegacyTypeSystem: in.UnsafeToUseLegacyTypeSystem, //nolint:staticcheck } requiresReplace, err := AttributePaths(in.RequiresReplace) if err != nil { @@ -141,7 +142,8 @@ func ApplyResourceChange_Request(in *tfprotov6.ApplyResourceChangeRequest) (*tfp func ApplyResourceChange_Response(in *tfprotov6.ApplyResourceChangeResponse) (*tfplugin6.ApplyResourceChange_Response, error) { resp := &tfplugin6.ApplyResourceChange_Response{ - Private: in.Private, + Private: in.Private, + LegacyTypeSystem: in.UnsafeToUseLegacyTypeSystem, //nolint:staticcheck } diags, err := Diagnostics(in.Diagnostics) if err != nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/schema.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/schema.go index f9f721a15..f3dfcb97d 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/schema.go @@ -127,9 +127,7 @@ func Schema_Object_NestingMode(in tfprotov6.SchemaObjectNestingMode) tfplugin6.S func Schema_Object(in *tfprotov6.SchemaObject) (*tfplugin6.Schema_Object, error) { resp := &tfplugin6.Schema_Object{ - Nesting: Schema_Object_NestingMode(in.Nesting), - MinItems: in.MinItems, - MaxItems: in.MaxItems, + Nesting: Schema_Object_NestingMode(in.Nesting), } attrs, err := Schema_Attributes(in.Attributes) if err != nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/server_capabilities.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/server_capabilities.go new file mode 100644 index 000000000..fa335d691 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/server_capabilities.go @@ -0,0 +1,16 @@ +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func GetProviderSchema_ServerCapabilities(in *tfprotov6.ServerCapabilities) *tfplugin6.GetProviderSchema_ServerCapabilities { + if in == nil { + return nil + } + + return &tfplugin6.GetProviderSchema_ServerCapabilities{ + PlanDestroy: in.PlanDestroy, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/provider.go index f2b8b3add..21f81f7a1 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/provider.go @@ -46,6 +46,10 @@ type GetProviderSchemaRequest struct{} // GetProviderSchemaResponse represents a Terraform RPC response containing the // provider's schemas. type GetProviderSchemaResponse struct { + // ServerCapabilities defines optionally supported protocol features, + // such as forward-compatible Terraform behavior changes. + ServerCapabilities *ServerCapabilities + // Provider defines the schema for the provider configuration, which // will be specified in the provider block of the user's configuration. Provider *Schema diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/resource.go index 3250f1954..2768bb526 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/resource.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/resource.go @@ -125,6 +125,9 @@ type ReadResourceRequest struct { // Private is any provider-defined private state stored with the // resource. It is used for keeping state with the resource that is not // meant to be included when calculating diffs. + // + // To ensure private state data is preserved, copy any necessary data to + // the ReadResourceResponse type Private field. Private []byte // ProviderMeta supplies the provider metadata configuration for the @@ -212,6 +215,9 @@ type PlanResourceChangeRequest struct { // PriorPrivate is any provider-defined private state stored with the // resource. It is used for keeping state with the resource that is not // meant to be included when calculating diffs. + // + // To ensure private state data is preserved, copy any necessary data to + // the PlanResourceChangeResponse type PlannedPrivate field. PriorPrivate []byte // ProviderMeta supplies the provider metadata configuration for the @@ -280,6 +286,10 @@ type PlanResourceChangeResponse struct { // like sent with requests for this resource. This state will be // associated with the resource, but will not be considered when // calculating diffs. + // + // This private state data will be sent in the ApplyResourceChange RPC, in + // relation to the types of this package, the ApplyResourceChangeRequest + // type PlannedPrivate field. PlannedPrivate []byte // Diagnostics report errors or warnings related to determining the @@ -341,6 +351,13 @@ type ApplyResourceChangeRequest struct { // PlannedPrivate is any provider-defined private state stored with the // resource. It is used for keeping state with the resource that is not // meant to be included when calculating diffs. + // + // This private state data is sourced from the PlanResourceChange RPC, in + // relation to the types in this package, the PlanResourceChangeResponse + // type PlannedPrivate field. + // + // To ensure private state data is preserved, copy any necessary data to + // the ApplyResourceChangeResponse type Private field. PlannedPrivate []byte // ProviderMeta supplies the provider metadata configuration for the diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/schema.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/schema.go index a7e139fed..c17e2ccee 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/schema.go @@ -96,6 +96,19 @@ type Schema struct { Block *SchemaBlock } +// ValueType returns the tftypes.Type for a Schema. +// +// If Schema is missing, an empty Object is returned. +func (s *Schema) ValueType() tftypes.Type { + if s == nil { + return tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{}, + } + } + + return s.Block.ValueType() +} + // SchemaBlock represents a block in a schema. Blocks are how Terraform creates // groupings of attributes. In configurations, they don't use the equals sign // and use dynamic instead of list comprehensions. @@ -133,6 +146,51 @@ type SchemaBlock struct { Deprecated bool } +// ValueType returns the tftypes.Type for a SchemaBlock. +// +// If SchemaBlock is missing, an empty Object is returned. +func (s *SchemaBlock) ValueType() tftypes.Type { + if s == nil { + return tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{}, + } + } + + attributeTypes := map[string]tftypes.Type{} + + for _, attribute := range s.Attributes { + if attribute == nil { + continue + } + + attributeType := attribute.ValueType() + + if attributeType == nil { + continue + } + + attributeTypes[attribute.Name] = attributeType + } + + for _, block := range s.BlockTypes { + if block == nil { + continue + } + + blockType := block.ValueType() + + if blockType == nil { + continue + } + + attributeTypes[block.TypeName] = blockType + } + + return tftypes.Object{ + AttributeTypes: attributeTypes, + } +} + // SchemaAttribute represents a single attribute within a schema block. // Attributes are the fields users can set in configuration using the equals // sign, can assign to variables, can interpolate, and can use list @@ -194,6 +252,22 @@ type SchemaAttribute struct { Deprecated bool } +// ValueType returns the tftypes.Type for a SchemaAttribute. +// +// If SchemaAttribute is missing, nil is returned. +func (s *SchemaAttribute) ValueType() tftypes.Type { + if s == nil { + return nil + } + + // It is not valid to set both NestedType and Type. + if s.NestedType != nil { + return s.NestedType.ValueType() + } + + return s.Type +} + // SchemaNestedBlock is a nested block within another block. See SchemaBlock // for more information on blocks. type SchemaNestedBlock struct { @@ -230,6 +304,39 @@ type SchemaNestedBlock struct { MaxItems int64 } +// ValueType returns the tftypes.Type for a SchemaNestedBlock. +// +// If SchemaNestedBlock is missing or the Nesting mode is invalid, nil is +// returned. +func (s *SchemaNestedBlock) ValueType() tftypes.Type { + if s == nil { + return nil + } + + blockType := s.Block.ValueType() + + switch s.Nesting { + case SchemaNestedBlockNestingModeGroup: + return blockType + case SchemaNestedBlockNestingModeList: + return tftypes.List{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeMap: + return tftypes.Map{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeSet: + return tftypes.Set{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeSingle: + return blockType + default: + return nil + } +} + // SchemaNestedBlockNestingMode indicates the nesting mode for // SchemaNestedBlocks. The nesting mode determines the number of instances of // the block allowed, how many labels the block expects, and the data structure @@ -260,14 +367,54 @@ type SchemaObject struct { Attributes []*SchemaAttribute Nesting SchemaObjectNestingMode +} - // MinItems is the minimum number of instances of this type that a - // user must specify or Terraform will return an error. - MinItems int64 +// ValueType returns the tftypes.Type for a SchemaObject. +// +// If SchemaObject is missing or the Nesting mode is invalid, nil is returned. +func (s *SchemaObject) ValueType() tftypes.Type { + if s == nil { + return nil + } - // MaxItems is the maximum number of instances of this type that a - // user may specify before Terraform returns an error. - MaxItems int64 + attributeTypes := map[string]tftypes.Type{} + + for _, attribute := range s.Attributes { + if attribute == nil { + continue + } + + attributeType := attribute.ValueType() + + if attributeType == nil { + continue + } + + attributeTypes[attribute.Name] = attributeType + } + + objectType := tftypes.Object{ + AttributeTypes: attributeTypes, + } + + switch s.Nesting { + case SchemaObjectNestingModeList: + return tftypes.List{ + ElementType: objectType, + } + case SchemaObjectNestingModeMap: + return tftypes.Map{ + ElementType: objectType, + } + case SchemaObjectNestingModeSet: + return tftypes.Set{ + ElementType: objectType, + } + case SchemaObjectNestingModeSingle: + return objectType + default: + return nil + } } // SchemaObjectNestingMode indicates the nesting mode for diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/server_capabilities.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/server_capabilities.go new file mode 100644 index 000000000..eed59219d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/server_capabilities.go @@ -0,0 +1,14 @@ +package tfprotov6 + +// ServerCapabilities allows providers to communicate optionally supported +// protocol features, such as forward-compatible Terraform behavior changes. +// +// This information is used in GetProviderSchemaResponse as capabilities are +// static features which must be known upfront in the provider server. +type ServerCapabilities struct { + // PlanDestroy signals that a provider expects a call to + // PlanResourceChange when a resource is going to be destroyed. This is + // opt-in to prevent unexpected errors or panics since the + // ProposedNewState in PlanResourceChangeRequest will be a null value. + PlanDestroy bool +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go index 4fd0b6eba..bdfe3d0f7 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go @@ -2,51 +2,83 @@ package tf6server import ( "context" + "encoding/json" "errors" - "log" + "fmt" + "os" + "os/signal" "regexp" + "runtime" "strings" "sync" + "time" + "github.com/hashicorp/terraform-plugin-go/internal/logging" "github.com/hashicorp/terraform-plugin-go/tfprotov6" "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging" "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto" + "google.golang.org/grpc" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" - "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-log/tfsdklog" - tfaddr "github.com/hashicorp/terraform-registry-address" testing "github.com/mitchellh/go-testing-interface" ) -const tflogSubsystemName = "proto" - -// Global logging keys attached to all requests. -// -// Practitioners or tooling reading logs may be depending on these keys, so be -// conscious of that when changing them. const ( - // A unique ID for the RPC request - logKeyRequestID = "tf_req_id" - - // The full address of the provider, such as - // registry.terraform.io/hashicorp/random - logKeyProviderAddress = "tf_provider_addr" - - // The RPC being run, such as "ApplyResourceChange" - logKeyRPC = "tf_rpc" + // protocolVersionMajor represents the major version number of the protocol + // being served. This is used during the plugin handshake to validate the + // server and client are compatible. + // + // In the future, it may be possible to include this information directly + // in the protocol buffers rather than recreating a constant here. + protocolVersionMajor uint = 6 + + // protocolVersionMinor represents the minor version number of the protocol + // being served. Backwards compatible additions are possible in the + // protocol definitions, which is when this may be increased. While it is + // not used in plugin negotiation, it can be helpful to include this value + // for debugging, such as in logs. + // + // In the future, it may be possible to include this information directly + // in the protocol buffers rather than recreating a constant here. + protocolVersionMinor uint = 3 +) - // The type of resource being operated on, such as "random_pet" - logKeyResourceType = "tf_resource_type" +// protocolVersion represents the combined major and minor version numbers of +// the protocol being served. +var protocolVersion string = fmt.Sprintf("%d.%d", protocolVersionMajor, protocolVersionMinor) - // The type of data source being operated on, such as "archive_file" - logKeyDataSourceType = "tf_data_source_type" +const ( + // envTfReattachProviders is the environment variable used by Terraform CLI + // to directly connect to already running provider processes, such as those + // being inspected by debugging processes. When connecting to providers in + // this manner, Terraform CLI disables certain plugin handshake checks and + // will not stop the provider process. + envTfReattachProviders = "TF_REATTACH_PROVIDERS" +) - // The protocol version being used, as a string, such as "6" - logKeyProtocolVersion = "tf_proto_version" +const ( + // grpcMaxMessageSize is the maximum gRPC send and receive message sizes + // for the server. + // + // This 256MB value is arbitrarily raised from the default message sizes of + // 4MB to account for advanced use cases, but arbitrarily lowered from + // MaxInt32 (or similar) to prevent incorrect server implementations from + // exhausting resources in common execution environments. Receiving a gRPC + // message size error is preferable for troubleshooting over determining + // why an execution environment may have terminated the process via its + // memory management processes, such as oom-killer on Linux. + // + // This value is kept as constant over allowing server configurability + // since there are many factors that influence message size, such as + // Terraform configuration and state data. If larger message size use + // cases appear, other gRPC options should be explored, such as + // implementing streaming RPCs and messages. + grpcMaxMessageSize = 256 << 20 ) // ServeOpt is an interface for defining options that can be passed to the @@ -65,6 +97,10 @@ type ServeConfig struct { debugCh chan *plugin.ReattachConfig debugCloseCh chan struct{} + managedDebug bool + managedDebugReattachConfigTimeout time.Duration + managedDebugStopSignals []os.Signal + disableLogInitStderr bool disableLogLocation bool useLoggingSink testing.T @@ -79,8 +115,17 @@ func (s serveConfigFunc) ApplyServeOpt(in *ServeConfig) error { // WithDebug returns a ServeOpt that will set the server into debug mode, using // the passed options to populate the go-plugin ServeTestConfig. +// +// This is an advanced ServeOpt that assumes the caller will fully manage the +// reattach configuration and server lifecycle. Refer to WithManagedDebug for a +// ServeOpt that handles common use cases, such as implementing provider main +// functions. func WithDebug(ctx context.Context, config chan *plugin.ReattachConfig, closeCh chan struct{}) ServeOpt { return serveConfigFunc(func(in *ServeConfig) error { + if in.managedDebug { + return errors.New("cannot set both WithDebug and WithManagedDebug") + } + in.debugCtx = ctx in.debugCh = config in.debugCloseCh = closeCh @@ -88,6 +133,47 @@ func WithDebug(ctx context.Context, config chan *plugin.ReattachConfig, closeCh }) } +// WithManagedDebug returns a ServeOpt that will start the server in debug +// mode, managing the reattach configuration handling and server lifecycle. +// Reattach configuration is output to stdout with human friendly instructions. +// By default, the server can be stopped with os.Interrupt (SIGINT; ctrl-c). +// +// Refer to the optional WithManagedDebugStopSignals and +// WithManagedDebugReattachConfigTimeout ServeOpt for additional configuration. +// +// The reattach configuration output of this handling is not protected by +// compatibility guarantees. Use the WithDebug ServeOpt for advanced use cases. +func WithManagedDebug() ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + if in.debugCh != nil { + return errors.New("cannot set both WithDebug and WithManagedDebug") + } + + in.managedDebug = true + return nil + }) +} + +// WithManagedDebugStopSignals returns a ServeOpt that will set the stop signals for a +// debug managed process (WithManagedDebug). When not configured, os.Interrupt +// (SIGINT; Ctrl-c) will stop the process. +func WithManagedDebugStopSignals(signals []os.Signal) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.managedDebugStopSignals = signals + return nil + }) +} + +// WithManagedDebugReattachConfigTimeout returns a ServeOpt that will set the timeout +// for a debug managed process to start and return its reattach configuration. +// When not configured, 2 seconds is the default. +func WithManagedDebugReattachConfigTimeout(timeout time.Duration) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.managedDebugReattachConfigTimeout = timeout + return nil + }) +} + // WithGoPluginLogger returns a ServeOpt that will set the logger that // go-plugin should use to log messages. func WithGoPluginLogger(logger hclog.Logger) ServeOpt { @@ -152,16 +238,22 @@ func WithLogEnvVarName(name string) ServeOpt { // modify the logger that go-plugin is using, ServeOpts can be specified to // support that. func Serve(name string, serverFactory func() tfprotov6.ProviderServer, opts ...ServeOpt) error { - var conf ServeConfig + // Defaults + conf := ServeConfig{ + managedDebugReattachConfigTimeout: 2 * time.Second, + managedDebugStopSignals: []os.Signal{os.Interrupt}, + } + for _, opt := range opts { err := opt.ApplyServeOpt(&conf) if err != nil { return err } } + serveConfig := &plugin.ServeConfig{ HandshakeConfig: plugin.HandshakeConfig{ - ProtocolVersion: 6, + ProtocolVersion: protocolVersionMajor, MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", }, @@ -172,11 +264,42 @@ func Serve(name string, serverFactory func() tfprotov6.ProviderServer, opts ...S Name: name, }, }, - GRPCServer: plugin.DefaultGRPCServer, + GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { + opts = append(opts, grpc.MaxRecvMsgSize(grpcMaxMessageSize)) + opts = append(opts, grpc.MaxSendMsgSize(grpcMaxMessageSize)) + + return grpc.NewServer(opts...) + }, } + if conf.logger != nil { serveConfig.Logger = conf.logger } + + if conf.managedDebug { + ctx, cancel := context.WithCancel(context.Background()) + signalCh := make(chan os.Signal, len(conf.managedDebugStopSignals)) + + signal.Notify(signalCh, conf.managedDebugStopSignals...) + + defer func() { + signal.Stop(signalCh) + cancel() + }() + + go func() { + select { + case <-signalCh: + cancel() + case <-ctx.Done(): + } + }() + + conf.debugCh = make(chan *plugin.ReattachConfig) + conf.debugCloseCh = make(chan struct{}) + conf.debugCtx = ctx + } + if conf.debugCh != nil { serveConfig.Test = &plugin.ServeTestConfig{ Context: conf.debugCtx, @@ -184,7 +307,78 @@ func Serve(name string, serverFactory func() tfprotov6.ProviderServer, opts ...S CloseCh: conf.debugCloseCh, } } - plugin.Serve(serveConfig) + + if !conf.managedDebug { + plugin.Serve(serveConfig) + return nil + } + + go plugin.Serve(serveConfig) + + var pluginReattachConfig *plugin.ReattachConfig + + select { + case pluginReattachConfig = <-conf.debugCh: + case <-time.After(conf.managedDebugReattachConfigTimeout): + return errors.New("timeout waiting on reattach configuration") + } + + if pluginReattachConfig == nil { + return errors.New("nil reattach configuration received") + } + + // Duplicate implementation is required because the go-plugin + // ReattachConfig.Addr implementation is not friendly for JSON encoding + // and to avoid importing terraform-exec. + type reattachConfigAddr struct { + Network string + String string + } + + type reattachConfig struct { + Protocol string + ProtocolVersion int + Pid int + Test bool + Addr reattachConfigAddr + } + + reattachBytes, err := json.Marshal(map[string]reattachConfig{ + name: { + Protocol: string(pluginReattachConfig.Protocol), + ProtocolVersion: pluginReattachConfig.ProtocolVersion, + Pid: pluginReattachConfig.Pid, + Test: pluginReattachConfig.Test, + Addr: reattachConfigAddr{ + Network: pluginReattachConfig.Addr.Network(), + String: pluginReattachConfig.Addr.String(), + }, + }, + }) + + if err != nil { + return fmt.Errorf("Error building reattach string: %w", err) + } + + reattachStr := string(reattachBytes) + + // This is currently intended to be executed via provider main function and + // human friendly, so output directly to stdout. + fmt.Printf("Provider started. To attach Terraform CLI, set the %s environment variable with the following:\n\n", envTfReattachProviders) + + switch runtime.GOOS { + case "windows": + fmt.Printf("\tCommand Prompt:\tset \"%s=%s\"\n", envTfReattachProviders, reattachStr) + fmt.Printf("\tPowerShell:\t$env:%s='%s'\n", envTfReattachProviders, strings.ReplaceAll(reattachStr, `'`, `''`)) + default: + fmt.Printf("\t%s='%s'\n", envTfReattachProviders, strings.ReplaceAll(reattachStr, `'`, `'"'"'`)) + } + + fmt.Println("") + + // Wait for the server to be done. + <-conf.debugCloseCh + return nil } @@ -200,6 +394,13 @@ type server struct { useTFLogSink bool testHandle testing.T name string + + // protocolDataDir is a directory to store raw protocol data files for + // debugging purposes. + protocolDataDir string + + // protocolVersion is the protocol version for the server. + protocolVersion string } func mergeStop(ctx context.Context, cancel context.CancelFunc, stopCh chan struct{}) { @@ -232,50 +433,11 @@ func (s *server) loggingContext(ctx context.Context) context.Context { ctx = tfsdklog.RegisterTestSink(ctx, s.testHandle) } - // generate a request ID - reqID, err := uuid.GenerateUUID() - if err != nil { - reqID = "unable to assign request ID: " + err.Error() - } - - // set up the logger SDK loggers are derived from - ctx = tfsdklog.NewRootSDKLogger(ctx, append(tfsdklog.Options{ - tfsdklog.WithLevelFromEnv("TF_LOG_SDK"), - }, s.tflogSDKOpts...)...) - ctx = tfsdklog.With(ctx, logKeyRequestID, reqID) - ctx = tfsdklog.With(ctx, logKeyProviderAddress, s.name) - - // set up our protocol-level subsystem logger - ctx = tfsdklog.NewSubsystem(ctx, tflogSubsystemName, append(tfsdklog.Options{ - tfsdklog.WithLevelFromEnv("TF_LOG_SDK_PROTO"), - }, s.tflogSDKOpts...)...) - ctx = tfsdklog.SubsystemWith(ctx, tflogSubsystemName, logKeyProtocolVersion, "6") - - // set up the provider logger - ctx = tfsdklog.NewRootProviderLogger(ctx, s.tflogOpts...) - ctx = tflog.With(ctx, logKeyRequestID, reqID) - ctx = tflog.With(ctx, logKeyProviderAddress, s.name) - return ctx -} - -func rpcLoggingContext(ctx context.Context, rpc string) context.Context { - ctx = tfsdklog.With(ctx, logKeyRPC, rpc) - ctx = tfsdklog.SubsystemWith(ctx, tflogSubsystemName, logKeyRPC, rpc) - ctx = tflog.With(ctx, logKeyRPC, rpc) - return ctx -} + ctx = logging.InitContext(ctx, s.tflogSDKOpts, s.tflogOpts) + ctx = logging.RequestIdContext(ctx) + ctx = logging.ProviderAddressContext(ctx, s.name) + ctx = logging.ProtocolVersionContext(ctx, s.protocolVersion) -func resourceLoggingContext(ctx context.Context, resource string) context.Context { - ctx = tfsdklog.With(ctx, logKeyResourceType, resource) - ctx = tfsdklog.SubsystemWith(ctx, tflogSubsystemName, logKeyResourceType, resource) - ctx = tflog.With(ctx, logKeyResourceType, resource) - return ctx -} - -func dataSourceLoggingContext(ctx context.Context, dataSource string) context.Context { - ctx = tfsdklog.With(ctx, logKeyDataSourceType, dataSource) - ctx = tfsdklog.SubsystemWith(ctx, tflogSubsystemName, logKeyDataSourceType, dataSource) - ctx = tflog.With(ctx, logKeyDataSourceType, dataSource) return ctx } @@ -303,97 +465,101 @@ func New(name string, serve tfprotov6.ProviderServer, opts ...ServeOpt) tfplugin } envVar := conf.envVar if envVar == "" { - addr, err := tfaddr.ParseRawProviderSourceString(name) - if err != nil { - log.Printf("[ERROR] Error parsing provider name %q: %s", name, err) - } else { - envVar = addr.Type - } + envVar = logging.ProviderLoggerName(name) } - envVar = strings.ReplaceAll(envVar, "-", "_") if envVar != "" { - options = append(options, tfsdklog.WithLogName(envVar), tflog.WithLevelFromEnv("TF_LOG_PROVIDER", envVar)) + options = append(options, tfsdklog.WithLogName(envVar), tflog.WithLevelFromEnv(logging.EnvTfLogProvider, envVar)) } return &server{ - downstream: serve, - stopCh: make(chan struct{}), - tflogOpts: options, - tflogSDKOpts: sdkOptions, - name: name, - useTFLogSink: conf.useLoggingSink != nil, - testHandle: conf.useLoggingSink, + downstream: serve, + stopCh: make(chan struct{}), + tflogOpts: options, + tflogSDKOpts: sdkOptions, + name: name, + useTFLogSink: conf.useLoggingSink != nil, + testHandle: conf.useLoggingSink, + protocolDataDir: os.Getenv(logging.EnvTfLogSdkProtoDataDir), + protocolVersion: protocolVersion, } } func (s *server) GetProviderSchema(ctx context.Context, req *tfplugin6.GetProviderSchema_Request) (*tfplugin6.GetProviderSchema_Response, error) { - ctx = rpcLoggingContext(s.loggingContext(ctx), "GetProviderSchema") + rpc := "GetProviderSchema" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.GetProviderSchemaRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.GetProviderSchema(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.GetProviderSchema_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ConfigureProvider(ctx context.Context, req *tfplugin6.ConfigureProvider_Request) (*tfplugin6.ConfigureProvider_Response, error) { - ctx = rpcLoggingContext(s.loggingContext(ctx), "ConfigureProvider") + rpc := "ConfigureProvider" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ConfigureProviderRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ConfigureProvider(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.Configure_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ValidateProviderConfig(ctx context.Context, req *tfplugin6.ValidateProviderConfig_Request) (*tfplugin6.ValidateProviderConfig_Response, error) { - ctx = rpcLoggingContext(s.loggingContext(ctx), "ValidateProviderConfig") - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + rpc := "ValidateProviderConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ValidateProviderConfigRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ValidateProviderConfig(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.ValidateProviderConfig_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil @@ -413,228 +579,276 @@ func (s *server) stop() { } func (s *server) Stop(ctx context.Context, req *tfplugin6.StopProvider_Request) (*tfplugin6.StopProvider_Response, error) { - ctx = rpcLoggingContext(s.loggingContext(ctx), "Stop") + rpc := "StopProvider" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.StopProviderRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.StopProvider(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Closing all our contexts") + tf6serverlogging.DownstreamResponse(ctx, nil) + logging.ProtocolTrace(ctx, "Closing all our contexts") s.stop() - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Closed all our contexts") + logging.ProtocolTrace(ctx, "Closed all our contexts") ret, err := toproto.Stop_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ValidateDataResourceConfig(ctx context.Context, req *tfplugin6.ValidateDataResourceConfig_Request) (*tfplugin6.ValidateDataResourceConfig_Response, error) { - ctx = dataSourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ValidateDataResourceConfig"), req.TypeName) + rpc := "ValidateDataResourceConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.DataSourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ValidateDataResourceConfigRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ValidateDataResourceConfig(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.ValidateDataResourceConfig_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ReadDataSource(ctx context.Context, req *tfplugin6.ReadDataSource_Request) (*tfplugin6.ReadDataSource_Response, error) { - ctx = dataSourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ReadDataSource"), req.TypeName) + rpc := "ReadDataSource" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.DataSourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ReadDataSourceRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ReadDataSource(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "State", resp.State) ret, err := toproto.ReadDataSource_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ValidateResourceConfig(ctx context.Context, req *tfplugin6.ValidateResourceConfig_Request) (*tfplugin6.ValidateResourceConfig_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ValidateResourceConfig"), req.TypeName) + rpc := "ValidateResourceConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ValidateResourceConfigRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ValidateResourceConfig(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.ValidateResourceConfig_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) UpgradeResourceState(ctx context.Context, req *tfplugin6.UpgradeResourceState_Request) (*tfplugin6.UpgradeResourceState_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "UpgradeResourceState"), req.TypeName) + rpc := "UpgradeResourceState" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.UpgradeResourceStateRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.UpgradeResourceState(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "UpgradedState", resp.UpgradedState) ret, err := toproto.UpgradeResourceState_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ReadResource(ctx context.Context, req *tfplugin6.ReadResource_Request) (*tfplugin6.ReadResource_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ReadResource"), req.TypeName) + rpc := "ReadResource" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ReadResourceRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "CurrentState", r.CurrentState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ReadResource(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) ret, err := toproto.ReadResource_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) PlanResourceChange(ctx context.Context, req *tfplugin6.PlanResourceChange_Request) (*tfplugin6.PlanResourceChange_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "PlanResourceChange"), req.TypeName) + rpc := "PlanResourceChange" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.PlanResourceChangeRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PriorState", r.PriorState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProposedNewState", r.ProposedNewState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.PlanResourceChange(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "PlannedState", resp.PlannedState) ret, err := toproto.PlanResourceChange_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ApplyResourceChange(ctx context.Context, req *tfplugin6.ApplyResourceChange_Request) (*tfplugin6.ApplyResourceChange_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ApplyResourceChange"), req.TypeName) + rpc := "ApplyResourceChange" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ApplyResourceChangeRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PlannedState", r.PlannedState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ApplyResourceChange(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) ret, err := toproto.ApplyResourceChange_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil } func (s *server) ImportResourceState(ctx context.Context, req *tfplugin6.ImportResourceState_Request) (*tfplugin6.ImportResourceState_Response, error) { - ctx = resourceLoggingContext(rpcLoggingContext(s.loggingContext(ctx), "ImportResourceState"), req.TypeName) + rpc := "ImportResourceState" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, req.TypeName) ctx = s.stoppableContext(ctx) - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Received request") - defer tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Served request") + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") r, err := fromproto.ImportResourceStateRequest(req) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting request from protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Calling downstream") + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ImportResourceState(ctx, r) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error from downstream", "error", err) + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - tfsdklog.SubsystemTrace(ctx, tflogSubsystemName, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + for _, importedResource := range resp.ImportedResources { + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response_ImportedResource", "State", importedResource.State) + } ret, err := toproto.ImportResourceState_Response(resp) if err != nil { - tfsdklog.SubsystemError(ctx, tflogSubsystemName, "Error converting response to protobuf", "error", err) + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } return ret, nil diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/attribute_path.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/attribute_path.go index fd91f96a5..d6108d4ab 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/attribute_path.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/attribute_path.go @@ -284,9 +284,9 @@ type AttributePathStepper interface { ApplyTerraform5AttributePathStep(AttributePathStep) (interface{}, error) } -// WalkAttributePath will return the value that `path` is pointing to, using -// `in` as the root. If an error is returned, the AttributePath returned will -// indicate the steps that remained to be applied when the error was +// WalkAttributePath will return the Type or Value that `path` is pointing to, +// using `in` as the root. If an error is returned, the AttributePath returned +// will indicate the steps that remained to be applied when the error was // encountered. // // map[string]interface{} and []interface{} types have built-in support. Other diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/diff.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/diff.go index dc89026c5..328b1adf6 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/diff.go @@ -283,6 +283,9 @@ func (val1 Value) Diff(val2 Value) ([]ValueDiff, error) { // if we have the same keys, we can just let recursion // from the walk check the sub-values match return true, nil + case value1.Type().Is(DynamicPseudoType): + // Let recursion from the walk check the sub-values match + return true, nil } return false, fmt.Errorf("unexpected type %v in Diff at %s", value1.Type(), path) }) diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/list.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/list.go index a633ed1e0..a0bc2f370 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/list.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/list.go @@ -15,6 +15,23 @@ type List struct { _ []struct{} } +// ApplyTerraform5AttributePathStep applies an AttributePathStep to a List, +// returning the Type found at that AttributePath within the List. If the +// AttributePathStep cannot be applied to the List, an ErrInvalidStep error +// will be returned. +func (l List) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + switch s := step.(type) { + case ElementKeyInt: + if int64(s) < 0 { + return nil, ErrInvalidStep + } + + return l.ElementType, nil + default: + return nil, ErrInvalidStep + } +} + // Equal returns true if the two Lists are exactly equal. Unlike Is, passing in // a List with no ElementType will always return false. func (l List) Equal(o Type) bool { @@ -70,9 +87,7 @@ func valueFromList(typ Type, in interface{}) (Value, error) { case []Value: var valType Type for pos, v := range value { - if v.Type().Is(DynamicPseudoType) && v.IsKnown() { - return Value{}, NewAttributePath().WithElementKeyInt(pos).NewErrorf("invalid value %s for %s", v, v.Type()) - } else if !v.Type().Is(DynamicPseudoType) && !v.Type().UsableAs(typ) { + if !v.Type().UsableAs(typ) { return Value{}, NewAttributePath().WithElementKeyInt(pos).NewErrorf("can't use %s as %s", v.Type(), typ) } if valType == nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/map.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/map.go index 9188df88f..7c1e7d05a 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/map.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/map.go @@ -16,6 +16,19 @@ type Map struct { _ []struct{} } +// ApplyTerraform5AttributePathStep applies an AttributePathStep to a Map, +// returning the Type found at that AttributePath within the Map. If the +// AttributePathStep cannot be applied to the Map, an ErrInvalidStep error +// will be returned. +func (m Map) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + switch step.(type) { + case ElementKeyString: + return m.ElementType, nil + default: + return nil, ErrInvalidStep + } +} + // Equal returns true if the two Maps are exactly equal. Unlike Is, passing in // a Map with no ElementType will always return false. func (m Map) Equal(o Type) bool { @@ -89,9 +102,7 @@ func valueFromMap(typ Type, in interface{}) (Value, error) { var elType Type for _, k := range keys { v := value[k] - if v.Type().Is(DynamicPseudoType) && v.IsKnown() { - return Value{}, NewAttributePath().WithElementKeyString(k).NewErrorf("invalid value %s for %s", v, v.Type()) - } else if !v.Type().Is(DynamicPseudoType) && !v.Type().UsableAs(typ) { + if !v.Type().UsableAs(typ) { return Value{}, NewAttributePath().WithElementKeyString(k).NewErrorf("can't use %s as %s", v.Type(), typ) } if elType == nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/object.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/object.go index 2c9dde76d..ce2bf1325 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/object.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/object.go @@ -24,6 +24,12 @@ type Object struct { // are considered part of the type signature, and their absence means a // value is no longer of that type. // + // OptionalAttributes is only valid when declaring a type constraint + // (e.g. Schema) and should not be used as part of a Type when creating + // a Value (e.g. NewValue()). When creating a Value, all OptionalAttributes + // must still be defined in the Object by setting each attribute to a null + // or known value for its attribute type. + // // The key of OptionalAttributes should be the name of the attribute // that is optional. The value should be an empty struct, used only to // indicate presence. @@ -38,6 +44,29 @@ type Object struct { _ []struct{} } +// ApplyTerraform5AttributePathStep applies an AttributePathStep to an Object, +// returning the Type found at that AttributePath within the Object. If the +// AttributePathStep cannot be applied to the Object, an ErrInvalidStep error +// will be returned. +func (o Object) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + switch s := step.(type) { + case AttributeName: + if len(o.AttributeTypes) == 0 { + return nil, ErrInvalidStep + } + + attrType, ok := o.AttributeTypes[string(s)] + + if !ok { + return nil, ErrInvalidStep + } + + return attrType, nil + default: + return nil, ErrInvalidStep + } +} + // Equal returns true if the two Objects are exactly equal. Unlike Is, passing // in an Object with no AttributeTypes will always return false. func (o Object) Equal(other Type) bool { @@ -183,9 +212,7 @@ func valueFromObject(types map[string]Type, optionalAttrs map[string]struct{}, i if v.Type() == nil { return Value{}, NewAttributePath().WithAttributeName(k).NewErrorf("missing value type") } - if v.Type().Is(DynamicPseudoType) && v.IsKnown() && !v.IsNull() { - return Value{}, NewAttributePath().WithAttributeName(k).NewErrorf("invalid value %s for %s", v, v.Type()) - } else if !v.Type().Is(DynamicPseudoType) && !v.Type().UsableAs(typ) { + if !v.Type().UsableAs(typ) { return Value{}, NewAttributePath().WithAttributeName(k).NewErrorf("can't use %s as %s", v.Type(), typ) } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/primitive.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/primitive.go index ff0f14e60..0349f4145 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/primitive.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/primitive.go @@ -37,6 +37,12 @@ type primitive struct { _ []struct{} } +// ApplyTerraform5AttributePathStep always returns an ErrInvalidStep error +// as it is invalid to step into a primitive. +func (p primitive) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + return nil, ErrInvalidStep +} + func (p primitive) Equal(o Type) bool { v, ok := o.(primitive) if !ok { @@ -61,7 +67,7 @@ func (p primitive) UsableAs(t Type) bool { } func (p primitive) String() string { - return "tftypes." + string(p.name) + return "tftypes." + p.name } func (p primitive) private() {} @@ -102,7 +108,16 @@ func (p primitive) supportedGoTypes() []string { case Bool.name: return []string{"bool", "*bool"} case DynamicPseudoType.name: - return []string{"nil", "UnknownValue"} + // List/Set is covered by Tuple, Map is covered by Object + possibleTypes := []Type{ + String, Bool, Number, + Tuple{}, Object{}, + } + results := []string{} + for _, t := range possibleTypes { + results = append(results, t.supportedGoTypes()...) + } + return results } panic(fmt.Sprintf("unknown primitive type %q", p.name)) } @@ -346,3 +361,45 @@ func valueFromNumber(in interface{}) (Value, error) { return Value{}, fmt.Errorf("tftypes.NewValue can't use %T as a tftypes.Number; expected types are: %s", in, formattedSupportedGoTypes(Number)) } } + +func valueFromDynamicPseudoType(val interface{}) (Value, error) { + switch val := val.(type) { + case string, *string: + v, err := valueFromString(val) + if err != nil { + return Value{}, err + } + v.typ = DynamicPseudoType + return v, nil + case *big.Float, float64, *float64, int, *int, int8, *int8, int16, *int16, int32, *int32, int64, *int64, uint, *uint, uint8, *uint8, uint16, *uint16, uint32, *uint32, uint64, *uint64: + v, err := valueFromNumber(val) + if err != nil { + return Value{}, err + } + v.typ = DynamicPseudoType + return v, nil + case bool, *bool: + v, err := valueFromBool(val) + if err != nil { + return Value{}, err + } + v.typ = DynamicPseudoType + return v, nil + case map[string]Value: + v, err := valueFromObject(nil, nil, val) + if err != nil { + return Value{}, err + } + v.typ = DynamicPseudoType + return v, nil + case []Value: + v, err := valueFromTuple(nil, val) + if err != nil { + return Value{}, err + } + v.typ = DynamicPseudoType + return v, nil + default: + return Value{}, fmt.Errorf("tftypes.NewValue can't use %T as a tftypes.DynamicPseudoType; expected types are: %s", val, formattedSupportedGoTypes(DynamicPseudoType)) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/set.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/set.go index 3bd3e8a67..00163067e 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/set.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/set.go @@ -15,6 +15,19 @@ type Set struct { _ []struct{} } +// ApplyTerraform5AttributePathStep applies an AttributePathStep to a Set, +// returning the Type found at that AttributePath within the Set. If the +// AttributePathStep cannot be applied to the Set, an ErrInvalidStep error +// will be returned. +func (s Set) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + switch step.(type) { + case ElementKeyValue: + return s.ElementType, nil + default: + return nil, ErrInvalidStep + } +} + // Equal returns true if the two Sets are exactly equal. Unlike Is, passing in // a Set with no ElementType will always return false. func (s Set) Equal(o Type) bool { @@ -70,9 +83,7 @@ func valueFromSet(typ Type, in interface{}) (Value, error) { case []Value: var elType Type for _, v := range value { - if v.Type().Is(DynamicPseudoType) && v.IsKnown() { - return Value{}, NewAttributePath().WithElementKeyValue(v).NewErrorf("invalid value %s for %s", v, v.Type()) - } else if !v.Type().Is(DynamicPseudoType) && !v.Type().UsableAs(typ) { + if !v.Type().UsableAs(typ) { return Value{}, NewAttributePath().WithElementKeyValue(v).NewErrorf("can't use %s as %s", v.Type(), typ) } if elType == nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/tuple.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/tuple.go index 088243bbd..5ca5709a3 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/tuple.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/tuple.go @@ -19,6 +19,23 @@ type Tuple struct { _ []struct{} } +// ApplyTerraform5AttributePathStep applies an AttributePathStep to a Tuple, +// returning the Type found at that AttributePath within the Tuple. If the +// AttributePathStep cannot be applied to the Tuple, an ErrInvalidStep error +// will be returned. +func (tu Tuple) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + switch s := step.(type) { + case ElementKeyInt: + if int64(s) < 0 || int64(s) >= int64(len(tu.ElementTypes)) { + return nil, ErrInvalidStep + } + + return tu.ElementTypes[int64(s)], nil + default: + return nil, ErrInvalidStep + } +} + // Equal returns true if the two Tuples are exactly equal. Unlike Is, passing // in a Tuple with no ElementTypes will always return false. func (tu Tuple) Equal(o Type) bool { @@ -109,9 +126,7 @@ func valueFromTuple(types []Type, in interface{}) (Value, error) { } for pos, v := range value { typ := types[pos] - if v.Type().Is(DynamicPseudoType) && v.IsKnown() { - return Value{}, NewAttributePath().WithElementKeyInt(pos).NewErrorf("invalid value %s for %s", v, v.Type()) - } else if !v.Type().Is(DynamicPseudoType) && !v.Type().UsableAs(typ) { + if !v.Type().UsableAs(typ) { return Value{}, NewAttributePath().WithElementKeyInt(pos).NewErrorf("can't use %s as %s", v.Type(), typ) } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/type.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/type.go index 955e322fc..4f0e4af7c 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/type.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/type.go @@ -12,6 +12,12 @@ import ( // implemented by the tftypes package. Types define the shape and // characteristics of data coming from or being sent to Terraform. type Type interface { + // AttributePathStepper requires each Type to implement the + // ApplyTerraform5AttributePathStep method, so Type is compatible with + // WalkAttributePath. The method should return the Type found at that + // AttributePath within the Type or ErrInvalidStep. + AttributePathStepper + // Is is used to determine what type a Type implementation is. It is // the recommended method for determining whether two types are // equivalent or not. diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value.go index 6166cb01b..4d7b74989 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value.go @@ -2,14 +2,13 @@ package tftypes import ( "bytes" - "errors" "fmt" "math/big" "sort" "strconv" "strings" - "github.com/vmihailenco/msgpack" + msgpack "github.com/vmihailenco/msgpack/v4" ) // ValueConverter is an interface that provider-defined types can implement to @@ -296,10 +295,6 @@ func newValue(t Type, val interface{}) (Value, error) { }, nil } - if t.Is(DynamicPseudoType) { - return Value{}, errors.New("cannot have DynamicPseudoType with known value, DynamicPseudoType can only contain null or unknown values") - } - if creator, ok := val.(ValueCreator); ok { var err error val, err = creator.ToTerraform5Value() @@ -357,6 +352,12 @@ func newValue(t Type, val interface{}) (Value, error) { return Value{}, err } return v, nil + case t.Is(DynamicPseudoType): + v, err := valueFromDynamicPseudoType(val) + if err != nil { + return Value{}, err + } + return v, nil default: return Value{}, fmt.Errorf("unknown type %s passed to tftypes.NewValue", t) } @@ -439,7 +440,7 @@ func (val Value) As(dst interface{}) error { if !ok { return fmt.Errorf("can't unmarshal %s into %T, expected *big.Float", val.Type(), dst) } - target.Set(v) + target.Copy(v) return nil case **big.Float: if val.IsNull() { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_json.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_json.go index c96401adf..308434776 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_json.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_json.go @@ -62,7 +62,7 @@ func jsonUnmarshal(buf []byte, typ Type, p *AttributePath) (Value, error) { return Value{}, p.NewErrorf("unknown type %s", typ) } -func jsonUnmarshalString(buf []byte, typ Type, p *AttributePath) (Value, error) { +func jsonUnmarshalString(buf []byte, _ Type, p *AttributePath) (Value, error) { dec := jsonByteDecoder(buf) tok, err := dec.Token() @@ -98,7 +98,7 @@ func jsonUnmarshalNumber(buf []byte, typ Type, p *AttributePath) (Value, error) } return NewValue(typ, f), nil case string: - f, _, err := big.ParseFloat(string(numTok), 10, 512, big.ToNearestEven) + f, _, err := big.ParseFloat(numTok, 10, 512, big.ToNearestEven) if err != nil { return Value{}, p.NewErrorf("error parsing number: %w", err) } @@ -107,7 +107,7 @@ func jsonUnmarshalNumber(buf []byte, typ Type, p *AttributePath) (Value, error) return Value{}, p.NewErrorf("unsupported type %T sent as %s", tok, Number) } -func jsonUnmarshalBool(buf []byte, typ Type, p *AttributePath) (Value, error) { +func jsonUnmarshalBool(buf []byte, _ Type, p *AttributePath) (Value, error) { dec := jsonByteDecoder(buf) tok, err := dec.Token() if err != nil { @@ -123,7 +123,7 @@ func jsonUnmarshalBool(buf []byte, typ Type, p *AttributePath) (Value, error) { case "false", "0": return NewValue(Bool, false), nil } - switch strings.ToLower(string(v)) { + switch strings.ToLower(v) { case "true": return Value{}, p.NewErrorf("to convert from string, use lowercase \"true\"") case "false": @@ -140,7 +140,7 @@ func jsonUnmarshalBool(buf []byte, typ Type, p *AttributePath) (Value, error) { return Value{}, p.NewErrorf("unsupported type %T sent as %s", tok, Bool) } -func jsonUnmarshalDynamicPseudoType(buf []byte, typ Type, p *AttributePath) (Value, error) { +func jsonUnmarshalDynamicPseudoType(buf []byte, _ Type, p *AttributePath) (Value, error) { dec := jsonByteDecoder(buf) tok, err := dec.Token() if err != nil { @@ -355,20 +355,8 @@ func jsonUnmarshalMap(buf []byte, attrType Type, p *AttributePath) (Value, error return Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('}'), tok) } - elTyp := attrType - if attrType.Is(DynamicPseudoType) { - var elements []Value - for _, val := range vals { - elements = append(elements, val) - } - elTyp, err = TypeFromElements(elements) - if err != nil { - return Value{}, p.NewErrorf("invalid elements for map: %w", err) - } - } - return NewValue(Map{ - ElementType: elTyp, + ElementType: attrType, }, vals), nil } @@ -393,7 +381,6 @@ func jsonUnmarshalTuple(buf []byte, elementTypes []Type, p *AttributePath) (Valu // while generally in Go it's undesirable to treat empty and nil slices // separately, in this case we're surfacing a non-Go-in-origin // distinction, so we'll allow it. - types := []Type{} vals := []Value{} var idx int @@ -415,7 +402,6 @@ func jsonUnmarshalTuple(buf []byte, elementTypes []Type, p *AttributePath) (Valu if err != nil { return Value{}, err } - types = append(types, val.Type()) vals = append(vals, val) } @@ -432,7 +418,7 @@ func jsonUnmarshalTuple(buf []byte, elementTypes []Type, p *AttributePath) (Valu } return NewValue(Tuple{ - ElementTypes: types, + ElementTypes: elementTypes, }, vals), nil } @@ -447,7 +433,6 @@ func jsonUnmarshalObject(buf []byte, attrTypes map[string]Type, p *AttributePath return Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('{'), tok) } - types := map[string]Type{} vals := map[string]Value{} for dec.More() { innerPath := p.WithElementKeyValue(NewValue(String, UnknownValue)) @@ -474,7 +459,6 @@ func jsonUnmarshalObject(buf []byte, attrTypes map[string]Type, p *AttributePath if err != nil { return Value{}, err } - types[key] = val.Type() vals[key] = val } @@ -494,6 +478,6 @@ func jsonUnmarshalObject(buf []byte, attrTypes map[string]Type, p *AttributePath } return NewValue(Object{ - AttributeTypes: types, + AttributeTypes: attrTypes, }, vals), nil } diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go index 3f3db12ef..c4047aeab 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go @@ -7,8 +7,8 @@ import ( "math/big" "sort" - "github.com/vmihailenco/msgpack" - msgpackCodes "github.com/vmihailenco/msgpack/codes" + msgpack "github.com/vmihailenco/msgpack/v4" + msgpackCodes "github.com/vmihailenco/msgpack/v4/codes" ) type msgPackUnknownType struct{} @@ -74,7 +74,7 @@ func msgpackUnmarshal(dec *msgpack.Decoder, typ Type, path *AttributePath) (Valu if err != nil { return Value{}, path.NewErrorf("couldn't decode number as int64: %w", err) } - return NewValue(Number, big.NewFloat(float64(rv))), nil + return NewValue(Number, new(big.Float).SetInt64(rv)), nil } switch peek { case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64: @@ -82,19 +82,19 @@ func msgpackUnmarshal(dec *msgpack.Decoder, typ Type, path *AttributePath) (Valu if err != nil { return Value{}, path.NewErrorf("couldn't decode number as int64: %w", err) } - return NewValue(Number, big.NewFloat(float64(rv))), nil + return NewValue(Number, new(big.Float).SetInt64(rv)), nil case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64: rv, err := dec.DecodeUint64() if err != nil { return Value{}, path.NewErrorf("couldn't decode number as uint64: %w", err) } - return NewValue(Number, big.NewFloat(float64(rv))), nil + return NewValue(Number, new(big.Float).SetUint64(rv)), nil case msgpackCodes.Float, msgpackCodes.Double: rv, err := dec.DecodeFloat64() if err != nil { return Value{}, path.NewErrorf("couldn't decode number as float64: %w", err) } - return NewValue(Number, big.NewFloat(float64(rv))), nil + return NewValue(Number, big.NewFloat(rv)), nil default: rv, err := dec.DecodeString() if err != nil { @@ -239,24 +239,8 @@ func msgpackUnmarshalMap(dec *msgpack.Decoder, typ Type, path *AttributePath) (V vals[key] = val } - elTyp := typ - - if typ.Is(DynamicPseudoType) { - var elements []Value - - for _, val := range vals { - elements = append(elements, val) - } - - elTyp, err = TypeFromElements(elements) - - if err != nil { - return Value{}, path.NewErrorf("invalid elements for map: %w", err) - } - } - return NewValue(Map{ - ElementType: elTyp, + ElementType: typ, }, vals), nil } @@ -275,7 +259,6 @@ func msgpackUnmarshalTuple(dec *msgpack.Decoder, types []Type, path *AttributePa return Value{}, path.NewErrorf("error decoding tuple; expected %d items, got %d", len(types), length) } - elTypes := make([]Type, 0, length) vals := make([]Value, 0, length) for i := 0; i < length; i++ { innerPath := path.WithElementKeyInt(i) @@ -284,12 +267,11 @@ func msgpackUnmarshalTuple(dec *msgpack.Decoder, types []Type, path *AttributePa if err != nil { return Value{}, err } - elTypes = append(elTypes, val.Type()) vals = append(vals, val) } return NewValue(Tuple{ - ElementTypes: elTypes, + ElementTypes: types, }, vals), nil } @@ -308,7 +290,6 @@ func msgpackUnmarshalObject(dec *msgpack.Decoder, types map[string]Type, path *A return Value{}, path.NewErrorf("error decoding object; expected %d attributes, got %d", len(types), length) } - attrTypes := make(map[string]Type, length) vals := make(map[string]Value, length) for i := 0; i < length; i++ { key, err := dec.DecodeString() @@ -324,12 +305,11 @@ func msgpackUnmarshalObject(dec *msgpack.Decoder, types map[string]Type, path *A if err != nil { return Value{}, err } - attrTypes[key] = val.Type() vals[key] = val } return NewValue(Object{ - AttributeTypes: attrTypes, + AttributeTypes: types, }, vals), nil } @@ -397,7 +377,7 @@ func marshalMsgPack(val Value, typ Type, p *AttributePath, enc *msgpack.Encoder) return fmt.Errorf("unknown type %s", typ) } -func marshalMsgPackDynamicPseudoType(val Value, typ Type, p *AttributePath, enc *msgpack.Encoder) error { +func marshalMsgPackDynamicPseudoType(val Value, _ Type, p *AttributePath, enc *msgpack.Encoder) error { typeJSON, err := val.Type().MarshalJSON() if err != nil { return p.NewErrorf("error generating JSON for type %s: %w", val.Type(), err) diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/fieldutils/field_maps.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/fieldutils/field_maps.go new file mode 100644 index 000000000..ac78d6da7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/fieldutils/field_maps.go @@ -0,0 +1,45 @@ +package fieldutils + +// MergeFieldMaps takes a slice of field maps, +// and merges all the key/value pairs into a new single field map. +// +// Input order matters: in case two or more maps use the same key, +// the last one to set that key will have the corresponding value +// persisted. +func MergeFieldMaps(maps ...map[string]interface{}) map[string]interface{} { + // Pre-allocate a map to merge all the maps into, + // that has at least the capacity equivalent to the number + // of maps to merge + result := make(map[string]interface{}, len(maps)) + + // Merge all the maps into one; + // in case of clash, only the last key is preserved + for _, m := range maps { + for k, v := range m { + result[k] = v + } + } + + return result +} + +// FieldMapsToKeys will extract all the field maps keys, avoiding repetitions +// in case two or more maps contained the same key. +func FieldMapsToKeys(maps ...map[string]interface{}) []string { + switch len(maps) { + case 0: + return nil + case 1: + result := make([]string, 0, len(maps[0])) + + for k := range maps[0] { + result = append(result, k) + } + + return result + default: + // As we merge all maps into one, we can use this + // same function recursively, falling back on the `switch case 1`. + return FieldMapsToKeys(MergeFieldMaps(maps...)) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/args.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/args.go new file mode 100644 index 000000000..44c81ab8e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/args.go @@ -0,0 +1,26 @@ +package hclogutils + +import ( + "github.com/hashicorp/terraform-plugin-log/internal/fieldutils" +) + +// FieldMapsToArgs will shallow merge field maps into a slice of key/value pairs +// arguments (i.e. `[k1, v1, k2, v2, ...]`) expected by hc-log.Logger methods. +func FieldMapsToArgs(maps ...map[string]interface{}) []interface{} { + switch len(maps) { + case 0: + return nil + case 1: + result := make([]interface{}, 0, len(maps[0])*2) + + for k, v := range maps[0] { + result = append(result, k, v) + } + + return result + default: + // As we merge all maps into one, we can use this + // same function recursively, falling back on the `switch case 1`. + return FieldMapsToArgs(fieldutils.MergeFieldMaps(maps...)) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/logger_options.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/logger_options.go new file mode 100644 index 000000000..a0ec34e20 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/logger_options.go @@ -0,0 +1,29 @@ +package hclogutils + +import ( + "github.com/hashicorp/go-hclog" +) + +// LoggerOptionsCopy will safely copy LoggerOptions. Manually implemented +// to save importing a dependency such as github.com/mitchellh/copystructure. +func LoggerOptionsCopy(src *hclog.LoggerOptions) *hclog.LoggerOptions { + if src == nil { + return nil + } + + return &hclog.LoggerOptions{ + AdditionalLocationOffset: src.AdditionalLocationOffset, + Color: src.Color, + DisableTime: src.DisableTime, + Exclude: src.Exclude, + IncludeLocation: src.IncludeLocation, + IndependentLevels: src.IndependentLevels, + JSONFormat: src.JSONFormat, + Level: src.Level, + Mutex: src.Mutex, + Name: src.Name, + Output: src.Output, + TimeFormat: src.TimeFormat, + TimeFn: src.TimeFn, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/filtering.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/filtering.go new file mode 100644 index 000000000..9e7867f9e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/filtering.go @@ -0,0 +1,104 @@ +package logging + +import ( + "strings" + + "github.com/hashicorp/terraform-plugin-log/internal/fieldutils" + "github.com/hashicorp/terraform-plugin-log/internal/hclogutils" +) + +const logMaskingReplacementString = "***" + +// ShouldOmit takes a log's *string message and slices of fields, +// and determines, based on the LoggerOpts configuration, if the +// log should be omitted (i.e. prevent it to be printed on the final writer). +func (lo LoggerOpts) ShouldOmit(msg *string, fieldMaps ...map[string]interface{}) bool { + // Omit log if any of the configured keys is found in the given fields + if len(lo.OmitLogWithFieldKeys) > 0 { + fieldsKeys := fieldutils.FieldMapsToKeys(fieldMaps...) + if argKeysContain(fieldsKeys, lo.OmitLogWithFieldKeys) { + return true + } + } + + // Omit log if any of the configured regexp matches the log message + if len(lo.OmitLogWithMessageRegexes) > 0 { + for _, r := range lo.OmitLogWithMessageRegexes { + if r.MatchString(*msg) { + return true + } + } + } + + // Omit log if any of the configured strings is contained in the log message + if len(lo.OmitLogWithMessageStrings) > 0 { + for _, s := range lo.OmitLogWithMessageStrings { + if strings.Contains(*msg, s) { + return true + } + } + } + + return false +} + +// ApplyMask takes a log's *string message and slices of fields, +// and applies masking to fields keys' values and/or to log message, +// based on the LoggerOpts configuration. +// +// Note that the given input is changed-in-place by this method. +func (lo LoggerOpts) ApplyMask(msg *string, fieldMaps ...map[string]interface{}) { + if len(lo.MaskFieldValuesWithFieldKeys) > 0 { + for _, k := range lo.MaskFieldValuesWithFieldKeys { + for _, f := range fieldMaps { + for fk := range f { + if k == fk { + f[k] = logMaskingReplacementString + } + } + } + } + } + + // Replace any part of the log message matching any of the configured regexp, + // with a masking replacement string + if len(lo.MaskMessageRegexes) > 0 { + for _, r := range lo.MaskMessageRegexes { + *msg = r.ReplaceAllString(*msg, logMaskingReplacementString) + } + } + + // Replace any part of the log message equal to any of the configured strings, + // with a masking replacement string + if len(lo.MaskMessageStrings) > 0 { + for _, s := range lo.MaskMessageStrings { + *msg = strings.ReplaceAll(*msg, s, logMaskingReplacementString) + } + } +} + +func OmitOrMask(tfLoggerOpts LoggerOpts, msg *string, additionalFields []map[string]interface{}) ([]interface{}, bool) { + additionalFieldsMap := fieldutils.MergeFieldMaps(additionalFields...) + + // Apply the provider root LoggerOpts to determine if this log should be omitted + if tfLoggerOpts.ShouldOmit(msg, tfLoggerOpts.Fields, additionalFieldsMap) { + return nil, true + } + + // Apply the provider root LoggerOpts to apply masking to this log + tfLoggerOpts.ApplyMask(msg, tfLoggerOpts.Fields, additionalFieldsMap) + + return hclogutils.FieldMapsToArgs(tfLoggerOpts.Fields, additionalFieldsMap), false +} + +func argKeysContain(haystack []string, needles []string) bool { + for _, h := range haystack { + for _, n := range needles { + if n == h { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/log.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/log.go index 8c0ef20f4..4aa80bb98 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/log.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/log.go @@ -5,6 +5,14 @@ import ( "os" ) +const ( + // DefaultProviderRootLoggerName is the default provider root logger name. + DefaultProviderRootLoggerName string = "provider" + + // DefaultSDKRootLoggerName is the default SDK root logger name. + DefaultSDKRootLoggerName string = "sdk" +) + // loggerKey defines context keys for locating loggers in context.Context // it's a private type to make sure no other packages can override the key type loggerKey string @@ -14,15 +22,77 @@ const ( // logger for writing logs from within provider code. ProviderRootLoggerKey loggerKey = "provider" + // ProviderRootLoggerOptionsKey is the loggerKey that will hold the root + // logger options when the root provider logger is created. This is to + // assist creating subsystem loggers, as most options cannot be fetched and + // a logger does not provide set methods for these options. + ProviderRootLoggerOptionsKey loggerKey = "provider-options" + // SDKRootLoggerKey is the loggerKey that will hold the root logger for // writing logs from with SDKs. SDKRootLoggerKey loggerKey = "sdk" + // SDKRootLoggerOptionsKey is the loggerKey that will hold the root + // logger options when the SDK provider logger is created. This is to + // assist creating subsystem loggers, as most options cannot be fetched and + // a logger does not provide set methods for these options. + SDKRootLoggerOptionsKey loggerKey = "sdk-options" + // SinkKey is the loggerKey that will hold the logging sink used for // test frameworks. SinkKey loggerKey = "" + + // SinkOptionsKey is the loggerKey that will hold the sink + // logger options when the SDK provider logger is created. This is to + // assist creating subsystem loggers, as most options cannot be fetched and + // a logger does not provide set methods for these options. + SinkOptionsKey loggerKey = "sink-options" + + // TFLoggerOpts is the loggerKey that will hold the LoggerOpts associated + // with the provider root logger (at `provider.tf-logger-opts`), and the + // provider sub-system logger (at `provider.SUBSYSTEM.tf-logger-opts`), + // in the context.Context. + // Note that only some LoggerOpts require to be stored this way, + // while others use the underlying *hclog.LoggerOptions of hclog.Logger. + TFLoggerOpts loggerKey = "tf-logger-opts" ) +// providerSubsystemLoggerKey is the loggerKey that will hold the subsystem logger +// for writing logs from within a provider subsystem. +func providerSubsystemLoggerKey(subsystem string) loggerKey { + return ProviderRootLoggerKey + loggerKey("."+subsystem) +} + +// providerRootTFLoggerOptsKey is the loggerKey that will hold +// the LoggerOpts of the provider. +func providerRootTFLoggerOptsKey() loggerKey { + return ProviderRootLoggerKey + "." + TFLoggerOpts +} + +// providerRootTFLoggerOptsKey is the loggerKey that will hold +// the LoggerOpts of a provider subsystem. +func providerSubsystemTFLoggerOptsKey(subsystem string) loggerKey { + return providerSubsystemLoggerKey(subsystem) + "." + TFLoggerOpts +} + +// providerSubsystemLoggerKey is the loggerKey that will hold the subsystem logger +// for writing logs from within an SDK subsystem. +func sdkSubsystemLoggerKey(subsystem string) loggerKey { + return SDKRootLoggerKey + loggerKey("."+subsystem) +} + +// sdkRootTFLoggerOptsKey is the loggerKey that will hold +// the LoggerOpts of the SDK. +func sdkRootTFLoggerOptsKey() loggerKey { + return SDKRootLoggerKey + "." + TFLoggerOpts +} + +// sdkSubsystemTFLoggerOptsKey is the loggerKey that will hold +// the LoggerOpts of an SDK subsystem. +func sdkSubsystemTFLoggerOptsKey(subsystem string) loggerKey { + return sdkSubsystemLoggerKey(subsystem) + "." + TFLoggerOpts +} + var ( // Stderr caches the original os.Stderr when the process is started. // diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/options.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/options.go index fe1e0762d..1f6ef0326 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/options.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/options.go @@ -3,6 +3,7 @@ package logging import ( "io" "os" + "regexp" "github.com/hashicorp/go-hclog" ) @@ -22,6 +23,11 @@ type LoggerOpts struct { // of the logging statement or not. IncludeLocation bool + // AdditionalLocationOffset is the number of additional stack levels to + // skip when finding the file and line information for the log line. + // Defaults to 1 to account for the tflog and tfsdklog logging functions. + AdditionalLocationOffset int + // Output dictates where logs are written to. Output should only ever // be set by tflog or tfsdklog, never by SDK authors or provider // developers. Where logs get written to is complex and delicate and @@ -29,22 +35,109 @@ type LoggerOpts struct { // easy to mess up on accident. Output io.Writer - // IncludeTime indicates whether logs should incude the time they were + // IncludeTime indicates whether logs should include the time they were // written or not. It should only be set to true when testing tflog or // tfsdklog; providers and SDKs should always include the time logs // were written as part of the log. IncludeTime bool + + // Fields indicates the key/value pairs to be added to each of its log output. + Fields map[string]interface{} + + // IncludeRootFields indicates whether a new subsystem logger should + // copy existing fields from the root logger. This is only performed + // at the time of new subsystem creation. + IncludeRootFields bool + + // OmitLogWithFieldKeys indicates that the logger should omit to write + // any log when any of the given keys is found within the fields. + // + // Example: + // + // OmitLogWithFieldKeys = `['foo', 'baz']` + // + // log1 = `{ msg = "...", fields = { 'foo', '...', 'bar', '...' }` -> omitted + // log2 = `{ msg = "...", fields = { 'bar', '...' }` -> printed + // log3 = `{ msg = "...", fields = { 'baz`', '...', 'boo', '...' }` -> omitted + // + OmitLogWithFieldKeys []string + + // OmitLogWithMessageRegexes indicates that the logger should omit to write + // any log that matches any of the given *regexp.Regexp. + // + // Example: + // + // OmitLogWithMessageRegexes = `[regexp.MustCompile("(foo|bar)")]` + // + // log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted + // log2 = `{ msg = "pineapple mango", fields = {...}` -> printed + // log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted + // + OmitLogWithMessageRegexes []*regexp.Regexp + + // OmitLogWithMessageStrings indicates that the logger should omit to write + // any log that matches any of the given string. + // + // Example: + // + // OmitLogWithMessageStrings = `['foo', 'bar']` + // + // log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted + // log2 = `{ msg = "pineapple mango", fields = {...}` -> printed + // log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted + // + OmitLogWithMessageStrings []string + + // MaskFieldValuesWithFieldKeys indicates that the logger should mask with asterisks (`*`) + // any field value where the key matches one of the given keys. + // + // Example: + // + // MaskFieldValuesWithFieldKeys = `['foo', 'baz']` + // + // log1 = `{ msg = "...", fields = { 'foo', '***', 'bar', '...' }` -> masked value + // log2 = `{ msg = "...", fields = { 'bar', '...' }` -> as-is value + // log3 = `{ msg = "...", fields = { 'baz`', '***', 'boo', '...' }` -> masked value + // + MaskFieldValuesWithFieldKeys []string + + // MaskMessageRegexes indicates that the logger should replace, within + // a log message, the portion matching one of the given *regexp.Regexp. + // + // Example: + // + // MaskMessageRegexes = `[regexp.MustCompile("(foo|bar)")]` + // + // log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion + // log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is + // log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion + // + MaskMessageRegexes []*regexp.Regexp + + // MaskMessageStrings indicates that the logger should replace, within + // a log message, the portion matching one of the given strings. + // + // Example: + // + // MaskMessageStrings = `['foo', 'bar']` + // + // log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion + // log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is + // log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion + // + MaskMessageStrings []string } // ApplyLoggerOpts generates a LoggerOpts out of a list of Option -// implementations. By default, IncludeLocation is true, IncludeTime is true, -// and Output is os.Stderr. +// implementations. By default, AdditionalLocationOffset is 1, IncludeLocation +// is true, IncludeTime is true, and Output is os.Stderr. func ApplyLoggerOpts(opts ...Option) LoggerOpts { // set some defaults l := LoggerOpts{ - IncludeLocation: true, - IncludeTime: true, - Output: os.Stderr, + AdditionalLocationOffset: 1, + IncludeLocation: true, + IncludeTime: true, + Output: os.Stderr, } for _, opt := range opts { l = opt(l) @@ -52,6 +145,18 @@ func ApplyLoggerOpts(opts ...Option) LoggerOpts { return l } +// WithAdditionalLocationOffset sets the WithAdditionalLocationOffset +// configuration option, allowing implementations to fix location information +// when implementing helper functions. The default offset of 1 is automatically +// added to the provided value to account for the tflog and tfsdk logging +// functions. +func WithAdditionalLocationOffset(additionalLocationOffset int) Option { + return func(l LoggerOpts) LoggerOpts { + l.AdditionalLocationOffset = additionalLocationOffset + 1 + return l + } +} + // WithOutput sets the Output configuration option, controlling where logs get // written to. This is mostly used for testing (to write to os.Stdout, so the // test framework can compare it against the example output) and as a helper @@ -63,6 +168,62 @@ func WithOutput(output io.Writer) Option { } } +// WithField sets the provided key/value pair, onto the LoggerOpts.Fields field. +// +// Behind the scene, fields are stored in a map[string]interface{}: +// this means that in case the same key is used multiple times (key collision), +// the last one set is the one that gets persisted and then outputted with the logs. +func WithField(key string, value interface{}) Option { + return func(l LoggerOpts) LoggerOpts { + // Lazily create this map, on first assignment + if l.Fields == nil { + l.Fields = make(map[string]interface{}) + } + + l.Fields[key] = value + return l + } +} + +// WithFields sets all the provided key/value pairs, onto the LoggerOpts.Fields field. +// +// Behind the scene, fields are stored in a map[string]interface{}: +// this means that in case the same key is used multiple times (key collision), +// the last one set is the one that gets persisted and then outputted with the logs. +func WithFields(fields map[string]interface{}) Option { + return func(l LoggerOpts) LoggerOpts { + // Lazily create this map, on first assignment + if l.Fields == nil { + l.Fields = make(map[string]interface{}) + } + + for k, v := range fields { + l.Fields[k] = v + } + + return l + } +} + +// WithRootFields enables the copying of root logger fields to a new subsystem +// logger during creation. +func WithRootFields() Option { + return func(l LoggerOpts) LoggerOpts { + l.IncludeRootFields = true + return l + } +} + +// WithoutLocation disables the location included with logging statements. It +// should only ever be used to make log output deterministic when testing +// terraform-plugin-log. +func WithoutLocation() Option { + return func(l LoggerOpts) LoggerOpts { + l.IncludeLocation = false + return l + } +} + // WithoutTimestamp disables the timestamp included with logging statements. It // should only ever be used to make log output deterministic when testing // terraform-plugin-log. @@ -72,3 +233,51 @@ func WithoutTimestamp() Option { return l } } + +// WithOmitLogWithFieldKeys appends keys to the LoggerOpts.OmitLogWithFieldKeys field. +func WithOmitLogWithFieldKeys(keys ...string) Option { + return func(l LoggerOpts) LoggerOpts { + l.OmitLogWithFieldKeys = append(l.OmitLogWithFieldKeys, keys...) + return l + } +} + +// WithOmitLogWithMessageRegexes appends *regexp.Regexp to the LoggerOpts.OmitLogWithMessageRegexes field. +func WithOmitLogWithMessageRegexes(expressions ...*regexp.Regexp) Option { + return func(l LoggerOpts) LoggerOpts { + l.OmitLogWithMessageRegexes = append(l.OmitLogWithMessageRegexes, expressions...) + return l + } +} + +// WithOmitLogWithMessageStrings appends string to the LoggerOpts.OmitLogWithMessageStrings field. +func WithOmitLogWithMessageStrings(matchingStrings ...string) Option { + return func(l LoggerOpts) LoggerOpts { + l.OmitLogWithMessageStrings = append(l.OmitLogWithMessageStrings, matchingStrings...) + return l + } +} + +// WithMaskFieldValuesWithFieldKeys appends keys to the LoggerOpts.MaskFieldValuesWithFieldKeys field. +func WithMaskFieldValuesWithFieldKeys(keys ...string) Option { + return func(l LoggerOpts) LoggerOpts { + l.MaskFieldValuesWithFieldKeys = append(l.MaskFieldValuesWithFieldKeys, keys...) + return l + } +} + +// WithMaskMessageRegexes appends *regexp.Regexp to the LoggerOpts.MaskMessageRegexes field. +func WithMaskMessageRegexes(expressions ...*regexp.Regexp) Option { + return func(l LoggerOpts) LoggerOpts { + l.MaskMessageRegexes = append(l.MaskMessageRegexes, expressions...) + return l + } +} + +// WithMaskMessageStrings appends string to the LoggerOpts.MaskMessageStrings field. +func WithMaskMessageStrings(matchingStrings ...string) Option { + return func(l LoggerOpts) LoggerOpts { + l.MaskMessageStrings = append(l.MaskMessageStrings, matchingStrings...) + return l + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/provider.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/provider.go index 897393c5d..0e6e8f50f 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/provider.go @@ -16,12 +16,41 @@ func GetProviderRootLogger(ctx context.Context) hclog.Logger { return logger.(hclog.Logger) } +// GetProviderRootLoggerOptions returns the root logger options used for +// creating the root provider logger. If the root logger has not been created +// or the options are not present, it will return nil. +func GetProviderRootLoggerOptions(ctx context.Context) *hclog.LoggerOptions { + if GetProviderRootLogger(ctx) == nil { + return nil + } + + loggerOptionsRaw := ctx.Value(ProviderRootLoggerOptionsKey) + + if loggerOptionsRaw == nil { + return nil + } + + loggerOptions, ok := loggerOptionsRaw.(*hclog.LoggerOptions) + + if !ok { + return nil + } + + return loggerOptions +} + // SetProviderRootLogger sets `logger` as the root logger used for writing // logs from a provider. func SetProviderRootLogger(ctx context.Context, logger hclog.Logger) context.Context { return context.WithValue(ctx, ProviderRootLoggerKey, logger) } +// SetProviderRootLoggerOptions sets `loggerOptions` as the root logger options +// used for creating the provider root logger. +func SetProviderRootLoggerOptions(ctx context.Context, loggerOptions *hclog.LoggerOptions) context.Context { + return context.WithValue(ctx, ProviderRootLoggerOptionsKey, loggerOptions) +} + // NewProviderSubsystemLoggerWarning is the text included in log output when a // subsystem is auto-generated by terraform-plugin-log because it was used // before the provider instantiated it. @@ -31,7 +60,7 @@ const NewProviderSubsystemLoggerWarning = "This log was generated by a subsystem // subsystem in provider space. If no such subsystem logger has been created, // it will return nil. func GetProviderSubsystemLogger(ctx context.Context, subsystem string) hclog.Logger { - logger := ctx.Value(ProviderRootLoggerKey + loggerKey("."+subsystem)) + logger := ctx.Value(providerSubsystemLoggerKey(subsystem)) if logger == nil { return nil } @@ -41,5 +70,37 @@ func GetProviderSubsystemLogger(ctx context.Context, subsystem string) hclog.Log // SetProviderSubsystemLogger sets `logger` as the logger for the named // subsystem in provider space. func SetProviderSubsystemLogger(ctx context.Context, subsystem string, logger hclog.Logger) context.Context { - return context.WithValue(ctx, ProviderRootLoggerKey+loggerKey("."+subsystem), logger) + return context.WithValue(ctx, providerSubsystemLoggerKey(subsystem), logger) +} + +// GetProviderRootTFLoggerOpts retrieves the LoggerOpts of the provider root logger. +// The value is stored in the context.Context: if none is found, a new one will be created. +func GetProviderRootTFLoggerOpts(ctx context.Context) LoggerOpts { + lOpts, ok := ctx.Value(providerRootTFLoggerOptsKey()).(LoggerOpts) + if !ok { + lOpts = LoggerOpts{} + } + + return lOpts +} + +// SetProviderRootTFLoggerOpts sets the LoggerOpts of the provider root logger, in the context. +func SetProviderRootTFLoggerOpts(ctx context.Context, lOpts LoggerOpts) context.Context { + return context.WithValue(ctx, providerRootTFLoggerOptsKey(), lOpts) +} + +// GetProviderSubsystemTFLoggerOpts retrieves the LoggerOpts of the logger for the named provider subsystem. +// The value is stored in the context.Context: if none is found, a new one will be created. +func GetProviderSubsystemTFLoggerOpts(ctx context.Context, subsystem string) LoggerOpts { + lOpts, ok := ctx.Value(providerSubsystemTFLoggerOptsKey(subsystem)).(LoggerOpts) + if !ok { + lOpts = LoggerOpts{} + } + + return lOpts +} + +// SetProviderSubsystemTFLoggerOpts sets the LoggerOpts of the logger for the named provider subsystem, in the context. +func SetProviderSubsystemTFLoggerOpts(ctx context.Context, subsystem string, lOpts LoggerOpts) context.Context { + return context.WithValue(ctx, providerSubsystemTFLoggerOptsKey(subsystem), lOpts) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sdk.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sdk.go index 99c577a9d..034e92827 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sdk.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sdk.go @@ -16,12 +16,57 @@ func GetSDKRootLogger(ctx context.Context) hclog.Logger { return logger.(hclog.Logger) } +// GetSDKRootLoggerOptions returns the root logger options used for +// creating the root SDK logger. If the root logger has not been created or +// the options are not present, it will return nil. +func GetSDKRootLoggerOptions(ctx context.Context) *hclog.LoggerOptions { + if GetSDKRootLogger(ctx) == nil { + return nil + } + + loggerOptionsRaw := ctx.Value(SDKRootLoggerOptionsKey) + + if loggerOptionsRaw == nil { + return nil + } + + loggerOptions, ok := loggerOptionsRaw.(*hclog.LoggerOptions) + + if !ok { + return nil + } + + return loggerOptions +} + // SetSDKRootLogger sets `logger` as the root logger used for writing logs from // an SDK. func SetSDKRootLogger(ctx context.Context, logger hclog.Logger) context.Context { return context.WithValue(ctx, SDKRootLoggerKey, logger) } +// SetSDKRootLoggerOptions sets `loggerOptions` as the root logger options +// used for creating the SDK root logger. +func SetSDKRootLoggerOptions(ctx context.Context, loggerOptions *hclog.LoggerOptions) context.Context { + return context.WithValue(ctx, SDKRootLoggerOptionsKey, loggerOptions) +} + +// GetSDKRootTFLoggerOpts retrieves the LoggerOpts of the SDK root logger. +// The value is stored in the context.Context: if none is found, a new one will be created. +func GetSDKRootTFLoggerOpts(ctx context.Context) LoggerOpts { + lOpts, ok := ctx.Value(sdkRootTFLoggerOptsKey()).(LoggerOpts) + if !ok { + lOpts = LoggerOpts{} + } + + return lOpts +} + +// SetSDKRootTFLoggerOpts sets the LoggerOpts of the SDK root logger, in the context. +func SetSDKRootTFLoggerOpts(ctx context.Context, lOpts LoggerOpts) context.Context { + return context.WithValue(ctx, sdkRootTFLoggerOptsKey(), lOpts) +} + // NewSDKSubsystemLoggerWarning is the text included in log output when a // subsystem is auto-generated by terraform-plugin-log because it was used // before the SDK instantiated it. @@ -31,7 +76,7 @@ const NewSDKSubsystemLoggerWarning = "This log was generated by an SDK subsystem // in SDK space. If no such subsystem logger has been created, it will return // nil. func GetSDKSubsystemLogger(ctx context.Context, subsystem string) hclog.Logger { - logger := ctx.Value(SDKRootLoggerKey + loggerKey("."+subsystem)) + logger := ctx.Value(sdkSubsystemLoggerKey(subsystem)) if logger == nil { return nil } @@ -41,5 +86,21 @@ func GetSDKSubsystemLogger(ctx context.Context, subsystem string) hclog.Logger { // SetSDKSubsystemLogger sets `logger` as the logger for the named subsystem in // SDK space. func SetSDKSubsystemLogger(ctx context.Context, subsystem string, logger hclog.Logger) context.Context { - return context.WithValue(ctx, SDKRootLoggerKey+loggerKey("."+subsystem), logger) + return context.WithValue(ctx, sdkSubsystemLoggerKey(subsystem), logger) +} + +// GetSDKSubsystemTFLoggerOpts retrieves the LoggerOpts of the logger for the named SDK subsystem. +// The value is stored in the context.Context: if none is found, a new one will be created. +func GetSDKSubsystemTFLoggerOpts(ctx context.Context, subsystem string) LoggerOpts { + lOpts, ok := ctx.Value(sdkSubsystemTFLoggerOptsKey(subsystem)).(LoggerOpts) + if !ok { + lOpts = LoggerOpts{} + } + + return lOpts +} + +// SetSDKSubsystemTFLoggerOpts sets the LoggerOpts of the logger for the named SDK subsystem, in the context. +func SetSDKSubsystemTFLoggerOpts(ctx context.Context, subsystem string, lOpts LoggerOpts) context.Context { + return context.WithValue(ctx, sdkSubsystemTFLoggerOptsKey(subsystem), lOpts) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sink.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sink.go new file mode 100644 index 000000000..d48f3bf4c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sink.go @@ -0,0 +1,51 @@ +package logging + +import ( + "context" + + "github.com/hashicorp/go-hclog" +) + +// GetSink returns the sink logger used for writing logs. +// If no sink logger has been created, it will return nil. +func GetSink(ctx context.Context) hclog.Logger { + logger := ctx.Value(SinkKey) + if logger == nil { + return nil + } + return logger.(hclog.Logger) +} + +// GetSinkOptions returns the root logger options used for +// creating the root SDK logger. If the root logger has not been created or +// the options are not present, it will return nil. +func GetSinkOptions(ctx context.Context) *hclog.LoggerOptions { + if GetSink(ctx) == nil { + return nil + } + + loggerOptionsRaw := ctx.Value(SinkOptionsKey) + + if loggerOptionsRaw == nil { + return nil + } + + loggerOptions, ok := loggerOptionsRaw.(*hclog.LoggerOptions) + + if !ok { + return nil + } + + return loggerOptions +} + +// SetSink sets `logger` as the sink logger used for writing logs. +func SetSink(ctx context.Context, logger hclog.Logger) context.Context { + return context.WithValue(ctx, SinkKey, logger) +} + +// SetSinkOptions sets `loggerOptions` as the root logger options +// used for creating the SDK root logger. +func SetSinkOptions(ctx context.Context, loggerOptions *hclog.LoggerOptions) context.Context { + return context.WithValue(ctx, SinkOptionsKey, loggerOptions) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/options.go b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/options.go index 1b61eb55f..750177812 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/options.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/options.go @@ -12,6 +12,14 @@ import ( // to NewSubsystem prior to calling it. type Options []logging.Option +// WithAdditionalLocationOffset returns an option that allowing implementations +// to fix location information when implementing helper functions. The default +// offset of 1 is automatically added to the provided value to account for the +// tflog logging functions. +func WithAdditionalLocationOffset(additionalLocationOffset int) logging.Option { + return logging.WithAdditionalLocationOffset(additionalLocationOffset) +} + // WithLevelFromEnv returns an option that will set the level of the logger // based on the string in an environment variable. The environment variable // checked will be `name` and `subsystems`, joined by _ and in all caps. @@ -35,6 +43,12 @@ func WithLevel(level hclog.Level) logging.Option { } } +// WithRootFields enables the copying of root logger fields to a new subsystem +// logger during creation. +func WithRootFields() logging.Option { + return logging.WithRootFields() +} + // WithoutLocation returns an option that disables including the location of // the log line in the log output, which is on by default. This has no effect // when used with NewSubsystem. diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/provider.go b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/provider.go index b05856df0..0be81b5e4 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/provider.go @@ -2,29 +2,29 @@ package tflog import ( "context" + "regexp" "github.com/hashicorp/terraform-plugin-log/internal/logging" ) -// With returns a new context.Context that has a modified logger in it which -// will include key and value as arguments in all its log output. -func With(ctx context.Context, key string, value interface{}) context.Context { - logger := logging.GetProviderRootLogger(ctx) - if logger == nil { - // this essentially should never happen in production - // the root logger for provider code should be injected - // by whatever SDK the provider developer is using, so - // really this is only likely in unit tests, at most - // so just making this a no-op is fine - return ctx - } - return logging.SetProviderRootLogger(ctx, logger.With(key, value)) +// SetField returns a new context.Context that has a modified logger in it which +// will include key and value as fields in all its log output. +// +// In case of the same key is used multiple times (i.e. key collision), +// the last one set is the one that gets persisted and then outputted with the logs. +func SetField(ctx context.Context, key string, value interface{}) context.Context { + lOpts := logging.GetProviderRootTFLoggerOpts(ctx) + + lOpts = logging.WithField(key, value)(lOpts) + + return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) } -// Trace logs `msg` at the trace level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Trace(ctx context.Context, msg string, args ...interface{}) { +// Trace logs `msg` at the trace level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Trace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderRootLogger(ctx) if logger == nil { // this essentially should never happen in production @@ -34,13 +34,20 @@ func Trace(ctx context.Context, msg string, args ...interface{}) { // so just making this a no-op is fine return } - logger.Trace(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Trace(msg, additionalArgs...) } -// Debug logs `msg` at the debug level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Debug(ctx context.Context, msg string, args ...interface{}) { +// Debug logs `msg` at the debug level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Debug(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderRootLogger(ctx) if logger == nil { // this essentially should never happen in production @@ -50,13 +57,20 @@ func Debug(ctx context.Context, msg string, args ...interface{}) { // so just making this a no-op is fine return } - logger.Debug(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Debug(msg, additionalArgs...) } -// Info logs `msg` at the info level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Info(ctx context.Context, msg string, args ...interface{}) { +// Info logs `msg` at the info level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Info(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderRootLogger(ctx) if logger == nil { // this essentially should never happen in production @@ -66,13 +80,20 @@ func Info(ctx context.Context, msg string, args ...interface{}) { // so just making this a no-op is fine return } - logger.Info(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Info(msg, additionalArgs...) } -// Warn logs `msg` at the warn level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Warn(ctx context.Context, msg string, args ...interface{}) { +// Warn logs `msg` at the warn level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Warn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderRootLogger(ctx) if logger == nil { // this essentially should never happen in production @@ -82,13 +103,20 @@ func Warn(ctx context.Context, msg string, args ...interface{}) { // so just making this a no-op is fine return } - logger.Warn(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Warn(msg, additionalArgs...) } -// Error logs `msg` at the error level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Error(ctx context.Context, msg string, args ...interface{}) { +// Error logs `msg` at the error level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Error(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderRootLogger(ctx) if logger == nil { // this essentially should never happen in production @@ -98,5 +126,148 @@ func Error(ctx context.Context, msg string, args ...interface{}) { // so just making this a no-op is fine return } - logger.Error(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Error(msg, additionalArgs...) +} + +// OmitLogWithFieldKeys returns a new context.Context that has a modified logger +// that will omit to write any log when any of the given keys is found +// within its fields. +// +// Each call to this function is additive: +// the keys to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'baz']` +// +// log1 = `{ msg = "...", fields = { 'foo', '...', 'bar', '...' }` -> omitted +// log2 = `{ msg = "...", fields = { 'bar', '...' }` -> printed +// log3 = `{ msg = "...", fields = { 'baz`', '...', 'boo', '...' }` -> omitted +// +func OmitLogWithFieldKeys(ctx context.Context, keys ...string) context.Context { + lOpts := logging.GetProviderRootTFLoggerOpts(ctx) + + lOpts = logging.WithOmitLogWithFieldKeys(keys...)(lOpts) + + return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) +} + +// OmitLogWithMessageRegexes returns a new context.Context that has a modified logger +// that will omit to write any log that has a message matching any of the +// given *regexp.Regexp. +// +// Each call to this function is additive: +// the regexp to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +// +func OmitLogWithMessageRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetProviderRootTFLoggerOpts(ctx) + + lOpts = logging.WithOmitLogWithMessageRegexes(expressions...)(lOpts) + + return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) +} + +// OmitLogWithMessageStrings returns a new context.Context that has a modified logger +// that will omit to write any log that matches any of the given string. +// +// Each call to this function is additive: +// the string to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'bar']` +// +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +// +func OmitLogWithMessageStrings(ctx context.Context, matchingStrings ...string) context.Context { + lOpts := logging.GetProviderRootTFLoggerOpts(ctx) + + lOpts = logging.WithOmitLogWithMessageStrings(matchingStrings...)(lOpts) + + return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) +} + +// MaskFieldValuesWithFieldKeys returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) any field value where the +// key matches one of the given keys. +// +// Each call to this function is additive: +// the keys to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'baz']` +// +// log1 = `{ msg = "...", fields = { 'foo', '***', 'bar', '...' }` -> masked value +// log2 = `{ msg = "...", fields = { 'bar', '...' }` -> as-is value +// log3 = `{ msg = "...", fields = { 'baz`', '***', 'boo', '...' }` -> masked value +// +func MaskFieldValuesWithFieldKeys(ctx context.Context, keys ...string) context.Context { + lOpts := logging.GetProviderRootTFLoggerOpts(ctx) + + lOpts = logging.WithMaskFieldValuesWithFieldKeys(keys...)(lOpts) + + return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) +} + +// MaskMessageRegexes returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) all message substrings matching one +// of the given strings. +// +// Each call to this function is additive: +// the regexp to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +// +func MaskMessageRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetProviderRootTFLoggerOpts(ctx) + + lOpts = logging.WithMaskMessageRegexes(expressions...)(lOpts) + + return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) +} + +// MaskMessageStrings returns a new context.Context that has a modified logger +// that masks (replace) with asterisks (`***`) all message substrings equal to one +// of the given strings. +// +// Each call to this function is additive: +// the string to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'bar']` +// +// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +// +func MaskMessageStrings(ctx context.Context, matchingStrings ...string) context.Context { + lOpts := logging.GetProviderRootTFLoggerOpts(ctx) + + lOpts = logging.WithMaskMessageStrings(matchingStrings...)(lOpts) + + return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/subsystem.go b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/subsystem.go index fbc564e47..b63ed3631 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/subsystem.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/subsystem.go @@ -2,8 +2,10 @@ package tflog import ( "context" + "regexp" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/terraform-plugin-log/internal/hclogutils" "github.com/hashicorp/terraform-plugin-log/internal/logging" ) @@ -17,9 +19,10 @@ import ( // at other times. // // The only Options supported for subsystems are the Options for setting the -// level of the logger. +// level and additional location offset of the logger. func NewSubsystem(ctx context.Context, subsystem string, options ...logging.Option) context.Context { logger := logging.GetProviderRootLogger(ctx) + if logger == nil { // this essentially should never happen in production // the root logger for provider code should be injected @@ -28,82 +31,322 @@ func NewSubsystem(ctx context.Context, subsystem string, options ...logging.Opti // so just making this a no-op is fine return ctx } - subLogger := logger.Named(subsystem) - opts := logging.ApplyLoggerOpts(options...) - if opts.Level != hclog.NoLevel { - subLogger.SetLevel(opts.Level) + + rootLoggerOptions := logging.GetProviderRootLoggerOptions(ctx) + subLoggerTFLoggerOpts := logging.ApplyLoggerOpts(options...) + + // If root logger options are not available, + // fallback to creating a logger named like the given subsystem. + // This will preserve the root logger options, + // but cannot make changes beyond setting the level + // due to limitations with the hclog.Logger interface. + var subLogger hclog.Logger + if rootLoggerOptions == nil { + subLogger = logger.Named(subsystem) + + if subLoggerTFLoggerOpts.AdditionalLocationOffset != 1 { + logger.Warn("Unable to create logging subsystem with AdditionalLocationOffset due to missing root logger options") + } + } else { + subLoggerOptions := hclogutils.LoggerOptionsCopy(rootLoggerOptions) + subLoggerOptions.Name = subLoggerOptions.Name + "." + subsystem + + if subLoggerTFLoggerOpts.AdditionalLocationOffset != 1 { + subLoggerOptions.AdditionalLocationOffset = subLoggerTFLoggerOpts.AdditionalLocationOffset + } + + subLogger = hclog.New(subLoggerOptions) + } + + // Set the configured log level + if subLoggerTFLoggerOpts.Level != hclog.NoLevel { + subLogger.SetLevel(subLoggerTFLoggerOpts.Level) + } + + // Propagate root fields to the subsystem logger + if subLoggerTFLoggerOpts.IncludeRootFields { + loggerTFOpts := logging.GetProviderRootTFLoggerOpts(ctx) + subLoggerTFLoggerOpts = logging.WithFields(loggerTFOpts.Fields)(subLoggerTFLoggerOpts) } + + // Set the subsystem LoggerOpts in the context + ctx = logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, subLoggerTFLoggerOpts) + return logging.SetProviderSubsystemLogger(ctx, subsystem, subLogger) } -// SubsystemWith returns a new context.Context that has a modified logger for -// the specified subsystem in it which will include key and value as arguments +// SubsystemSetField returns a new context.Context that has a modified logger for +// the specified subsystem in it which will include key and value as fields // in all its log output. -func SubsystemWith(ctx context.Context, subsystem, key string, value interface{}) context.Context { - logger := logging.GetProviderSubsystemLogger(ctx, subsystem) - if logger == nil { - // create a new logger if one doesn't exist - logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) - } - return logging.SetProviderSubsystemLogger(ctx, subsystem, logger.With(key, value)) +// +// In case of the same key is used multiple times (i.e. key collision), +// the last one set is the one that gets persisted and then outputted with the logs. +func SubsystemSetField(ctx context.Context, subsystem, key string, value interface{}) context.Context { + lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) + + lOpts = logging.WithField(key, value)(lOpts) + + return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } // SubsystemTrace logs `msg` at the trace level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemTrace(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemTrace(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetProviderRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) } - logger.Trace(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Trace(msg, additionalArgs...) } // SubsystemDebug logs `msg` at the debug level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemDebug(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemDebug(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetProviderRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) } - logger.Debug(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Debug(msg, additionalArgs...) } // SubsystemInfo logs `msg` at the info level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemInfo(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemInfo(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetProviderRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) } - logger.Info(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Info(msg, additionalArgs...) } // SubsystemWarn logs `msg` at the warn level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemWarn(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemWarn(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetProviderRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) } - logger.Warn(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Warn(msg, additionalArgs...) } // SubsystemError logs `msg` at the error level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemError(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemError(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetProviderSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetProviderRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) } - logger.Error(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Error(msg, additionalArgs...) +} + +// SubsystemOmitLogWithFieldKeys returns a new context.Context that has a modified logger +// that will omit to write any log when any of the given keys is found +// within its fields. +// +// Each call to this function is additive: +// the keys to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'baz']` +// +// log1 = `{ msg = "...", fields = { 'foo', '...', 'bar', '...' }` -> omitted +// log2 = `{ msg = "...", fields = { 'bar', '...' }` -> printed +// log3 = `{ msg = "...", fields = { 'baz`', '...', 'boo', '...' }` -> omitted +// +func SubsystemOmitLogWithFieldKeys(ctx context.Context, subsystem string, keys ...string) context.Context { + lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) + + lOpts = logging.WithOmitLogWithFieldKeys(keys...)(lOpts) + + return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemOmitLogWithMessageRegexes returns a new context.Context that has a modified logger +// that will omit to write any log that has a message matching any of the +// given *regexp.Regexp. +// +// Each call to this function is additive: +// the regexp to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +// +func SubsystemOmitLogWithMessageRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) + + lOpts = logging.WithOmitLogWithMessageRegexes(expressions...)(lOpts) + + return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemOmitLogWithMessageStrings returns a new context.Context that has a modified logger +// that will omit to write any log that matches any of the given string. +// +// Each call to this function is additive: +// the string to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'bar']` +// +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +// +func SubsystemOmitLogWithMessageStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { + lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) + + lOpts = logging.WithOmitLogWithMessageStrings(matchingStrings...)(lOpts) + + return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskFieldValuesWithFieldKeys returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) any argument value where the +// key matches one of the given keys. +// +// Each call to this function is additive: +// the keys to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'baz']` +// +// log1 = `{ msg = "...", fields = { 'foo', '***', 'bar', '...' }` -> masked value +// log2 = `{ msg = "...", fields = { 'bar', '...' }` -> as-is value +// log3 = `{ msg = "...", fields = { 'baz`', '***', 'boo', '...' }` -> masked value +// +func SubsystemMaskFieldValuesWithFieldKeys(ctx context.Context, subsystem string, keys ...string) context.Context { + lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) + + lOpts = logging.WithMaskFieldValuesWithFieldKeys(keys...)(lOpts) + + return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskMessageRegexes returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) all message substrings matching one +// of the given strings. +// +// Each call to this function is additive: +// the regexp to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +// +func SubsystemMaskMessageRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) + + lOpts = logging.WithMaskMessageRegexes(expressions...)(lOpts) + + return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskMessageStrings returns a new context.Context that has a modified logger +// that masks (replace) with asterisks (`***`) all message substrings equal to one +// of the given strings. +// +// Each call to this function is additive: +// the string to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'bar']` +// +// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +// +func SubsystemMaskMessageStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { + lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) + + lOpts = logging.WithMaskMessageStrings(matchingStrings...)(lOpts) + + return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/options.go b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/options.go index 2115babd7..b1ba8e51e 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/options.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/options.go @@ -13,6 +13,14 @@ import ( // them. type Options []logging.Option +// WithAdditionalLocationOffset returns an option that allowing implementations +// to fix location information when implementing helper functions. The default +// offset of 1 is automatically added to the provided value to account for the +// tfsdklog logging functions. +func WithAdditionalLocationOffset(additionalLocationOffset int) logging.Option { + return logging.WithAdditionalLocationOffset(additionalLocationOffset) +} + // WithLogName returns an option that will set the logger name explicitly to // `name`. This has no effect when used with NewSubsystem. func WithLogName(name string) logging.Option { @@ -45,6 +53,12 @@ func WithLevel(level hclog.Level) logging.Option { } } +// WithRootFields enables the copying of root logger fields to a new subsystem +// logger during creation. +func WithRootFields() logging.Option { + return logging.WithRootFields() +} + // WithoutLocation returns an option that disables including the location of // the log line in the log output, which is on by default. This has no effect // when used with NewSubsystem. diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sdk.go b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sdk.go index ec22d9fc6..eca972790 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sdk.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sdk.go @@ -2,8 +2,10 @@ package tfsdklog import ( "context" + "regexp" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/terraform-plugin-log/internal/hclogutils" "github.com/hashicorp/terraform-plugin-log/internal/logging" ) @@ -12,14 +14,23 @@ import ( func NewRootSDKLogger(ctx context.Context, options ...logging.Option) context.Context { opts := logging.ApplyLoggerOpts(options...) if opts.Name == "" { - opts.Name = "sdk" + opts.Name = logging.DefaultSDKRootLoggerName } - if sink := getSink(ctx); sink != nil { + if sink := logging.GetSink(ctx); sink != nil { logger := sink.Named(opts.Name) + sinkLoggerOptions := logging.GetSinkOptions(ctx) + sdkLoggerOptions := hclogutils.LoggerOptionsCopy(sinkLoggerOptions) + sdkLoggerOptions.Name = opts.Name + if opts.Level != hclog.NoLevel { logger.SetLevel(opts.Level) + sdkLoggerOptions.Level = opts.Level } - return logging.SetSDKRootLogger(ctx, logger) + + ctx = logging.SetSDKRootLogger(ctx, logger) + ctx = logging.SetSDKRootLoggerOptions(ctx, sdkLoggerOptions) + + return ctx } if opts.Level == hclog.NoLevel { opts.Level = hclog.Trace @@ -32,9 +43,13 @@ func NewRootSDKLogger(ctx context.Context, options ...logging.Option) context.Co IncludeLocation: opts.IncludeLocation, DisableTime: !opts.IncludeTime, Output: opts.Output, - AdditionalLocationOffset: 1, + AdditionalLocationOffset: opts.AdditionalLocationOffset, } - return logging.SetSDKRootLogger(ctx, hclog.New(loggerOptions)) + + ctx = logging.SetSDKRootLogger(ctx, hclog.New(loggerOptions)) + ctx = logging.SetSDKRootLoggerOptions(ctx, loggerOptions) + + return ctx } // NewRootProviderLogger returns a new context.Context that contains a provider @@ -42,14 +57,23 @@ func NewRootSDKLogger(ctx context.Context, options ...logging.Option) context.Co func NewRootProviderLogger(ctx context.Context, options ...logging.Option) context.Context { opts := logging.ApplyLoggerOpts(options...) if opts.Name == "" { - opts.Name = "provider" + opts.Name = logging.DefaultProviderRootLoggerName } - if sink := getSink(ctx); sink != nil { + if sink := logging.GetSink(ctx); sink != nil { logger := sink.Named(opts.Name) + sinkLoggerOptions := logging.GetSinkOptions(ctx) + providerLoggerOptions := hclogutils.LoggerOptionsCopy(sinkLoggerOptions) + providerLoggerOptions.Name = opts.Name + if opts.Level != hclog.NoLevel { logger.SetLevel(opts.Level) + providerLoggerOptions.Level = opts.Level } - return logging.SetProviderRootLogger(ctx, logger) + + ctx = logging.SetProviderRootLogger(ctx, logger) + ctx = logging.SetProviderRootLoggerOptions(ctx, providerLoggerOptions) + + return ctx } if opts.Level == hclog.NoLevel { opts.Level = hclog.Trace @@ -62,29 +86,33 @@ func NewRootProviderLogger(ctx context.Context, options ...logging.Option) conte IncludeLocation: opts.IncludeLocation, DisableTime: !opts.IncludeTime, Output: opts.Output, - AdditionalLocationOffset: 1, + AdditionalLocationOffset: opts.AdditionalLocationOffset, } - return logging.SetProviderRootLogger(ctx, hclog.New(loggerOptions)) + + ctx = logging.SetProviderRootLogger(ctx, hclog.New(loggerOptions)) + ctx = logging.SetProviderRootLoggerOptions(ctx, loggerOptions) + + return ctx } -// With returns a new context.Context that has a modified logger in it which -// will include key and value as arguments in all its log output. -func With(ctx context.Context, key string, value interface{}) context.Context { - logger := logging.GetSDKRootLogger(ctx) - if logger == nil { - // this essentially should never happen in production the root - // logger for code should be injected by the in - // question, so really this is only likely in unit tests, at - // most so just making this a no-op is fine - return ctx - } - return logging.SetSDKRootLogger(ctx, logger.With(key, value)) +// SetField returns a new context.Context that has a modified logger in it which +// will include key and value as fields in all its log output. +// +// In case of the same key is used multiple times (i.e. key collision), +// the last one set is the one that gets persisted and then outputted with the logs. +func SetField(ctx context.Context, key string, value interface{}) context.Context { + lOpts := logging.GetSDKRootTFLoggerOpts(ctx) + + lOpts = logging.WithField(key, value)(lOpts) + + return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) } -// Trace logs `msg` at the trace level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Trace(ctx context.Context, msg string, args ...interface{}) { +// Trace logs `msg` at the trace level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Trace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKRootLogger(ctx) if logger == nil { // this essentially should never happen in production the root @@ -93,13 +121,20 @@ func Trace(ctx context.Context, msg string, args ...interface{}) { // most so just making this a no-op is fine return } - logger.Trace(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Trace(msg, additionalArgs...) } -// Debug logs `msg` at the debug level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Debug(ctx context.Context, msg string, args ...interface{}) { +// Debug logs `msg` at the debug level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Debug(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKRootLogger(ctx) if logger == nil { // this essentially should never happen in production the root @@ -108,13 +143,20 @@ func Debug(ctx context.Context, msg string, args ...interface{}) { // most so just making this a no-op is fine return } - logger.Debug(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Debug(msg, additionalArgs...) } -// Info logs `msg` at the info level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Info(ctx context.Context, msg string, args ...interface{}) { +// Info logs `msg` at the info level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Info(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKRootLogger(ctx) if logger == nil { // this essentially should never happen in production the root @@ -123,13 +165,20 @@ func Info(ctx context.Context, msg string, args ...interface{}) { // most so just making this a no-op is fine return } - logger.Info(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Info(msg, additionalArgs...) } -// Warn logs `msg` at the warn level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Warn(ctx context.Context, msg string, args ...interface{}) { +// Warn logs `msg` at the warn level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Warn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKRootLogger(ctx) if logger == nil { // this essentially should never happen in production the root @@ -138,13 +187,20 @@ func Warn(ctx context.Context, msg string, args ...interface{}) { // most so just making this a no-op is fine return } - logger.Warn(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Warn(msg, additionalArgs...) } -// Error logs `msg` at the error level to the logger in `ctx`, with `args` as -// structured arguments in the log output. `args` is expected to be pairs of -// key and value. -func Error(ctx context.Context, msg string, args ...interface{}) { +// Error logs `msg` at the error level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Error(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKRootLogger(ctx) if logger == nil { // this essentially should never happen in production the root @@ -153,5 +209,148 @@ func Error(ctx context.Context, msg string, args ...interface{}) { // most so just making this a no-op is fine return } - logger.Error(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Error(msg, additionalArgs...) +} + +// OmitLogWithFieldKeys returns a new context.Context that has a modified logger +// that will omit to write any log when any of the given keys is found +// within its fields. +// +// Each call to this function is additive: +// the keys to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'baz']` +// +// log1 = `{ msg = "...", fields = { 'foo', '...', 'bar', '...' }` -> omitted +// log2 = `{ msg = "...", fields = { 'bar', '...' }` -> printed +// log3 = `{ msg = "...", fields = { 'baz`', '...', 'boo', '...' }` -> omitted +// +func OmitLogWithFieldKeys(ctx context.Context, keys ...string) context.Context { + lOpts := logging.GetSDKRootTFLoggerOpts(ctx) + + lOpts = logging.WithOmitLogWithFieldKeys(keys...)(lOpts) + + return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) +} + +// OmitLogWithMessageRegexes returns a new context.Context that has a modified logger +// that will omit to write any log that has a message matching any of the +// given *regexp.Regexp. +// +// Each call to this function is additive: +// the regexp to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +// +func OmitLogWithMessageRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetSDKRootTFLoggerOpts(ctx) + + lOpts = logging.WithOmitLogWithMessageRegexes(expressions...)(lOpts) + + return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) +} + +// OmitLogWithMessageStrings returns a new context.Context that has a modified logger +// that will omit to write any log that matches any of the given string. +// +// Each call to this function is additive: +// the string to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'bar']` +// +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +// +func OmitLogWithMessageStrings(ctx context.Context, matchingStrings ...string) context.Context { + lOpts := logging.GetSDKRootTFLoggerOpts(ctx) + + lOpts = logging.WithOmitLogWithMessageStrings(matchingStrings...)(lOpts) + + return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) +} + +// MaskFieldValuesWithFieldKeys returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) any field value where the +// key matches one of the given keys. +// +// Each call to this function is additive: +// the keys to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'baz']` +// +// log1 = `{ msg = "...", fields = { 'foo', '***', 'bar', '...' }` -> masked value +// log2 = `{ msg = "...", fields = { 'bar', '...' }` -> as-is value +// log3 = `{ msg = "...", fields = { 'baz`', '***', 'boo', '...' }` -> masked value +// +func MaskFieldValuesWithFieldKeys(ctx context.Context, keys ...string) context.Context { + lOpts := logging.GetSDKRootTFLoggerOpts(ctx) + + lOpts = logging.WithMaskFieldValuesWithFieldKeys(keys...)(lOpts) + + return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) +} + +// MaskMessageRegexes returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) all message substrings matching one +// of the given strings. +// +// Each call to this function is additive: +// the regexp to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +// +func MaskMessageRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetSDKRootTFLoggerOpts(ctx) + + lOpts = logging.WithMaskMessageRegexes(expressions...)(lOpts) + + return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) +} + +// MaskMessageStrings returns a new context.Context that has a modified logger +// that masks (replace) with asterisks (`***`) all message substrings equal to one +// of the given strings. +// +// Each call to this function is additive: +// the string to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'bar']` +// +// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +// +func MaskMessageStrings(ctx context.Context, matchingStrings ...string) context.Context { + lOpts := logging.GetSDKRootTFLoggerOpts(ctx) + + lOpts = logging.WithMaskMessageStrings(matchingStrings...)(lOpts) + + return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sink.go b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sink.go index 0b26a0890..6326901f1 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sink.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sink.go @@ -6,6 +6,7 @@ import ( "io" "os" "strings" + "sync" "syscall" "github.com/hashicorp/go-hclog" @@ -53,13 +54,8 @@ const ( // loggers. var ValidLevels = []string{"TRACE", "DEBUG", "INFO", "WARN", "ERROR", "OFF"} -func getSink(ctx context.Context) hclog.Logger { - logger := ctx.Value(logging.SinkKey) - if logger == nil { - return nil - } - return logger.(hclog.Logger) -} +// Only show invalid log level message once across any number of level lookups. +var invalidLogLevelMessage sync.Once // RegisterTestSink sets up a logging sink, for use with test frameworks and // other cases where plugin logs don't get routed through Terraform. This @@ -71,10 +67,15 @@ func getSink(ctx context.Context) hclog.Logger { // RegisterTestSink must be called prior to any loggers being setup or // instantiated. func RegisterTestSink(ctx context.Context, t testing.T) context.Context { - return context.WithValue(ctx, logging.SinkKey, newSink(t)) + logger, loggerOptions := newSink(t) + + ctx = logging.SetSink(ctx, logger) + ctx = logging.SetSinkOptions(ctx, loggerOptions) + + return ctx } -func newSink(t testing.T) hclog.Logger { +func newSink(t testing.T) (hclog.Logger, *hclog.LoggerOptions) { logOutput := io.Writer(os.Stderr) var json bool var logLevel hclog.Level @@ -120,21 +121,29 @@ func newSink(t testing.T) hclog.Logger { } else if isValidLogLevel(envLevel) { logLevel = hclog.LevelFromString(envLevel) } else { - fmt.Fprintf(os.Stderr, "[WARN] Invalid log level: %q. Defaulting to level: OFF. Valid levels are: %+v", - envLevel, ValidLevels) + invalidLogLevelMessage.Do(func() { + fmt.Fprintf( + os.Stderr, + "[WARN] Invalid log level: %q. Defaulting to level: OFF. Valid levels are: %+v\n", + envLevel, + ValidLevels, + ) + }) } - return hclog.New(&hclog.LoggerOptions{ + loggerOptions := &hclog.LoggerOptions{ Level: logLevel, Output: logOutput, IndependentLevels: true, JSONFormat: json, - }) + } + + return hclog.New(loggerOptions), loggerOptions } func isValidLogLevel(level string) bool { - for _, l := range ValidLevels { - if level == string(l) { + for _, validLevel := range ValidLevels { + if level == validLevel { return true } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/subsystem.go b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/subsystem.go index 96b76791f..bba1d6f25 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/subsystem.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/subsystem.go @@ -2,8 +2,10 @@ package tfsdklog import ( "context" + "regexp" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/terraform-plugin-log/internal/hclogutils" "github.com/hashicorp/terraform-plugin-log/internal/logging" ) @@ -17,9 +19,10 @@ import ( // at other times. // // The only Options supported for subsystems are the Options for setting the -// level of the logger. +// level and additional location offset of the logger. func NewSubsystem(ctx context.Context, subsystem string, options ...logging.Option) context.Context { logger := logging.GetSDKRootLogger(ctx) + if logger == nil { // this essentially should never happen in production // the root logger for provider code should be injected @@ -28,82 +31,322 @@ func NewSubsystem(ctx context.Context, subsystem string, options ...logging.Opti // so just making this a no-op is fine return ctx } - subLogger := logger.Named(subsystem) - opts := logging.ApplyLoggerOpts(options...) - if opts.Level != hclog.NoLevel { - subLogger.SetLevel(opts.Level) + + rootLoggerOptions := logging.GetSDKRootLoggerOptions(ctx) + subLoggerTFLoggerOpts := logging.ApplyLoggerOpts(options...) + + // If root logger options are not available, + // fallback to creating a logger named like the given subsystem. + // This will preserve the root logger options, + // but cannot make changes beyond setting the level + // due to limitations with the hclog.Logger interface. + var subLogger hclog.Logger + if rootLoggerOptions == nil { + subLogger = logger.Named(subsystem) + + if subLoggerTFLoggerOpts.AdditionalLocationOffset != 1 { + logger.Warn("Unable to create logging subsystem with AdditionalLocationOffset due to missing root logger options") + } + } else { + subLoggerOptions := hclogutils.LoggerOptionsCopy(rootLoggerOptions) + subLoggerOptions.Name = subLoggerOptions.Name + "." + subsystem + + if subLoggerTFLoggerOpts.AdditionalLocationOffset != 1 { + subLoggerOptions.AdditionalLocationOffset = subLoggerTFLoggerOpts.AdditionalLocationOffset + } + + subLogger = hclog.New(subLoggerOptions) + } + + // Set the configured log level + if subLoggerTFLoggerOpts.Level != hclog.NoLevel { + subLogger.SetLevel(subLoggerTFLoggerOpts.Level) + } + + // Propagate root fields to the subsystem logger + if subLoggerTFLoggerOpts.IncludeRootFields { + loggerTFOpts := logging.GetSDKRootTFLoggerOpts(ctx) + subLoggerTFLoggerOpts = logging.WithFields(loggerTFOpts.Fields)(subLoggerTFLoggerOpts) } + + // Set the subsystem LoggerOpts in the context + ctx = logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, subLoggerTFLoggerOpts) + return logging.SetSDKSubsystemLogger(ctx, subsystem, subLogger) } -// SubsystemWith returns a new context.Context that has a modified logger for -// the specified subsystem in it which will include key and value as arguments +// SubsystemSetField returns a new context.Context that has a modified logger for +// the specified subsystem in it which will include key and value as fields // in all its log output. -func SubsystemWith(ctx context.Context, subsystem, key string, value interface{}) context.Context { - logger := logging.GetSDKSubsystemLogger(ctx, subsystem) - if logger == nil { - // create a new logger if one doesn't exist - logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) - } - return logging.SetSDKSubsystemLogger(ctx, subsystem, logger.With(key, value)) +// +// In case of the same key is used multiple times (i.e. key collision), +// the last one set is the one that gets persisted and then outputted with the logs. +func SubsystemSetField(ctx context.Context, subsystem, key string, value interface{}) context.Context { + lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) + + lOpts = logging.WithField(key, value)(lOpts) + + return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } // SubsystemTrace logs `msg` at the trace level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemTrace(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemTrace(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetSDKRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) } - logger.Trace(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Trace(msg, additionalArgs...) } // SubsystemDebug logs `msg` at the debug level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemDebug(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemDebug(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetSDKRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) } - logger.Debug(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Debug(msg, additionalArgs...) } // SubsystemInfo logs `msg` at the info level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemInfo(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemInfo(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetSDKRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) } - logger.Info(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Info(msg, additionalArgs...) } // SubsystemWarn logs `msg` at the warn level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemWarn(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemWarn(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetSDKRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) } - logger.Warn(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Warn(msg, additionalArgs...) } // SubsystemError logs `msg` at the error level to the subsystem logger -// specified in `ctx`, with `args` as structured arguments in the log output. -// `args` is expected to be pairs of key and value. -func SubsystemError(ctx context.Context, subsystem, msg string, args ...interface{}) { +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemError(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { logger := logging.GetSDKSubsystemLogger(ctx, subsystem) if logger == nil { + if logging.GetSDKRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } // create a new logger if one doesn't exist logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) } - logger.Error(msg, args...) + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Error(msg, additionalArgs...) +} + +// SubsystemOmitLogWithFieldKeys returns a new context.Context that has a modified logger +// that will omit to write any log when any of the given keys is found +// within its fields. +// +// Each call to this function is additive: +// the keys to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'baz']` +// +// log1 = `{ msg = "...", fields = { 'foo', '...', 'bar', '...' }` -> omitted +// log2 = `{ msg = "...", fields = { 'bar', '...' }` -> printed +// log3 = `{ msg = "...", fields = { 'baz`', '...', 'boo', '...' }` -> omitted +// +func SubsystemOmitLogWithFieldKeys(ctx context.Context, subsystem string, keys ...string) context.Context { + lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) + + lOpts = logging.WithOmitLogWithFieldKeys(keys...)(lOpts) + + return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemOmitLogWithMessageRegexes returns a new context.Context that has a modified logger +// that will omit to write any log that has a message matching any of the +// given *regexp.Regexp. +// +// Each call to this function is additive: +// the regexp to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +// +func SubsystemOmitLogWithMessageRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) + + lOpts = logging.WithOmitLogWithMessageRegexes(expressions...)(lOpts) + + return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemOmitLogWithMessageStrings returns a new context.Context that has a modified logger +// that will omit to write any log that matches any of the given string. +// +// Each call to this function is additive: +// the string to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'bar']` +// +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +// +func SubsystemOmitLogWithMessageStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { + lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) + + lOpts = logging.WithOmitLogWithMessageStrings(matchingStrings...)(lOpts) + + return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskFieldValuesWithFieldKeys returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) any field value where the +// key matches one of the given keys. +// +// Each call to this function is additive: +// the keys to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'baz']` +// +// log1 = `{ msg = "...", fields = { 'foo', '***', 'bar', '...' }` -> masked value +// log2 = `{ msg = "...", fields = { 'bar', '...' }` -> as-is value +// log3 = `{ msg = "...", fields = { 'baz`', '***', 'boo', '...' }` -> masked value +// +func SubsystemMaskFieldValuesWithFieldKeys(ctx context.Context, subsystem string, keys ...string) context.Context { + lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) + + lOpts = logging.WithMaskFieldValuesWithFieldKeys(keys...)(lOpts) + + return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskMessageRegexes returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) all message substrings matching one +// of the given strings. +// +// Each call to this function is additive: +// the regexp to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +// +func SubsystemMaskMessageRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) + + lOpts = logging.WithMaskMessageRegexes(expressions...)(lOpts) + + return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskMessageStrings returns a new context.Context that has a modified logger +// that masks (replace) with asterisks (`***`) all message substrings equal to one +// of the given strings. +// +// Each call to this function is additive: +// the string to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'bar']` +// +// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +// +func SubsystemMaskMessageStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { + lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) + + lOpts = logging.WithMaskMessageStrings(matchingStrings...)(lOpts) + + return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go index 12fdde9d5..263a1ff57 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go @@ -120,8 +120,8 @@ func RandIpAddress(s string) (string, error) { last.SetBytes([]byte(lastIp)) r := &big.Int{} r.Sub(last, first) - if len := r.BitLen(); len > 31 { - return "", fmt.Errorf("CIDR range is too large: %d", len) + if bitLen := r.BitLen(); bitLen > 31 { + return "", fmt.Errorf("CIDR range is too large: %d", bitLen) } max := int(r.Int64()) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go index 01ae25dfb..235586aed 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go @@ -1,7 +1,6 @@ package logging import ( - "context" "fmt" "io" "io/ioutil" @@ -11,7 +10,6 @@ import ( "syscall" "github.com/hashicorp/logutils" - "github.com/hashicorp/terraform-plugin-log/tfsdklog" testing "github.com/mitchellh/go-testing-interface" ) @@ -125,7 +123,7 @@ func LogLevel() string { // IsDebugOrHigher returns whether or not the current log level is debug or trace func IsDebugOrHigher() bool { - level := string(LogLevel()) + level := LogLevel() return level == "DEBUG" || level == "TRACE" } @@ -138,12 +136,3 @@ func isValidLogLevel(level string) bool { return false } - -// GetTestLogContext creates a context that is registered to the SDK log sink. -// This function is for internal usage only and is not supported by the project's -// compatibility promises. -func GetTestLogContext(t testing.T) context.Context { - ctx := context.Background() - ctx = tfsdklog.RegisterTestSink(ctx, t) - return ctx -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/transport.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/transport.go index bddabe647..6419605e7 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/transport.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/transport.go @@ -52,7 +52,7 @@ func prettyPrintJsonLines(b []byte) string { for i, p := range parts { if b := []byte(p); json.Valid(b) { var out bytes.Buffer - json.Indent(&out, b, "", " ") + _ = json.Indent(&out, b, "", " ") // already checked for validity parts[i] = out.String() } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/environment_variables.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/environment_variables.go new file mode 100644 index 000000000..fed6aa25c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/environment_variables.go @@ -0,0 +1,32 @@ +package resource + +// Environment variables for acceptance testing. Additional environment +// variable constants can be found in the internal/plugintest package. +const ( + // Environment variable to enable acceptance tests using this package's + // ParallelTest and Test functions whose TestCase does not enable the + // IsUnitTest field. Defaults to disabled, in which each test will call + // (*testing.T).Skip(). Can be set to any value to enable acceptance tests, + // however "1" is conventional. + EnvTfAcc = "TF_ACC" + + // Environment variable with hostname for the provider under acceptance + // test. The hostname is the first portion of the full provider source + // address, such as "example.com" in example.com/myorg/myprovider. Defaults + // to "registry.terraform.io". + // + // Only required if any Terraform configuration set via the TestStep + // type Config field includes a provider source, such as the terraform + // configuration block required_providers attribute. + EnvTfAccProviderHost = "TF_ACC_PROVIDER_HOST" + + // Environment variable with namespace for the provider under acceptance + // test. The namespace is the second portion of the full provider source + // address, such as "myorg" in registry.terraform.io/myorg/myprovider. + // Defaults to "-" for Terraform 0.12-0.13 compatibility and "hashicorp". + // + // Only required if any Terraform configuration set via the TestStep + // type Config field includes a provider source, such as the terraform + // configuration block required_providers attribute. + EnvTfAccProviderNamespace = "TF_ACC_PROVIDER_NAMESPACE" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go index 2d649def0..9e52348d6 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io/ioutil" - "log" "os" "strings" "sync" @@ -13,29 +12,120 @@ import ( "github.com/hashicorp/terraform-exec/tfexec" "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tfprotov6" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" testing "github.com/mitchellh/go-testing-interface" ) +// protov5ProviderFactory is a function which is called to start a protocol +// version 5 provider server. +type protov5ProviderFactory func() (tfprotov5.ProviderServer, error) + +// protov5ProviderFactories is a mapping of provider addresses to provider +// factory for protocol version 5 provider servers. +type protov5ProviderFactories map[string]func() (tfprotov5.ProviderServer, error) + +// merge combines provider factories. +// +// In case of an overlapping entry, the later entry will overwrite the previous +// value. +func (pf protov5ProviderFactories) merge(otherPfs ...protov5ProviderFactories) protov5ProviderFactories { + result := make(protov5ProviderFactories) + + for name, providerFactory := range pf { + result[name] = providerFactory + } + + for _, otherPf := range otherPfs { + for name, providerFactory := range otherPf { + result[name] = providerFactory + } + } + + return result +} + +// protov6ProviderFactory is a function which is called to start a protocol +// version 6 provider server. +type protov6ProviderFactory func() (tfprotov6.ProviderServer, error) + +// protov6ProviderFactories is a mapping of provider addresses to provider +// factory for protocol version 6 provider servers. +type protov6ProviderFactories map[string]func() (tfprotov6.ProviderServer, error) + +// merge combines provider factories. +// +// In case of an overlapping entry, the later entry will overwrite the previous +// value. +func (pf protov6ProviderFactories) merge(otherPfs ...protov6ProviderFactories) protov6ProviderFactories { + result := make(protov6ProviderFactories) + + for name, providerFactory := range pf { + result[name] = providerFactory + } + + for _, otherPf := range otherPfs { + for name, providerFactory := range otherPf { + result[name] = providerFactory + } + } + + return result +} + +// sdkProviderFactory is a function which is called to start a SDK provider +// server. +type sdkProviderFactory func() (*schema.Provider, error) + +// protov6ProviderFactories is a mapping of provider addresses to provider +// factory for protocol version 6 provider servers. +type sdkProviderFactories map[string]func() (*schema.Provider, error) + +// merge combines provider factories. +// +// In case of an overlapping entry, the later entry will overwrite the previous +// value. +func (pf sdkProviderFactories) merge(otherPfs ...sdkProviderFactories) sdkProviderFactories { + result := make(sdkProviderFactories) + + for name, providerFactory := range pf { + result[name] = providerFactory + } + + for _, otherPf := range otherPfs { + for name, providerFactory := range otherPf { + result[name] = providerFactory + } + } + + return result +} + type providerFactories struct { - legacy map[string]func() (*schema.Provider, error) - protov5 map[string]func() (tfprotov5.ProviderServer, error) - protov6 map[string]func() (tfprotov6.ProviderServer, error) + legacy sdkProviderFactories + protov5 protov5ProviderFactories + protov6 protov6ProviderFactories } -func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, factories providerFactories) error { +func runProviderCommand(ctx context.Context, t testing.T, f func() error, wd *plugintest.WorkingDir, factories *providerFactories) error { // don't point to this as a test failure location // point to whatever called it t.Helper() + // This should not happen, but prevent panics just in case. + if factories == nil { + err := fmt.Errorf("Provider factories are missing to run Terraform command. Please report this bug in the testing framework.") + logging.HelperResourceError(ctx, err.Error()) + return err + } + // Run the providers in the same process as the test runner using the // reattach behavior in Terraform. This ensures we get test coverage // and enables the use of delve as a debugger. - ctx, cancel := context.WithCancel(logging.GetTestLogContext(t)) + ctx, cancel := context.WithCancel(ctx) defer cancel() // this is needed so Terraform doesn't default to expecting protocol 4; @@ -43,8 +133,13 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, // plugins. os.Setenv("PLUGIN_PROTOCOL_VERSIONS", "5") - // Terraform doesn't need to reach out to Checkpoint during testing. - wd.Setenv("CHECKPOINT_DISABLE", "1") + // Acceptance testing does not need to call checkpoint as the output + // is not accessible, nor desirable if explicitly using + // TF_ACC_TERRAFORM_PATH or TF_ACC_TERRAFORM_VERSION environment variables. + // + // Avoid calling (tfexec.Terraform).SetEnv() as it will stop copying + // os.Environ() and prevents TF_VAR_ environment variable usage. + os.Setenv("CHECKPOINT_DISABLE", "1") // Terraform 0.12.X and 0.13.X+ treat namespaceless providers // differently in terms of what namespace they default to. So we're @@ -53,12 +148,12 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, // the host or namespace using environment variables. var namespaces []string host := "registry.terraform.io" - if v := os.Getenv("TF_ACC_PROVIDER_NAMESPACE"); v != "" { + if v := os.Getenv(EnvTfAccProviderNamespace); v != "" { namespaces = append(namespaces, v) } else { namespaces = append(namespaces, "-", "hashicorp") } - if v := os.Getenv("TF_ACC_PROVIDER_HOST"); v != "" { + if v := os.Getenv(EnvTfAccProviderHost); v != "" { host = v } @@ -70,12 +165,17 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, // providerName may be returned as terraform-provider-foo, and // we need just foo. So let's fix that. providerName = strings.TrimPrefix(providerName, "terraform-provider-") + providerAddress := getProviderAddr(providerName) + + logging.HelperResourceDebug(ctx, "Creating sdkv2 provider instance", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) provider, err := factory() if err != nil { return fmt.Errorf("unable to create provider %q from factory: %w", providerName, err) } + logging.HelperResourceDebug(ctx, "Created sdkv2 provider instance", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + // keep track of the running factory, so we can make sure it's // shut down. wg.Add(1) @@ -95,14 +195,18 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, }), NoLogOutputOverride: true, UseTFLogSink: t, - ProviderAddr: getProviderAddr(providerName), + ProviderAddr: providerAddress, } - // let's actually start the provider server + logging.HelperResourceDebug(ctx, "Starting sdkv2 provider instance server", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + config, closeCh, err := plugin.DebugServe(ctx, opts) if err != nil { return fmt.Errorf("unable to serve provider %q: %w", providerName, err) } + + logging.HelperResourceDebug(ctx, "Started sdkv2 provider instance server", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + tfexecConfig := tfexec.ReattachConfig{ Protocol: config.Protocol, ProtocolVersion: config.ProtocolVersion, @@ -137,6 +241,7 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, // providerName may be returned as terraform-provider-foo, and // we need just foo. So let's fix that. providerName = strings.TrimPrefix(providerName, "terraform-provider-") + providerAddress := getProviderAddr(providerName) // If the user has supplied the same provider in both // ProviderFactories and ProtoV5ProviderFactories, they made a @@ -150,11 +255,15 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, } } + logging.HelperResourceDebug(ctx, "Creating tfprotov5 provider instance", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + provider, err := factory() if err != nil { return fmt.Errorf("unable to create provider %q from factory: %w", providerName, err) } + logging.HelperResourceDebug(ctx, "Created tfprotov5 provider instance", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + // keep track of the running factory, so we can make sure it's // shut down. wg.Add(1) @@ -174,14 +283,18 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, }), NoLogOutputOverride: true, UseTFLogSink: t, - ProviderAddr: getProviderAddr(providerName), + ProviderAddr: providerAddress, } - // let's actually start the provider server + logging.HelperResourceDebug(ctx, "Starting tfprotov5 provider instance server", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + config, closeCh, err := plugin.DebugServe(ctx, opts) if err != nil { return fmt.Errorf("unable to serve provider %q: %w", providerName, err) } + + logging.HelperResourceDebug(ctx, "Started tfprotov5 provider instance server", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + tfexecConfig := tfexec.ReattachConfig{ Protocol: config.Protocol, ProtocolVersion: config.ProtocolVersion, @@ -217,6 +330,7 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, // providerName may be returned as terraform-provider-foo, and // we need just foo. So let's fix that. providerName = strings.TrimPrefix(providerName, "terraform-provider-") + providerAddress := getProviderAddr(providerName) // If the user has already registered this provider in // ProviderFactories or ProtoV5ProviderFactories, they made a @@ -230,11 +344,15 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, } } + logging.HelperResourceDebug(ctx, "Creating tfprotov6 provider instance", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + provider, err := factory() if err != nil { return fmt.Errorf("unable to create provider %q from factory: %w", providerName, err) } + logging.HelperResourceDebug(ctx, "Created tfprotov6 provider instance", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + // keep track of the running factory, so we can make sure it's // shut down. wg.Add(1) @@ -250,15 +368,18 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, }), NoLogOutputOverride: true, UseTFLogSink: t, - ProviderAddr: getProviderAddr(providerName), + ProviderAddr: providerAddress, } - // let's actually start the provider server + logging.HelperResourceDebug(ctx, "Starting tfprotov6 provider instance server", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + config, closeCh, err := plugin.DebugServe(ctx, opts) if err != nil { return fmt.Errorf("unable to serve provider %q: %w", providerName, err) } + logging.HelperResourceDebug(ctx, "Started tfprotov6 provider instance server", map[string]interface{}{logging.KeyProviderAddress: providerAddress}) + tfexecConfig := tfexec.ReattachConfig{ Protocol: config.Protocol, ProtocolVersion: config.ProtocolVersion, @@ -290,26 +411,35 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, // set the working directory reattach info that will tell Terraform how to // connect to our various running servers. - wd.SetReattachInfo(reattachInfo) + wd.SetReattachInfo(ctx, reattachInfo) + + logging.HelperResourceTrace(ctx, "Calling wrapped Terraform CLI command") // ok, let's call whatever Terraform command the test was trying to // call, now that we know it'll attach back to those servers we just // started. err := f() if err != nil { - log.Printf("[WARN] Got error running Terraform: %s", err) + logging.HelperResourceWarn(ctx, "Error running Terraform CLI command", map[string]interface{}{logging.KeyError: err}) } + logging.HelperResourceTrace(ctx, "Called wrapped Terraform CLI command") + logging.HelperResourceDebug(ctx, "Stopping providers") + // cancel the servers so they'll return. Otherwise, this closeCh won't // get closed, and we'll hang here. cancel() + logging.HelperResourceTrace(ctx, "Waiting for providers to stop") + // wait for the servers to actually shut down; it may take a moment for // them to clean up, or whatever. // TODO: add a timeout here? // PC: do we need one? The test will time out automatically... wg.Wait() + logging.HelperResourceTrace(ctx, "Providers have successfully stopped") + // once we've run the Terraform command, let's remove the reattach // information from the WorkingDir's environment. The WorkingDir will // persist until the next call, but the server in the reattach info @@ -327,10 +457,10 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, func getProviderAddr(name string) string { host := "registry.terraform.io" namespace := "hashicorp" - if v := os.Getenv("TF_ACC_PROVIDER_NAMESPACE"); v != "" { + if v := os.Getenv(EnvTfAccProviderNamespace); v != "" { namespace = v } - if v := os.Getenv("TF_ACC_PROVIDER_HOST"); v != "" { + if v := os.Getenv(EnvTfAccProviderHost); v != "" { host = v } return strings.TrimSuffix(host, "/") + "/" + diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go index 7f8c22529..497bcc1c9 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go @@ -226,7 +226,7 @@ func (sf *shimmedFlatmap) AddMap(prefix string, m map[string]interface{}) error err := sf.AddEntry(k, value) if err != nil { - return err + return fmt.Errorf("unable to add map key %q entry: %w", k, err) } } @@ -235,7 +235,9 @@ func (sf *shimmedFlatmap) AddMap(prefix string, m map[string]interface{}) error mapLength = fmt.Sprintf("%s.%s", prefix, "%") } - sf.AddEntry(mapLength, strconv.Itoa(len(m))) + if err := sf.AddEntry(mapLength, strconv.Itoa(len(m))); err != nil { + return fmt.Errorf("unable to add map length %q entry: %w", mapLength, err) + } return nil } @@ -245,12 +247,14 @@ func (sf *shimmedFlatmap) AddSlice(name string, elements []interface{}) error { key := fmt.Sprintf("%s.%d", name, i) err := sf.AddEntry(key, elem) if err != nil { - return err + return fmt.Errorf("unable to add slice key %q entry: %w", key, err) } } sliceLength := fmt.Sprintf("%s.#", name) - sf.AddEntry(sliceLength, strconv.Itoa(len(elements))) + if err := sf.AddEntry(sliceLength, strconv.Itoa(len(elements))); err != nil { + return fmt.Errorf("unable to add slice length %q entry: %w", sliceLength, err) + } return nil } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_providers.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_providers.go new file mode 100644 index 000000000..c09e4657c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_providers.go @@ -0,0 +1,56 @@ +package resource + +import ( + "context" + "fmt" + "strings" +) + +// providerConfig takes the list of providers in a TestCase and returns a +// config with only empty provider blocks. This is useful for Import, where no +// config is provided, but the providers must be defined. +func (c TestCase) providerConfig(_ context.Context) string { + var providerBlocks, requiredProviderBlocks strings.Builder + + // [BF] The Providers field handling predates the logic being moved to this + // method. It's not entirely clear to me at this time why this field + // is being used and not the others, but leaving it here just in case + // it does have a special purpose that wasn't being unit tested prior. + for name := range c.Providers { + providerBlocks.WriteString(fmt.Sprintf("provider %q {}\n", name)) + } + + for name, externalProvider := range c.ExternalProviders { + providerBlocks.WriteString(fmt.Sprintf("provider %q {}\n", name)) + + if externalProvider.Source == "" && externalProvider.VersionConstraint == "" { + continue + } + + requiredProviderBlocks.WriteString(fmt.Sprintf(" %s = {\n", name)) + + if externalProvider.Source != "" { + requiredProviderBlocks.WriteString(fmt.Sprintf(" source = %q\n", externalProvider.Source)) + } + + if externalProvider.VersionConstraint != "" { + requiredProviderBlocks.WriteString(fmt.Sprintf(" version = %q\n", externalProvider.VersionConstraint)) + } + + requiredProviderBlocks.WriteString(" }\n") + } + + if requiredProviderBlocks.Len() > 0 { + return fmt.Sprintf(` +terraform { + required_providers { +%[1]s + } +} + +%[2]s +`, strings.TrimSuffix(requiredProviderBlocks.String(), "\n"), providerBlocks.String()) + } + + return providerBlocks.String() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_validate.go new file mode 100644 index 000000000..39e5da46c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_validate.go @@ -0,0 +1,85 @@ +package resource + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" +) + +// hasProviders returns true if the TestCase has set any of the +// ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, +// ProviderFactories, or Providers fields. +func (c TestCase) hasProviders(_ context.Context) bool { + if len(c.ExternalProviders) > 0 { + return true + } + + if len(c.ProtoV5ProviderFactories) > 0 { + return true + } + + if len(c.ProtoV6ProviderFactories) > 0 { + return true + } + + if len(c.ProviderFactories) > 0 { + return true + } + + if len(c.Providers) > 0 { + return true + } + + return false +} + +// validate ensures the TestCase is valid based on the following criteria: +// +// - No overlapping ExternalProviders and Providers entries +// - No overlapping ExternalProviders and ProviderFactories entries +// - TestStep validations performed by the (TestStep).validate() method. +// +func (c TestCase) validate(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Validating TestCase") + + if len(c.Steps) == 0 { + err := fmt.Errorf("TestCase missing Steps") + logging.HelperResourceError(ctx, "TestCase validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + for name := range c.ExternalProviders { + if _, ok := c.Providers[name]; ok { + err := fmt.Errorf("TestCase provider %q set in both ExternalProviders and Providers", name) + logging.HelperResourceError(ctx, "TestCase validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if _, ok := c.ProviderFactories[name]; ok { + err := fmt.Errorf("TestCase provider %q set in both ExternalProviders and ProviderFactories", name) + logging.HelperResourceError(ctx, "TestCase validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + testCaseHasProviders := c.hasProviders(ctx) + + for stepIndex, step := range c.Steps { + stepNumber := stepIndex + 1 // Use 1-based index for humans + stepValidateReq := testStepValidateRequest{ + StepNumber: stepNumber, + TestCaseHasProviders: testCaseHasProviders, + } + + err := step.validate(ctx, stepValidateReq) + + if err != nil { + err := fmt.Errorf("TestStep %d/%d validation error: %w", stepNumber, len(c.Steps), err) + logging.HelperResourceError(ctx, "TestCase validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go index 6d82d8017..8cb4dbaa6 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go @@ -1,6 +1,7 @@ package resource import ( + "context" "errors" "flag" "fmt" @@ -12,13 +13,14 @@ import ( "time" "github.com/hashicorp/go-multierror" - testing "github.com/mitchellh/go-testing-interface" + "github.com/mitchellh/go-testing-interface" "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tfprotov6" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) @@ -48,7 +50,7 @@ var flagSweepAllowFailures = flag.Bool("sweep-allow-failures", false, "Enable to var flagSweepRun = flag.String("sweep-run", "", "Comma seperated list of Sweeper Tests to run") var sweeperFuncs map[string]*Sweeper -// type SweeperFunc is a signature for a function that acts as a sweeper. It +// SweeperFunc is a signature for a function that acts as a sweeper. It // accepts a string for the region that the sweeper is to be ran in. This // function must be able to construct a valid client for that region. type SweeperFunc func(r string) error @@ -84,6 +86,27 @@ func AddTestSweepers(name string, s *Sweeper) { sweeperFuncs[name] = s } +// TestMain adds sweeper functionality to the "go test" command, otherwise +// tests are executed as normal. Most provider acceptance tests are written +// using the Test() function of this package, which imposes its own +// requirements and Terraform CLI behavior. Refer to that function's +// documentation for additional details. +// +// Sweepers enable infrastructure cleanup functions to be included with +// resource definitions, typically so developers can remove all resources of +// that resource type from testing infrastructure in case of failures that +// prevented the normal resource destruction behavior of acceptance tests. +// Use the AddTestSweepers() function to configure available sweepers. +// +// Sweeper flags added to the "go test" command: +// +// -sweep: Comma-separated list of locations/regions to run available sweepers. +// -sweep-allow-failues: Enable to allow other sweepers to run after failures. +// -sweep-run: Comma-separated list of resource type sweepers to run. Defaults +// to all sweepers. +// +// Refer to the Env prefixed constants for environment variables that further +// control testing functionality. func TestMain(m interface { Run() int }) { @@ -126,7 +149,7 @@ func runSweepers(regions []string, sweepers map[string]*Sweeper, allowFailures b return sweeperRunList, fmt.Errorf("sweeper (%s) for region (%s) failed: %s", sweeper.Name, region, err) } } - elapsed := time.Now().Sub(start) + elapsed := time.Since(start) log.Printf("Completed Sweepers for region (%s) in %s", region, elapsed) log.Printf("Sweeper Tests for region (%s) ran successfully:\n", region) @@ -214,6 +237,7 @@ func runSweeperWithRegion(region string, s *Sweeper, sweepers map[string]*Sweepe depSweeper, ok := sweepers[dep] if !ok { + log.Printf("[ERROR] Sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) return fmt.Errorf("sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) } @@ -239,7 +263,7 @@ func runSweeperWithRegion(region string, s *Sweeper, sweepers map[string]*Sweepe start := time.Now() runE := s.F(region) - elapsed := time.Now().Sub(start) + elapsed := time.Since(start) log.Printf("[DEBUG] Completed Sweeper (%s) in region (%s) in %s", s.Name, region, elapsed) @@ -252,7 +276,8 @@ func runSweeperWithRegion(region string, s *Sweeper, sweepers map[string]*Sweepe return runE } -const TestEnvVar = "TF_ACC" +// Deprecated: Use EnvTfAcc instead. +const TestEnvVar = EnvTfAcc // TestCheckFunc is the callback type used with acceptance tests to check // the state of a resource. The state passed in is the latest state known, @@ -275,6 +300,9 @@ type ErrorCheckFunc func(error) error // // When the destroy plan is executed, the config from the last TestStep // is used to plan it. +// +// Refer to the Env prefixed constants for environment variables that further +// control testing functionality. type TestCase struct { // IsUnitTest allows a test to run regardless of the TF_ACC // environment variable. This should be used with care - only for @@ -291,6 +319,11 @@ type TestCase struct { // ProviderFactories can be specified for the providers that are valid. // + // This can also be specified at the TestStep level to enable per-step + // differences in providers, however all provider specifications must + // be done either at the TestCase level or TestStep level, otherwise the + // testing framework will raise an error and fail the test. + // // These are the providers that can be referenced within the test. Each key // is an individually addressable provider. Typically you will only pass a // single value here for the provider you are testing. Aliases are not @@ -312,6 +345,11 @@ type TestCase struct { // ProtoV5ProviderFactories serves the same purpose as ProviderFactories, // but for protocol v5 providers defined using the terraform-plugin-go // ProviderServer interface. + // + // This can also be specified at the TestStep level to enable per-step + // differences in providers, however all provider specifications must + // be done either at the TestCase level or TestStep level, otherwise the + // testing framework will raise an error and fail the test. ProtoV5ProviderFactories map[string]func() (tfprotov5.ProviderServer, error) // ProtoV6ProviderFactories serves the same purpose as ProviderFactories, @@ -319,6 +357,11 @@ type TestCase struct { // ProviderServer interface. // The version of Terraform used in acceptance testing must be greater // than or equal to v0.15.4 to use ProtoV6ProviderFactories. + // + // This can also be specified at the TestStep level to enable per-step + // differences in providers, however all provider specifications must + // be done either at the TestCase level or TestStep level, otherwise the + // testing framework will raise an error and fail the test. ProtoV6ProviderFactories map[string]func() (tfprotov6.ProviderServer, error) // Providers is the ResourceProvider that will be under test. @@ -327,11 +370,18 @@ type TestCase struct { Providers map[string]*schema.Provider // ExternalProviders are providers the TestCase relies on that should - // be downloaded from the registry during init. This is only really - // necessary to set if you're using import, as providers in your config - // will be automatically retrieved during init. Import doesn't use a - // config, however, so we allow manually specifying them here to be - // downloaded for import tests. + // be downloaded from the registry during init. + // + // This can also be specified at the TestStep level to enable per-step + // differences in providers, however all provider specifications must + // be done either at the TestCase level or TestStep level, otherwise the + // testing framework will raise an error and fail the test. + // + // This is generally unnecessary to set at the TestCase level, however + // it has existing in the testing framework prior to the introduction of + // TestStep level specification and was only necessary for performing + // import testing where the configuration contained a provider outside the + // one under test. ExternalProviders map[string]ExternalProvider // PreventPostDestroyRefresh can be set to true for cases where data sources @@ -350,16 +400,18 @@ type TestCase struct { // same state. Each step can have its own check to verify correctness. Steps []TestStep - // The settings below control the "ID-only refresh test." This is - // an enabled-by-default test that tests that a refresh can be - // refreshed with only an ID to result in the same attributes. - // This validates completeness of Refresh. - // - // IDRefreshName is the name of the resource to check. This will - // default to the first non-nil primary resource in the state. + // IDRefreshName is the name of the resource to check during ID-only + // refresh testing, which ensures that a resource can be refreshed solely + // by its identifier. This will default to the first non-nil primary + // resource in the state. It runs every TestStep. // - // IDRefreshIgnore is a list of configuration keys that will be ignored. - IDRefreshName string + // While not deprecated, most resource tests should instead prefer using + // TestStep.ImportState based testing as it works with multiple attribute + // identifiers and also verifies resource import functionality. + IDRefreshName string + + // IDRefreshIgnore is a list of configuration keys that will be ignored + // during ID-only refresh testing. IDRefreshIgnore []string } @@ -376,6 +428,9 @@ type ExternalProvider struct { // Multiple TestSteps can be sequenced in a Test to allow testing // potentially complex update logic. In general, simply create/destroy // tests will only need one step. +// +// Refer to the Env prefixed constants for environment variables that further +// control testing functionality. type TestStep struct { // ResourceName should be set to the name of the resource // that is being tested. Example: "aws_instance.foo". Various test @@ -414,6 +469,9 @@ type TestStep struct { // Config a string of the configuration to give to Terraform. If this // is set, then the TestCase will execute this step with the same logic // as a `terraform apply`. + // + // JSON Configuration Syntax can be used and is assumed whenever Config + // contains valid JSON. Config string // Check is called after the Config is applied. Use this step to @@ -452,8 +510,15 @@ type TestStep struct { // are tested alongside real resources PreventPostDestroyRefresh bool - // SkipFunc is called before applying config, but after PreConfig - // This is useful for defining test steps with platform-dependent checks + // SkipFunc enables skipping the TestStep, based on environment criteria. + // For example, this can prevent running certain steps that may be runtime + // platform or API configuration dependent. + // + // Return true with no error to skip the test step. The error return + // should be used to signify issues that prevented the function from + // completing as expected. + // + // SkipFunc is called after PreConfig but before applying the Config. SkipFunc func() (bool, error) //--------------------------------------------------------------- @@ -498,14 +563,84 @@ type TestStep struct { // fields that can't be refreshed and don't matter. ImportStateVerify bool ImportStateVerifyIgnore []string + + // ProviderFactories can be specified for the providers that are valid for + // this TestStep. When providers are specified at the TestStep level, all + // TestStep within a TestCase must declare providers. + // + // This can also be specified at the TestCase level for all TestStep, + // however all provider specifications must be done either at the TestCase + // level or TestStep level, otherwise the testing framework will raise an + // error and fail the test. + // + // These are the providers that can be referenced within the test. Each key + // is an individually addressable provider. Typically you will only pass a + // single value here for the provider you are testing. Aliases are not + // supported by the test framework, so to use multiple provider instances, + // you should add additional copies to this map with unique names. To set + // their configuration, you would reference them similar to the following: + // + // provider "my_factory_key" { + // # ... + // } + // + // resource "my_resource" "mr" { + // provider = my_factory_key + // + // # ... + // } + ProviderFactories map[string]func() (*schema.Provider, error) + + // ProtoV5ProviderFactories serves the same purpose as ProviderFactories, + // but for protocol v5 providers defined using the terraform-plugin-go + // ProviderServer interface. When providers are specified at the TestStep + // level, all TestStep within a TestCase must declare providers. + // + // This can also be specified at the TestCase level for all TestStep, + // however all provider specifications must be done either at the TestCase + // level or TestStep level, otherwise the testing framework will raise an + // error and fail the test. + ProtoV5ProviderFactories map[string]func() (tfprotov5.ProviderServer, error) + + // ProtoV6ProviderFactories serves the same purpose as ProviderFactories, + // but for protocol v6 providers defined using the terraform-plugin-go + // ProviderServer interface. + // The version of Terraform used in acceptance testing must be greater + // than or equal to v0.15.4 to use ProtoV6ProviderFactories. When providers + // are specified at the TestStep level, all TestStep within a TestCase must + // declare providers. + // + // This can also be specified at the TestCase level for all TestStep, + // however all provider specifications must be done either at the TestCase + // level or TestStep level, otherwise the testing framework will raise an + // error and fail the test. + ProtoV6ProviderFactories map[string]func() (tfprotov6.ProviderServer, error) + + // ExternalProviders are providers the TestStep relies on that should + // be downloaded from the registry during init. When providers are + // specified at the TestStep level, all TestStep within a TestCase must + // declare providers. + // + // This can also be specified at the TestCase level for all TestStep, + // however all provider specifications must be done either at the TestCase + // level or TestStep level, otherwise the testing framework will raise an + // error and fail the test. + // + // Outside specifying an earlier version of the provider under test, + // typically for state upgrader testing, this is generally only necessary + // for performing import testing where the prior TestStep configuration + // contained a provider outside the one under test. + ExternalProviders map[string]ExternalProvider } // ParallelTest performs an acceptance test on a resource, allowing concurrency -// with other ParallelTest. +// with other ParallelTest. The number of concurrent tests is controlled by the +// "go test" command -parallel flag. // // Tests will fail if they do not properly handle conditions to allow multiple // tests to occur against the same resource or service (e.g. random naming). -// All other requirements of the Test function also apply to this function. +// +// Test() function requirements and documentation also apply to this function. func ParallelTest(t testing.T, c TestCase) { t.Helper() t.Parallel() @@ -522,98 +657,100 @@ func ParallelTest(t testing.T, c TestCase) { // the "-test.v" flag) is set. Because some acceptance tests take quite // long, we require the verbose flag so users are able to see progress // output. +// +// Use the ParallelTest() function to automatically set (*testing.T).Parallel() +// to enable testing concurrency. Use the UnitTest() function to automatically +// set the TestCase type IsUnitTest field. +// +// This function will automatically find or install Terraform CLI into a +// temporary directory, based on the following behavior: +// +// - If the TF_ACC_TERRAFORM_PATH environment variable is set, that +// Terraform CLI binary is used if found and executable. If not found or +// executable, an error will be returned unless the +// TF_ACC_TERRAFORM_VERSION environment variable is also set. +// - If the TF_ACC_TERRAFORM_VERSION environment variable is set, install +// and use that Terraform CLI version. +// - If both the TF_ACC_TERRAFORM_PATH and TF_ACC_TERRAFORM_VERSION +// environment variables are unset, perform a lookup for the Terraform +// CLI binary based on the operating system PATH. If not found, the +// latest available Terraform CLI binary is installed. +// +// Refer to the Env prefixed constants for additional details about these +// environment variables, and others, that control testing functionality. func Test(t testing.T, c TestCase) { t.Helper() + ctx := context.Background() + ctx = logging.InitTestContext(ctx, t) + + err := c.validate(ctx) + + if err != nil { + logging.HelperResourceError(ctx, + "Test validation error", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Test validation error: %s", err) + } + // We only run acceptance tests if an env var is set because they're // slow and generally require some outside configuration. You can opt out // of this with OverrideEnvVar on individual TestCases. - if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest { + if os.Getenv(EnvTfAcc) == "" && !c.IsUnitTest { t.Skip(fmt.Sprintf( "Acceptance tests skipped unless env '%s' set", - TestEnvVar)) + EnvTfAcc)) return } - logging.SetOutput(t) - // Copy any explicitly passed providers to factories, this is for backwards compatibility. if len(c.Providers) > 0 { c.ProviderFactories = map[string]func() (*schema.Provider, error){} for name, p := range c.Providers { - if _, ok := c.ProviderFactories[name]; ok { - t.Fatalf("ProviderFactory for %q already exists, cannot overwrite with Provider", name) - } prov := p - c.ProviderFactories[name] = func() (*schema.Provider, error) { + c.ProviderFactories[name] = func() (*schema.Provider, error) { //nolint:unparam // required signature return prov, nil } } } + logging.HelperResourceDebug(ctx, "Starting TestCase") + // Run the PreCheck if we have it. // This is done after the auto-configure to allow providers // to override the default auto-configure parameters. if c.PreCheck != nil { + logging.HelperResourceDebug(ctx, "Calling TestCase PreCheck") + c.PreCheck() + + logging.HelperResourceDebug(ctx, "Called TestCase PreCheck") } sourceDir, err := os.Getwd() if err != nil { t.Fatalf("Error getting working dir: %s", err) } - helper := plugintest.AutoInitProviderHelper(sourceDir) + helper := plugintest.AutoInitProviderHelper(ctx, sourceDir) defer func(helper *plugintest.Helper) { err := helper.Close() if err != nil { - log.Printf("Error cleaning up temporary test files: %s", err) + logging.HelperResourceError(ctx, "Unable to clean up temporary test files", map[string]interface{}{logging.KeyError: err}) } }(helper) - runNewTest(t, c, helper) -} - -// testProviderConfig takes the list of Providers in a TestCase and returns a -// config with only empty provider blocks. This is useful for Import, where no -// config is provided, but the providers must be defined. -func testProviderConfig(c TestCase) (string, error) { - var lines []string - var requiredProviders []string - for p := range c.Providers { - lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) - } - for p, v := range c.ExternalProviders { - if _, ok := c.Providers[p]; ok { - return "", fmt.Errorf("Provider %q set in both Providers and ExternalProviders for TestCase. Must be set in only one.", p) - } - if _, ok := c.ProviderFactories[p]; ok { - return "", fmt.Errorf("Provider %q set in both ProviderFactories and ExternalProviders for TestCase. Must be set in only one.", p) - } - lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) - var providerBlock string - if v.VersionConstraint != "" { - providerBlock = fmt.Sprintf("%s\nversion = %q", providerBlock, v.VersionConstraint) - } - if v.Source != "" { - providerBlock = fmt.Sprintf("%s\nsource = %q", providerBlock, v.Source) - } - if providerBlock != "" { - providerBlock = fmt.Sprintf("%s = {%s\n}\n", p, providerBlock) - } - requiredProviders = append(requiredProviders, providerBlock) - } - - if len(requiredProviders) > 0 { - lines = append([]string{fmt.Sprintf("terraform {\nrequired_providers {\n%s}\n}\n\n", strings.Join(requiredProviders, ""))}, lines...) - } + runNewTest(ctx, t, c, helper) - return strings.Join(lines, ""), nil + logging.HelperResourceDebug(ctx, "Finished TestCase") } // UnitTest is a helper to force the acceptance testing harness to run in the // normal unit test suite. This should only be used for resource that don't // have any external dependencies. +// +// Test() function requirements and documentation also apply to this function. func UnitTest(t testing.T, c TestCase) { t.Helper() @@ -622,10 +759,6 @@ func UnitTest(t testing.T, c TestCase) { } func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { - if c.ResourceName == "" { - return nil, fmt.Errorf("ResourceName must be set in TestStep") - } - for _, m := range state.Modules { if len(m.Resources) > 0 { if v, ok := m.Resources[c.ResourceName]; ok { @@ -643,6 +776,9 @@ func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, // // As a user testing their provider, this lets you decompose your checks // into smaller pieces more easily. +// +// ComposeTestCheckFunc returns immediately on the first TestCheckFunc error. +// To aggregrate all errors, use ComposeAggregateTestCheckFunc instead. func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { return func(s *terraform.State) error { for i, f := range fs { @@ -677,10 +813,48 @@ func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { } } -// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value -// exists in state for the given name/key combination. It is useful when -// testing that computed values were set, when it is not possible to -// know ahead of time what the values will be. +// TestCheckResourceAttrSet ensures any value exists in the state for the +// given name and key combination. The opposite of this TestCheckFunc is +// TestCheckNoResourceAttr. State value checking is only recommended for +// testing Computed attributes and attribute defaults. +// +// Use this as a last resort when a more specific TestCheckFunc cannot be +// implemented, such as: +// +// - TestCheckResourceAttr: Equality checking of non-TypeSet state value. +// - TestCheckResourceAttrPair: Equality checking of non-TypeSet state +// value, based on another state value. +// - TestCheckTypeSet*: Equality checking of TypeSet state values. +// - TestMatchResourceAttr: Regular expression checking of non-TypeSet +// state value. +// - TestMatchTypeSet*: Regular expression checking on TypeSet state values. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect underlying +// values of a list or map attribute: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value +// +// While it is possible to check nested attributes under list and map +// attributes using the special key syntax, checking a list, map, or set +// attribute directly is not supported. Use TestCheckResourceAttr with +// the special .# or .% key syntax for those situations instead. func TestCheckResourceAttrSet(name, key string) TestCheckFunc { return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { is, err := primaryInstanceState(s, name) @@ -707,15 +881,71 @@ func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCh } func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error { - if val, ok := is.Attributes[key]; !ok || val == "" { - return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) + val, ok := is.Attributes[key] + + if ok && val != "" { + return nil } - return nil + if _, ok := is.Attributes[key+".#"]; ok { + return fmt.Errorf( + "%s: list or set attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s). Set element value checks should use TestCheckTypeSet functions instead.", + name, + key, + key+".#", + key+".0", + ) + } + + if _, ok := is.Attributes[key+".%"]; ok { + return fmt.Errorf( + "%s: map attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s).", + name, + key, + key+".%", + key+".examplekey", + ) + } + + return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) } -// TestCheckResourceAttr is a TestCheckFunc which validates -// the value in state for the given name/key combination. +// TestCheckResourceAttr ensures a specific value is stored in state for the +// given name and key combination. State value checking is only recommended for +// testing Computed attributes and attribute defaults. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect list, map, and +// set attributes: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element. +// Use the TestCheckTypeSet* and TestMatchTypeSet* functions instead +// for sets. +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value. +// - .#: Number of elements in list or set. +// - .%: Number of elements in map. +// +// The value parameter is the stringified data to check at the given key. Use +// the following attribute type rules to set the value: +// +// - Boolean: "false" or "true". +// - Float/Integer: Stringified number, such as "1.2" or "123". +// - String: No conversion necessary. func TestCheckResourceAttr(name, key, value string) TestCheckFunc { return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { is, err := primaryInstanceState(s, name) @@ -742,23 +972,40 @@ func TestCheckModuleResourceAttr(mp []string, name string, key string, value str } func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { - // Empty containers may be elided from the state. - // If the intent here is to check for an empty container, allow the key to - // also be non-existent. - emptyCheck := false - if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { - emptyCheck = true - } + v, ok := is.Attributes[key] - if v, ok := is.Attributes[key]; !ok || v != value { - if emptyCheck && !ok { + if !ok { + // Empty containers may be elided from the state. + // If the intent here is to check for an empty container, allow the key to + // also be non-existent. + if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { return nil } - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", name, key) + if _, ok := is.Attributes[key+".#"]; ok { + return fmt.Errorf( + "%s: list or set attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s). Set element value checks should use TestCheckTypeSet functions instead.", + name, + key, + key+".#", + key+".0", + ) + } + + if _, ok := is.Attributes[key+".%"]; ok { + return fmt.Errorf( + "%s: map attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s).", + name, + key, + key+".%", + key+".examplekey", + ) } + return fmt.Errorf("%s: Attribute '%s' not found", name, key) + } + + if v != value { return fmt.Errorf( "%s: Attribute '%s' expected %#v, got %#v", name, @@ -766,11 +1013,103 @@ func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value, v) } + return nil } -// TestCheckNoResourceAttr is a TestCheckFunc which ensures that -// NO value exists in state for the given name/key combination. +// CheckResourceAttrWithFunc is the callback type used to apply a custom checking logic +// when using TestCheckResourceAttrWith and a value is found for the given name and key. +// +// When this function returns an error, TestCheckResourceAttrWith will fail the check. +type CheckResourceAttrWithFunc func(value string) error + +// TestCheckResourceAttrWith ensures a value stored in state for the +// given name and key combination, is checked against a custom logic. +// State value checking is only recommended for testing Computed attributes +// and attribute defaults. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect list, map, and +// set attributes: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element. +// Use the TestCheckTypeSet* and TestMatchTypeSet* functions instead +// for sets. +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value. +// - .#: Number of elements in list or set. +// - .%: Number of elements in map. +// +// The checkValueFunc parameter is a CheckResourceAttrWithFunc, +// and it's provided with the attribute value to apply a custom checking logic, +// if it was found in the state. The function must return an error for the +// check to fail, or `nil` to succeed. +func TestCheckResourceAttrWith(name, key string, checkValueFunc CheckResourceAttrWithFunc) TestCheckFunc { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + err = testCheckResourceAttrSet(is, name, key) + if err != nil { + return err + } + + err = checkValueFunc(is.Attributes[key]) + if err != nil { + return fmt.Errorf("%s: Attribute %q value: %w", name, key, err) + } + + return nil + }) +} + +// TestCheckNoResourceAttr ensures no value exists in the state for the +// given name and key combination. The opposite of this TestCheckFunc is +// TestCheckResourceAttrSet. State value checking is only recommended for +// testing Computed attributes and attribute defaults. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect underlying +// values of a list or map attribute: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element. +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value. +// +// While it is possible to check nested attributes under list and map +// attributes using the special key syntax, checking a list, map, or set +// attribute directly is not supported. Use TestCheckResourceAttr with +// the special .# or .% key syntax for those situations instead. func TestCheckNoResourceAttr(name, key string) TestCheckFunc { return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { is, err := primaryInstanceState(s, name) @@ -797,28 +1136,76 @@ func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestChe } func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { + v, ok := is.Attributes[key] + // Empty containers may sometimes be included in the state. // If the intent here is to check for an empty container, allow the value to // also be "0". - emptyCheck := false - if strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%") { - emptyCheck = true - } - - val, exists := is.Attributes[key] - if emptyCheck && val == "0" { + if v == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { return nil } - if exists { + if ok { return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) } + if _, ok := is.Attributes[key+".#"]; ok { + return fmt.Errorf( + "%s: list or set attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s). Set element value checks should use TestCheckTypeSet functions instead.", + name, + key, + key+".#", + key+".0", + ) + } + + if _, ok := is.Attributes[key+".%"]; ok { + return fmt.Errorf( + "%s: map attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s).", + name, + key, + key+".%", + key+".examplekey", + ) + } + return nil } -// TestMatchResourceAttr is a TestCheckFunc which checks that the value -// in state for the given name/key combination matches the given regex. +// TestMatchResourceAttr ensures a value matching a regular expression is +// stored in state for the given name and key combination. State value checking +// is only recommended for testing Computed attributes and attribute defaults. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect list, map, and +// set attributes: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element. +// Use the TestCheckTypeSet* and TestMatchTypeSet* functions instead +// for sets. +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value. +// - .#: Number of elements in list or set. +// - .%: Number of elements in map. +// +// The value parameter is a compiled regular expression. A typical pattern is +// using the regexp.MustCompile() function, which will automatically ensure the +// regular expression is supported by the Go regular expression handlers during +// compilation. func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { is, err := primaryInstanceState(s, name) @@ -860,6 +1247,9 @@ func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, // TestCheckResourceAttrPtr is like TestCheckResourceAttr except the // value is a pointer so that it can be updated while the test is running. // It will only be dereferenced at the point this step is run. +// +// Refer to the TestCheckResourceAttr documentation for more information about +// setting the name, key, and value parameters. func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc { return func(s *terraform.State) error { return TestCheckResourceAttr(name, key, *value)(s) @@ -874,8 +1264,39 @@ func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value } } -// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values -// in state for a pair of name/key combinations are equal. +// TestCheckResourceAttrPair ensures value equality in state between the first +// given name and key combination and the second name and key combination. +// State value checking is only recommended for testing Computed attributes +// and attribute defaults. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The first and second names may use any combination of managed resources +// and/or data sources. +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect list, map, and +// set attributes: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element. +// Use the TestCheckTypeSet* and TestMatchTypeSet* functions instead +// for sets. +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value. +// - .#: Number of elements in list or set. +// - .%: Number of elements in map. func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { return checkIfIndexesIntoTypeSetPair(keyFirst, keySecond, func(s *terraform.State) error { isFirst, err := primaryInstanceState(s, nameFirst) @@ -1004,7 +1425,7 @@ func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc { // modulePrimaryInstanceState returns the instance state for the given resource // name in a ModuleState -func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { +func modulePrimaryInstanceState(ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { rs, ok := ms.Resources[name] if !ok { return nil, fmt.Errorf("Not found: %s in %s", name, ms.Path) @@ -1026,14 +1447,14 @@ func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, return nil, fmt.Errorf("No module found at: %s", mp) } - return modulePrimaryInstanceState(s, ms, name) + return modulePrimaryInstanceState(ms, name) } // primaryInstanceState returns the primary instance state for the given // resource name in the root module. func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { ms := s.RootModule() - return modulePrimaryInstanceState(s, ms, name) + return modulePrimaryInstanceState(ms, name) } // indexesIntoTypeSet is a heuristic to try and identify if a flatmap style diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go index 1e39294fd..e28426fde 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go @@ -1,14 +1,21 @@ package resource import ( + "context" "errors" "fmt" - "log" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func testStepTaint(state *terraform.State, step TestStep) error { +func testStepTaint(ctx context.Context, state *terraform.State, step TestStep) error { + if len(step.Taint) == 0 { + return nil + } + + logging.HelperResourceTrace(ctx, fmt.Sprintf("Using TestStep Taint: %v", step.Taint)) + for _, p := range step.Taint { m := state.RootModule() if m == nil { @@ -18,7 +25,7 @@ func testStepTaint(state *terraform.State, step TestStep) error { if !ok { return fmt.Errorf("resource %q not found in state", p) } - log.Printf("[WARN] Test: Explicitly tainting resource %q", p) + logging.HelperResourceWarn(ctx, fmt.Sprintf("Explicitly tainting resource %q", p)) rs.Taint() } return nil diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go index 1dc73906b..f1e607f83 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go @@ -1,8 +1,8 @@ package resource import ( + "context" "fmt" - "log" "reflect" "strings" @@ -10,63 +10,77 @@ import ( tfjson "github.com/hashicorp/terraform-json" testing "github.com/mitchellh/go-testing-interface" - "github.com/hashicorp/terraform-plugin-go/tfprotov5" - "github.com/hashicorp/terraform-plugin-go/tfprotov6" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func runPostTestDestroy(t testing.T, c TestCase, wd *plugintest.WorkingDir, factories map[string]func() (*schema.Provider, error), v5factories map[string]func() (tfprotov5.ProviderServer, error), v6factories map[string]func() (tfprotov6.ProviderServer, error), statePreDestroy *terraform.State) error { +func runPostTestDestroy(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, providers *providerFactories, statePreDestroy *terraform.State) error { t.Helper() - err := runProviderCommand(t, func() error { - return wd.Destroy() - }, wd, providerFactories{ - legacy: factories, - protov5: v5factories, - protov6: v6factories}) + err := runProviderCommand(ctx, t, func() error { + return wd.Destroy(ctx) + }, wd, providers) if err != nil { return err } if c.CheckDestroy != nil { + logging.HelperResourceTrace(ctx, "Using TestCase CheckDestroy") + logging.HelperResourceDebug(ctx, "Calling TestCase CheckDestroy") + if err := c.CheckDestroy(statePreDestroy); err != nil { return err } + + logging.HelperResourceDebug(ctx, "Called TestCase CheckDestroy") } return nil } -func runNewTest(t testing.T, c TestCase, helper *plugintest.Helper) { +func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest.Helper) { t.Helper() spewConf := spew.NewDefaultConfig() spewConf.SortKeys = true - wd := helper.RequireNewWorkingDir(t) + wd := helper.RequireNewWorkingDir(ctx, t) + + ctx = logging.TestTerraformPathContext(ctx, wd.GetHelper().TerraformExecPath()) + ctx = logging.TestWorkingDirectoryContext(ctx, wd.GetHelper().WorkingDirectory()) + + providers := &providerFactories{ + legacy: c.ProviderFactories, + protov5: c.ProtoV5ProviderFactories, + protov6: c.ProtoV6ProviderFactories, + } defer func() { var statePreDestroy *terraform.State var err error - err = runProviderCommand(t, func() error { - statePreDestroy, err = getState(t, wd) + err = runProviderCommand(ctx, t, func() error { + statePreDestroy, err = getState(ctx, t, wd) if err != nil { return err } return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { + logging.HelperResourceError(ctx, + "Error retrieving state, there may be dangling resources", + map[string]interface{}{logging.KeyError: err}, + ) t.Fatalf("Error retrieving state, there may be dangling resources: %s", err.Error()) return } if !stateIsEmpty(statePreDestroy) { - err := runPostTestDestroy(t, c, wd, c.ProviderFactories, c.ProtoV5ProviderFactories, c.ProtoV6ProviderFactories, statePreDestroy) + err := runPostTestDestroy(ctx, t, c, wd, providers, statePreDestroy) if err != nil { + logging.HelperResourceError(ctx, + "Error running post-test destroy, there may be dangling resources", + map[string]interface{}{logging.KeyError: err}, + ) t.Fatalf("Error running post-test destroy, there may be dangling resources: %s", err.Error()) } } @@ -74,95 +88,233 @@ func runNewTest(t testing.T, c TestCase, helper *plugintest.Helper) { wd.Close() }() - providerCfg, err := testProviderConfig(c) - if err != nil { - t.Fatal(err) - } + if c.hasProviders(ctx) { + err := wd.SetConfig(ctx, c.providerConfig(ctx)) - err = wd.SetConfig(providerCfg) - if err != nil { - t.Fatalf("Error setting test config: %s", err) - } - err = runProviderCommand(t, func() error { - return wd.Init() - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) - if err != nil { - t.Fatalf("Error running init: %s", err.Error()) - return + if err != nil { + logging.HelperResourceError(ctx, + "TestCase error setting provider configuration", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestCase error setting provider configuration: %s", err) + } + + err = runProviderCommand(ctx, t, func() error { + return wd.Init(ctx) + }, wd, providers) + + if err != nil { + logging.HelperResourceError(ctx, + "TestCase error running init", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestCase error running init: %s", err.Error()) + } } + logging.HelperResourceDebug(ctx, "Starting TestSteps") + // use this to track last step succesfully applied // acts as default for import tests var appliedCfg string - for i, step := range c.Steps { + for stepIndex, step := range c.Steps { + stepNumber := stepIndex + 1 // 1-based indexing for humans + ctx = logging.TestStepNumberContext(ctx, stepNumber) + + logging.HelperResourceDebug(ctx, "Starting TestStep") + if step.PreConfig != nil { + logging.HelperResourceDebug(ctx, "Calling TestStep PreConfig") step.PreConfig() + logging.HelperResourceDebug(ctx, "Called TestStep PreConfig") } if step.SkipFunc != nil { + logging.HelperResourceDebug(ctx, "Calling TestStep SkipFunc") + skip, err := step.SkipFunc() if err != nil { - t.Fatal(err) + logging.HelperResourceError(ctx, + "Error calling TestStep SkipFunc", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Error calling TestStep SkipFunc: %s", err.Error()) } + + logging.HelperResourceDebug(ctx, "Called TestStep SkipFunc") + if skip { - log.Printf("[WARN] Skipping step %d/%d", i+1, len(c.Steps)) + t.Logf("Skipping step %d/%d due to SkipFunc", stepNumber, len(c.Steps)) + logging.HelperResourceWarn(ctx, "Skipping TestStep due to SkipFunc") continue } } + if step.Config != "" && !step.Destroy && len(step.Taint) > 0 { + var state *terraform.State + + err := runProviderCommand(ctx, t, func() error { + var err error + + state, err = getState(ctx, t, wd) + + if err != nil { + return err + } + + return nil + }, wd, providers) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error reading prior state before tainting resources", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d error reading prior state before tainting resources: %s", stepNumber, len(c.Steps), err) + } + + err = testStepTaint(ctx, state, step) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error tainting resources", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d error tainting resources: %s", stepNumber, len(c.Steps), err) + } + } + + if step.hasProviders(ctx) { + providers = &providerFactories{ + legacy: sdkProviderFactories(c.ProviderFactories).merge(step.ProviderFactories), + protov5: protov5ProviderFactories(c.ProtoV5ProviderFactories).merge(step.ProtoV5ProviderFactories), + protov6: protov6ProviderFactories(c.ProtoV6ProviderFactories).merge(step.ProtoV6ProviderFactories), + } + + providerCfg := step.providerConfig(ctx) + + err := wd.SetConfig(ctx, providerCfg) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error setting provider configuration", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d error setting test provider configuration: %s", stepNumber, len(c.Steps), err) + } + + err = runProviderCommand( + ctx, + t, + func() error { + return wd.Init(ctx) + }, + wd, + providers, + ) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error running init", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d running init: %s", stepNumber, len(c.Steps), err.Error()) + return + } + } + if step.ImportState { - err := testStepNewImportState(t, c, helper, wd, step, appliedCfg) + logging.HelperResourceTrace(ctx, "TestStep is ImportState mode") + + err := testStepNewImportState(ctx, t, helper, wd, step, appliedCfg, providers) if step.ExpectError != nil { + logging.HelperResourceDebug(ctx, "Checking TestStep ExpectError") if err == nil { - t.Fatalf("Step %d/%d error running import: expected an error but got none", i+1, len(c.Steps)) + logging.HelperResourceError(ctx, + "Error running import: expected an error but got none", + ) + t.Fatalf("Step %d/%d error running import: expected an error but got none", stepNumber, len(c.Steps)) } if !step.ExpectError.MatchString(err.Error()) { - t.Fatalf("Step %d/%d error running import, expected an error with pattern (%s), no match on: %s", i+1, len(c.Steps), step.ExpectError.String(), err) + logging.HelperResourceError(ctx, + fmt.Sprintf("Error running import: expected an error with pattern (%s)", step.ExpectError.String()), + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Step %d/%d error running import, expected an error with pattern (%s), no match on: %s", stepNumber, len(c.Steps), step.ExpectError.String(), err) } } else { if err != nil && c.ErrorCheck != nil { + logging.HelperResourceDebug(ctx, "Calling TestCase ErrorCheck") err = c.ErrorCheck(err) + logging.HelperResourceDebug(ctx, "Called TestCase ErrorCheck") } if err != nil { - t.Fatalf("Step %d/%d error running import: %s", i+1, len(c.Steps), err) + logging.HelperResourceError(ctx, + "Error running import", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Step %d/%d error running import: %s", stepNumber, len(c.Steps), err) } } + + logging.HelperResourceDebug(ctx, "Finished TestStep") + continue } if step.Config != "" { - err := testStepNewConfig(t, c, wd, step) + logging.HelperResourceTrace(ctx, "TestStep is Config mode") + + err := testStepNewConfig(ctx, t, c, wd, step, providers) if step.ExpectError != nil { + logging.HelperResourceDebug(ctx, "Checking TestStep ExpectError") + if err == nil { - t.Fatalf("Step %d/%d, expected an error but got none", i+1, len(c.Steps)) + logging.HelperResourceError(ctx, + "Expected an error but got none", + ) + t.Fatalf("Step %d/%d, expected an error but got none", stepNumber, len(c.Steps)) } if !step.ExpectError.MatchString(err.Error()) { - t.Fatalf("Step %d/%d, expected an error with pattern, no match on: %s", i+1, len(c.Steps), err) + logging.HelperResourceError(ctx, + fmt.Sprintf("Expected an error with pattern (%s)", step.ExpectError.String()), + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Step %d/%d, expected an error with pattern, no match on: %s", stepNumber, len(c.Steps), err) } } else { if err != nil && c.ErrorCheck != nil { + logging.HelperResourceDebug(ctx, "Calling TestCase ErrorCheck") + err = c.ErrorCheck(err) + + logging.HelperResourceDebug(ctx, "Called TestCase ErrorCheck") } if err != nil { - t.Fatalf("Step %d/%d error: %s", i+1, len(c.Steps), err) + logging.HelperResourceError(ctx, + "Unexpected error", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Step %d/%d error: %s", stepNumber, len(c.Steps), err) } } + appliedCfg = step.Config + + logging.HelperResourceDebug(ctx, "Finished TestStep") + continue } - t.Fatal("Unsupported test mode") + t.Fatalf("Step %d/%d, unsupported test mode", stepNumber, len(c.Steps)) } } -func getState(t testing.T, wd *plugintest.WorkingDir) (*terraform.State, error) { +func getState(ctx context.Context, t testing.T, wd *plugintest.WorkingDir) (*terraform.State, error) { t.Helper() - jsonState, err := wd.State() + jsonState, err := wd.State(ctx) if err != nil { return nil, err } @@ -188,7 +340,7 @@ func planIsEmpty(plan *tfjson.Plan) bool { return true } -func testIDRefresh(c TestCase, t testing.T, wd *plugintest.WorkingDir, step TestStep, r *terraform.ResourceState) error { +func testIDRefresh(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep, r *terraform.ResourceState, providers *providerFactories) error { t.Helper() spewConf := spew.NewDefaultConfig() @@ -202,36 +354,29 @@ func testIDRefresh(c TestCase, t testing.T, wd *plugintest.WorkingDir, step Test // Temporarily set the config to a minimal provider config for the refresh // test. After the refresh we can reset it. - cfg, err := testProviderConfig(c) - if err != nil { - return err - } - err = wd.SetConfig(cfg) + err := wd.SetConfig(ctx, c.providerConfig(ctx)) if err != nil { t.Fatalf("Error setting import test config: %s", err) } defer func() { - err = wd.SetConfig(step.Config) + err = wd.SetConfig(ctx, step.Config) if err != nil { t.Fatalf("Error resetting test config: %s", err) } }() // Refresh! - err = runProviderCommand(t, func() error { - err = wd.Refresh() + err = runProviderCommand(ctx, t, func() error { + err = wd.Refresh(ctx) if err != nil { t.Fatalf("Error running terraform refresh: %s", err) } - state, err = getState(t, wd) + state, err = getState(ctx, t, wd) if err != nil { return err } return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return err } @@ -246,6 +391,11 @@ func testIDRefresh(c TestCase, t testing.T, wd *plugintest.WorkingDir, step Test } actual := actualR.Primary.Attributes expected := r.Primary.Attributes + + if len(c.IDRefreshIgnore) > 0 { + logging.HelperResourceTrace(ctx, fmt.Sprintf("Using TestCase IDRefreshIgnore: %v", c.IDRefreshIgnore)) + } + // Remove fields we're ignoring for _, v := range c.IDRefreshIgnore { for k := range actual { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go index 9900cc396..09d18c364 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go @@ -1,56 +1,31 @@ package resource import ( + "context" "errors" "fmt" tfjson "github.com/hashicorp/terraform-json" testing "github.com/mitchellh/go-testing-interface" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep) error { +func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep, providers *providerFactories) error { t.Helper() - var idRefreshCheck *terraform.ResourceState - idRefresh := c.IDRefreshName != "" - - if !step.Destroy { - var state *terraform.State - var err error - err = runProviderCommand(t, func() error { - state, err = getState(t, wd) - if err != nil { - return err - } - return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) - if err != nil { - return err - } - if err := testStepTaint(state, step); err != nil { - return fmt.Errorf("Error when tainting resources: %s", err) - } - } - - err := wd.SetConfig(step.Config) + err := wd.SetConfig(ctx, step.Config) if err != nil { return fmt.Errorf("Error setting config: %w", err) } // require a refresh before applying // failing to do this will result in data sources not being updated - err = runProviderCommand(t, func() error { - return wd.Refresh() - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + err = runProviderCommand(ctx, t, func() error { + return wd.Refresh(ctx) + }, wd, providers) if err != nil { return fmt.Errorf("Error running pre-apply refresh: %w", err) } @@ -59,16 +34,15 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step // subsequent Apply, and use the follow-up Plan that checks for // permadiffs if !step.PlanOnly { + logging.HelperResourceDebug(ctx, "Running Terraform CLI plan and apply") + // Plan! - err := runProviderCommand(t, func() error { + err := runProviderCommand(ctx, t, func() error { if step.Destroy { - return wd.CreateDestroyPlan() + return wd.CreateDestroyPlan(ctx) } - return wd.CreatePlan() - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + return wd.CreatePlan(ctx) + }, wd, providers) if err != nil { return fmt.Errorf("Error running pre-apply plan: %w", err) } @@ -77,27 +51,21 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step // that the destroy steps can verify their behavior in the // check function var stateBeforeApplication *terraform.State - err = runProviderCommand(t, func() error { - stateBeforeApplication, err = getState(t, wd) + err = runProviderCommand(ctx, t, func() error { + stateBeforeApplication, err = getState(ctx, t, wd) if err != nil { return err } return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving pre-apply state: %w", err) } // Apply the diff, creating real resources - err = runProviderCommand(t, func() error { - return wd.Apply() - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + err = runProviderCommand(ctx, t, func() error { + return wd.Apply(ctx) + }, wd, providers) if err != nil { if step.Destroy { return fmt.Errorf("Error running destroy: %w", err) @@ -107,22 +75,21 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step // Get the new state var state *terraform.State - err = runProviderCommand(t, func() error { - state, err = getState(t, wd) + err = runProviderCommand(ctx, t, func() error { + state, err = getState(ctx, t, wd) if err != nil { return err } return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving state after apply: %w", err) } // Run any configured checks if step.Check != nil { + logging.HelperResourceTrace(ctx, "Using TestStep Check") + state.IsBinaryDrivenTest = true if step.Destroy { if err := step.Check(stateBeforeApplication); err != nil { @@ -137,44 +104,36 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step } // Test for perpetual diffs by performing a plan, a refresh, and another plan + logging.HelperResourceDebug(ctx, "Running Terraform CLI plan to check for perpetual differences") // do a plan - err = runProviderCommand(t, func() error { + err = runProviderCommand(ctx, t, func() error { if step.Destroy { - return wd.CreateDestroyPlan() + return wd.CreateDestroyPlan(ctx) } - return wd.CreatePlan() - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + return wd.CreatePlan(ctx) + }, wd, providers) if err != nil { return fmt.Errorf("Error running post-apply plan: %w", err) } var plan *tfjson.Plan - err = runProviderCommand(t, func() error { + err = runProviderCommand(ctx, t, func() error { var err error - plan, err = wd.SavedPlan() + plan, err = wd.SavedPlan(ctx) return err - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving post-apply plan: %w", err) } if !planIsEmpty(plan) && !step.ExpectNonEmptyPlan { var stdout string - err = runProviderCommand(t, func() error { + err = runProviderCommand(ctx, t, func() error { var err error - stdout, err = wd.SavedPlanRawStdout() + stdout, err = wd.SavedPlanRawStdout(ctx) return err - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving formatted plan output: %w", err) } @@ -183,39 +142,30 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step // do a refresh if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { - err := runProviderCommand(t, func() error { - return wd.Refresh() - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + err := runProviderCommand(ctx, t, func() error { + return wd.Refresh(ctx) + }, wd, providers) if err != nil { return fmt.Errorf("Error running post-apply refresh: %w", err) } } // do another plan - err = runProviderCommand(t, func() error { + err = runProviderCommand(ctx, t, func() error { if step.Destroy { - return wd.CreateDestroyPlan() + return wd.CreateDestroyPlan(ctx) } - return wd.CreatePlan() - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + return wd.CreatePlan(ctx) + }, wd, providers) if err != nil { return fmt.Errorf("Error running second post-apply plan: %w", err) } - err = runProviderCommand(t, func() error { + err = runProviderCommand(ctx, t, func() error { var err error - plan, err = wd.SavedPlan() + plan, err = wd.SavedPlan(ctx) return err - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving second post-apply plan: %w", err) } @@ -223,14 +173,11 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step // check if plan is empty if !planIsEmpty(plan) && !step.ExpectNonEmptyPlan { var stdout string - err = runProviderCommand(t, func() error { + err = runProviderCommand(ctx, t, func() error { var err error - stdout, err = wd.SavedPlanRawStdout() + stdout, err = wd.SavedPlanRawStdout(ctx) return err - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving formatted second plan output: %w", err) } @@ -242,21 +189,29 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step // ID-ONLY REFRESH // If we've never checked an id-only refresh and our state isn't // empty, find the first resource and test it. - var state *terraform.State - err = runProviderCommand(t, func() error { - state, err = getState(t, wd) + if c.IDRefreshName != "" { + logging.HelperResourceTrace(ctx, "Using TestCase IDRefreshName") + + var state *terraform.State + + err = runProviderCommand(ctx, t, func() error { + state, err = getState(ctx, t, wd) + if err != nil { + return err + } + return nil + }, wd, providers) + if err != nil { return err } - return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) - if err != nil { - return err - } - if idRefresh && idRefreshCheck == nil && !state.Empty() { + + if state.Empty() { + return nil + } + + var idRefreshCheck *terraform.ResourceState + // Find the first non-nil resource in the state for _, m := range state.Modules { if len(m.Resources) > 0 { @@ -275,7 +230,7 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step // this fails. If refresh isn't read-only, then this will have // caught a different bug. if idRefreshCheck != nil { - if err := testIDRefresh(c, t, wd, step, idRefreshCheck); err != nil { + if err := testIDRefresh(ctx, t, c, wd, step, idRefreshCheck, providers); err != nil { return fmt.Errorf( "[ERROR] Test: ID-only test failed: %s", err) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go index a38c3c7dc..ec61b055f 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go @@ -1,17 +1,20 @@ package resource import ( + "context" + "fmt" "reflect" "strings" "github.com/davecgh/go-spew/spew" testing "github.com/mitchellh/go-testing-interface" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func testStepNewImportState(t testing.T, c TestCase, helper *plugintest.Helper, wd *plugintest.WorkingDir, step TestStep, cfg string) error { +func testStepNewImportState(ctx context.Context, t testing.T, helper *plugintest.Helper, wd *plugintest.WorkingDir, step TestStep, cfg string, providers *providerFactories) error { t.Helper() spewConf := spew.NewDefaultConfig() @@ -24,16 +27,13 @@ func testStepNewImportState(t testing.T, c TestCase, helper *plugintest.Helper, // get state from check sequence var state *terraform.State var err error - err = runProviderCommand(t, func() error { - state, err = getState(t, wd) + err = runProviderCommand(ctx, t, func() error { + state, err = getState(ctx, t, wd) if err != nil { return err } return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { t.Fatalf("Error getting state: %s", err) } @@ -42,73 +42,89 @@ func testStepNewImportState(t testing.T, c TestCase, helper *plugintest.Helper, var importId string switch { case step.ImportStateIdFunc != nil: + logging.HelperResourceTrace(ctx, "Using TestStep ImportStateIdFunc for import identifier") + var err error + + logging.HelperResourceDebug(ctx, "Calling TestStep ImportStateIdFunc") + importId, err = step.ImportStateIdFunc(state) + if err != nil { t.Fatal(err) } + + logging.HelperResourceDebug(ctx, "Called TestStep ImportStateIdFunc") case step.ImportStateId != "": + logging.HelperResourceTrace(ctx, "Using TestStep ImportStateId for import identifier") + importId = step.ImportStateId default: + logging.HelperResourceTrace(ctx, "Using resource identifier for import identifier") + resource, err := testResource(step, state) if err != nil { t.Fatal(err) } importId = resource.Primary.ID } - importId = step.ImportStateIdPrefix + importId + + if step.ImportStateIdPrefix != "" { + logging.HelperResourceTrace(ctx, "Prepending TestStep ImportStateIdPrefix for import identifier") + + importId = step.ImportStateIdPrefix + importId + } + + logging.HelperResourceTrace(ctx, fmt.Sprintf("Using import identifier: %s", importId)) // Create working directory for import tests if step.Config == "" { + logging.HelperResourceTrace(ctx, "Using prior TestStep Config for import") + step.Config = cfg if step.Config == "" { t.Fatal("Cannot import state with no specified config") } } - importWd := helper.RequireNewWorkingDir(t) + importWd := helper.RequireNewWorkingDir(ctx, t) defer importWd.Close() - err = importWd.SetConfig(step.Config) + err = importWd.SetConfig(ctx, step.Config) if err != nil { t.Fatalf("Error setting test config: %s", err) } - err = runProviderCommand(t, func() error { - return importWd.Init() - }, importWd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + logging.HelperResourceDebug(ctx, "Running Terraform CLI init and import") + + err = runProviderCommand(ctx, t, func() error { + return importWd.Init(ctx) + }, importWd, providers) if err != nil { t.Fatalf("Error running init: %s", err) } - err = runProviderCommand(t, func() error { - return importWd.Import(step.ResourceName, importId) - }, importWd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + err = runProviderCommand(ctx, t, func() error { + return importWd.Import(ctx, step.ResourceName, importId) + }, importWd, providers) if err != nil { return err } var importState *terraform.State - err = runProviderCommand(t, func() error { - importState, err = getState(t, importWd) + err = runProviderCommand(ctx, t, func() error { + importState, err = getState(ctx, t, importWd) if err != nil { return err } return nil - }, importWd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, importWd, providers) if err != nil { t.Fatalf("Error getting state: %s", err) } // Go through the imported state and verify if step.ImportStateCheck != nil { + logging.HelperResourceTrace(ctx, "Using TestStep ImportStateCheck") + var states []*terraform.InstanceState for _, r := range importState.RootModule().Resources { if r.Primary != nil { @@ -117,20 +133,27 @@ func testStepNewImportState(t testing.T, c TestCase, helper *plugintest.Helper, states = append(states, is) } } + + logging.HelperResourceDebug(ctx, "Calling TestStep ImportStateCheck") + if err := step.ImportStateCheck(states); err != nil { t.Fatal(err) } + + logging.HelperResourceDebug(ctx, "Called TestStep ImportStateCheck") } // Verify that all the states match if step.ImportStateVerify { - new := importState.RootModule().Resources - old := state.RootModule().Resources + logging.HelperResourceTrace(ctx, "Using TestStep ImportStateVerify") + + newResources := importState.RootModule().Resources + oldResources := state.RootModule().Resources - for _, r := range new { + for _, r := range newResources { // Find the existing resource var oldR *terraform.ResourceState - for r2Key, r2 := range old { + for r2Key, r2 := range oldResources { // Ensure that we do not match against data sources as they // cannot be imported and are not what we want to verify. // Mode is not present in ResourceState so we use the @@ -144,7 +167,7 @@ func testStepNewImportState(t testing.T, c TestCase, helper *plugintest.Helper, break } } - if oldR == nil { + if oldR == nil || oldR.Primary == nil { t.Fatalf( "Failed state verification, resource with ID %s not found", r.Primary.ID) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go index ada1d6e7e..9b1d57c66 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go @@ -14,25 +14,46 @@ const ( sentinelIndex = "*" ) -// TestCheckTypeSetElemNestedAttrs is a TestCheckFunc that accepts a resource -// name, an attribute path, which should use the sentinel value '*' for indexing -// into a TypeSet. The function verifies that an element matches the whole value -// map. +// TestCheckTypeSetElemNestedAttrs ensures a subset map of values is stored in +// state for the given name and key combination of attributes nested under a +// list or set block. Use this TestCheckFunc in preference over non-set +// variants to simplify testing code and ensure compatibility with indicies, +// which can easily change with schema changes. State value checking is only +// recommended for testing Computed attributes and attribute defaults. // -// You may check for unset keys, however this will also match keys set to empty -// string. Please provide a map with at least 1 non-empty value. +// For managed resources, the name parameter is a combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". // -// map[string]string{ -// "key1": "value", -// "key2": "", -// } +// resource "myprovider_thing" "example" { ... } // -// Use this function over SDK provided TestCheckFunctions when validating a -// TypeSet where its elements are a nested object with their own attrs/values. +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the sentinel value '*' to replace the element indexing into +// a list or set. The sentinel value can be used for each list or set index, if +// there are multiple lists or sets in the attribute path. +// +// The values parameter is the map of attribute names to attribute values +// expected to be nested under the list or set. +// +// You may check for unset nested attributes, however this will also match keys +// set to an empty string. Use a map with at least 1 non-empty value. // -// Please note, if the provided value map is not granular enough, there exists -// the possibility you match an element you were not intending to, in the TypeSet. -// Provide a full mapping of attributes to be sure the unique element exists. +// map[string]string{ +// "key1": "value", +// "key2": "", +// } +// +// If the values map is not granular enough, it is possible to match an element +// you were not intending to in the set. Provide the most complete mapping of +// attributes possible to be sure the unique element exists. func TestCheckTypeSetElemNestedAttrs(name, attr string, values map[string]string) TestCheckFunc { return func(s *terraform.State) error { is, err := primaryInstanceState(s, name) @@ -64,17 +85,47 @@ func TestCheckTypeSetElemNestedAttrs(name, attr string, values map[string]string } } -// TestMatchTypeSetElemNestedAttrs is a TestCheckFunc similar to TestCheckTypeSetElemNestedAttrs -// with the exception that it verifies that an element matches a *regexp.Regexp. +// TestMatchTypeSetElemNestedAttrs ensures a subset map of values, compared by +// regular expressions, is stored in state for the given name and key +// combination of attributes nested under a list or set block. Use this +// TestCheckFunc in preference over non-set variants to simplify testing code +// and ensure compatibility with indicies, which can easily change with schema +// changes. State value checking is only recommended for testing Computed +// attributes and attribute defaults. +// +// For managed resources, the name parameter is a combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the sentinel value '*' to replace the element indexing into +// a list or set. The sentinel value can be used for each list or set index, if +// there are multiple lists or sets in the attribute path. // -// You may check for unset keys, however this will also match keys set to empty -// string. Please provide a map with at least 1 non-empty value e.g. +// The values parameter is the map of attribute names to regular expressions +// for matching attribute values expected to be nested under the list or set. // -// map[string]*regexp.Regexp{ -// "key1": regexp.MustCompile("value"), -// "key2": regexp.MustCompile(""), -// } +// You may check for unset nested attributes, however this will also match keys +// set to an empty string. Use a map with at least 1 non-empty value. // +// map[string]*regexp.Regexp{ +// "key1": regexp.MustCompile(`^value`), +// "key2": regexp.MustCompile(`^$`), +// } +// +// If the values map is not granular enough, it is possible to match an element +// you were not intending to in the set. Provide the most complete mapping of +// attributes possible to be sure the unique element exists. func TestMatchTypeSetElemNestedAttrs(name, attr string, values map[string]*regexp.Regexp) TestCheckFunc { return func(s *terraform.State) error { is, err := primaryInstanceState(s, name) @@ -113,6 +164,39 @@ func TestMatchTypeSetElemNestedAttrs(name, attr string, values map[string]*regex // // Use this function over SDK provided TestCheckFunctions when validating a // TypeSet where its elements are a simple value + +// TestCheckTypeSetElemAttr ensures a specific value is stored in state for the +// given name and key combination under a list or set. Use this TestCheckFunc +// in preference over non-set variants to simplify testing code and ensure +// compatibility with indicies, which can easily change with schema changes. +// State value checking is only recommended for testing Computed attributes and +// attribute defaults. +// +// For managed resources, the name parameter is a combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the sentinel value '*' to replace the element indexing into +// a list or set. The sentinel value can be used for each list or set index, if +// there are multiple lists or sets in the attribute path. +// +// The value parameter is the stringified data to check at the given key. Use +// the following attribute type rules to set the value: +// +// - Boolean: "false" or "true". +// - Float/Integer: Stringified number, such as "1.2" or "123". +// - String: No conversion necessary. func TestCheckTypeSetElemAttr(name, attr, value string) TestCheckFunc { return func(s *terraform.State) error { is, err := primaryInstanceState(s, name) @@ -129,12 +213,32 @@ func TestCheckTypeSetElemAttr(name, attr, value string) TestCheckFunc { } } -// TestCheckTypeSetElemAttrPair is a TestCheckFunc that verifies a pair of name/key -// combinations are equal where the first uses the sentinel value to index into a -// TypeSet. +// TestCheckTypeSetElemAttrPair ensures value equality in state between the +// first given name and key combination and the second name and key +// combination. State value checking is only recommended for testing Computed +// attributes and attribute defaults. +// +// For managed resources, the name parameter is a combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The first and second names may use any combination of managed resources +// and/or data sources. // -// E.g., TestCheckTypeSetElemAttrPair("aws_autoscaling_group.bar", "availability_zones.*", "data.aws_availability_zones.available", "names.0") -// E.g., TestCheckTypeSetElemAttrPair("aws_spot_fleet_request.bar", "launch_specification.*.instance_type", "data.data.aws_ec2_instance_type_offering.available", "instance_type") +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the sentinel value '*' to replace the element indexing into +// a list or set. The sentinel value can be used for each list or set index, if +// there are multiple lists or sets in the attribute path. func TestCheckTypeSetElemAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { return func(s *terraform.State) error { isFirst, err := primaryInstanceState(s, nameFirst) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_providers.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_providers.go new file mode 100644 index 000000000..35d4a9bf5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_providers.go @@ -0,0 +1,48 @@ +package resource + +import ( + "context" + "fmt" + "strings" +) + +// providerConfig takes the list of providers in a TestStep and returns a +// config with only empty provider blocks. This is useful for Import, where no +// config is provided, but the providers must be defined. +func (s TestStep) providerConfig(_ context.Context) string { + var providerBlocks, requiredProviderBlocks strings.Builder + + for name, externalProvider := range s.ExternalProviders { + providerBlocks.WriteString(fmt.Sprintf("provider %q {}\n", name)) + + if externalProvider.Source == "" && externalProvider.VersionConstraint == "" { + continue + } + + requiredProviderBlocks.WriteString(fmt.Sprintf(" %s = {\n", name)) + + if externalProvider.Source != "" { + requiredProviderBlocks.WriteString(fmt.Sprintf(" source = %q\n", externalProvider.Source)) + } + + if externalProvider.VersionConstraint != "" { + requiredProviderBlocks.WriteString(fmt.Sprintf(" version = %q\n", externalProvider.VersionConstraint)) + } + + requiredProviderBlocks.WriteString(" }\n") + } + + if requiredProviderBlocks.Len() > 0 { + return fmt.Sprintf(` +terraform { + required_providers { +%[1]s + } +} + +%[2]s +`, strings.TrimSuffix(requiredProviderBlocks.String(), "\n"), providerBlocks.String()) + } + + return providerBlocks.String() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go new file mode 100644 index 000000000..e9239328c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go @@ -0,0 +1,99 @@ +package resource + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" +) + +// testStepValidateRequest contains data for the (TestStep).validate() method. +type testStepValidateRequest struct { + // StepNumber is the index of the TestStep in the TestCase.Steps. + StepNumber int + + // TestCaseHasProviders is enabled if the TestCase has set any of + // ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, + // or ProviderFactories. + TestCaseHasProviders bool +} + +// hasProviders returns true if the TestStep has set any of the +// ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, or +// ProviderFactories fields. +func (s TestStep) hasProviders(_ context.Context) bool { + if len(s.ExternalProviders) > 0 { + return true + } + + if len(s.ProtoV5ProviderFactories) > 0 { + return true + } + + if len(s.ProtoV6ProviderFactories) > 0 { + return true + } + + if len(s.ProviderFactories) > 0 { + return true + } + + return false +} + +// validate ensures the TestStep is valid based on the following criteria: +// +// - Config or ImportState is set. +// - Providers are not specified (ExternalProviders, +// ProtoV5ProviderFactories, ProtoV6ProviderFactories, ProviderFactories) +// if specified at the TestCase level. +// - Providers are specified (ExternalProviders, ProtoV5ProviderFactories, +// ProtoV6ProviderFactories, ProviderFactories) if not specified at the +// TestCase level. +// - No overlapping ExternalProviders and ProviderFactories entries +// - ResourceName is not empty when ImportState is true, ImportStateIdFunc +// is not set, and ImportStateId is not set. +// +func (s TestStep) validate(ctx context.Context, req testStepValidateRequest) error { + ctx = logging.TestStepNumberContext(ctx, req.StepNumber) + + logging.HelperResourceTrace(ctx, "Validating TestStep") + + if s.Config == "" && !s.ImportState { + err := fmt.Errorf("TestStep missing Config or ImportState") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + for name := range s.ExternalProviders { + if _, ok := s.ProviderFactories[name]; ok { + err := fmt.Errorf("TestStep provider %q set in both ExternalProviders and ProviderFactories", name) + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + hasProviders := s.hasProviders(ctx) + + if req.TestCaseHasProviders && hasProviders { + err := fmt.Errorf("Providers must only be specified either at the TestCase or TestStep level") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if !req.TestCaseHasProviders && !hasProviders { + err := fmt.Errorf("Providers must be specified at the TestCase level or in all TestStep") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if s.ImportState { + if s.ImportStateId == "" && s.ImportStateIdFunc == nil && s.ResourceName == "" { + err := fmt.Errorf("TestStep ImportState must be specified with ImportStateId, ImportStateIdFunc, or ResourceName") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go index c7721bcd3..855bc4028 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go @@ -171,7 +171,7 @@ func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema { // "foo.#" for a list "foo" and that the indexes are "foo.0", "foo.1", etc. // after that point. func readListField( - r FieldReader, addr []string, schema *Schema) (FieldReadResult, error) { + r FieldReader, addr []string) (FieldReadResult, error) { addrPadded := make([]string, len(addr)+1) copy(addrPadded, addr) addrPadded[len(addrPadded)-1] = "#" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go index 3f1f5e8ab..cd4a993a3 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go @@ -100,7 +100,7 @@ func (r *ConfigFieldReader) readField( case TypeBool, TypeFloat, TypeInt, TypeString: return r.readPrimitive(k, schema) case TypeList: - return readListField(&nestedConfigFieldReader{r}, address, schema) + return readListField(&nestedConfigFieldReader{r}, address) case TypeMap: return r.readMap(k, schema) case TypeSet: @@ -203,7 +203,7 @@ func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, err := mapValuesToPrimitive(k, result, schema) if err != nil { - return FieldReadResult{}, nil + return FieldReadResult{}, nil //nolint:nilerr // Leave legacy flatmap handling } var value interface{} @@ -258,7 +258,7 @@ func (r *ConfigFieldReader) readSet( // Create the set that will be our result set := schema.ZeroValue().(*Set) - raw, err := readListField(&nestedConfigFieldReader{r}, address, schema) + raw, err := readListField(&nestedConfigFieldReader{r}, address) if err != nil { return FieldReadResult{}, err } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go index 642e7f32e..ca3b714b1 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go @@ -67,7 +67,7 @@ func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) { case TypeBool, TypeInt, TypeFloat, TypeString: res, err = r.readPrimitive(address, schema) case TypeList: - res, err = readListField(r, address, schema) + res, err = readListField(r, address) case TypeMap: res, err = r.readMap(address, schema) case TypeSet: @@ -128,7 +128,7 @@ func (r *DiffFieldReader) readMap( key := address[len(address)-1] err = mapValuesToPrimitive(key, result, schema) if err != nil { - return FieldReadResult{}, nil + return FieldReadResult{}, nil //nolint:nilerr // Leave legacy flatmap handling } var resultVal interface{} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go index 092dd7f68..6fa191522 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go @@ -24,7 +24,7 @@ func (r *MapFieldReader) ReadField(address []string) (FieldReadResult, error) { case TypeBool, TypeInt, TypeFloat, TypeString: return r.readPrimitive(address, schema) case TypeList: - return readListField(r, address, schema) + return readListField(r, address) case TypeMap: return r.readMap(k, schema) case TypeSet: @@ -63,7 +63,7 @@ func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, err err := mapValuesToPrimitive(k, result, schema) if err != nil { - return FieldReadResult{}, nil + return FieldReadResult{}, nil //nolint:nilerr // Leave legacy flatmap handling } var resultVal interface{} @@ -159,11 +159,8 @@ func (r *MapFieldReader) readSet( // "ports.1", but the "state" map might have those plus "ports.2". // We don't want "ports.2" countActual[idx] = struct{}{} - if len(countActual) >= countExpected { - return false - } - return true + return len(countActual) < countExpected }) if !completed && err != nil { return FieldReadResult{}, err diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go index 85d05be4c..39c708b27 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go @@ -100,13 +100,13 @@ func (w *MapFieldWriter) set(addr []string, value interface{}) error { case TypeBool, TypeInt, TypeFloat, TypeString: return w.setPrimitive(addr, value, schema) case TypeList: - return w.setList(addr, value, schema) + return w.setList(addr, value) case TypeMap: - return w.setMap(addr, value, schema) + return w.setMap(addr, value) case TypeSet: return w.setSet(addr, value, schema) case typeObject: - return w.setObject(addr, value, schema) + return w.setObject(addr, value) default: panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) } @@ -114,8 +114,7 @@ func (w *MapFieldWriter) set(addr []string, value interface{}) error { func (w *MapFieldWriter) setList( addr []string, - v interface{}, - schema *Schema) error { + v interface{}) error { k := strings.Join(addr, ".") setElement := func(idx string, value interface{}) error { addrCopy := make([]string, len(addr), len(addr)+1) @@ -148,7 +147,7 @@ func (w *MapFieldWriter) setList( if err != nil { for i := range vs { is := strconv.FormatInt(int64(i), 10) - setElement(is, nil) + _ = setElement(is, nil) // best effort; error returned below } return err @@ -160,8 +159,7 @@ func (w *MapFieldWriter) setList( func (w *MapFieldWriter) setMap( addr []string, - value interface{}, - schema *Schema) error { + value interface{}) error { k := strings.Join(addr, ".") v := reflect.ValueOf(value) vs := make(map[string]interface{}) @@ -176,7 +174,7 @@ func (w *MapFieldWriter) setMap( return fmt.Errorf("%s: must be a map", k) } if v.Type().Key().Kind() != reflect.String { - return fmt.Errorf("%s: keys must strings", k) + return fmt.Errorf("%s: keys must be strings", k) } for _, mk := range v.MapKeys() { mv := v.MapIndex(mk) @@ -207,8 +205,7 @@ func (w *MapFieldWriter) setMap( func (w *MapFieldWriter) setObject( addr []string, - value interface{}, - schema *Schema) error { + value interface{}) error { // Set the entire object. First decode into a proper structure var v map[string]interface{} if err := mapstructure.Decode(value, &v); err != nil { @@ -228,11 +225,13 @@ func (w *MapFieldWriter) setObject( } if err != nil { for k1 := range v { - w.set(append(addrCopy, k1), nil) + _ = w.set(append(addrCopy, k1), nil) // best effort; error returned below } + + return err } - return err + return nil } func (w *MapFieldWriter) setPrimitive( @@ -271,7 +270,7 @@ func (w *MapFieldWriter) setPrimitive( if err := mapstructure.Decode(v, &n); err != nil { return fmt.Errorf("%s: %s", k, err) } - set = strconv.FormatFloat(float64(n), 'G', -1, 64) + set = strconv.FormatFloat(n, 'G', -1, 64) default: return fmt.Errorf("Unknown type: %#v", schema.Type) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go index 4ed1253de..ec3ed07c1 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - "log" "strconv" "sync" @@ -16,12 +15,15 @@ import ( "github.com/hashicorp/terraform-plugin-go/tftypes" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -const newExtraKey = "_new_extra_shim" +const ( + newExtraKey = "_new_extra_shim" +) func NewGRPCProviderServer(p *Provider) *GRPCProviderServer { return &GRPCProviderServer{ @@ -53,6 +55,7 @@ func mergeStop(ctx context.Context, cancel context.CancelFunc, stopCh chan struc // It creates a goroutine to wait for the server stop and propagates // cancellation to the derived grpc context. func (s *GRPCProviderServer) StopContext(ctx context.Context) context.Context { + ctx = logging.InitContext(ctx) s.stopMu.Lock() defer s.stopMu.Unlock() @@ -61,7 +64,10 @@ func (s *GRPCProviderServer) StopContext(ctx context.Context) context.Context { return stoppable } -func (s *GRPCProviderServer) GetProviderSchema(_ context.Context, req *tfprotov5.GetProviderSchemaRequest) (*tfprotov5.GetProviderSchemaResponse, error) { +func (s *GRPCProviderServer) GetProviderSchema(ctx context.Context, req *tfprotov5.GetProviderSchemaRequest) (*tfprotov5.GetProviderSchemaResponse, error) { + ctx = logging.InitContext(ctx) + + logging.HelperSchemaTrace(ctx, "Getting provider schema") resp := &tfprotov5.GetProviderSchemaResponse{ ResourceSchemas: make(map[string]*tfprotov5.Schema), @@ -69,24 +75,28 @@ func (s *GRPCProviderServer) GetProviderSchema(_ context.Context, req *tfprotov5 } resp.Provider = &tfprotov5.Schema{ - Block: convert.ConfigSchemaToProto(s.getProviderSchemaBlock()), + Block: convert.ConfigSchemaToProto(ctx, s.getProviderSchemaBlock()), } resp.ProviderMeta = &tfprotov5.Schema{ - Block: convert.ConfigSchemaToProto(s.getProviderMetaSchemaBlock()), + Block: convert.ConfigSchemaToProto(ctx, s.getProviderMetaSchemaBlock()), } for typ, res := range s.provider.ResourcesMap { + logging.HelperSchemaTrace(ctx, "Found resource type", map[string]interface{}{logging.KeyResourceType: typ}) + resp.ResourceSchemas[typ] = &tfprotov5.Schema{ Version: int64(res.SchemaVersion), - Block: convert.ConfigSchemaToProto(res.CoreConfigSchema()), + Block: convert.ConfigSchemaToProto(ctx, res.CoreConfigSchema()), } } for typ, dat := range s.provider.DataSourcesMap { + logging.HelperSchemaTrace(ctx, "Found data source type", map[string]interface{}{logging.KeyDataSourceType: typ}) + resp.DataSourceSchemas[typ] = &tfprotov5.Schema{ Version: int64(dat.SchemaVersion), - Block: convert.ConfigSchemaToProto(dat.CoreConfigSchema()), + Block: convert.ConfigSchemaToProto(ctx, dat.CoreConfigSchema()), } } @@ -111,14 +121,17 @@ func (s *GRPCProviderServer) getDatasourceSchemaBlock(name string) *configschema return dat.CoreConfigSchema() } -func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *tfprotov5.PrepareProviderConfigRequest) (*tfprotov5.PrepareProviderConfigResponse, error) { +func (s *GRPCProviderServer) PrepareProviderConfig(ctx context.Context, req *tfprotov5.PrepareProviderConfigRequest) (*tfprotov5.PrepareProviderConfigResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.PrepareProviderConfigResponse{} + logging.HelperSchemaTrace(ctx, "Preparing provider configuration") + schemaBlock := s.getProviderSchemaBlock() configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -156,8 +169,7 @@ func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *tfpro // find a default value if it exists def, err := attrSchema.DefaultValue() if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) - return val, err + return val, fmt.Errorf("error getting default for %q: %w", getAttr.Name, err) } // no default @@ -171,41 +183,43 @@ func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *tfpro // helper/schema used to allow setting "" to a bool if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { // return a warning about the conversion - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name) tmpVal = cty.False } val, err = ctyconvert.Convert(tmpVal, val.Type()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) + return val, fmt.Errorf("error setting default for %q: %w", getAttr.Name, err) } - return val, err + return val, nil }) if err != nil { - // any error here was already added to the diagnostics + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } configVal, err = schemaBlock.CoerceValue(configVal) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + if err := validateConfigNulls(ctx, configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, s.provider.Validate(config)) + logging.HelperSchemaTrace(ctx, "Calling downstream") + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, s.provider.Validate(config)) + logging.HelperSchemaTrace(ctx, "Called downstream") preparedConfigMP, err := msgpack.Marshal(configVal, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -214,54 +228,61 @@ func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *tfpro return resp, nil } -func (s *GRPCProviderServer) ValidateResourceTypeConfig(_ context.Context, req *tfprotov5.ValidateResourceTypeConfigRequest) (*tfprotov5.ValidateResourceTypeConfigResponse, error) { +func (s *GRPCProviderServer) ValidateResourceTypeConfig(ctx context.Context, req *tfprotov5.ValidateResourceTypeConfigRequest) (*tfprotov5.ValidateResourceTypeConfigResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.ValidateResourceTypeConfigResponse{} schemaBlock := s.getResourceSchemaBlock(req.TypeName) configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, s.provider.ValidateResource(req.TypeName, config)) + logging.HelperSchemaTrace(ctx, "Calling downstream") + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, s.provider.ValidateResource(req.TypeName, config)) + logging.HelperSchemaTrace(ctx, "Called downstream") return resp, nil } -func (s *GRPCProviderServer) ValidateDataSourceConfig(_ context.Context, req *tfprotov5.ValidateDataSourceConfigRequest) (*tfprotov5.ValidateDataSourceConfigResponse, error) { +func (s *GRPCProviderServer) ValidateDataSourceConfig(ctx context.Context, req *tfprotov5.ValidateDataSourceConfigRequest) (*tfprotov5.ValidateDataSourceConfigResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.ValidateDataSourceConfigResponse{} schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + if err := validateConfigNulls(ctx, configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, s.provider.ValidateDataSource(req.TypeName, config)) + logging.HelperSchemaTrace(ctx, "Calling downstream") + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, s.provider.ValidateDataSource(req.TypeName, config)) + logging.HelperSchemaTrace(ctx, "Called downstream") return resp, nil } func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *tfprotov5.UpgradeResourceStateRequest) (*tfprotov5.UpgradeResourceStateResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.UpgradeResourceStateResponse{} res, ok := s.provider.ResourcesMap[req.TypeName] if !ok { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) return resp, nil } schemaBlock := s.getResourceSchemaBlock(req.TypeName) @@ -275,9 +296,11 @@ func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *tfpr // We first need to upgrade a flatmap state if it exists. // There should never be both a JSON and Flatmap state in the request. case len(req.RawState.Flatmap) > 0: + logging.HelperSchemaTrace(ctx, "Upgrading flatmap state") + jsonMap, version, err = s.upgradeFlatmapState(ctx, version, req.RawState.Flatmap, res) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } // if there's a JSON state, we need to decode it. @@ -288,29 +311,31 @@ func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *tfpr err = json.Unmarshal(req.RawState.JSON, &jsonMap) } if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } default: - log.Println("[DEBUG] no state provided to upgrade") + logging.HelperSchemaDebug(ctx, "no state provided to upgrade") return resp, nil } // complete the upgrade of the JSON states + logging.HelperSchemaTrace(ctx, "Upgrading JSON state") + jsonMap, err = s.upgradeJSONState(ctx, version, jsonMap, res) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } // The provider isn't required to clean out removed fields - s.removeAttributes(jsonMap, schemaBlock.ImpliedType()) + s.removeAttributes(ctx, jsonMap, schemaBlock.ImpliedType()) // now we need to turn the state into the default json representation, so // that it can be re-decoded using the actual schema. val, err := JSONMapToStateValue(jsonMap, schemaBlock) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -319,7 +344,7 @@ func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *tfpr // First we need to CoerceValue to ensure that all object types match. val, err = schemaBlock.CoerceValue(val) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } // Normalize the value and fill in any missing blocks. @@ -328,7 +353,7 @@ func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *tfpr // encode the final state to the expected msgpack format newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -341,7 +366,7 @@ func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *tfpr // map[string]interface{}. // upgradeFlatmapState returns the json map along with the corresponding schema // version. -func (s *GRPCProviderServer) upgradeFlatmapState(ctx context.Context, version int, m map[string]string, res *Resource) (map[string]interface{}, int, error) { +func (s *GRPCProviderServer) upgradeFlatmapState(_ context.Context, version int, m map[string]string, res *Resource) (map[string]interface{}, int, error) { // this will be the version we've upgraded so, defaulting to the given // version in case no migration was called. upgradedVersion := version @@ -433,7 +458,7 @@ func (s *GRPCProviderServer) upgradeJSONState(ctx context.Context, version int, // Remove any attributes no longer present in the schema, so that the json can // be correctly decoded. -func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { +func (s *GRPCProviderServer) removeAttributes(ctx context.Context, v interface{}, ty cty.Type) { // we're only concerned with finding maps that corespond to object // attributes switch v := v.(type) { @@ -442,7 +467,7 @@ func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { if ty.IsListType() || ty.IsSetType() { eTy := ty.ElementType() for _, eV := range v { - s.removeAttributes(eV, eTy) + s.removeAttributes(ctx, eV, eTy) } } return @@ -451,20 +476,20 @@ func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { if ty.IsMapType() { eTy := ty.ElementType() for _, eV := range v { - s.removeAttributes(eV, eTy) + s.removeAttributes(ctx, eV, eTy) } return } if ty == cty.DynamicPseudoType { - log.Printf("[DEBUG] ignoring dynamic block: %#v\n", v) + logging.HelperSchemaDebug(ctx, "ignoring dynamic block", map[string]interface{}{"block": v}) return } if !ty.IsObjectType() { // This shouldn't happen, and will fail to decode further on, so // there's no need to handle it here. - log.Printf("[WARN] unexpected type %#v for map in json state", ty) + logging.HelperSchemaWarn(ctx, "unexpected type for map in JSON state", map[string]interface{}{"type": ty}) return } @@ -472,17 +497,21 @@ func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { for attr, attrV := range v { attrTy, ok := attrTypes[attr] if !ok { - log.Printf("[DEBUG] attribute %q no longer present in schema", attr) + logging.HelperSchemaDebug(ctx, "attribute no longer present in schema", map[string]interface{}{"attribute": attr}) delete(v, attr) continue } - s.removeAttributes(attrV, attrTy) + s.removeAttributes(ctx, attrV, attrTy) } } } -func (s *GRPCProviderServer) StopProvider(_ context.Context, _ *tfprotov5.StopProviderRequest) (*tfprotov5.StopProviderResponse, error) { +func (s *GRPCProviderServer) StopProvider(ctx context.Context, _ *tfprotov5.StopProviderRequest) (*tfprotov5.StopProviderResponse, error) { + ctx = logging.InitContext(ctx) + + logging.HelperSchemaTrace(ctx, "Stopping provider") + s.stopMu.Lock() defer s.stopMu.Unlock() @@ -491,25 +520,28 @@ func (s *GRPCProviderServer) StopProvider(_ context.Context, _ *tfprotov5.StopPr // reset the stop signal s.stopCh = make(chan struct{}) + logging.HelperSchemaTrace(ctx, "Stopped provider") + return &tfprotov5.StopProviderResponse{}, nil } func (s *GRPCProviderServer) ConfigureProvider(ctx context.Context, req *tfprotov5.ConfigureProviderRequest) (*tfprotov5.ConfigureProviderResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.ConfigureProviderResponse{} schemaBlock := s.getProviderSchemaBlock() configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } s.provider.TerraformVersion = req.TerraformVersion // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + if err := validateConfigNulls(ctx, configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -520,13 +552,18 @@ func (s *GRPCProviderServer) ConfigureProvider(ctx context.Context, req *tfproto // function. Ideally a provider should migrate to the context aware API that receives // request scoped contexts, however this is a large undertaking for very large providers. ctxHack := context.WithValue(ctx, StopContextKey, s.StopContext(context.Background())) + + logging.HelperSchemaTrace(ctx, "Calling downstream") diags := s.provider.Configure(ctxHack, config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + logging.HelperSchemaTrace(ctx, "Called downstream") + + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, diags) return resp, nil } func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.ReadResourceRequest) (*tfprotov5.ReadResourceResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.ReadResourceResponse{ // helper/schema did previously handle private data during refresh, but // core is now going to expect this to be maintained in order to @@ -536,20 +573,20 @@ func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.Re res, ok := s.provider.ResourcesMap[req.TypeName] if !ok { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) return resp, nil } schemaBlock := s.getResourceSchemaBlock(req.TypeName) stateVal, err := msgpack.Unmarshal(req.CurrentState.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } instanceState, err := res.ShimInstanceStateFromValue(stateVal) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } instanceState.RawState = stateVal @@ -557,7 +594,7 @@ func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.Re private := make(map[string]interface{}) if len(req.Private) > 0 { if err := json.Unmarshal(req.Private, &private); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } } @@ -567,14 +604,14 @@ func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.Re if pmSchemaBlock != nil && req.ProviderMeta != nil { providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.MsgPack, pmSchemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } instanceState.ProviderMeta = providerSchemaVal } newInstanceState, diags := res.RefreshWithoutUpgrade(ctx, instanceState, s.provider.Meta()) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, diags) if diags.HasError() { return resp, nil } @@ -585,7 +622,7 @@ func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.Re // to see a null value (in the cty sense) in that case. newStateMP, err := msgpack.Marshal(cty.NullVal(schemaBlock.ImpliedType()), schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) } resp.NewState = &tfprotov5.DynamicValue{ MsgPack: newStateMP, @@ -598,7 +635,7 @@ func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.Re newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -607,7 +644,7 @@ func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.Re newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -619,6 +656,7 @@ func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.Re } func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprotov5.PlanResourceChangeRequest) (*tfprotov5.PlanResourceChangeResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.PlanResourceChangeResponse{} // This is a signal to Terraform Core that we're doing the best we can to @@ -627,18 +665,18 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot // forward to any new SDK implementations, since setting it prevents us // from catching certain classes of provider bug that can lead to // confusing downstream errors. - resp.UnsafeToUseLegacyTypeSystem = true + resp.UnsafeToUseLegacyTypeSystem = true //nolint:staticcheck res, ok := s.provider.ResourcesMap[req.TypeName] if !ok { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) return resp, nil } schemaBlock := s.getResourceSchemaBlock(req.TypeName) priorStateVal, err := msgpack.Unmarshal(req.PriorState.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -646,7 +684,7 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot proposedNewStateVal, err := msgpack.Unmarshal(req.ProposedNewState.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -659,13 +697,13 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } priorState, err := res.ShimInstanceStateFromValue(priorStateVal) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } priorState.RawState = priorStateVal @@ -674,7 +712,7 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot priorPrivate := make(map[string]interface{}) if len(req.PriorPrivate) > 0 { if err := json.Unmarshal(req.PriorPrivate, &priorPrivate); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } } @@ -685,15 +723,15 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot if pmSchemaBlock != nil && req.ProviderMeta != nil { providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.MsgPack, pmSchemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } priorState.ProviderMeta = providerSchemaVal } // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(proposedNewStateVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + if err := validateConfigNulls(ctx, proposedNewStateVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -702,7 +740,7 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot diff, err := res.SimpleDiff(ctx, priorState, cfg, s.provider.Meta()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -734,22 +772,27 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot // now we need to apply the diff to the prior state, so get the planned state plannedAttrs, err := diff.Apply(priorState.Attributes, schemaBlock) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } plannedStateVal, err = schemaBlock.CoerceValue(plannedStateVal) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } plannedStateVal = normalizeNullValues(plannedStateVal, proposedNewStateVal, false) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -777,7 +820,7 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot plannedMP, err := msgpack.Marshal(plannedStateVal, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } resp.PlannedState = &tfprotov5.DynamicValue{ @@ -787,12 +830,12 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot // encode any timeouts into the diff Meta t := &ResourceTimeout{} if err := t.ConfigDecode(res, cfg); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } if err := t.DiffEncode(diff); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -815,7 +858,7 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot // the Meta field gets encoded into PlannedPrivate plannedPrivate, err := json.Marshal(privateMap) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } resp.PlannedPrivate = plannedPrivate @@ -842,7 +885,7 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -855,6 +898,7 @@ func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprot } func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfprotov5.ApplyResourceChangeRequest) (*tfprotov5.ApplyResourceChangeResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.ApplyResourceChangeResponse{ // Start with the existing state as a fallback NewState: req.PriorState, @@ -862,39 +906,39 @@ func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfpro res, ok := s.provider.ResourcesMap[req.TypeName] if !ok { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) return resp, nil } schemaBlock := s.getResourceSchemaBlock(req.TypeName) priorStateVal, err := msgpack.Unmarshal(req.PriorState.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } plannedStateVal, err := msgpack.Unmarshal(req.PlannedState.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } priorState, err := res.ShimInstanceStateFromValue(priorStateVal) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } private := make(map[string]interface{}) if len(req.PlannedPrivate) > 0 { if err := json.Unmarshal(req.PlannedPrivate, &private); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } } @@ -916,7 +960,7 @@ func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfpro } else { diff, err = DiffFromValues(ctx, priorStateVal, plannedStateVal, configVal, stripResourceModifiers(res)) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } } @@ -968,14 +1012,14 @@ func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfpro if pmSchemaBlock != nil && req.ProviderMeta != nil { providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.MsgPack, pmSchemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } priorState.ProviderMeta = providerSchemaVal } newInstanceState, diags := res.Apply(ctx, priorState, diff, s.provider.Meta()) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, diags) newStateVal := cty.NullVal(schemaBlock.ImpliedType()) @@ -985,7 +1029,7 @@ func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfpro if destroy || newInstanceState == nil || newInstanceState.Attributes == nil || newInstanceState.ID == "" { newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } resp.NewState = &tfprotov5.DynamicValue{ @@ -998,7 +1042,7 @@ func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfpro // entire object, even if the new state was nil. newStateVal, err = StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -1008,7 +1052,7 @@ func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfpro newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } resp.NewState = &tfprotov5.DynamicValue{ @@ -1017,7 +1061,7 @@ func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfpro meta, err := json.Marshal(newInstanceState.Meta) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } resp.Private = meta @@ -1028,12 +1072,13 @@ func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfpro // forward to any new SDK implementations, since setting it prevents us // from catching certain classes of provider bug that can lead to // confusing downstream errors. - resp.UnsafeToUseLegacyTypeSystem = true + resp.UnsafeToUseLegacyTypeSystem = true //nolint:staticcheck return resp, nil } func (s *GRPCProviderServer) ImportResourceState(ctx context.Context, req *tfprotov5.ImportResourceStateRequest) (*tfprotov5.ImportResourceStateResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.ImportResourceStateResponse{} info := &terraform.InstanceInfo{ @@ -1042,7 +1087,7 @@ func (s *GRPCProviderServer) ImportResourceState(ctx context.Context, req *tfpro newInstanceStates, err := s.provider.ImportState(ctx, info, req.ID) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -1058,7 +1103,7 @@ func (s *GRPCProviderServer) ImportResourceState(ctx context.Context, req *tfpro schemaBlock := s.getResourceSchemaBlock(resourceType) newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -1067,13 +1112,13 @@ func (s *GRPCProviderServer) ImportResourceState(ctx context.Context, req *tfpro newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } meta, err := json.Marshal(is.Meta) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -1092,19 +1137,20 @@ func (s *GRPCProviderServer) ImportResourceState(ctx context.Context, req *tfpro } func (s *GRPCProviderServer) ReadDataSource(ctx context.Context, req *tfprotov5.ReadDataSourceRequest) (*tfprotov5.ReadDataSourceResponse, error) { + ctx = logging.InitContext(ctx) resp := &tfprotov5.ReadDataSourceResponse{} schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + if err := validateConfigNulls(ctx, configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -1114,12 +1160,12 @@ func (s *GRPCProviderServer) ReadDataSource(ctx context.Context, req *tfprotov5. // the old behavior res, ok := s.provider.DataSourcesMap[req.TypeName] if !ok { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown data source: %s", req.TypeName)) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, fmt.Errorf("unknown data source: %s", req.TypeName)) return resp, nil } diff, err := res.Diff(ctx, nil, config, s.provider.Meta()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -1131,14 +1177,14 @@ func (s *GRPCProviderServer) ReadDataSource(ctx context.Context, req *tfprotov5. // now we can get the new complete data source newInstanceState, diags := res.ReadDataApply(ctx, diff, s.provider.Meta()) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, diags) if diags.HasError() { return resp, nil } newStateVal, err := StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } @@ -1146,7 +1192,7 @@ func (s *GRPCProviderServer) ReadDataSource(ctx context.Context, req *tfprotov5. newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) return resp, nil } resp.State = &tfprotov5.DynamicValue{ @@ -1439,7 +1485,7 @@ func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value { // appears in a list-like attribute (list, set, tuple) will present a nil value // to helper/schema which can panic. Return an error to the user in this case, // indicating the attribute with the null value. -func validateConfigNulls(v cty.Value, path cty.Path) []*tfprotov5.Diagnostic { +func validateConfigNulls(ctx context.Context, v cty.Value, path cty.Path) []*tfprotov5.Diagnostic { var diags []*tfprotov5.Diagnostic if v.IsNull() || !v.IsKnown() { return diags @@ -1468,8 +1514,8 @@ func validateConfigNulls(v cty.Value, path cty.Path) []*tfprotov5.Diagnostic { continue } - d := validateConfigNulls(ev, append(path, cty.IndexStep{Key: kv})) - diags = convert.AppendProtoDiag(diags, d) + d := validateConfigNulls(ctx, ev, append(path, cty.IndexStep{Key: kv})) + diags = convert.AppendProtoDiag(ctx, diags, d) } case v.Type().IsMapType() || v.Type().IsObjectType(): @@ -1483,8 +1529,8 @@ func validateConfigNulls(v cty.Value, path cty.Path) []*tfprotov5.Diagnostic { case v.Type().IsObjectType(): step = cty.GetAttrStep{Name: kv.AsString()} } - d := validateConfigNulls(ev, append(path, step)) - diags = convert.AppendProtoDiag(diags, d) + d := validateConfigNulls(ctx, ev, append(path, step)) + diags = convert.AppendProtoDiag(ctx, diags, d) } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go index 0d1f8e23c..91a21b389 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/meta" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) @@ -265,7 +266,7 @@ func (p *Provider) Configure(ctx context.Context, c *terraform.ResourceConfig) d } if p.configured { - log.Printf("[WARN] Previously configured provider being re-configured. This can cause issues in concurrent testing if the configurations are not equal.") + logging.HelperSchemaWarn(ctx, "Previously configured provider being re-configured. This can cause issues in concurrent testing if the configurations are not equal.") } sm := schemaMap(p.Schema) @@ -378,11 +379,15 @@ func (p *Provider) ImportState( results := []*ResourceData{data} if r.Importer.State != nil || r.Importer.StateContext != nil { var err error + logging.HelperSchemaTrace(ctx, "Calling downstream") + if r.Importer.StateContext != nil { results, err = r.Importer.StateContext(ctx, data, p.meta) } else { results, err = r.Importer.State(data, p.meta) } + logging.HelperSchemaTrace(ctx, "Called downstream") + if err != nil { return nil, err } @@ -391,6 +396,21 @@ func (p *Provider) ImportState( // Convert the results to InstanceState values and return it states := make([]*terraform.InstanceState, len(results)) for i, r := range results { + if r == nil { + return nil, fmt.Errorf("The provider returned a missing resource during ImportResourceState. " + + "This is generally a bug in the resource implementation for import. " + + "Resource import code should return an error for missing resources and skip returning a missing or empty ResourceData. " + + "Please report this to the provider developers.") + } + + if r.Id() == "" { + return nil, fmt.Errorf("The provider returned a resource missing an identifier during ImportResourceState. " + + "This is generally a bug in the resource implementation for import. " + + "Resource import code should not call d.SetId(\"\") or create an empty ResourceData. " + + "If the resource is missing, instead return an error. " + + "Please report this to the provider developers.") + } + states[i] = r.State() } @@ -398,10 +418,10 @@ func (p *Provider) ImportState( // isn't obvious so we circumvent that with a friendlier error. for _, s := range states { if s == nil { - return nil, fmt.Errorf( - "nil entry in ImportState results. This is always a bug with\n" + - "the resource that is being imported. Please report this as\n" + - "a bug to Terraform.") + return nil, fmt.Errorf("The provider returned a missing resource during ImportResourceState. " + + "This is generally a bug in the resource implementation for import. " + + "Resource import code should return an error for missing resources. " + + "Please report this to the provider developers.") } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go index b0a8631ca..5b40bec89 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go @@ -4,12 +4,12 @@ import ( "context" "errors" "fmt" - "log" "strconv" "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) @@ -31,35 +31,44 @@ var ReservedResourceFields = []string{ "provisioner", } -// Resource represents a thing in Terraform that has a set of configurable -// attributes and a lifecycle (create, read, update, delete). +// Resource is an abstraction for multiple Terraform concepts: // -// The Resource schema is an abstraction that allows provider writers to -// worry only about CRUD operations while off-loading validation, diff -// generation, etc. to this higher level library. +// - Managed Resource: An infrastructure component with a schema, lifecycle +// operations such as create, read, update, and delete +// (CRUD), and optional implementation details such as +// import support, upgrade state support, and difference +// customization. +// - Data Resource: Also known as a data source. An infrastructure component +// with a schema and only the read lifecycle operation. +// - Block: When implemented within a Schema type Elem field, a configuration +// block that contains nested schema information such as attributes +// and blocks. // -// In spite of the name, this struct is not used only for terraform resources, -// but also for data sources. In the case of data sources, the Create, -// Update and Delete functions must not be provided. +// To fully implement managed resources, the Provider type ResourcesMap field +// should include a reference to an implementation of this type. To fully +// implement data resources, the Provider type DataSourcesMap field should +// include a reference to an implementation of this type. +// +// Each field further documents any constraints based on the Terraform concept +// being implemented. type Resource struct { - // Schema is the schema for the configuration of this resource. - // - // The keys of this map are the configuration keys, and the values - // describe the schema of the configuration value. + // Schema is the structure and type information for this component. This + // field is required for all Resource concepts. // - // The schema is used to represent both configurable data as well - // as data that might be computed in the process of creating this - // resource. + // The keys of this map are the names used in a practitioner configuration, + // such as the attribute or block name. The values describe the structure + // and type information of that attribute or block. Schema map[string]*Schema // SchemaVersion is the version number for this resource's Schema - // definition. The current SchemaVersion stored in the state for each - // resource. Provider authors can increment this version number - // when Schema semantics change. If the State's SchemaVersion is less than - // the current SchemaVersion, the InstanceState is yielded to the - // MigrateState callback, where the provider can make whatever changes it - // needs to update the state to be compatible to the latest version of the - // Schema. + // definition. This field is only valid when the Resource is a managed + // resource. + // + // The current SchemaVersion stored in the state for each resource. + // Provider authors can increment this version number when Schema semantics + // change in an incompatible manner. If the state's SchemaVersion is less + // than the current SchemaVersion, the MigrateState and StateUpgraders + // functionality is executed to upgrade the state information. // // When unset, SchemaVersion defaults to 0, so provider authors can start // their Versioning at any integer >= 1 @@ -67,6 +76,7 @@ type Resource struct { // MigrateState is responsible for updating an InstanceState with an old // version to the format expected by the current version of the Schema. + // This field is only valid when the Resource is a managed resource. // // It is called during Refresh if the State's stored SchemaVersion is less // than the current SchemaVersion of the Resource. @@ -86,7 +96,8 @@ type Resource struct { // StateUpgraders contains the functions responsible for upgrading an // existing state with an old schema version to a newer schema. It is // called specifically by Terraform when the stored schema version is less - // than the current SchemaVersion of the Resource. + // than the current SchemaVersion of the Resource. This field is only valid + // when the Resource is a managed resource. // // StateUpgraders map specific schema versions to a StateUpgrader // function. The registered versions are expected to be ordered, @@ -95,57 +106,261 @@ type Resource struct { // MigrateState. StateUpgraders []StateUpgrader - // The functions below are the CRUD operations for this resource. + // Create is called when the provider must create a new instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Create, CreateContext, or + // CreateWithoutTimeout should be implemented. // - // Deprecated: Please use the context aware equivalents instead. Only one of - // the operations or context aware equivalent can be set, not both. + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the practitioner + // configuration and any CustomizeDiff field logic. + // + // The SetId method must be called with a non-empty value for the managed + // resource instance to be properly saved into the Terraform state and + // avoid a "inconsistent result after apply" error. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The error return parameter, if not nil, will be converted into an error + // diagnostic when passed back to Terraform. + // + // Deprecated: Use CreateContext or CreateWithoutTimeout instead. This + // implementation does not support request cancellation initiated by + // Terraform, such as a system or practitioner sending SIGINT (Ctrl-c). + // This implementation also does not support warning diagnostics. Create CreateFunc - // Deprecated: Please use the context aware equivalents instead. + + // Read is called when the provider must refresh the state of a managed + // resource instance or data resource instance. This field is only valid + // when the Resource is a managed resource or data resource. Only one of + // Read, ReadContext, or ReadWithoutTimeout should be implemented. + // + // The *ResourceData parameter contains the state data for this managed + // resource instance or data resource instance. + // + // Managed resources can signal to Terraform that the managed resource + // instance no longer exists and potentially should be recreated by calling + // the SetId method with an empty string ("") parameter and without + // returning an error. + // + // Data resources that are designed to return state for a singular + // infrastructure component should conventionally return an error if that + // infrastructure does not exist and omit any calls to the + // SetId method. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The error return parameter, if not nil, will be converted into an error + // diagnostic when passed back to Terraform. + // + // Deprecated: Use ReadContext or ReadWithoutTimeout instead. This + // implementation does not support request cancellation initiated by + // Terraform, such as a system or practitioner sending SIGINT (Ctrl-c). + // This implementation also does not support warning diagnostics. Read ReadFunc - // Deprecated: Please use the context aware equivalents instead. + + // Update is called when the provider must update an instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Update, UpdateContext, or + // UpdateWithoutTimeout should be implemented. + // + // This implementation is optional. If omitted, all Schema must enable + // the ForceNew field and any practitioner changes that would have + // caused and update will instead destroy and recreate the infrastructure + // compontent. + // + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the prior state, + // practitioner configuration, and any CustomizeDiff field logic. The + // available data for the GetChange* and HasChange* methods is the prior + // state and proposed state. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The error return parameter, if not nil, will be converted into an error + // diagnostic when passed back to Terraform. + // + // Deprecated: Use UpdateContext or UpdateWithoutTimeout instead. This + // implementation does not support request cancellation initiated by + // Terraform, such as a system or practitioner sending SIGINT (Ctrl-c). + // This implementation also does not support warning diagnostics. Update UpdateFunc - // Deprecated: Please use the context aware equivalents instead. + + // Delete is called when the provider must destroy the instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Delete, DeleteContext, or + // DeleteWithoutTimeout should be implemented. + // + // The *ResourceData parameter contains the state data for this managed + // resource instance. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The error return parameter, if not nil, will be converted into an error + // diagnostic when passed back to Terraform. + // + // Deprecated: Use DeleteContext or DeleteWithoutTimeout instead. This + // implementation does not support request cancellation initiated by + // Terraform, such as a system or practitioner sending SIGINT (Ctrl-c). + // This implementation also does not support warning diagnostics. Delete DeleteFunc // Exists is a function that is called to check if a resource still - // exists. If this returns false, then this will affect the diff + // exists. This field is only valid when the Resource is a managed + // resource. + // + // If this returns false, then this will affect the diff // accordingly. If this function isn't set, it will not be called. You // can also signal existence in the Read method by calling d.SetId("") // if the Resource is no longer present and should be removed from state. // The *ResourceData passed to Exists should _not_ be modified. // - // Deprecated: ReadContext should be able to encapsulate the logic of Exists + // Deprecated: Remove in preference of ReadContext or ReadWithoutTimeout. Exists ExistsFunc - // The functions below are the CRUD operations for this resource. + // CreateContext is called when the provider must create a new instance of + // a managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Create, CreateContext, or + // CreateWithoutTimeout should be implemented. + // + // The Context parameter stores SDK information, such as loggers and + // timeout deadlines. It also is wired to receive any cancellation from + // Terraform such as a system or practitioner sending SIGINT (Ctrl-c). // - // The only optional operation is Update. If Update is not - // implemented, then updates will not be supported for this resource. + // By default, CreateContext has a 20 minute timeout. Use the Timeouts + // field to control the default duration or implement CreateWithoutTimeout + // instead of CreateContext to remove the default timeout. // - // The ResourceData parameter in the functions below are used to - // query configuration and changes for the resource as well as to set - // the ID, computed data, etc. + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the practitioner + // configuration and any CustomizeDiff field logic. // - // The interface{} parameter is the result of the ConfigureFunc in - // the provider for this resource. If the provider does not define - // a ConfigureFunc, this will be nil. This parameter should be used - // to store API clients, configuration structures, etc. + // The SetId method must be called with a non-empty value for the managed + // resource instance to be properly saved into the Terraform state and + // avoid a "inconsistent result after apply" error. // - // These functions are passed a context configured to timeout with whatever - // was set as the timeout for this operation. Useful for forwarding on to - // backend SDK's that accept context. The context will also cancel if - // Terraform sends a cancellation signal. + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. // - // These functions return diagnostics, allowing developers to build - // a list of warnings and errors to be presented to the Terraform user. - // The AttributePath of those diagnostics should be built within these - // functions, please consult go-cty documentation for building a cty.Path + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. CreateContext CreateContextFunc - ReadContext ReadContextFunc + + // ReadContext is called when the provider must refresh the state of a managed + // resource instance or data resource instance. This field is only valid + // when the Resource is a managed resource or data resource. Only one of + // Read, ReadContext, or ReadWithoutTimeout should be implemented. + // + // The Context parameter stores SDK information, such as loggers and + // timeout deadlines. It also is wired to receive any cancellation from + // Terraform such as a system or practitioner sending SIGINT (Ctrl-c). + // + // By default, ReadContext has a 20 minute timeout. Use the Timeouts + // field to control the default duration or implement ReadWithoutTimeout + // instead of ReadContext to remove the default timeout. + // + // The *ResourceData parameter contains the state data for this managed + // resource instance or data resource instance. + // + // Managed resources can signal to Terraform that the managed resource + // instance no longer exists and potentially should be recreated by calling + // the SetId method with an empty string ("") parameter and without + // returning an error. + // + // Data resources that are designed to return state for a singular + // infrastructure component should conventionally return an error if that + // infrastructure does not exist and omit any calls to the + // SetId method. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. + ReadContext ReadContextFunc + + // UpdateContext is called when the provider must update an instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Update, UpdateContext, or + // UpdateWithoutTimeout should be implemented. + // + // This implementation is optional. If omitted, all Schema must enable + // the ForceNew field and any practitioner changes that would have + // caused and update will instead destroy and recreate the infrastructure + // compontent. + // + // The Context parameter stores SDK information, such as loggers and + // timeout deadlines. It also is wired to receive any cancellation from + // Terraform such as a system or practitioner sending SIGINT (Ctrl-c). + // + // By default, UpdateContext has a 20 minute timeout. Use the Timeouts + // field to control the default duration or implement UpdateWithoutTimeout + // instead of UpdateContext to remove the default timeout. + // + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the prior state, + // practitioner configuration, and any CustomizeDiff field logic. The + // available data for the GetChange* and HasChange* methods is the prior + // state and proposed state. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. UpdateContext UpdateContextFunc + + // DeleteContext is called when the provider must destroy the instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Delete, DeleteContext, or + // DeleteWithoutTimeout should be implemented. + // + // The Context parameter stores SDK information, such as loggers and + // timeout deadlines. It also is wired to receive any cancellation from + // Terraform such as a system or practitioner sending SIGINT (Ctrl-c). + // + // By default, DeleteContext has a 20 minute timeout. Use the Timeouts + // field to control the default duration or implement DeleteWithoutTimeout + // instead of DeleteContext to remove the default timeout. + // + // The *ResourceData parameter contains the state data for this managed + // resource instance. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. DeleteContext DeleteContextFunc - // CreateWithoutTimeout is equivalent to CreateContext with no context timeout. + // CreateWithoutTimeout is called when the provider must create a new + // instance of a managed resource. This field is only valid when the + // Resource is a managed resource. Only one of Create, CreateContext, or + // CreateWithoutTimeout should be implemented. // // Most resources should prefer CreateContext with properly implemented // operation timeout values, however there are cases where operation @@ -153,9 +368,34 @@ type Resource struct { // logic, such as a mutex, to prevent remote system errors. Since these // operations would have an indeterminate timeout that scales with the // number of resources, this allows resources to control timeout behavior. + // + // The Context parameter stores SDK information, such as loggers. It also + // is wired to receive any cancellation from Terraform such as a system or + // practitioner sending SIGINT (Ctrl-c). + // + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the practitioner + // configuration and any CustomizeDiff field logic. + // + // The SetId method must be called with a non-empty value for the managed + // resource instance to be properly saved into the Terraform state and + // avoid a "inconsistent result after apply" error. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. CreateWithoutTimeout CreateContextFunc - // ReadWithoutTimeout is equivalent to ReadContext with no context timeout. + // ReadWithoutTimeout is called when the provider must refresh the state of + // a managed resource instance or data resource instance. This field is + // only valid when the Resource is a managed resource or data resource. + // Only one of Read, ReadContext, or ReadWithoutTimeout should be + // implemented. // // Most resources should prefer ReadContext with properly implemented // operation timeout values, however there are cases where operation @@ -163,9 +403,37 @@ type Resource struct { // logic, such as a mutex, to prevent remote system errors. Since these // operations would have an indeterminate timeout that scales with the // number of resources, this allows resources to control timeout behavior. + // + // The Context parameter stores SDK information, such as loggers. It also + // is wired to receive any cancellation from Terraform such as a system or + // practitioner sending SIGINT (Ctrl-c). + // + // The *ResourceData parameter contains the state data for this managed + // resource instance or data resource instance. + // + // Managed resources can signal to Terraform that the managed resource + // instance no longer exists and potentially should be recreated by calling + // the SetId method with an empty string ("") parameter and without + // returning an error. + // + // Data resources that are designed to return state for a singular + // infrastructure component should conventionally return an error if that + // infrastructure does not exist and omit any calls to the + // SetId method. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. ReadWithoutTimeout ReadContextFunc - // UpdateWithoutTimeout is equivalent to UpdateContext with no context timeout. + // UpdateWithoutTimeout is called when the provider must update an instance + // of a managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Update, UpdateContext, or + // UpdateWithoutTimeout should be implemented. // // Most resources should prefer UpdateContext with properly implemented // operation timeout values, however there are cases where operation @@ -173,9 +441,36 @@ type Resource struct { // logic, such as a mutex, to prevent remote system errors. Since these // operations would have an indeterminate timeout that scales with the // number of resources, this allows resources to control timeout behavior. + // + // This implementation is optional. If omitted, all Schema must enable + // the ForceNew field and any practitioner changes that would have + // caused and update will instead destroy and recreate the infrastructure + // compontent. + // + // The Context parameter stores SDK information, such as loggers. It also + // is wired to receive any cancellation from Terraform such as a system or + // practitioner sending SIGINT (Ctrl-c). + // + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the prior state, + // practitioner configuration, and any CustomizeDiff field logic. The + // available data for the GetChange* and HasChange* methods is the prior + // state and proposed state. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. UpdateWithoutTimeout UpdateContextFunc - // DeleteWithoutTimeout is equivalent to DeleteContext with no context timeout. + // DeleteWithoutTimeout is called when the provider must destroy the + // instance of a managed resource. This field is only valid when the + // Resource is a managed resource. Only one of Delete, DeleteContext, or + // DeleteWithoutTimeout should be implemented. // // Most resources should prefer DeleteContext with properly implemented // operation timeout values, however there are cases where operation @@ -183,15 +478,37 @@ type Resource struct { // logic, such as a mutex, to prevent remote system errors. Since these // operations would have an indeterminate timeout that scales with the // number of resources, this allows resources to control timeout behavior. + // + // The Context parameter stores SDK information, such as loggers. It also + // is wired to receive any cancellation from Terraform such as a system or + // practitioner sending SIGINT (Ctrl-c). + // + // The *ResourceData parameter contains the state data for this managed + // resource instance. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. DeleteWithoutTimeout DeleteContextFunc - // CustomizeDiff is a custom function for working with the diff that - // Terraform has created for this resource - it can be used to customize the - // diff that has been created, diff values not controlled by configuration, - // or even veto the diff altogether and abort the plan. It is passed a - // *ResourceDiff, a structure similar to ResourceData but lacking most write - // functions like Set, while introducing new functions that work with the - // diff such as SetNew, SetNewComputed, and ForceNew. + // CustomizeDiff is called after a difference (plan) has been generated + // for the Resource and allows for customizations, such as setting values + // not controlled by configuration, conditionally triggering resource + // recreation, or implementing additional validation logic to abort a plan. + // This field is only valid when the Resource is a managed resource. + // + // The Context parameter stores SDK information, such as loggers. It also + // is wired to receive any cancellation from Terraform such as a system or + // practitioner sending SIGINT (Ctrl-c). + // + // The *ResourceDiff parameter is similar to ResourceData but replaces the + // Set method with other difference handling methods, such as SetNew, + // SetNewComputed, and ForceNew. In general, only Schema with Computed + // enabled can have those methods executed against them. // // The phases Terraform runs this in, and the state available via functions // like Get and GetChange, are as follows: @@ -205,41 +522,60 @@ type Resource struct { // // This function needs to be resilient to support all scenarios. // - // For the most part, only computed fields can be customized by this - // function. + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. // - // This function is only allowed on regular resources (not data sources). + // The error return parameter, if not nil, will be converted into an error + // diagnostic when passed back to Terraform. CustomizeDiff CustomizeDiffFunc - // Importer is the ResourceImporter implementation for this resource. + // Importer is called when the provider must import an instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. + // // If this is nil, then this resource does not support importing. If // this is non-nil, then it supports importing and ResourceImporter // must be validated. The validity of ResourceImporter is verified // by InternalValidate on Resource. Importer *ResourceImporter - // If non-empty, this string is emitted as a warning during Validate. + // If non-empty, this string is emitted as the details of a warning + // diagnostic during validation (validate, plan, and apply operations). + // This field is only valid when the Resource is a managed resource or + // data resource. DeprecationMessage string - // Timeouts allow users to specify specific time durations in which an - // operation should time out, to allow them to extend an action to suit their - // usage. For example, a user may specify a large Creation timeout for their - // AWS RDS Instance due to it's size, or restoring from a snapshot. - // Resource implementors must enable Timeout support by adding the allowed - // actions (Create, Read, Update, Delete, Default) to the Resource struct, and - // accessing them in the matching methods. + // Timeouts configures the default time duration allowed before a create, + // read, update, or delete operation is considered timed out, which returns + // an error to practitioners. This field is only valid when the Resource is + // a managed resource or data resource. + // + // When implemented, practitioners can add a timeouts configuration block + // within their managed resource or data resource configuration to further + // customize the create, read, update, or delete operation timeouts. For + // example, a configuration may specify a longer create timeout for a + // database resource due to its data size. + // + // The ResourceData that is passed to create, read, update, and delete + // functionality can access the merged time duration of the Resource + // default timeouts configured in this field and the practitioner timeouts + // configuration via the Timeout method. Practitioner configuration + // always overrides any default values set here, whether shorter or longer. Timeouts *ResourceTimeout // Description is used as the description for docs, the language server and // other user facing usage. It can be plain-text or markdown depending on the - // global DescriptionKind setting. + // global DescriptionKind setting. This field is valid for any Resource. Description string // UseJSONNumber should be set when state upgraders will expect // json.Numbers instead of float64s for numbers. This is added as a // toggle for backwards compatibility for type assertions, but should // be used in all new resources to avoid bugs with sufficiently large - // user input. + // user input. This field is only valid when the Resource is a managed + // resource. // // See github.com/hashicorp/terraform-plugin-sdk/issues/655 for more // details. @@ -300,6 +636,7 @@ type DeleteContextFunc func(context.Context, *ResourceData, interface{}) diag.Di type StateMigrateFunc func( int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) +// Implementation of a single schema version state upgrade. type StateUpgrader struct { // Version is the version schema that this Upgrader will handle, converting // it to Version+1. @@ -318,7 +655,36 @@ type StateUpgrader struct { Upgrade StateUpgradeFunc } -// See StateUpgrader +// Function signature for a schema version state upgrade handler. +// +// The Context parameter stores SDK information, such as loggers. It also +// is wired to receive any cancellation from Terraform such as a system or +// practitioner sending SIGINT (Ctrl-c). +// +// The map[string]interface{} parameter contains the previous schema version +// state data for a managed resource instance. The keys are top level attribute +// or block names mapped to values that can be type asserted similar to +// fetching values using the ResourceData Get* methods: +// +// - TypeBool: bool +// - TypeFloat: float +// - TypeInt: int +// - TypeList: []interface{} +// - TypeMap: map[string]interface{} +// - TypeSet: *Set +// - TypeString: string +// +// In certain scenarios, the map may be nil, so checking for that condition +// upfront is recommended to prevent potential panics. +// +// The interface{} parameter is the result of the Provider type +// ConfigureFunc field execution. If the Provider does not define +// a ConfigureFunc, this will be nil. This parameter is conventionally +// used to store API clients and other provider instance specific data. +// +// The map[string]interface{} return parameter should contain the upgraded +// schema version state data for a managed resource instance. Values must +// align to the typing mentioned above. type StateUpgradeFunc func(ctx context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) // See Resource documentation. @@ -412,16 +778,16 @@ func (r *Resource) Apply( rt := ResourceTimeout{} if _, ok := d.Meta[TimeoutKey]; ok { if err := rt.DiffDecode(d); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + logging.HelperSchemaError(ctx, "Error decoding ResourceTimeout", map[string]interface{}{logging.KeyError: err}) } } else if s != nil { if _, ok := s.Meta[TimeoutKey]; ok { if err := rt.StateDecode(s); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + logging.HelperSchemaError(ctx, "Error decoding ResourceTimeout", map[string]interface{}{logging.KeyError: err}) } } } else { - log.Printf("[DEBUG] No meta timeoutkey found in Apply()") + logging.HelperSchemaDebug(ctx, "No meta timeoutkey found in Apply()") } data.timeouts = &rt @@ -436,7 +802,10 @@ func (r *Resource) Apply( if d.Destroy || d.RequiresNew() { if s.ID != "" { // Destroy the resource since it is created + logging.HelperSchemaTrace(ctx, "Calling downstream") diags = append(diags, r.delete(ctx, data, meta)...) + logging.HelperSchemaTrace(ctx, "Called downstream") + if diags.HasError() { return r.recordCurrentSchemaVersion(data.State()), diags } @@ -464,7 +833,9 @@ func (r *Resource) Apply( if data.Id() == "" { // We're creating, it is a new resource. data.MarkNewResource() + logging.HelperSchemaTrace(ctx, "Calling downstream") diags = append(diags, r.create(ctx, data, meta)...) + logging.HelperSchemaTrace(ctx, "Called downstream") } else { if !r.updateFuncSet() { return s, append(diags, diag.Diagnostic{ @@ -472,7 +843,9 @@ func (r *Resource) Apply( Summary: "doesn't support update", }) } + logging.HelperSchemaTrace(ctx, "Calling downstream") diags = append(diags, r.update(ctx, data, meta)...) + logging.HelperSchemaTrace(ctx, "Called downstream") } return r.recordCurrentSchemaVersion(data.State()), diags @@ -499,10 +872,10 @@ func (r *Resource) Diff( if instanceDiff != nil { if err := t.DiffEncode(instanceDiff); err != nil { - log.Printf("[ERR] Error encoding timeout to instance diff: %s", err) + logging.HelperSchemaError(ctx, "Error encoding timeout to instance diff", map[string]interface{}{logging.KeyError: err}) } } else { - log.Printf("[DEBUG] Instance Diff is nil in Diff()") + logging.HelperSchemaDebug(ctx, "Instance Diff is nil in Diff()") } return instanceDiff, err @@ -566,7 +939,10 @@ func (r *Resource) ReadDataApply( return nil, diag.FromErr(err) } + logging.HelperSchemaTrace(ctx, "Calling downstream") diags := r.read(ctx, data, meta) + logging.HelperSchemaTrace(ctx, "Called downstream") + state := data.State() if state != nil && state.ID == "" { // Data sources can set an ID if they want, but they aren't @@ -595,7 +971,7 @@ func (r *Resource) RefreshWithoutUpgrade( rt := ResourceTimeout{} if _, ok := s.Meta[TimeoutKey]; ok { if err := rt.StateDecode(s); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + logging.HelperSchemaError(ctx, "Error decoding ResourceTimeout", map[string]interface{}{logging.KeyError: err}) } } @@ -612,7 +988,10 @@ func (r *Resource) RefreshWithoutUpgrade( data.providerMeta = s.ProviderMeta } + logging.HelperSchemaTrace(ctx, "Calling downstream") exists, err := r.Exists(data, meta) + logging.HelperSchemaTrace(ctx, "Called downstream") + if err != nil { return s, diag.FromErr(err) } @@ -632,12 +1011,16 @@ func (r *Resource) RefreshWithoutUpgrade( data.providerMeta = s.ProviderMeta } + logging.HelperSchemaTrace(ctx, "Calling downstream") diags := r.read(ctx, data, meta) + logging.HelperSchemaTrace(ctx, "Called downstream") + state := data.State() if state != nil && state.ID == "" { state = nil } + schemaMap(r.Schema).handleDiffSuppressOnRefresh(ctx, s, state) return r.recordCurrentSchemaVersion(state), diags } @@ -737,8 +1120,8 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error } } - for k, f := range tsm { - if isReservedResourceFieldName(k, f) { + for k := range tsm { + if isReservedResourceFieldName(k) { return fmt.Errorf("%s is a reserved field name", k) } } @@ -844,13 +1227,13 @@ func validateResourceID(s *Schema) error { // ID should at least be computed. If unspecified it will be set to Computed and Optional, // but Optional is unnecessary if undesired. - if s.Computed != true { + if !s.Computed { return fmt.Errorf(`the "id" attribute must be marked Computed`) } return nil } -func isReservedResourceFieldName(name string, s *Schema) bool { +func isReservedResourceFieldName(name string) bool { for _, reservedName := range ReservedResourceFields { if name == reservedName { return true diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go index 46ac3f2a4..396b5bc6f 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go @@ -528,7 +528,7 @@ func (d *ResourceData) getChange( func (d *ResourceData) get(addr []string, source getSource) getResult { d.once.Do(d.init) - level := "set" + var level string flags := source & ^getSourceLevelMask exact := flags&getSourceExact != 0 source = source & getSourceLevelMask diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go index 1fdc48cca..27575b297 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go @@ -270,16 +270,16 @@ func (d *ResourceDiff) GetChangedKeysPrefix(prefix string) []string { // diffChange helps to implement resourceDiffer and derives its change values // from ResourceDiff's own change data, in addition to existing diff, config, and state. func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool, bool) { - old, new, customized := d.getChange(key) + oldValue, newValue, customized := d.getChange(key) - if !old.Exists { - old.Value = nil + if !oldValue.Exists { + oldValue.Value = nil } - if !new.Exists || d.removed(key) { - new.Value = nil + if !newValue.Exists || d.removed(key) { + newValue.Value = nil } - return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed, customized + return oldValue.Value, newValue.Value, !reflect.DeepEqual(oldValue.Value, newValue.Value), newValue.Computed, customized } // SetNew is used to set a new diff value for the mentioned key. The value must @@ -308,12 +308,12 @@ func (d *ResourceDiff) SetNewComputed(key string) error { } // setDiff performs common diff setting behaviour. -func (d *ResourceDiff) setDiff(key string, new interface{}, computed bool) error { +func (d *ResourceDiff) setDiff(key string, newValue interface{}, computed bool) error { if err := d.clear(key); err != nil { return err } - if err := d.newWriter.WriteField(strings.Split(key, "."), new, computed); err != nil { + if err := d.newWriter.WriteField(strings.Split(key, "."), newValue, computed); err != nil { return fmt.Errorf("Cannot set new diff value for key %s: %s", key, err) } @@ -374,8 +374,8 @@ func (d *ResourceDiff) Get(key string) interface{} { // results from the exact levels for the new diff, then from state and diff as // per normal. func (d *ResourceDiff) GetChange(key string) (interface{}, interface{}) { - old, new, _ := d.getChange(key) - return old.Value, new.Value + oldValue, newValue, _ := d.getChange(key) + return oldValue.Value, newValue.Value } // GetOk functions the same way as ResourceData.GetOk, but it also checks the @@ -422,19 +422,29 @@ func (d *ResourceDiff) NewValueKnown(key string) bool { return !r.Computed } +// HasChanges returns whether or not any of the given keys has been changed. +func (d *ResourceDiff) HasChanges(keys ...string) bool { + for _, key := range keys { + if d.HasChange(key) { + return true + } + } + return false +} + // HasChange checks to see if there is a change between state and the diff, or // in the overridden diff. func (d *ResourceDiff) HasChange(key string) bool { - old, new := d.GetChange(key) + oldValue, newValue := d.GetChange(key) // If the type implements the Equal interface, then call that // instead of just doing a reflect.DeepEqual. An example where this is // needed is *Set - if eq, ok := old.(Equal); ok { - return !eq.Equal(new) + if eq, ok := oldValue.(Equal); ok { + return !eq.Equal(newValue) } - return !reflect.DeepEqual(old, new) + return !reflect.DeepEqual(oldValue, newValue) } // Id returns the ID of this resource. @@ -506,16 +516,16 @@ func (d *ResourceDiff) GetRawPlan() cty.Value { // results from the exact levels for the new diff, then from state and diff as // per normal. func (d *ResourceDiff) getChange(key string) (getResult, getResult, bool) { - old := d.get(strings.Split(key, "."), "state") - var new getResult + oldValue := d.get(strings.Split(key, "."), "state") + var newValue getResult for p := range d.updatedKeys { if childAddrOf(key, p) { - new = d.getExact(strings.Split(key, "."), "newDiff") - return old, new, true + newValue = d.getExact(strings.Split(key, "."), "newDiff") + return oldValue, newValue, true } } - new = d.get(strings.Split(key, "."), "newDiff") - return old, new, false + newValue = d.get(strings.Split(key, "."), "newDiff") + return oldValue, newValue, false } // removed checks to see if the key is present in the existing, pre-customized diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go index cee0b6781..2594677f5 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go @@ -137,7 +137,13 @@ func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) *timeout = rt } - return nil + + // This early return, which makes this function handle a single + // timeout configuration block, should likely not be here but the + // SDK has never raised an error for multiple blocks nor made any + // precedence decisions for them in the past. + // It is left here for compatibility reasons. + return nil //nolint:staticcheck } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go index 8d5c22e04..7cbd58589 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go @@ -23,17 +23,24 @@ import ( "strings" "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-log/tfsdklog" "github.com/mitchellh/copystructure" "github.com/mitchellh/mapstructure" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -// Schema is used to describe the structure of a value. +// Schema describes the structure and type information of a value, whether +// sourced from configuration, plan, or state data. Schema is used in Provider +// and Resource types (for managed resources and data resources) and is +// fundamental to the implementations of ResourceData and ResourceDiff. // -// Read the documentation of the struct elements for important details. +// The Type field must always be set. At least one of Required, Optional, +// Optional and Computed, or Computed must be enabled unless the Schema is +// directly an implementation of an Elem field of another Schema. type Schema struct { // Type is the type of the value and must be one of the ValueType values. // @@ -71,14 +78,37 @@ type Schema struct { // behavior, and SchemaConfigModeBlock is not permitted. ConfigMode SchemaConfigMode - // If one of these is set, then this item can come from the configuration. - // Both cannot be set. If Optional is set, the value is optional. If - // Required is set, the value is required. + // Required indicates whether the practitioner must enter a value in the + // configuration for this attribute. Required cannot be used with Computed + // Default, DefaultFunc, DiffSuppressFunc, DiffSuppressOnRefresh, + // InputDefault, Optional, or StateFunc. At least one of Required, + // Optional, Optional and Computed, or Computed must be enabled. + Required bool + + // Optional indicates whether the practitioner can choose to not enter + // a value in the configuration for this attribute. Optional cannot be used + // with Required. // - // One of these must be set if the value is not computed. That is: - // value either comes from the config, is computed, or is both. + // If also using Default or DefaultFunc, Computed should also be enabled, + // otherwise Terraform can output warning logs or "inconsistent result + // after apply" errors. Optional bool - Required bool + + // Computed indicates whether the provider may return its own value for + // this attribute or not. Computed cannot be used with Required. If + // Required and Optional are both false, the attribute will be considered + // "read only" for the practitioner, with only the provider able to set + // its value. + Computed bool + + // ForceNew indicates whether a change in this value requires the + // replacement (destroy and create) of the managed resource instance, + // rather than an in-place update. This field is only valid when the + // encapsulating Resource is a managed resource. + // + // If conditional replacement logic is needed, use the Resource type + // CustomizeDiff field to call the ResourceDiff type ForceNew method. + ForceNew bool // If this is non-nil, the provided function will be used during diff // of this field. If this is nil, a default diff for the type of the @@ -87,31 +117,74 @@ type Schema struct { // This allows comparison based on something other than primitive, list // or map equality - for example SSH public keys may be considered // equivalent regardless of trailing whitespace. + // + // If CustomizeDiffFunc makes this field ForceNew=true, the + // following DiffSuppressFunc will come in with the value of old being + // empty, as if creating a new resource. + // + // By default, DiffSuppressFunc is considered only when deciding whether + // a configuration value is significantly different than the prior state + // value during planning. Set DiffSuppressOnRefresh to opt in to checking + // this also during the refresh step. DiffSuppressFunc SchemaDiffSuppressFunc - // If this is non-nil, then this will be a default value that is used - // when this item is not set in the configuration. + // DiffSuppressOnRefresh enables using the DiffSuppressFunc to ignore + // normalization-classified changes returned by the resource type's + // "Read" or "ReadContext" function, in addition to the default behavior of + // doing so during planning. + // + // This is a particularly good choice for attributes which take strings + // containing "microsyntaxes" where various different values are packed + // together in some serialization where there are many ways to express the + // same information. For example, attributes which accept JSON data can + // include different whitespace characters without changing meaning, and + // case-insensitive identifiers may refer to the same object using different + // characters. // - // DefaultFunc can be specified to compute a dynamic default. - // Only one of Default or DefaultFunc can be set. If DefaultFunc is - // used then its return value should be stable to avoid generating - // confusing/perpetual diffs. + // This is valid only for attributes of primitive types, because + // DiffSuppressFunc itself is only compatible with primitive types. // - // Changing either Default or the return value of DefaultFunc can be - // a breaking change, especially if the attribute in question has - // ForceNew set. If a default needs to change to align with changing - // assumptions in an upstream API then it may be necessary to also use - // the MigrateState function on the resource to change the state to match, - // or have the Read function adjust the state value to align with the - // new default. + // The key benefit of activating this flag is that the result of Read or + // ReadContext will be cleaned of normalization-only changes in the same + // way as the planning result would normaly be, which therefore prevents + // churn for downstream expressions deriving from this attribute and + // prevents incorrect "Values changed outside of Terraform" messages + // when the remote API returns values which have the same meaning as the + // prior state but in a different serialization. // - // If Required is true above, then Default cannot be set. DefaultFunc - // can be set with Required. If the DefaultFunc returns nil, then there - // will be no default and the user will be asked to fill it in. + // This is an opt-in because it was a later addition to the DiffSuppressFunc + // functionality which would cause some significant changes in behavior + // for existing providers if activated everywhere all at once. + DiffSuppressOnRefresh bool + + // Default indicates a value to set if this attribute is not set in the + // configuration. Default cannot be used with DefaultFunc or Required. + // Default is only supported if the Type is TypeBool, TypeFloat, TypeInt, + // or TypeString. Default cannot be used if the Schema is directly an + // implementation of an Elem field of another Schema, such as trying to + // set a default value for a TypeList or TypeSet. // - // If either of these is set, then the user won't be asked for input - // for this key if the default is not nil. - Default interface{} + // Changing either Default can be a breaking change, especially if the + // attribute has ForceNew enabled. If a default needs to change to align + // with changing assumptions in an upstream API, then it may be necessary + // to also implement resource state upgrade functionality to change the + // state to match or update read operation logic to align with the new + // default. + Default interface{} + + // DefaultFunc can be specified to compute a dynamic default when this + // attribute is not set in the configuration. DefaultFunc cannot be used + // with Default. For legacy reasons, DefaultFunc can be used with Required + // attributes in a Provider schema, which will prompt practitioners for + // input if the result of this function is nil. + // + // The return value should be stable to avoid generating confusing + // plan differences. Changing the return value can be a breaking change, + // especially if ForceNew is enabled. If a default needs to change to align + // with changing assumptions in an upstream API, then it may be necessary + // to also implement resource state upgrade functionality to change the + // state to match or update read operation logic to align with the new + // default. DefaultFunc SchemaDefaultFunc // Description is used as the description for docs, the language server and @@ -124,85 +197,125 @@ type Schema struct { // asked for. If Input is asked, this will be the default value offered. InputDefault string - // The fields below relate to diffs. - // - // If Computed is true, then the result of this value is computed - // (unless specified by config) on creation. - // - // If ForceNew is true, then a change in this resource necessitates - // the creation of a new resource. - // // StateFunc is a function called to change the value of this before // storing it in the state (and likewise before comparing for diffs). // The use for this is for example with large strings, you may want // to simply store the hash of it. - Computed bool - ForceNew bool StateFunc SchemaStateFunc - // The following fields are only set for a TypeList, TypeSet, or TypeMap. + // Elem represents the element type for a TypeList, TypeSet, or TypeMap + // attribute or block. The only valid types are *Schema and *Resource. + // Only TypeList and TypeSet support *Resource. + // + // If the Elem is a *Schema, the surrounding Schema represents a single + // attribute with a single element type for underlying elements. In + // practitioner configurations, an equals sign (=) is required to set + // the value. Refer to the following documentation: + // + // https://www.terraform.io/docs/language/syntax/configuration.html + // + // The underlying *Schema is only required to implement Type. ValidateFunc + // or ValidateDiagFunc can be used to validate each element value. + // + // If the Elem is a *Resource, the surrounding Schema represents a + // configuration block. Blocks can contain underlying attributes or blocks. + // In practitioner configurations, an equals sign (=) cannot be used to + // set the value. Blocks are instead repeated as necessary, or require + // the use of dynamic block expressions. Refer to the following + // documentation: // - // Elem represents the element type. For a TypeMap, it must be a *Schema - // with a Type that is one of the primitives: TypeString, TypeBool, - // TypeInt, or TypeFloat. Otherwise it may be either a *Schema or a - // *Resource. If it is *Schema, the element type is just a simple value. - // If it is *Resource, the element type is a complex structure, - // potentially managed via its own CRUD actions on the API. + // https://www.terraform.io/docs/language/syntax/configuration.html + // https://www.terraform.io/docs/language/expressions/dynamic-blocks.html + // + // The underlying *Resource must only implement the Schema field. Elem interface{} - // The following fields are only set for a TypeList or TypeSet. - // // MaxItems defines a maximum amount of items that can exist within a - // TypeSet or TypeList. Specific use cases would be if a TypeSet is being - // used to wrap a complex structure, however more than one instance would - // cause instability. - // + // TypeSet or TypeList. + MaxItems int + // MinItems defines a minimum amount of items that can exist within a - // TypeSet or TypeList. Specific use cases would be if a TypeSet is being - // used to wrap a complex structure, however less than one instance would - // cause instability. + // TypeSet or TypeList. // // If the field Optional is set to true then MinItems is ignored and thus // effectively zero. - MaxItems int MinItems int - // The following fields are only valid for a TypeSet type. - // - // Set defines a function to determine the unique ID of an item so that - // a proper set can be built. + // Set defines custom hash algorithm for each TypeSet element. If not + // defined, the SDK implements a default hash algorithm based on the + // underlying structure and type information of the Elem field. Set SchemaSetFunc // ComputedWhen is a set of queries on the configuration. Whenever any // of these things is changed, it will require a recompute (this requires // that Computed is set to true). // - // NOTE: This currently does not work. + // Deprecated: This functionality is not implemented and this field + // declaration should be removed. ComputedWhen []string - // ConflictsWith is a set of schema keys that conflict with this schema. - // This will only check that they're set in the _config_. This will not - // raise an error for a malfunctioning resource that sets a conflicting - // key. - // - // ExactlyOneOf is a set of schema keys that, when set, only one of the - // keys in that list can be specified. It will error if none are - // specified as well. + // ConflictsWith is a set of attribute paths, including this attribute, + // whose configurations cannot be set simultaneously. This implements the + // validation logic declaratively within the schema and can trigger earlier + // in Terraform operations, rather than using create or update logic which + // only triggers during apply. // - // AtLeastOneOf is a set of schema keys that, when set, at least one of - // the keys in that list must be specified. - // - // RequiredWith is a set of schema keys that must be set simultaneously. + // Only absolute attribute paths, ones starting with top level attribute + // names, are supported. Attribute paths cannot be accurately declared + // for TypeList (if MaxItems is greater than 1), TypeMap, or TypeSet + // attributes. To reference an attribute under a single configuration block + // (TypeList with Elem of *Resource and MaxItems of 1), the syntax is + // "parent_block_name.0.child_attribute_name". ConflictsWith []string - ExactlyOneOf []string - AtLeastOneOf []string - RequiredWith []string - // When Deprecated is set, this attribute is deprecated. + // ExactlyOneOf is a set of attribute paths, including this attribute, + // where only one attribute out of all specified can be configured. It will + // return a validation error if none are specified as well. This implements + // the validation logic declaratively within the schema and can trigger + // earlier in Terraform operations, rather than using create or update + // logic which only triggers during apply. + // + // Only absolute attribute paths, ones starting with top level attribute + // names, are supported. Attribute paths cannot be accurately declared + // for TypeList (if MaxItems is greater than 1), TypeMap, or TypeSet + // attributes. To reference an attribute under a single configuration block + // (TypeList with Elem of *Resource and MaxItems of 1), the syntax is + // "parent_block_name.0.child_attribute_name". + ExactlyOneOf []string + + // AtLeastOneOf is a set of attribute paths, including this attribute, + // in which at least one of the attributes must be configured. This + // implements the validation logic declaratively within the schema and can + // trigger earlier in Terraform operations, rather than using create or + // update logic which only triggers during apply. // - // A deprecated field still works, but will probably stop working in near - // future. This string is the message shown to the user with instructions on - // how to address the deprecation. + // Only absolute attribute paths, ones starting with top level attribute + // names, are supported. Attribute paths cannot be accurately declared + // for TypeList (if MaxItems is greater than 1), TypeMap, or TypeSet + // attributes. To reference an attribute under a single configuration block + // (TypeList with Elem of *Resource and MaxItems of 1), the syntax is + // "parent_block_name.0.child_attribute_name". + AtLeastOneOf []string + + // RequiredWith is a set of attribute paths, including this attribute, + // that must be set simultaneously. This implements the validation logic + // declaratively within the schema and can trigger earlier in Terraform + // operations, rather than using create or update logic which only triggers + // during apply. + // + // Only absolute attribute paths, ones starting with top level attribute + // names, are supported. Attribute paths cannot be accurately declared + // for TypeList (if MaxItems is greater than 1), TypeMap, or TypeSet + // attributes. To reference an attribute under a single configuration block + // (TypeList with Elem of *Resource and MaxItems of 1), the syntax is + // "parent_block_name.0.child_attribute_name". + RequiredWith []string + + // Deprecated indicates the message to include in a warning diagnostic to + // practitioners when this attribute is configured. Typically this is used + // to signal that this attribute will be removed in the future and provide + // next steps to the practitioner, such as using a different attribute, + // different resource, or if it should just be removed. Deprecated string // ValidateFunc allows individual fields to define arbitrary validation @@ -238,9 +351,28 @@ type Schema struct { ValidateDiagFunc SchemaValidateDiagFunc // Sensitive ensures that the attribute's value does not get displayed in - // logs or regular output. It should be used for passwords or other - // secret fields. Future versions of Terraform may encrypt these - // values. + // the Terraform user interface output. It should be used for password or + // other values which should be hidden. + // + // Terraform does not support conditional sensitivity, so if the value may + // only be sensitive in certain scenarios, a pragmatic choice will be + // necessary upfront of whether or not to always hide the value. Some + // providers may opt to split up resources based on sensitivity, to ensure + // that practitioners without sensitive values do not have values + // unnecessarily hidden. + // + // Terraform does not support passing sensitivity from configurations to + // providers. For example, if a sensitive value is configured via another + // attribute, this attribute is not marked Sensitive, and the value is used + // in this attribute value, the sensitivity is not transitive. The value + // will be displayed as normal. + // + // Sensitive values propagate when referenced in other parts of a + // configuration unless the nonsensitive() configuration function is used. + // Certain configuration usage may also expand the sensitivity. For + // example, including the sensitive value in a set may mark the whole set + // as sensitive. Any outputs containing a sensitive value must enable the + // output sensitive argument. Sensitive bool } @@ -260,7 +392,7 @@ const ( // suppress it from the plan if necessary. // // Return true if the diff should be suppressed, false to retain it. -type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool +type SchemaDiffSuppressFunc func(k, oldValue, newValue string, d *ResourceData) bool // SchemaDefaultFunc is a function called to return a default value for // a field. @@ -487,11 +619,11 @@ func (m schemaMap) Data( // DeepCopy returns a copy of this schemaMap. The copy can be safely modified // without affecting the original. func (m *schemaMap) DeepCopy() schemaMap { - copy, err := copystructure.Config{Lock: true}.Copy(m) + copiedMap, err := copystructure.Config{Lock: true}.Copy(m) if err != nil { panic(err) } - return *copy.(*schemaMap) + return *copiedMap.(*schemaMap) } // Diff returns the diff for a resource given the schema map, @@ -522,7 +654,7 @@ func (m schemaMap) Diff( } for k, schema := range m { - err := m.diff(k, schema, result, d, false) + err := m.diff(ctx, k, schema, result, d, false) if err != nil { return nil, err } @@ -540,11 +672,16 @@ func (m schemaMap) Diff( if !result.DestroyTainted && customizeDiff != nil { mc := m.DeepCopy() rd := newResourceDiff(mc, c, s, result) - if err := customizeDiff(ctx, rd, meta); err != nil { + + logging.HelperSchemaTrace(ctx, "Calling downstream") + err := customizeDiff(ctx, rd, meta) + logging.HelperSchemaTrace(ctx, "Called downstream") + + if err != nil { return nil, err } for _, k := range rd.UpdatedKeys() { - err := m.diff(k, mc[k], result, rd, false) + err := m.diff(ctx, k, mc[k], result, rd, false) if err != nil { return nil, err } @@ -571,7 +708,7 @@ func (m schemaMap) Diff( // Perform the diff again for k, schema := range m { - err := m.diff(k, schema, result2, d, false) + err := m.diff(ctx, k, schema, result2, d, false) if err != nil { return nil, err } @@ -585,7 +722,7 @@ func (m schemaMap) Diff( return nil, err } for _, k := range rd.UpdatedKeys() { - err := m.diff(k, mc[k], result2, rd, false) + err := m.diff(ctx, k, mc[k], result2, rd, false) if err != nil { return nil, err } @@ -757,6 +894,10 @@ func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) erro } } + if v.DiffSuppressOnRefresh && v.DiffSuppressFunc == nil { + return fmt.Errorf("%s: cannot set DiffSuppressOnRefresh without DiffSuppressFunc", k) + } + if v.Type == TypeList || v.Type == TypeSet { if v.Elem == nil { return fmt.Errorf("%s: Elem must be set for lists", k) @@ -941,10 +1082,12 @@ type resourceDiffer interface { GetChange(string) (interface{}, interface{}) GetOk(string) (interface{}, bool) HasChange(string) bool + HasChanges(...string) bool Id() string } func (m schemaMap) diff( + ctx context.Context, k string, schema *Schema, diff *terraform.InstanceDiff, @@ -959,11 +1102,11 @@ func (m schemaMap) diff( case TypeBool, TypeInt, TypeFloat, TypeString: err = m.diffString(k, schema, unsupressedDiff, d, all) case TypeList: - err = m.diffList(k, schema, unsupressedDiff, d, all) + err = m.diffList(ctx, k, schema, unsupressedDiff, d, all) case TypeMap: err = m.diffMap(k, schema, unsupressedDiff, d, all) case TypeSet: - err = m.diffSet(k, schema, unsupressedDiff, d, all) + err = m.diffSet(ctx, k, schema, unsupressedDiff, d, all) default: err = fmt.Errorf("%s: unknown type %#v", k, schema.Type) } @@ -980,6 +1123,7 @@ func (m schemaMap) diff( continue } + logging.HelperSchemaDebug(ctx, "Ignoring change due to DiffSuppressFunc", map[string]interface{}{logging.KeyAttributePath: attrK}) attrV = &terraform.ResourceAttrDiff{ Old: attrV.Old, New: attrV.Old, @@ -993,6 +1137,7 @@ func (m schemaMap) diff( } func (m schemaMap) diffList( + ctx context.Context, k string, schema *Schema, diff *terraform.InstanceDiff, @@ -1091,7 +1236,7 @@ func (m schemaMap) diffList( for i := 0; i < maxLen; i++ { for k2, schema := range t.Schema { subK := fmt.Sprintf("%s.%d.%s", k, i, k2) - err := m.diff(subK, schema, diff, d, all) + err := m.diff(ctx, subK, schema, diff, d, all) if err != nil { return err } @@ -1107,7 +1252,7 @@ func (m schemaMap) diffList( // just diff each. for i := 0; i < maxLen; i++ { subK := fmt.Sprintf("%s.%d", k, i) - err := m.diff(subK, &t2, diff, d, all) + err := m.diff(ctx, subK, &t2, diff, d, all) if err != nil { return err } @@ -1232,6 +1377,7 @@ func (m schemaMap) diffMap( } func (m schemaMap) diffSet( + ctx context.Context, k string, schema *Schema, diff *terraform.InstanceDiff, @@ -1337,7 +1483,7 @@ func (m schemaMap) diffSet( // This is a complex resource for k2, schema := range t.Schema { subK := fmt.Sprintf("%s.%s.%s", k, code, k2) - err := m.diff(subK, schema, diff, d, true) + err := m.diff(ctx, subK, schema, diff, d, true) if err != nil { return err } @@ -1351,7 +1497,7 @@ func (m schemaMap) diffSet( // This is just a primitive element, so go through each and // just diff each. subK := fmt.Sprintf("%s.%s", k, code) - err := m.diff(subK, &t2, diff, d, true) + err := m.diff(ctx, subK, &t2, diff, d, true) if err != nil { return err } @@ -1426,6 +1572,60 @@ func (m schemaMap) diffString( return nil } +// handleDiffSuppressOnRefresh visits each of the attributes set in "new" and, +// if the corresponding schema sets both DiffSuppressFunc and +// DiffSuppressOnRefresh, checks whether the new value is materially different +// than the old and if not it overwrites the new value with the old one, +// in-place. +func (m schemaMap) handleDiffSuppressOnRefresh(ctx context.Context, oldState, newState *terraform.InstanceState) { + if newState == nil || oldState == nil { + return // nothing to do, then + } + + // We'll populate this in the loop below only if we find at least one + // attribute which needs this analysis. + var d *ResourceData + + oldAttrs := oldState.Attributes + newAttrs := newState.Attributes + for k, newV := range newAttrs { + oldV, ok := oldAttrs[k] + if !ok { + continue // no old value to compare with + } + if newV == oldV { + continue // no change to test + } + + schemaList := addrToSchema(strings.Split(k, "."), m) + if len(schemaList) == 0 { + continue // no schema? weird, but not our responsibility to handle + } + schema := schemaList[len(schemaList)-1] + if !schema.DiffSuppressOnRefresh || schema.DiffSuppressFunc == nil { + continue // not relevant + } + + if d == nil { + // We populate "d" only on demand, to avoid the cost for most + // existing schemas where DiffSuppressOnRefresh won't be set. + var err error + d, err = m.Data(newState, nil) + if err != nil { + // Should not happen if we got far enough to be doing this + // analysis, but if it does then we'll bail out. + tfsdklog.Warn(ctx, fmt.Sprintf("schemaMap.handleDiffSuppressOnRefresh failed to construct ResourceData: %s", err)) + return + } + } + + if schema.DiffSuppressFunc(k, oldV, newV, d) { + tfsdklog.Debug(ctx, fmt.Sprintf("ignoring change of %q due to DiffSuppressFunc", k)) + newState.Attributes[k] = oldV // keep the old value, then + } + } +} + func (m schemaMap) validate( k string, schema *Schema, @@ -2048,8 +2248,19 @@ func (m schemaMap) validatePrimitive( // decode a float as an integer. // the config shims only use int for integral number values + // also accept a string, just as the TypeBool and TypeFloat cases do if v, ok := raw.(int); ok { decoded = v + } else if _, ok := raw.(string); ok { + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: path, + }) + } + decoded = n } else { return append(diags, diag.Diagnostic{ Severity: diag.Error, @@ -2058,7 +2269,7 @@ func (m schemaMap) validatePrimitive( }) } case TypeFloat: - // Verify that we can parse this as an int + // Verify that we can parse this as a float var n float64 if err := mapstructure.WeakDecode(raw, &n); err != nil { return append(diags, diag.Diagnostic{ diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go index 48755c22d..b937ab6e5 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go @@ -3,12 +3,12 @@ package schema import ( "bytes" "fmt" - "github.com/google/go-cmp/cmp" "reflect" "sort" "strconv" "sync" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode" ) @@ -238,6 +238,6 @@ func (s *Set) listCode() []string { for k := range s.m { keys = append(keys, k) } - sort.Sort(sort.StringSlice(keys)) + sort.Strings(keys) return keys } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/suppress_json_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/suppress_json_diff.go index 741ca0ac2..c99b73846 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/suppress_json_diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/suppress_json_diff.go @@ -6,13 +6,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func SuppressJsonDiff(k, old, new string, d *schema.ResourceData) bool { - oldMap, err := ExpandJsonFromString(old) +func SuppressJsonDiff(k, oldValue, newValue string, d *schema.ResourceData) bool { + oldMap, err := ExpandJsonFromString(oldValue) if err != nil { return false } - newMap, err := ExpandJsonFromString(new) + newMap, err := ExpandJsonFromString(newValue) if err != nil { return false } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/map.go index 3e8068a8c..9e4510b1e 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/map.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/map.go @@ -17,12 +17,12 @@ func MapKeyLenBetween(min, max int) schema.SchemaValidateDiagFunc { var diags diag.Diagnostics for _, key := range sortedKeys(v.(map[string]interface{})) { - len := len(key) - if len < min || len > max { + keyLen := len(key) + if keyLen < min || keyLen > max { diags = append(diags, diag.Diagnostic{ Severity: diag.Error, Summary: "Bad map key length", - Detail: fmt.Sprintf("Map key lengths should be in the range (%d - %d): %s (length = %d)", min, max, key, len), + Detail: fmt.Sprintf("Map key lengths should be in the range (%d - %d): %s (length = %d)", min, max, key, keyLen), AttributePath: append(path, cty.IndexStep{Key: cty.StringVal(key)}), }) } @@ -53,12 +53,12 @@ func MapValueLenBetween(min, max int) schema.SchemaValidateDiagFunc { continue } - len := len(val.(string)) - if len < min || len > max { + valLen := len(val.(string)) + if valLen < min || valLen > max { diags = append(diags, diag.Diagnostic{ Severity: diag.Error, Summary: "Bad map value length", - Detail: fmt.Sprintf("Map value lengths should be in the range (%d - %d): %s => %v (length = %d)", min, max, key, val, len), + Detail: fmt.Sprintf("Map value lengths should be in the range (%d - %d): %s => %v (length = %d)", min, max, key, val, valLen), AttributePath: append(path, cty.IndexStep{Key: cty.StringVal(key)}), }) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/meta.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/meta.go index f1376c2d3..b0515c8d0 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/meta.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/meta.go @@ -66,8 +66,21 @@ func ToDiagFunc(validator schema.SchemaValidateFunc) schema.SchemaValidateDiagFu return func(i interface{}, p cty.Path) diag.Diagnostics { var diags diag.Diagnostics - attr := p[len(p)-1].(cty.GetAttrStep) - ws, es := validator(i, attr.Name) + // A practitioner-friendly key for any SchemaValidateFunc output. + // Generally this should be the last attribute name on the path. + // If not found for some unexpected reason, an empty string is fine + // as the diagnostic will have the full attribute path anyways. + var key string + + // Reverse search for last cty.GetAttrStep + for i := len(p) - 1; i >= 0; i-- { + if pathStep, ok := p[i].(cty.GetAttrStep); ok { + key = pathStep.Name + break + } + } + + ws, es := validator(i, key) for _, w := range ws { diags = append(diags, diag.Diagnostic{ diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/strings.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/strings.go index c0c17d011..e739a1a1b 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/strings.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/strings.go @@ -138,7 +138,7 @@ func StringInSlice(valid []string, ignoreCase bool) schema.SchemaValidateFunc { } for _, str := range valid { - if v == str || (ignoreCase && strings.ToLower(v) == strings.ToLower(str)) { + if v == str || (ignoreCase && strings.EqualFold(v, str)) { return warnings, errors } } @@ -160,7 +160,7 @@ func StringNotInSlice(invalid []string, ignoreCase bool) schema.SchemaValidateFu } for _, str := range invalid { - if v == str || (ignoreCase && strings.ToLower(v) == strings.ToLower(str)) { + if v == str || (ignoreCase && strings.EqualFold(v, str)) { errors = append(errors, fmt.Errorf("expected %s to not be any of %v, got %s", k, invalid, v)) return warnings, errors } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/testing.go index 596c5754a..d861f5a2a 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/testing.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/testing.go @@ -5,7 +5,6 @@ import ( testing "github.com/mitchellh/go-testing-interface" - "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -16,12 +15,6 @@ type testCase struct { expectedErr *regexp.Regexp } -type diagTestCase struct { - val interface{} - f schema.SchemaValidateDiagFunc - expectedErr *regexp.Regexp -} - func runTestCases(t testing.T, cases []testCase) { t.Helper() @@ -52,29 +45,6 @@ func matchAnyError(errs []error, r *regexp.Regexp) bool { return false } -func runDiagTestCases(t testing.T, cases []diagTestCase) { - t.Helper() - - for i, tc := range cases { - p := cty.Path{ - cty.GetAttrStep{Name: "test_property"}, - } - diags := tc.f(tc.val, p) - - if !diags.HasError() && tc.expectedErr == nil { - continue - } - - if diags.HasError() && tc.expectedErr == nil { - t.Fatalf("expected test case %d to produce no errors, got %v", i, diags) - } - - if !matchAnyDiagSummary(diags, tc.expectedErr) { - t.Fatalf("expected test case %d to produce error matching \"%s\", got %v", i, tc.expectedErr, diags) - } - } -} - func matchAnyDiagSummary(ds diag.Diagnostics, r *regexp.Regexp) bool { for _, d := range ds { if r.MatchString(d.Summary) { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/time.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/time.go index df3e2620c..fde3a0199 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/time.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/time.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -// IsDayOfTheWeek id a SchemaValidateFunc which tests if the provided value is of type string and a valid english day of the week +// IsDayOfTheWeek is a SchemaValidateFunc which tests if the provided value is of type string and a valid english day of the week func IsDayOfTheWeek(ignoreCase bool) schema.SchemaValidateFunc { return StringInSlice([]string{ "Monday", @@ -20,7 +20,7 @@ func IsDayOfTheWeek(ignoreCase bool) schema.SchemaValidateFunc { }, ignoreCase) } -// IsMonth id a SchemaValidateFunc which tests if the provided value is of type string and a valid english month +// IsMonth is a SchemaValidateFunc which tests if the provided value is of type string and a valid english month func IsMonth(ignoreCase bool) schema.SchemaValidateFunc { return StringInSlice([]string{ "January", diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go index f31d833d2..113d3d675 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go @@ -92,7 +92,6 @@ func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Tra "Invalid address operator", "Module address prefix must be followed by dot and then a name.", )) - break } if next != "module" { @@ -122,7 +121,6 @@ func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Tra "Invalid address operator", "Prefix \"module.\" must be followed by a module name.", )) - break } remain = remain[1:] step := ModuleInstanceStep{ diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go index e620e76a9..b96e17c58 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go @@ -85,15 +85,15 @@ func flatmapValueFromHCL2Map(m map[string]string, prefix string, val cty.Value) return } - len := 0 + valLen := 0 for it := val.ElementIterator(); it.Next(); { ak, av := it.Element() name := ak.AsString() flatmapValueFromHCL2Value(m, prefix+name, av) - len++ + valLen++ } if !val.Type().IsObjectType() { // objects don't have an explicit count included, since their attribute count is fixed - m[prefix+"%"] = strconv.Itoa(len) + m[prefix+"%"] = strconv.Itoa(valLen) } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/single_attr_body.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/single_attr_body.go deleted file mode 100644 index 68f48da8f..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/single_attr_body.go +++ /dev/null @@ -1,85 +0,0 @@ -package hcl2shim - -import ( - "fmt" - - hcl2 "github.com/hashicorp/hcl/v2" -) - -// SingleAttrBody is a weird implementation of hcl2.Body that acts as if -// it has a single attribute whose value is the given expression. -// -// This is used to shim Resource.RawCount and Output.RawConfig to behave -// more like they do in the old HCL loader. -type SingleAttrBody struct { - Name string - Expr hcl2.Expression -} - -var _ hcl2.Body = SingleAttrBody{} - -func (b SingleAttrBody) Content(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Diagnostics) { - content, all, diags := b.content(schema) - if !all { - // This should never happen because this body implementation should only - // be used by code that is aware that it's using a single-attr body. - diags = append(diags, &hcl2.Diagnostic{ - Severity: hcl2.DiagError, - Summary: "Invalid attribute", - Detail: fmt.Sprintf("The correct attribute name is %q.", b.Name), - Subject: b.Expr.Range().Ptr(), - }) - } - return content, diags -} - -func (b SingleAttrBody) PartialContent(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Body, hcl2.Diagnostics) { - content, all, diags := b.content(schema) - var remain hcl2.Body - if all { - // If the request matched the one attribute we represent, then the - // remaining body is empty. - remain = hcl2.EmptyBody() - } else { - remain = b - } - return content, remain, diags -} - -func (b SingleAttrBody) content(schema *hcl2.BodySchema) (*hcl2.BodyContent, bool, hcl2.Diagnostics) { - ret := &hcl2.BodyContent{} - all := false - var diags hcl2.Diagnostics - - for _, attrS := range schema.Attributes { - if attrS.Name == b.Name { - attrs, _ := b.JustAttributes() - ret.Attributes = attrs - all = true - } else if attrS.Required { - diags = append(diags, &hcl2.Diagnostic{ - Severity: hcl2.DiagError, - Summary: "Missing attribute", - Detail: fmt.Sprintf("The attribute %q is required.", attrS.Name), - Subject: b.Expr.Range().Ptr(), - }) - } - } - - return ret, all, diags -} - -func (b SingleAttrBody) JustAttributes() (hcl2.Attributes, hcl2.Diagnostics) { - return hcl2.Attributes{ - b.Name: { - Expr: b.Expr, - Name: b.Name, - NameRange: b.Expr.Range(), - Range: b.Expr.Range(), - }, - }, nil -} - -func (b SingleAttrBody) MissingItemRange() hcl2.Range { - return b.Expr.Range() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/context.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/context.go new file mode 100644 index 000000000..6036b0d04 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/context.go @@ -0,0 +1,75 @@ +package logging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tfsdklog" + helperlogging "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" + testing "github.com/mitchellh/go-testing-interface" +) + +// InitContext creates SDK logger contexts when the provider is running in +// "production" (not under acceptance testing). The incoming context will +// already have the root SDK logger and root provider logger setup from +// terraform-plugin-go tf5server RPC handlers. +func InitContext(ctx context.Context) context.Context { + ctx = tfsdklog.NewSubsystem(ctx, SubsystemHelperSchema, + // All calls are through the HelperSchema* helper functions + tfsdklog.WithAdditionalLocationOffset(1), + tfsdklog.WithLevelFromEnv(EnvTfLogSdkHelperSchema), + // Propagate tf_req_id, tf_rpc, etc. fields + tfsdklog.WithRootFields(), + ) + + return ctx +} + +// InitTestContext registers the terraform-plugin-log/tfsdklog test sink, +// configures the standard library log package, and creates SDK logger +// contexts. The incoming context is expected to be devoid of logging setup. +// +// The standard library log package handling is important as provider code +// under test may be using that package or another logging library outside of +// terraform-plugin-log. +func InitTestContext(ctx context.Context, t testing.T) context.Context { + helperlogging.SetOutput(t) + + ctx = tfsdklog.RegisterTestSink(ctx, t) + ctx = tfsdklog.NewRootSDKLogger(ctx, tfsdklog.WithLevelFromEnv(EnvTfLogSdk)) + ctx = tfsdklog.NewSubsystem(ctx, SubsystemHelperResource, + // All calls are through the HelperResource* helper functions + tfsdklog.WithAdditionalLocationOffset(1), + tfsdklog.WithLevelFromEnv(EnvTfLogSdkHelperResource), + ) + ctx = TestNameContext(ctx, t.Name()) + + return ctx +} + +// TestNameContext adds the current test name to loggers. +func TestNameContext(ctx context.Context, testName string) context.Context { + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemHelperResource, KeyTestName, testName) + + return ctx +} + +// TestStepNumberContext adds the current test step number to loggers. +func TestStepNumberContext(ctx context.Context, stepNumber int) context.Context { + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemHelperResource, KeyTestStepNumber, stepNumber) + + return ctx +} + +// TestTerraformPathContext adds the current test Terraform CLI path to loggers. +func TestTerraformPathContext(ctx context.Context, terraformPath string) context.Context { + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemHelperResource, KeyTestTerraformPath, terraformPath) + + return ctx +} + +// TestWorkingDirectoryContext adds the current test working directory to loggers. +func TestWorkingDirectoryContext(ctx context.Context, workingDirectory string) context.Context { + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemHelperResource, KeyTestWorkingDirectory, workingDirectory) + + return ctx +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/environment_variables.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/environment_variables.go new file mode 100644 index 000000000..db1a27a81 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/environment_variables.go @@ -0,0 +1,24 @@ +package logging + +// Environment variables. +const ( + // EnvTfLogSdk is an environment variable that sets the logging level of + // the root SDK logger, while the provider is under test. In "production" + // usage, this environment variable is handled by terraform-plugin-go. + // + // Terraform CLI's logging must be explicitly turned on before this + // environment varable can be used to reduce the SDK logging levels. It + // cannot be used to show only SDK logging unless all other logging levels + // are turned off. + EnvTfLogSdk = "TF_LOG_SDK" + + // EnvTfLogSdkHelperResource is an environment variable that sets the logging + // level of SDK helper/resource loggers. Infers root SDK logging level, if + // unset. + EnvTfLogSdkHelperResource = "TF_LOG_SDK_HELPER_RESOURCE" + + // EnvTfLogSdkHelperSchema is an environment variable that sets the logging + // level of SDK helper/schema loggers. Infers root SDK logging level, if + // unset. + EnvTfLogSdkHelperSchema = "TF_LOG_SDK_HELPER_SCHEMA" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_resource.go new file mode 100644 index 000000000..a889601c7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_resource.go @@ -0,0 +1,32 @@ +package logging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tfsdklog" +) + +const ( + // SubsystemHelperResource is the tfsdklog subsystem name for helper/resource. + SubsystemHelperResource = "helper_resource" +) + +// HelperResourceTrace emits a helper/resource subsystem log at TRACE level. +func HelperResourceTrace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemTrace(ctx, SubsystemHelperResource, msg, additionalFields...) +} + +// HelperResourceDebug emits a helper/resource subsystem log at DEBUG level. +func HelperResourceDebug(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemDebug(ctx, SubsystemHelperResource, msg, additionalFields...) +} + +// HelperResourceWarn emits a helper/resource subsystem log at WARN level. +func HelperResourceWarn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemWarn(ctx, SubsystemHelperResource, msg, additionalFields...) +} + +// HelperResourceError emits a helper/resource subsystem log at ERROR level. +func HelperResourceError(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemError(ctx, SubsystemHelperResource, msg, additionalFields...) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_schema.go new file mode 100644 index 000000000..b2fe71d15 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_schema.go @@ -0,0 +1,32 @@ +package logging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tfsdklog" +) + +const ( + // SubsystemHelperSchema is the tfsdklog subsystem name for helper/schema. + SubsystemHelperSchema = "helper_schema" +) + +// HelperSchemaDebug emits a helper/schema subsystem log at DEBUG level. +func HelperSchemaDebug(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemDebug(ctx, SubsystemHelperSchema, msg, additionalFields...) +} + +// HelperSchemaError emits a helper/schema subsystem log at ERROR level. +func HelperSchemaError(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemError(ctx, SubsystemHelperSchema, msg, additionalFields...) +} + +// HelperSchemaTrace emits a helper/schema subsystem log at TRACE level. +func HelperSchemaTrace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemTrace(ctx, SubsystemHelperSchema, msg, additionalFields...) +} + +// HelperSchemaWarn emits a helper/schema subsystem log at WARN level. +func HelperSchemaWarn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemWarn(ctx, SubsystemHelperSchema, msg, additionalFields...) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/keys.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/keys.go new file mode 100644 index 000000000..03931b024 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/keys.go @@ -0,0 +1,54 @@ +package logging + +// Structured logging keys. +// +// Practitioners or tooling reading logs may be depending on these keys, so be +// conscious of that when changing them. +// +// Refer to the terraform-plugin-go logging keys as well, which should be +// equivalent to these when possible. +const ( + // Attribute path representation, which is typically in flatmap form such + // as parent.0.child in this project. + KeyAttributePath = "tf_attribute_path" + + // The type of data source being operated on, such as "archive_file" + KeyDataSourceType = "tf_data_source_type" + + // Underlying Go error string when logging an error. + KeyError = "error" + + // The full address of the provider, such as + // registry.terraform.io/hashicorp/random + KeyProviderAddress = "tf_provider_addr" + + // The type of resource being operated on, such as "random_pet" + KeyResourceType = "tf_resource_type" + + // The name of the test being executed. + KeyTestName = "test_name" + + // The TestStep number of the test being executed. Starts at 1. + KeyTestStepNumber = "test_step_number" + + // The Terraform CLI logging level (TF_LOG) used for an acceptance test. + KeyTestTerraformLogLevel = "test_terraform_log_level" + + // The Terraform CLI logging level (TF_LOG_CORE) used for an acceptance test. + KeyTestTerraformLogCoreLevel = "test_terraform_log_core_level" + + // The Terraform CLI logging level (TF_LOG_PROVIDER) used for an acceptance test. + KeyTestTerraformLogProviderLevel = "test_terraform_log_provider_level" + + // The path to the Terraform CLI logging file used for an acceptance test. + // + // This should match where the rest of the acceptance test logs are going + // already, but is provided for troubleshooting in case it does not. + KeyTestTerraformLogPath = "test_terraform_log_path" + + // The path to the Terraform CLI used for an acceptance test. + KeyTestTerraformPath = "test_terraform_path" + + // The working directory of the acceptance test. + KeyTestWorkingDirectory = "test_working_directory" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go index 252456747..e02c1e443 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go @@ -1,34 +1,61 @@ package convert import ( - "fmt" + "context" "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tftypes" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" ) // AppendProtoDiag appends a new diagnostic from a warning string or an error. // This panics if d is not a string or error. -func AppendProtoDiag(diags []*tfprotov5.Diagnostic, d interface{}) []*tfprotov5.Diagnostic { +func AppendProtoDiag(ctx context.Context, diags []*tfprotov5.Diagnostic, d interface{}) []*tfprotov5.Diagnostic { switch d := d.(type) { case cty.PathError: ap := PathToAttributePath(d.Path) - diags = append(diags, &tfprotov5.Diagnostic{ + diagnostic := &tfprotov5.Diagnostic{ Severity: tfprotov5.DiagnosticSeverityError, Summary: d.Error(), Attribute: ap, - }) + } + + if diagnostic.Summary == "" { + logging.HelperSchemaWarn(ctx, "detected empty error string for diagnostic in AppendProtoDiag for cty.PathError type") + diagnostic.Summary = "Empty Error String" + diagnostic.Detail = "This is always a bug in the provider and should be reported to the provider developers." + } + + diags = append(diags, diagnostic) case diag.Diagnostics: diags = append(diags, DiagsToProto(d)...) case error: - diags = append(diags, &tfprotov5.Diagnostic{ + if d == nil { + logging.HelperSchemaDebug(ctx, "skipping diagnostic for nil error in AppendProtoDiag") + return diags + } + + diagnostic := &tfprotov5.Diagnostic{ Severity: tfprotov5.DiagnosticSeverityError, Summary: d.Error(), - }) + } + + if diagnostic.Summary == "" { + logging.HelperSchemaWarn(ctx, "detected empty error string for diagnostic in AppendProtoDiag for error type") + diagnostic.Summary = "Error Missing Message" + diagnostic.Detail = "This is always a bug in the provider and should be reported to the provider developers." + } + + diags = append(diags, diagnostic) case string: + if d == "" { + logging.HelperSchemaDebug(ctx, "skipping diagnostic for empty string in AppendProtoDiag") + return diags + } + diags = append(diags, &tfprotov5.Diagnostic{ Severity: tfprotov5.DiagnosticSeverityWarning, Summary: d, @@ -68,19 +95,18 @@ func ProtoToDiags(ds []*tfprotov5.Diagnostic) diag.Diagnostics { func DiagsToProto(diags diag.Diagnostics) []*tfprotov5.Diagnostic { var ds []*tfprotov5.Diagnostic for _, d := range diags { - if err := d.Validate(); err != nil { - panic(fmt.Errorf("Invalid diagnostic: %s. This is always a bug in the provider implementation", err)) - } protoDiag := &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityError, Summary: d.Summary, Detail: d.Detail, Attribute: PathToAttributePath(d.AttributePath), } - if d.Severity == diag.Error { - protoDiag.Severity = tfprotov5.DiagnosticSeverityError - } else if d.Severity == diag.Warning { + if d.Severity == diag.Warning { protoDiag.Severity = tfprotov5.DiagnosticSeverityWarning } + if d.Summary == "" { + protoDiag.Summary = "Empty Summary: This is always a bug in the provider and should be reported to the provider developers." + } ds = append(ds, protoDiag) } return ds @@ -93,13 +119,13 @@ func AttributePathToPath(ap *tftypes.AttributePath) cty.Path { return p } for _, step := range ap.Steps() { - switch step.(type) { + switch step := step.(type) { case tftypes.AttributeName: - p = p.GetAttr(string(step.(tftypes.AttributeName))) + p = p.GetAttr(string(step)) case tftypes.ElementKeyString: - p = p.Index(cty.StringVal(string(step.(tftypes.ElementKeyString)))) + p = p.Index(cty.StringVal(string(step))) case tftypes.ElementKeyInt: - p = p.Index(cty.NumberIntVal(int64(step.(tftypes.ElementKeyInt)))) + p = p.Index(cty.NumberIntVal(int64(step))) } } return p diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go index c5dbf5b8f..07d0b8978 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go @@ -1,8 +1,8 @@ package convert import ( + "context" "fmt" - "log" "reflect" "sort" @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tftypes" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" ) func tftypeFromCtyType(in cty.Type) (tftypes.Type, error) { @@ -128,10 +129,10 @@ func ctyTypeFromTFType(in tftypes.Type) (cty.Type, error) { // ConfigSchemaToProto takes a *configschema.Block and converts it to a // tfprotov5.SchemaBlock for a grpc response. -func ConfigSchemaToProto(b *configschema.Block) *tfprotov5.SchemaBlock { +func ConfigSchemaToProto(ctx context.Context, b *configschema.Block) *tfprotov5.SchemaBlock { block := &tfprotov5.SchemaBlock{ Description: b.Description, - DescriptionKind: protoStringKind(b.DescriptionKind), + DescriptionKind: protoStringKind(ctx, b.DescriptionKind), Deprecated: b.Deprecated, } @@ -141,7 +142,7 @@ func ConfigSchemaToProto(b *configschema.Block) *tfprotov5.SchemaBlock { attr := &tfprotov5.SchemaAttribute{ Name: name, Description: a.Description, - DescriptionKind: protoStringKind(a.DescriptionKind), + DescriptionKind: protoStringKind(ctx, a.DescriptionKind), Optional: a.Optional, Computed: a.Computed, Required: a.Required, @@ -160,16 +161,16 @@ func ConfigSchemaToProto(b *configschema.Block) *tfprotov5.SchemaBlock { for _, name := range sortedKeys(b.BlockTypes) { b := b.BlockTypes[name] - block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b)) + block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(ctx, name, b)) } return block } -func protoStringKind(k configschema.StringKind) tfprotov5.StringKind { +func protoStringKind(ctx context.Context, k configschema.StringKind) tfprotov5.StringKind { switch k { default: - log.Printf("[TRACE] unexpected configschema.StringKind: %d", k) + logging.HelperSchemaTrace(ctx, fmt.Sprintf("Unexpected configschema.StringKind: %d", k)) return tfprotov5.StringKindPlain case configschema.StringPlain: return tfprotov5.StringKindPlain @@ -178,7 +179,7 @@ func protoStringKind(k configschema.StringKind) tfprotov5.StringKind { } } -func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *tfprotov5.SchemaNestedBlock { +func protoSchemaNestedBlock(ctx context.Context, name string, b *configschema.NestedBlock) *tfprotov5.SchemaNestedBlock { var nesting tfprotov5.SchemaNestedBlockNestingMode switch b.Nesting { case configschema.NestingSingle: @@ -196,7 +197,7 @@ func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *tfprotov5 } return &tfprotov5.SchemaNestedBlock{ TypeName: name, - Block: ConfigSchemaToProto(&b.Block), + Block: ConfigSchemaToProto(ctx, &b.Block), Nesting: nesting, MinItems: int64(b.MinItems), MaxItems: int64(b.MaxItems), @@ -205,20 +206,20 @@ func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *tfprotov5 // ProtoToConfigSchema takes the GetSchema_Block from a grpc response and converts it // to a terraform *configschema.Block. -func ProtoToConfigSchema(b *tfprotov5.SchemaBlock) *configschema.Block { +func ProtoToConfigSchema(ctx context.Context, b *tfprotov5.SchemaBlock) *configschema.Block { block := &configschema.Block{ Attributes: make(map[string]*configschema.Attribute), BlockTypes: make(map[string]*configschema.NestedBlock), Description: b.Description, - DescriptionKind: schemaStringKind(b.DescriptionKind), + DescriptionKind: schemaStringKind(ctx, b.DescriptionKind), Deprecated: b.Deprecated, } for _, a := range b.Attributes { attr := &configschema.Attribute{ Description: a.Description, - DescriptionKind: schemaStringKind(a.DescriptionKind), + DescriptionKind: schemaStringKind(ctx, a.DescriptionKind), Required: a.Required, Optional: a.Optional, Computed: a.Computed, @@ -236,16 +237,16 @@ func ProtoToConfigSchema(b *tfprotov5.SchemaBlock) *configschema.Block { } for _, b := range b.BlockTypes { - block.BlockTypes[b.TypeName] = schemaNestedBlock(b) + block.BlockTypes[b.TypeName] = schemaNestedBlock(ctx, b) } return block } -func schemaStringKind(k tfprotov5.StringKind) configschema.StringKind { +func schemaStringKind(ctx context.Context, k tfprotov5.StringKind) configschema.StringKind { switch k { default: - log.Printf("[TRACE] unexpected tfprotov5.StringKind: %d", k) + logging.HelperSchemaTrace(ctx, fmt.Sprintf("Unexpected tfprotov5.StringKind: %d", k)) return configschema.StringPlain case tfprotov5.StringKindPlain: return configschema.StringPlain @@ -254,7 +255,7 @@ func schemaStringKind(k tfprotov5.StringKind) configschema.StringKind { } } -func schemaNestedBlock(b *tfprotov5.SchemaNestedBlock) *configschema.NestedBlock { +func schemaNestedBlock(ctx context.Context, b *tfprotov5.SchemaNestedBlock) *configschema.NestedBlock { var nesting configschema.NestingMode switch b.Nesting { case tfprotov5.SchemaNestedBlockNestingModeSingle: @@ -278,7 +279,7 @@ func schemaNestedBlock(b *tfprotov5.SchemaNestedBlock) *configschema.NestedBlock MaxItems: int(b.MaxItems), } - nested := ProtoToConfigSchema(b.Block) + nested := ProtoToConfigSchema(ctx, b.Block) nb.Block = *nested return nb } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go index 2c544e9e5..c238145aa 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/hc-install/product" "github.com/hashicorp/hc-install/releases" "github.com/hashicorp/hc-install/src" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" ) // Config is used to configure the test helper. In most normal test programs @@ -29,11 +30,11 @@ type Config struct { // DiscoverConfig uses environment variables and other means to automatically // discover a reasonable test helper configuration. -func DiscoverConfig(sourceDir string) (*Config, error) { - tfVersion := strings.TrimPrefix(os.Getenv("TF_ACC_TERRAFORM_VERSION"), "v") - tfPath := os.Getenv("TF_ACC_TERRAFORM_PATH") +func DiscoverConfig(ctx context.Context, sourceDir string) (*Config, error) { + tfVersion := strings.TrimPrefix(os.Getenv(EnvTfAccTerraformVersion), "v") + tfPath := os.Getenv(EnvTfAccTerraformPath) - tempDir := os.Getenv("TF_ACC_TEMP_DIR") + tempDir := os.Getenv(EnvTfAccTempDir) tfDir, err := ioutil.TempDir(tempDir, "plugintest-terraform") if err != nil { return nil, fmt.Errorf("failed to create temp dir: %w", err) @@ -42,6 +43,8 @@ func DiscoverConfig(sourceDir string) (*Config, error) { var sources []src.Source switch { case tfPath != "": + logging.HelperResourceTrace(ctx, fmt.Sprintf("Adding potential Terraform CLI source of exact path: %s", tfPath)) + sources = append(sources, &fs.AnyVersion{ ExactBinPath: tfPath, }) @@ -52,12 +55,17 @@ func DiscoverConfig(sourceDir string) (*Config, error) { return nil, fmt.Errorf("invalid Terraform version: %w", err) } + logging.HelperResourceTrace(ctx, fmt.Sprintf("Adding potential Terraform CLI source of releases.hashicorp.com exact version %q for installation in: %s", tfVersion, tfDir)) + sources = append(sources, &releases.ExactVersion{ InstallDir: tfDir, Product: product.Terraform, Version: tfVersion, }) default: + logging.HelperResourceTrace(ctx, "Adding potential Terraform CLI source of local filesystem PATH lookup") + logging.HelperResourceTrace(ctx, fmt.Sprintf("Adding potential Terraform CLI source of checkpoint.hashicorp.com latest version for installation in: %s", tfDir)) + sources = append(sources, &fs.AnyVersion{ Product: &product.Terraform, }) @@ -70,9 +78,13 @@ func DiscoverConfig(sourceDir string) (*Config, error) { installer := install.NewInstaller() tfExec, err := installer.Ensure(context.Background(), sources) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to find or install Terraform CLI from %+v: %w", sources, err) } + ctx = logging.TestTerraformPathContext(ctx, tfExec) + + logging.HelperResourceDebug(ctx, "Found Terraform CLI") + return &Config{ SourceDir: sourceDir, TerraformExec: tfExec, diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go new file mode 100644 index 000000000..6fd001a07 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go @@ -0,0 +1,108 @@ +package plugintest + +// Environment variables +const ( + // Environment variable with acceptance testing temporary directory for + // testing files and Terraform CLI installation, if installation is + // required. By default, the operating system temporary directory is used. + // + // Setting TF_ACC_TERRAFORM_PATH does not override this value for Terraform + // CLI installation, if installation is required. + EnvTfAccTempDir = "TF_ACC_TEMP_DIR" + + // Environment variable with level to filter Terraform logs during + // acceptance testing. This value sets TF_LOG in a safe manner when + // executing Terraform CLI commands, which would otherwise interfere + // with the testing framework using TF_LOG to set the Go standard library + // log package level. + // + // This value takes precedence over TF_LOG_CORE, due to precedence rules + // in the Terraform core code, so it is not possible to set this to a level + // and also TF_LOG_CORE=OFF. Use TF_LOG_CORE and TF_LOG_PROVIDER in that + // case instead. + // + // If not set, but TF_ACC_LOG_PATH or TF_LOG_PATH_MASK is set, it defaults + // to TRACE. If Terraform CLI is version 0.14 or earlier, it will have no + // separate affect from the TF_ACC_LOG_PATH or TF_LOG_PATH_MASK behavior, + // as those earlier versions of Terraform are unreliable with the logging + // level being outside TRACE. + EnvTfAccLog = "TF_ACC_LOG" + + // Environment variable with path to save Terraform logs during acceptance + // testing. This value sets TF_LOG_PATH in a safe manner when executing + // Terraform CLI commands, which would otherwise be ignored since it could + // interfere with how the underlying execution is performed. + // + // If TF_LOG_PATH_MASK is set, it takes precedence over this value. + EnvTfAccLogPath = "TF_ACC_LOG_PATH" + + // Environment variable with level to filter Terraform core logs during + // acceptance testing. This value sets TF_LOG_CORE separate from + // TF_LOG_PROVIDER when calling Terraform. + // + // This value has no affect when TF_ACC_LOG is set (which sets Terraform's + // TF_LOG), due to precedence rules in the Terraform core code. Use + // TF_LOG_CORE and TF_LOG_PROVIDER in that case instead. + // + // If not set, defaults to TF_ACC_LOG behaviors. + EnvTfLogCore = "TF_LOG_CORE" + + // Environment variable with path containing the string %s, which is + // replaced with the test name, to save separate Terraform logs during + // acceptance testing. This value sets TF_LOG_PATH in a safe manner when + // executing Terraform CLI commands, which would otherwise be ignored since + // it could interfere with how the underlying execution is performed. + // + // Takes precedence over TF_ACC_LOG_PATH. + EnvTfLogPathMask = "TF_LOG_PATH_MASK" + + // Environment variable with level to filter Terraform provider logs during + // acceptance testing. This value sets TF_LOG_PROVIDER separate from + // TF_LOG_CORE. + // + // During testing, this only affects external providers whose logging goes + // through Terraform. The logging for the provider under test is controlled + // by the testing framework as it is running the provider code. Provider + // code using the Go standard library log package is controlled by TF_LOG + // for historical compatibility. + // + // This value takes precedence over TF_ACC_LOG for external provider logs, + // due to rules in the Terraform core code. + // + // If not set, defaults to TF_ACC_LOG behaviors. + EnvTfLogProvider = "TF_LOG_PROVIDER" + + // Environment variable with acceptance testing Terraform CLI version to + // download from releases.hashicorp.com, checksum verify, and install. The + // value can be any valid Terraform CLI version, such as 1.1.6, with or + // without a prepended v character. + // + // Setting this value takes precedence over using an available Terraform + // binary in the operation system PATH, or if not found, installing the + // latest version according to checkpoint.hashicorp.com. + // + // By default, the binary is installed in the operating system temporary + // directory, however that directory can be overridden with the + // TF_ACC_TEMP_DIR environment variable. + // + // If TF_ACC_TERRAFORM_PATH is also set, this installation method is + // only invoked when a binary does not exist at that path. No version + // checks are performed against an existing TF_ACC_TERRAFORM_PATH. + EnvTfAccTerraformVersion = "TF_ACC_TERRAFORM_VERSION" + + // Acceptance testing path to Terraform CLI binary. + // + // Setting this value takes precedence over using an available Terraform + // binary in the operation system PATH, or if not found, installing the + // latest version according to checkpoint.hashicorp.com. This value does + // not override TF_ACC_TEMP_DIR for Terraform CLI installation, if + // installation is required. + // + // If TF_ACC_TERRAFORM_VERSION is not set, the binary must exist and be + // executable, or an error will be returned. + // + // If TF_ACC_TERRAFORM_VERSION is also set, that Terraform CLI version + // will be installed if a binary is not found at the given path. No version + // checks are performed against an existing binary. + EnvTfAccTerraformPath = "TF_ACC_TERRAFORM_PATH" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/guard.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/guard.go index 56eaa0c16..ad796be08 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/guard.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/guard.go @@ -2,54 +2,8 @@ package plugintest import ( "fmt" - "os" - "testing" ) -// AcceptanceTest is a test guard that will produce a log and call SkipNow on -// the given TestControl if the environment variable TF_ACC isn't set to -// indicate that the caller wants to run acceptance tests. -// -// Call this immediately at the start of each acceptance test function to -// signal that it may cost money and thus requires this opt-in enviromment -// variable. -// -// For the purpose of this function, an "acceptance test" is any est that -// reaches out to services that are not directly controlled by the test program -// itself, particularly if those requests may lead to service charges. For any -// system where it is possible and realistic to run a local instance of the -// service for testing (e.g. in a daemon launched by the test program itself), -// prefer to do this and _don't_ call AcceptanceTest, thus allowing tests to be -// run more easily and without external cost by contributors. -func AcceptanceTest(t TestControl) { - t.Helper() - if os.Getenv("TF_ACC") != "" { - t.Log("TF_ACC is not set") - t.SkipNow() - } -} - -// LongTest is a test guard that will produce a log and call SkipNow on the -// given TestControl if the test harness is currently running in "short mode". -// -// What is considered a "long test" will always be pretty subjective, but test -// implementers should think of this in terms of what seems like it'd be -// inconvenient to run repeatedly for quick feedback while testing a new feature -// under development. -// -// When testing resource types that always take several minutes to complete -// operations, consider having a single general test that covers the basic -// functionality and then mark any other more specific tests as long tests so -// that developers can quickly smoke-test a particular feature when needed -// but can still run the full set of tests for a feature when needed. -func LongTest(t TestControl) { - t.Helper() - if testing.Short() { - t.Log("skipping long test because of short mode") - t.SkipNow() - } -} - // TestControl is an interface requiring a subset of *testing.T which is used // by the test guards and helpers in this package. Most callers can simply // pass their *testing.T value here, but the interface allows other @@ -65,6 +19,7 @@ type TestControl interface { Log(args ...interface{}) FailNow() SkipNow() + Name() string } // testingT wraps a TestControl to recover some of the convenience behaviors diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go index 4b130b499..0411eae0a 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go @@ -1,16 +1,17 @@ package plugintest import ( + "context" + "errors" "fmt" "io/ioutil" "os" + "strings" "github.com/hashicorp/terraform-exec/tfexec" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" ) -const subprocessCurrentSigil = "4acd63807899403ca4859f5bb948d2c6" -const subprocessPreviousSigil = "2279afb8cf71423996be1fd65d32f13b" - // AutoInitProviderHelper is the main entrypoint for testing provider plugins // using this package. It is intended to be called during TestMain to prepare // for provider testing. @@ -20,8 +21,8 @@ const subprocessPreviousSigil = "2279afb8cf71423996be1fd65d32f13b" // available for upgrade tests, and then will return an object containing the // results of that initialization which can then be stored in a global variable // for use in other tests. -func AutoInitProviderHelper(sourceDir string) *Helper { - helper, err := AutoInitHelper(sourceDir) +func AutoInitProviderHelper(ctx context.Context, sourceDir string) *Helper { + helper, err := AutoInitHelper(ctx, sourceDir) if err != nil { fmt.Fprintf(os.Stderr, "cannot run Terraform provider tests: %s\n", err) os.Exit(1) @@ -49,13 +50,13 @@ type Helper struct { // way to get the standard init behavior based on environment variables, and // callers should use this unless they have an unusual requirement that calls // for constructing a config in a different way. -func AutoInitHelper(sourceDir string) (*Helper, error) { - config, err := DiscoverConfig(sourceDir) +func AutoInitHelper(ctx context.Context, sourceDir string) (*Helper, error) { + config, err := DiscoverConfig(ctx, sourceDir) if err != nil { return nil, err } - return InitHelper(config) + return InitHelper(ctx, config) } // InitHelper prepares a testing helper with the given configuration. @@ -67,8 +68,8 @@ func AutoInitHelper(sourceDir string) (*Helper, error) { // If this function returns an error then it may have left some temporary files // behind in the system's temporary directory. There is currently no way to // automatically clean those up. -func InitHelper(config *Config) (*Helper, error) { - tempDir := os.Getenv("TF_ACC_TEMP_DIR") +func InitHelper(ctx context.Context, config *Config) (*Helper, error) { + tempDir := os.Getenv(EnvTfAccTempDir) baseDir, err := ioutil.TempDir(tempDir, "plugintest") if err != nil { return nil, fmt.Errorf("failed to create temporary directory for test helper: %s", err) @@ -103,24 +104,155 @@ func (h *Helper) Close() error { // If the working directory object is not itself closed by the time the test // program exits, the Close method on the helper itself will attempt to // delete it. -func (h *Helper) NewWorkingDir() (*WorkingDir, error) { +func (h *Helper) NewWorkingDir(ctx context.Context, t TestControl) (*WorkingDir, error) { dir, err := ioutil.TempDir(h.baseDir, "work") if err != nil { return nil, err } + ctx = logging.TestWorkingDirectoryContext(ctx, dir) + // symlink the provider source files into the config directory // e.g. testdata + logging.HelperResourceTrace(ctx, "Symlinking source directories to work directory") err = symlinkDirectoriesOnly(h.sourceDir, dir) if err != nil { return nil, err } tf, err := tfexec.NewTerraform(dir, h.terraformExec) + if err != nil { + return nil, fmt.Errorf("unable to create terraform-exec instance: %w", err) + } + + err = tf.SetDisablePluginTLS(true) + + if err != nil { + return nil, fmt.Errorf("unable to disable terraform-exec plugin TLS: %w", err) + } + + err = tf.SetSkipProviderVerify(true) // Only required for Terraform CLI 0.12.x + + var mismatch *tfexec.ErrVersionMismatch + if err != nil && !errors.As(err, &mismatch) { + return nil, fmt.Errorf("unable to disable terraform-exec provider verification: %w", err) + } + + tfAccLog := os.Getenv(EnvTfAccLog) + tfAccLogPath := os.Getenv(EnvTfAccLogPath) + tfLogCore := os.Getenv(EnvTfLogCore) + tfLogPathMask := os.Getenv(EnvTfLogPathMask) + tfLogProvider := os.Getenv(EnvTfLogProvider) + + if tfAccLog != "" && tfLogCore != "" { + err = fmt.Errorf( + "Invalid environment variable configuration. Cannot set both TF_ACC_LOG and TF_LOG_CORE. " + + "Use TF_LOG_CORE and TF_LOG_PROVIDER to separately control the Terraform CLI logging subsystems. " + + "To control the Go standard library log package for the provider under test, use TF_LOG.", + ) + logging.HelperResourceError(ctx, err.Error()) return nil, err } + if tfAccLog != "" { + logging.HelperResourceTrace( + ctx, + fmt.Sprintf("Setting terraform-exec log level via %s environment variable, if Terraform CLI is version 0.15 or later", EnvTfAccLog), + map[string]interface{}{logging.KeyTestTerraformLogLevel: tfAccLog}, + ) + + err := tf.SetLog(tfAccLog) + + if err != nil { + if !errors.As(err, new(*tfexec.ErrVersionMismatch)) { + logging.HelperResourceError( + ctx, + "Unable to set terraform-exec log level", + map[string]interface{}{logging.KeyError: err.Error()}, + ) + return nil, fmt.Errorf("unable to set terraform-exec log level (%s): %w", tfAccLog, err) + } + + logging.HelperResourceWarn( + ctx, + fmt.Sprintf("Unable to set terraform-exec log level via %s environment variable, as Terraform CLI is version 0.14 or earlier. It will default to TRACE.", EnvTfAccLog), + map[string]interface{}{logging.KeyTestTerraformLogLevel: "TRACE"}, + ) + } + } + + if tfLogCore != "" { + logging.HelperResourceTrace( + ctx, + fmt.Sprintf("Setting terraform-exec core log level via %s environment variable, if Terraform CLI is version 0.15 or later", EnvTfLogCore), + map[string]interface{}{ + logging.KeyTestTerraformLogCoreLevel: tfLogCore, + }, + ) + + err := tf.SetLogCore(tfLogCore) + + if err != nil { + logging.HelperResourceError( + ctx, + "Unable to set terraform-exec core log level", + map[string]interface{}{logging.KeyError: err.Error()}, + ) + return nil, fmt.Errorf("unable to set terraform-exec core log level (%s): %w", tfLogCore, err) + } + } + + if tfLogProvider != "" { + logging.HelperResourceTrace( + ctx, + fmt.Sprintf("Setting terraform-exec provider log level via %s environment variable, if Terraform CLI is version 0.15 or later", EnvTfLogProvider), + map[string]interface{}{ + logging.KeyTestTerraformLogCoreLevel: tfLogProvider, + }, + ) + + err := tf.SetLogProvider(tfLogProvider) + + if err != nil { + logging.HelperResourceError( + ctx, + "Unable to set terraform-exec provider log level", + map[string]interface{}{logging.KeyError: err.Error()}, + ) + return nil, fmt.Errorf("unable to set terraform-exec provider log level (%s): %w", tfLogProvider, err) + } + } + + var logPath, logPathEnvVar string + + if tfAccLogPath != "" { + logPath = tfAccLogPath + logPathEnvVar = EnvTfAccLogPath + } + + // Similar to helper/logging.LogOutput() and + // terraform-plugin-log/tfsdklog.RegisterTestSink(), the TF_LOG_PATH_MASK + // environment variable should take precedence over TF_ACC_LOG_PATH. + if tfLogPathMask != "" { + // Escape special characters which may appear if we have subtests + testName := strings.Replace(t.Name(), "/", "__", -1) + logPath = fmt.Sprintf(tfLogPathMask, testName) + logPathEnvVar = EnvTfLogPathMask + } + + if logPath != "" { + logging.HelperResourceTrace( + ctx, + fmt.Sprintf("Setting terraform-exec log path via %s environment variable", logPathEnvVar), + map[string]interface{}{logging.KeyTestTerraformLogPath: logPath}, + ) + + if err := tf.SetLogPath(logPath); err != nil { + return nil, fmt.Errorf("unable to set terraform-exec log path (%s): %w", logPath, err) + } + } + return &WorkingDir{ h: h, tf: tf, @@ -132,10 +264,10 @@ func (h *Helper) NewWorkingDir() (*WorkingDir, error) { // RequireNewWorkingDir is a variant of NewWorkingDir that takes a TestControl // object and will immediately fail the running test if the creation of the // working directory fails. -func (h *Helper) RequireNewWorkingDir(t TestControl) *WorkingDir { +func (h *Helper) RequireNewWorkingDir(ctx context.Context, t TestControl) *WorkingDir { t.Helper() - wd, err := h.NewWorkingDir() + wd, err := h.NewWorkingDir(ctx, t) if err != nil { t := testingT{t} t.Fatalf("failed to create new working directory: %s", err) @@ -144,6 +276,11 @@ func (h *Helper) RequireNewWorkingDir(t TestControl) *WorkingDir { return wd } +// WorkingDirectory returns the working directory being used when running tests. +func (h *Helper) WorkingDirectory() string { + return h.baseDir +} + // TerraformExecPath returns the location of the Terraform CLI executable that // should be used when running tests. func (h *Helper) TerraformExecPath() string { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go index df1cb92fd..335e217ad 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go @@ -1,20 +1,31 @@ package plugintest import ( + "fmt" "os" "path/filepath" ) -func symlinkFile(src string, dest string) (err error) { - err = os.Symlink(src, dest) - if err == nil { - srcInfo, err := os.Stat(src) - if err != nil { - err = os.Chmod(dest, srcInfo.Mode()) - } +func symlinkFile(src string, dest string) error { + err := os.Symlink(src, dest) + + if err != nil { + return fmt.Errorf("unable to symlink %q to %q: %w", src, dest, err) } - return + srcInfo, err := os.Stat(src) + + if err != nil { + return fmt.Errorf("unable to stat %q: %w", src, err) + } + + err = os.Chmod(dest, srcInfo.Mode()) + + if err != nil { + return fmt.Errorf("unable to set %q permissions: %w", dest, err) + } + + return nil } // symlinkDir is a simplistic function for recursively symlinking all files in a directory to a new path. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go index 1faeb5c7a..1c15e6f5c 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go @@ -3,7 +3,7 @@ package plugintest import ( "bytes" "context" - "errors" + "encoding/json" "fmt" "io/ioutil" "os" @@ -11,11 +11,13 @@ import ( "github.com/hashicorp/terraform-exec/tfexec" tfjson "github.com/hashicorp/terraform-json" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" ) const ( - ConfigFileName = "terraform_plugin_test.tf" - PlanFileName = "tfplan" + ConfigFileName = "terraform_plugin_test.tf" + ConfigFileNameJSON = ConfigFileName + ".json" + PlanFileName = "tfplan" ) // WorkingDir represents a distinct working directory that can be used for @@ -28,6 +30,10 @@ type WorkingDir struct { // baseDir is the root of the working directory tree baseDir string + // configFilename is the full filename where the latest configuration + // was stored; empty until SetConfig is called. + configFilename string + // baseArgs is arguments that should be appended to all commands baseArgs []string @@ -40,8 +46,6 @@ type WorkingDir struct { // reattachInfo stores the gRPC socket info required for Terraform's // plugin reattach functionality reattachInfo tfexec.ReattachInfo - - env map[string]string } // Close deletes the directories and files created to represent the receiving @@ -51,20 +55,8 @@ func (wd *WorkingDir) Close() error { return os.RemoveAll(wd.baseDir) } -// Setenv sets an environment variable on the WorkingDir. -func (wd *WorkingDir) Setenv(envVar, val string) { - if wd.env == nil { - wd.env = map[string]string{} - } - wd.env[envVar] = val -} - -// Unsetenv removes an environment variable from the WorkingDir. -func (wd *WorkingDir) Unsetenv(envVar string) { - delete(wd.env, envVar) -} - -func (wd *WorkingDir) SetReattachInfo(reattachInfo tfexec.ReattachInfo) { +func (wd *WorkingDir) SetReattachInfo(ctx context.Context, reattachInfo tfexec.ReattachInfo) { + logging.HelperResourceTrace(ctx, "Setting Terraform CLI reattach configuration", map[string]interface{}{"tf_reattach_config": reattachInfo}) wd.reattachInfo = reattachInfo } @@ -82,29 +74,24 @@ func (wd *WorkingDir) GetHelper() *Helper { // This must be called at least once before any call to Init, Plan, Apply, or // Destroy to establish the configuration. Any previously-set configuration is // discarded and any saved plan is cleared. -func (wd *WorkingDir) SetConfig(cfg string) error { - configFilename := filepath.Join(wd.baseDir, ConfigFileName) - err := ioutil.WriteFile(configFilename, []byte(cfg), 0700) - if err != nil { - return err +func (wd *WorkingDir) SetConfig(ctx context.Context, cfg string) error { + outFilename := filepath.Join(wd.baseDir, ConfigFileName) + rmFilename := filepath.Join(wd.baseDir, ConfigFileNameJSON) + bCfg := []byte(cfg) + if json.Valid(bCfg) { + outFilename, rmFilename = rmFilename, outFilename } - - var mismatch *tfexec.ErrVersionMismatch - err = wd.tf.SetDisablePluginTLS(true) - if err != nil && !errors.As(err, &mismatch) { - return err + if err := os.Remove(rmFilename); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to remove %q: %w", rmFilename, err) } - err = wd.tf.SetSkipProviderVerify(true) - if err != nil && !errors.As(err, &mismatch) { + err := ioutil.WriteFile(outFilename, bCfg, 0700) + if err != nil { return err } - - if p := os.Getenv("TF_ACC_LOG_PATH"); p != "" { - wd.tf.SetLogPath(p) - } + wd.configFilename = outFilename // Changing configuration invalidates any saved plan. - err = wd.ClearPlan() + err = wd.ClearPlan(ctx) if err != nil { return err } @@ -115,35 +102,66 @@ func (wd *WorkingDir) SetConfig(cfg string) error { // // Any remote objects tracked by the state are not destroyed first, so this // will leave them dangling in the remote system. -func (wd *WorkingDir) ClearState() error { +func (wd *WorkingDir) ClearState(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Clearing Terraform state") + err := os.Remove(filepath.Join(wd.baseDir, "terraform.tfstate")) + if os.IsNotExist(err) { + logging.HelperResourceTrace(ctx, "No Terraform state to clear") return nil } - return err + + if err != nil { + return err + } + + logging.HelperResourceTrace(ctx, "Cleared Terraform state") + + return nil } // ClearPlan deletes any saved plan present in the working directory. -func (wd *WorkingDir) ClearPlan() error { +func (wd *WorkingDir) ClearPlan(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Clearing Terraform plan") + err := os.Remove(wd.planFilename()) + if os.IsNotExist(err) { + logging.HelperResourceTrace(ctx, "No Terraform plan to clear") return nil } - return err + + if err != nil { + return err + } + + logging.HelperResourceTrace(ctx, "Cleared Terraform plan") + + return nil } +var errWorkingDirSetConfigNotCalled = fmt.Errorf("must call SetConfig before Init") + // Init runs "terraform init" for the given working directory, forcing Terraform // to use the current version of the plugin under test. -func (wd *WorkingDir) Init() error { - if _, err := os.Stat(wd.configFilename()); err != nil { - return fmt.Errorf("must call SetConfig before Init") +func (wd *WorkingDir) Init(ctx context.Context) error { + if wd.configFilename == "" { + return errWorkingDirSetConfigNotCalled + } + if _, err := os.Stat(wd.configFilename); err != nil { + return errWorkingDirSetConfigNotCalled } - return wd.tf.Init(context.Background(), tfexec.Reattach(wd.reattachInfo)) -} + logging.HelperResourceTrace(ctx, "Calling Terraform CLI init command") + + // -upgrade=true is required for per-TestStep provider version changes + // e.g. TestTest_TestStep_ExternalProviders_DifferentVersions + err := wd.tf.Init(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Upgrade(true)) -func (wd *WorkingDir) configFilename() string { - return filepath.Join(wd.baseDir, ConfigFileName) + logging.HelperResourceTrace(ctx, "Called Terraform CLI init command") + + return err } func (wd *WorkingDir) planFilename() string { @@ -152,15 +170,25 @@ func (wd *WorkingDir) planFilename() string { // CreatePlan runs "terraform plan" to create a saved plan file, which if successful // will then be used for the next call to Apply. -func (wd *WorkingDir) CreatePlan() error { +func (wd *WorkingDir) CreatePlan(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI plan command") + _, err := wd.tf.Plan(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false), tfexec.Out(PlanFileName)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI plan command") + return err } // CreateDestroyPlan runs "terraform plan -destroy" to create a saved plan // file, which if successful will then be used for the next call to Apply. -func (wd *WorkingDir) CreateDestroyPlan() error { +func (wd *WorkingDir) CreateDestroyPlan(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI plan -destroy command") + _, err := wd.tf.Plan(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false), tfexec.Out(PlanFileName), tfexec.Destroy(true)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI plan -destroy command") + return err } @@ -168,13 +196,19 @@ func (wd *WorkingDir) CreateDestroyPlan() error { // successfully and the saved plan has not been cleared in the meantime then // this will apply the saved plan. Otherwise, it will implicitly create a new // plan and apply it. -func (wd *WorkingDir) Apply() error { +func (wd *WorkingDir) Apply(ctx context.Context) error { args := []tfexec.ApplyOption{tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false)} if wd.HasSavedPlan() { args = append(args, tfexec.DirOrPlan(PlanFileName)) } - return wd.tf.Apply(context.Background(), args...) + logging.HelperResourceTrace(ctx, "Calling Terraform CLI apply command") + + err := wd.tf.Apply(context.Background(), args...) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI apply command") + + return err } // Destroy runs "terraform destroy". It does not consider or modify any saved @@ -182,8 +216,14 @@ func (wd *WorkingDir) Apply() error { // // If destroy fails then remote objects might still exist, and continue to // exist after a particular test is concluded. -func (wd *WorkingDir) Destroy() error { - return wd.tf.Destroy(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false)) +func (wd *WorkingDir) Destroy(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI destroy command") + + err := wd.tf.Destroy(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI destroy command") + + return err } // HasSavedPlan returns true if there is a saved plan in the working directory. If @@ -197,19 +237,25 @@ func (wd *WorkingDir) HasSavedPlan() bool { // // If no plan is saved or if the plan file cannot be read, SavedPlan returns // an error. -func (wd *WorkingDir) SavedPlan() (*tfjson.Plan, error) { +func (wd *WorkingDir) SavedPlan(ctx context.Context) (*tfjson.Plan, error) { if !wd.HasSavedPlan() { return nil, fmt.Errorf("there is no current saved plan") } - return wd.tf.ShowPlanFile(context.Background(), wd.planFilename(), tfexec.Reattach(wd.reattachInfo)) + logging.HelperResourceTrace(ctx, "Calling Terraform CLI apply command") + + plan, err := wd.tf.ShowPlanFile(context.Background(), wd.planFilename(), tfexec.Reattach(wd.reattachInfo)) + + logging.HelperResourceTrace(ctx, "Calling Terraform CLI apply command") + + return plan, err } // SavedPlanRawStdout returns a human readable stdout capture of the current saved plan file, if any. // // If no plan is saved or if the plan file cannot be read, SavedPlanRawStdout returns // an error. -func (wd *WorkingDir) SavedPlanRawStdout() (string, error) { +func (wd *WorkingDir) SavedPlanRawStdout(ctx context.Context) (string, error) { if !wd.HasSavedPlan() { return "", fmt.Errorf("there is no current saved plan") } @@ -218,7 +264,13 @@ func (wd *WorkingDir) SavedPlanRawStdout() (string, error) { wd.tf.SetStdout(&ret) defer wd.tf.SetStdout(ioutil.Discard) + + logging.HelperResourceTrace(ctx, "Calling Terraform CLI show command") + _, err := wd.tf.ShowPlanFileRaw(context.Background(), wd.planFilename(), tfexec.Reattach(wd.reattachInfo)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI show command") + if err != nil { return "", err } @@ -230,23 +282,47 @@ func (wd *WorkingDir) SavedPlanRawStdout() (string, error) { // // If the state cannot be read, State returns an error. -func (wd *WorkingDir) State() (*tfjson.State, error) { - return wd.tf.Show(context.Background(), tfexec.Reattach(wd.reattachInfo)) +func (wd *WorkingDir) State(ctx context.Context) (*tfjson.State, error) { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI show command") + + state, err := wd.tf.Show(context.Background(), tfexec.Reattach(wd.reattachInfo)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI show command") + + return state, err } // Import runs terraform import -func (wd *WorkingDir) Import(resource, id string) error { - return wd.tf.Import(context.Background(), resource, id, tfexec.Config(wd.baseDir), tfexec.Reattach(wd.reattachInfo)) +func (wd *WorkingDir) Import(ctx context.Context, resource, id string) error { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI import command") + + err := wd.tf.Import(context.Background(), resource, id, tfexec.Config(wd.baseDir), tfexec.Reattach(wd.reattachInfo)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI import command") + + return err } // Refresh runs terraform refresh -func (wd *WorkingDir) Refresh() error { - return wd.tf.Refresh(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.State(filepath.Join(wd.baseDir, "terraform.tfstate"))) +func (wd *WorkingDir) Refresh(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI refresh command") + + err := wd.tf.Refresh(context.Background(), tfexec.Reattach(wd.reattachInfo)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI refresh command") + + return err } // Schemas returns an object describing the provider schemas. // // If the schemas cannot be read, Schemas returns an error. -func (wd *WorkingDir) Schemas() (*tfjson.ProviderSchemas, error) { - return wd.tf.ProvidersSchema(context.Background()) +func (wd *WorkingDir) Schemas(ctx context.Context) (*tfjson.ProviderSchemas, error) { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI providers schema command") + + providerSchemas, err := wd.tf.ProvidersSchema(context.Background()) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI providers schema command") + + return providerSchemas, err } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go index 171451ca7..e1875e134 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go @@ -2,13 +2,7 @@ package plugin import ( "context" - "encoding/json" "errors" - "fmt" - "os" - "os/signal" - "runtime" - "strings" "time" "github.com/hashicorp/go-plugin" @@ -37,6 +31,10 @@ func DebugServe(ctx context.Context, opts *ServeOpts) (ReattachConfig, <-chan st reattachCh := make(chan *plugin.ReattachConfig) closeCh := make(chan struct{}) + if opts == nil { + return ReattachConfig{}, closeCh, errors.New("ServeOpts must be passed in with at least GRPCProviderFunc, GRPCProviderV6Func, or ProviderFunc") + } + opts.TestConfig = &plugin.ServeTestConfig{ Context: ctx, ReattachConfigCh: reattachCh, @@ -71,48 +69,20 @@ func DebugServe(ctx context.Context, opts *ServeOpts) (ReattachConfig, <-chan st // Debug starts a debug server and controls its lifecycle, printing the // information needed for Terraform to connect to the provider to stdout. // os.Interrupt will be captured and used to stop the server. +// +// Deprecated: Use the Serve function with the ServeOpts Debug field instead. func Debug(ctx context.Context, providerAddr string, opts *ServeOpts) error { - ctx, cancel := context.WithCancel(ctx) - // Ctrl-C will stop the server - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, os.Interrupt) - defer func() { - signal.Stop(sigCh) - cancel() - }() - config, closeCh, err := DebugServe(ctx, opts) - if err != nil { - return fmt.Errorf("Error launching debug server: %w", err) - } - go func() { - select { - case <-sigCh: - cancel() - case <-ctx.Done(): - } - }() - reattachBytes, err := json.Marshal(map[string]ReattachConfig{ - providerAddr: config, - }) - if err != nil { - return fmt.Errorf("Error building reattach string: %w", err) + if opts == nil { + return errors.New("ServeOpts must be passed in with at least GRPCProviderFunc, GRPCProviderV6Func, or ProviderFunc") } - reattachStr := string(reattachBytes) - - fmt.Printf("Provider started, to attach Terraform set the TF_REATTACH_PROVIDERS env var:\n\n") - switch runtime.GOOS { - case "windows": - fmt.Printf("\tCommand Prompt:\tset \"TF_REATTACH_PROVIDERS=%s\"\n", reattachStr) - fmt.Printf("\tPowerShell:\t$env:TF_REATTACH_PROVIDERS='%s'\n", strings.ReplaceAll(reattachStr, `'`, `''`)) - case "linux", "darwin": - fmt.Printf("\tTF_REATTACH_PROVIDERS='%s'\n", strings.ReplaceAll(reattachStr, `'`, `'"'"'`)) - default: - fmt.Println(reattachStr) + opts.Debug = true + + if opts.ProviderAddr == "" { + opts.ProviderAddr = providerAddr } - fmt.Println("") - // wait for the server to be done - <-closeCh + Serve(opts) + return nil } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go index ac126642d..88975da2c 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go @@ -1,12 +1,12 @@ package plugin import ( + "errors" "log" hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" testing "github.com/mitchellh/go-testing-interface" - "google.golang.org/grpc" "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server" @@ -18,10 +18,16 @@ import ( const ( // The constants below are the names of the plugins that can be dispensed // from the plugin server. + // + // Deprecated: This is no longer used, but left for backwards compatibility + // since it is exported. It will be removed in the next major version. ProviderPluginName = "provider" ) // Handshake is the HandshakeConfig used to configure clients and servers. +// +// Deprecated: This is no longer used, but left for backwards compatibility +// since it is exported. It will be removed in the next major version. var Handshake = plugin.HandshakeConfig{ // The magic cookie values should NEVER be changed. MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", @@ -45,11 +51,20 @@ type ServeOpts struct { // Logger is the logger that go-plugin will use. Logger hclog.Logger + // Debug starts a debug server and controls its lifecycle, printing the + // information needed for Terraform to connect to the provider to stdout. + // os.Interrupt will be captured and used to stop the server. + // + // This option cannot be combined with TestConfig. + Debug bool + // TestConfig should only be set when the provider is being tested; it // will opt out of go-plugin's lifecycle management and other features, // and will use the supplied configuration options to control the // plugin's lifecycle and communicate connection information. See the // go-plugin GoDoc for more information. + // + // This option cannot be combined with Debug. TestConfig *plugin.ServeTestConfig // Set NoLogOutputOverride to not override the log output with an hclog @@ -69,6 +84,11 @@ type ServeOpts struct { // Serve serves a plugin. This function never returns and should be the final // function called in the main function of the plugin. func Serve(opts *ServeOpts) { + if opts.Debug && opts.TestConfig != nil { + log.Printf("[ERROR] Error starting provider: cannot set both Debug and TestConfig") + return + } + if !opts.NoLogOutputOverride { // In order to allow go-plugin to correctly pass log-levels through to // terraform, we need to use an hclog.Logger with JSON output. We can @@ -84,65 +104,125 @@ func Serve(opts *ServeOpts) { log.SetOutput(logger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true})) } - // since the plugins may not yet be aware of the new protocol, we - // automatically wrap the plugins in the grpc shims. - if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil { + if opts.ProviderAddr == "" { + opts.ProviderAddr = "provider" + } + + var err error + + switch { + case opts.ProviderFunc != nil && opts.GRPCProviderFunc == nil: opts.GRPCProviderFunc = func() tfprotov5.ProviderServer { return schema.NewGRPCProviderServer(opts.ProviderFunc()) } + err = tf5serverServe(opts) + case opts.GRPCProviderFunc != nil: + err = tf5serverServe(opts) + case opts.GRPCProviderV6Func != nil: + err = tf6serverServe(opts) + default: + err = errors.New("no provider server defined in ServeOpts") } - serveConfig := plugin.ServeConfig{ - HandshakeConfig: Handshake, - GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { - return grpc.NewServer(opts...) - }, - Logger: opts.Logger, - Test: opts.TestConfig, + if err != nil { + log.Printf("[ERROR] Error starting provider: %s", err) } +} - // assume we have either a v5 or a v6 provider - if opts.GRPCProviderFunc != nil { - provider := opts.GRPCProviderFunc() - addr := opts.ProviderAddr - if addr == "" { - addr = "provider" - } - serveConfig.VersionedPlugins = map[int]plugin.PluginSet{ - 5: { - ProviderPluginName: &tf5server.GRPCProviderPlugin{ - GRPCProvider: func() tfprotov5.ProviderServer { - return provider - }, - Name: addr, - }, - }, - } - if opts.UseTFLogSink != nil { - serveConfig.VersionedPlugins[5][ProviderPluginName].(*tf5server.GRPCProviderPlugin).Opts = append(serveConfig.VersionedPlugins[5][ProviderPluginName].(*tf5server.GRPCProviderPlugin).Opts, tf5server.WithLoggingSink(opts.UseTFLogSink)) - } +func tf5serverServe(opts *ServeOpts) error { + var tf5serveOpts []tf5server.ServeOpt - } else if opts.GRPCProviderV6Func != nil { - provider := opts.GRPCProviderV6Func() - addr := opts.ProviderAddr - if addr == "" { - addr = "provider" - } - serveConfig.VersionedPlugins = map[int]plugin.PluginSet{ - 6: { - ProviderPluginName: &tf6server.GRPCProviderPlugin{ - GRPCProvider: func() tfprotov6.ProviderServer { - return provider - }, - Name: addr, - }, - }, - } - if opts.UseTFLogSink != nil { - serveConfig.VersionedPlugins[6][ProviderPluginName].(*tf6server.GRPCProviderPlugin).Opts = append(serveConfig.VersionedPlugins[6][ProviderPluginName].(*tf6server.GRPCProviderPlugin).Opts, tf6server.WithLoggingSink(opts.UseTFLogSink)) - } + if opts.Debug { + tf5serveOpts = append(tf5serveOpts, tf5server.WithManagedDebug()) + } + + if opts.Logger != nil { + tf5serveOpts = append(tf5serveOpts, tf5server.WithGoPluginLogger(opts.Logger)) + } + + if opts.TestConfig != nil { + // Convert send-only channels to bi-directional channels to appease + // the compiler. WithDebug is errantly defined to require + // bi-directional when send-only is actually needed, which may be + // fixed in the future so the opts.TestConfig channels can be passed + // through directly. + closeCh := make(chan struct{}) + reattachConfigCh := make(chan *plugin.ReattachConfig) + + go func() { + // Always forward close channel receive, since its signaling that + // the channel is closed. + val := <-closeCh + opts.TestConfig.CloseCh <- val + }() + + go func() { + val, ok := <-reattachConfigCh + + if ok { + opts.TestConfig.ReattachConfigCh <- val + } + }() + + tf5serveOpts = append(tf5serveOpts, tf5server.WithDebug( + opts.TestConfig.Context, + reattachConfigCh, + closeCh), + ) + } + + if opts.UseTFLogSink != nil { + tf5serveOpts = append(tf5serveOpts, tf5server.WithLoggingSink(opts.UseTFLogSink)) + } + + return tf5server.Serve(opts.ProviderAddr, opts.GRPCProviderFunc, tf5serveOpts...) +} + +func tf6serverServe(opts *ServeOpts) error { + var tf6serveOpts []tf6server.ServeOpt + + if opts.Debug { + tf6serveOpts = append(tf6serveOpts, tf6server.WithManagedDebug()) + } + + if opts.Logger != nil { + tf6serveOpts = append(tf6serveOpts, tf6server.WithGoPluginLogger(opts.Logger)) + } + + if opts.TestConfig != nil { + // Convert send-only channels to bi-directional channels to appease + // the compiler. WithDebug is errantly defined to require + // bi-directional when send-only is actually needed, which may be + // fixed in the future so the opts.TestConfig channels can be passed + // through directly. + closeCh := make(chan struct{}) + reattachConfigCh := make(chan *plugin.ReattachConfig) + + go func() { + // Always forward close channel receive, since its signaling that + // the channel is closed. + val := <-closeCh + opts.TestConfig.CloseCh <- val + }() + + go func() { + val, ok := <-reattachConfigCh + + if ok { + opts.TestConfig.ReattachConfigCh <- val + } + }() + + tf6serveOpts = append(tf6serveOpts, tf6server.WithDebug( + opts.TestConfig.Context, + reattachConfigCh, + closeCh), + ) + } + if opts.UseTFLogSink != nil { + tf6serveOpts = append(tf6serveOpts, tf6server.WithLoggingSink(opts.UseTFLogSink)) } - plugin.Serve(&serveConfig) + return tf6server.Serve(opts.ProviderAddr, opts.GRPCProviderV6Func, tf6serveOpts...) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go index 0511a1fc5..8a2940f31 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go @@ -20,7 +20,7 @@ import ( type diffChangeType byte const ( - diffInvalid diffChangeType = iota + diffInvalid diffChangeType = iota //nolint:deadcode,varcheck diffNone diffCreate diffUpdate diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go index 11b63de8a..703547745 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go @@ -154,13 +154,13 @@ func (c *ResourceConfig) DeepCopy() *ResourceConfig { } // Copy, this will copy all the exported attributes - copy, err := copystructure.Config{Lock: true}.Copy(c) + copiedConfig, err := copystructure.Config{Lock: true}.Copy(c) if err != nil { panic(err) } // Force the type - result := copy.(*ResourceConfig) + result := copiedConfig.(*ResourceConfig) return result } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go index 7f8655ee7..170e20cd0 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go @@ -366,11 +366,11 @@ func (s *State) Remove(addr ...string) error { switch v := r.Value.(type) { case *ModuleState: - s.removeModule(path, v) + s.removeModule(v) case *ResourceState: s.removeResource(path, v) case *InstanceState: - s.removeInstance(path, r.Parent.Value.(*ResourceState), v) + s.removeInstance(r.Parent.Value.(*ResourceState), v) default: return fmt.Errorf("unknown type to delete: %T", r.Value) } @@ -384,7 +384,7 @@ func (s *State) Remove(addr ...string) error { return nil } -func (s *State) removeModule(path []string, v *ModuleState) { +func (s *State) removeModule(v *ModuleState) { for i, m := range s.Modules { if m == v { s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil @@ -412,7 +412,7 @@ func (s *State) removeResource(path []string, v *ResourceState) { } } -func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { +func (s *State) removeInstance(r *ResourceState, v *InstanceState) { // Go through the resource and find the instance that matches this // (if any) and remove it. @@ -421,20 +421,6 @@ func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState r.Primary = nil return } - - // Check lists - lists := [][]*InstanceState{r.Deposed} - for _, is := range lists { - for i, instance := range is { - if instance == v { - // Found it, remove it - is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil - - // Done - return - } - } - } } // RootModule returns the ModuleState for the root module @@ -562,12 +548,12 @@ func (s *State) DeepCopy() *State { return nil } - copy, err := copystructure.Config{Lock: true}.Copy(s) + copiedState, err := copystructure.Config{Lock: true}.Copy(s) if err != nil { panic(err) } - return copy.(*State) + return copiedState.(*State) } func (s *State) Init() { @@ -1023,7 +1009,7 @@ func (m *ModuleState) String() string { } if len(rs.Dependencies) > 0 { - buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) + buf.WriteString("\n Dependencies:\n") for _, dep := range rs.Dependencies { buf.WriteString(fmt.Sprintf(" %s\n", dep)) } @@ -1237,11 +1223,7 @@ func (s *ResourceState) Equal(other *ResourceState) bool { } // States must be equal - if !s.Primary.Equal(other.Primary) { - return false - } - - return true + return s.Primary.Equal(other.Primary) } // Taint marks a resource as tainted. @@ -1430,12 +1412,12 @@ func (s *InstanceState) Set(from *InstanceState) { } func (s *InstanceState) DeepCopy() *InstanceState { - copy, err := copystructure.Config{Lock: true}.Copy(s) + copiedState, err := copystructure.Config{Lock: true}.Copy(s) if err != nil { panic(err) } - return copy.(*InstanceState) + return copiedState.(*InstanceState) } func (s *InstanceState) Empty() bool { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go index 01d039272..d2229f8ce 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go @@ -24,7 +24,15 @@ type stateFilter struct { // parseResourceAddress. func (f *stateFilter) filter(fs ...string) ([]*stateFilterResult, error) { // Parse all the addresses - as := make([]*resourceAddress, len(fs)) + var as []*resourceAddress + + if len(fs) == 0 { + // If we weren't given any filters, then we list all + as = []*resourceAddress{{Index: -1}} + } else { + as = make([]*resourceAddress, len(fs)) + } + for i, v := range fs { a, err := parseResourceAddress(v) if err != nil { @@ -34,11 +42,6 @@ func (f *stateFilter) filter(fs ...string) ([]*stateFilterResult, error) { as[i] = a } - // If we weren't given any filters, then we list all - if len(fs) == 0 { - as = append(as, &resourceAddress{Index: -1}) - } - // Filter each of the address. We keep track of this in a map to // strip duplicates. resultSet := make(map[string]*stateFilterResult) diff --git a/vendor/github.com/hashicorp/terraform-registry-address/.go-version b/vendor/github.com/hashicorp/terraform-registry-address/.go-version index 42cf0675c..adc97d8e2 100644 --- a/vendor/github.com/hashicorp/terraform-registry-address/.go-version +++ b/vendor/github.com/hashicorp/terraform-registry-address/.go-version @@ -1 +1 @@ -1.15.2 +1.18 diff --git a/vendor/github.com/hashicorp/terraform-registry-address/README.md b/vendor/github.com/hashicorp/terraform-registry-address/README.md index 2ed8398da..27db81f7c 100644 --- a/vendor/github.com/hashicorp/terraform-registry-address/README.md +++ b/vendor/github.com/hashicorp/terraform-registry-address/README.md @@ -1,46 +1,149 @@ # terraform-registry-address -This package helps with representation, comparison and parsing of -Terraform Registry addresses, such as -`registry.terraform.io/grafana/grafana` or `hashicorp/aws`. +This module enables parsing, comparison and canonical representation of +[Terraform Registry](https://registry.terraform.io/) **provider** addresses +(such as `registry.terraform.io/grafana/grafana` or `hashicorp/aws`) +and **module** addresses (such as `hashicorp/subnets/cidr`). -The most common source of these addresses outside of Terraform Core -is JSON representation of state, plan, or schemas as obtained -via [`hashicorp/terraform-exec`](https://github.com/hashicorp/terraform-exec). +**Provider** addresses can be found in -## Example + - [`terraform show -json `](https://www.terraform.io/internals/json-format#configuration-representation) (`full_name`) + - [`terraform version -json`](https://www.terraform.io/cli/commands/version#example) (`provider_selections`) + - [`terraform providers schema -json`](https://www.terraform.io/cli/commands/providers/schema#providers-schema-representation) (keys of `provider_schemas`) + - within `required_providers` block in Terraform configuration (`*.tf`) + - Terraform [CLI configuration file](https://www.terraform.io/cli/config/config-file#provider-installation) + - Plugin [reattach configurations](https://www.terraform.io/plugin/debugging#running-terraform-with-a-provider-in-debug-mode) + +**Module** addresses can be found within `source` argument +of `module` block in Terraform configuration (`*.tf`) +and parts of the address (namespace and name) in the Registry API. + +## Compatibility + +The module assumes compatibility with Terraform v0.12 and later, +which have the mentioned JSON output produced by corresponding CLI flags. + +We recommend carefully reading the [ambigouous provider addresses](#Ambiguous-Provider-Addresses) +section below which may impact versions `0.12` and `0.13`. + +## Related Libraries + +Other libraries which may help with consuming most of the above Terraform +outputs in automation: + + - [`hashicorp/terraform-exec`](https://github.com/hashicorp/terraform-exec) + - [`hashicorp/terraform-json`](https://github.com/hashicorp/terraform-json) + +## Usage + +### Provider ```go -p, err := ParseRawProviderSourceString("hashicorp/aws") +pAddr, err := ParseProviderSource("hashicorp/aws") if err != nil { // deal with error } -// p == Provider{ +// pAddr == Provider{ // Type: "aws", // Namespace: "hashicorp", -// Hostname: svchost.Hostname("registry.terraform.io"), +// Hostname: DefaultProviderRegistryHost, // } ``` -## Legacy address +### Module + +```go +mAddr, err := ParseModuleSource("hashicorp/consul/aws//modules/consul-cluster") +if err != nil { + // deal with error +} + +// mAddr == Module{ +// Package: ModulePackage{ +// Host: DefaultProviderRegistryHost, +// Namespace: "hashicorp", +// Name: "consul", +// TargetSystem: "aws", +// }, +// Subdir: "modules/consul-cluster", +// }, +``` + +## Other Module Address Formats + +Modules can also be sourced from [other sources](https://www.terraform.io/language/modules/sources) +and these other sources (outside of Terraform Registry) +have different address formats, such as `./local` or +`github.com/hashicorp/example`. + +This library does _not_ recognize such other address formats +and it will return error upon parsing these. + +## Ambiguous Provider Addresses + +Qualified addresses with namespace (such as `hashicorp/aws`) +are used exclusively in all recent versions (`0.14+`) of Terraform. +If you only work with Terraform `v0.14.0+` configuration/output, you may +safely ignore the rest of this section and related part of the API. + +There are a few types of ambiguous addresses you may comes accross: + + - Terraform `v0.12` uses "namespace-less address", such as `aws`. + - Terraform `v0.13` may use `-` as a placeholder for the unknown namespace, + resulting in address such as `-/aws`. + - Terraform `v0.14+` _configuration_ still allows ambiguous providers + through `provider "" {}` block _without_ corresponding + entry inside `required_providers`, but these providers are always + resolved as `hashicorp/` and all JSON outputs only use that + resolved address. + +Both ambiguous address formats are accepted by `ParseProviderSource()` -A legacy address is by itself (without more context) ambiguous. -For example `aws` may represent either the official `hashicorp/aws` -or just any custom-built provider called `aws`. +```go +pAddr, err := ParseProviderSource("aws") +if err != nil { + // deal with error +} + +// pAddr == Provider{ +// Type: "aws", +// Namespace: UnknownProviderNamespace, // "?" +// Hostname: DefaultProviderRegistryHost, // "registry.terraform.io" +// } +pAddr.HasKnownNamespace() // == false +pAddr.IsLegacy() // == false +``` +```go +pAddr, err := ParseProviderSource("-/aws") +if err != nil { + // deal with error +} -Such ambiguous address can be produced by Terraform `<=0.12`. You can -just use `ImpliedProviderForUnqualifiedType` if you know for sure -the address was produced by an affected version. +// pAddr == Provider{ +// Type: "aws", +// Namespace: LegacyProviderNamespace, // "-" +// Hostname: DefaultProviderRegistryHost, // "registry.terraform.io" +// } +pAddr.HasKnownNamespace() // == true +pAddr.IsLegacy() // == true +``` -If you do not have that context you should parse the string via -`ParseRawProviderSourceString` and then check `addr.IsLegacy()`. +However `NewProvider()` will panic if you pass an empty namespace +or any placeholder indicating unknown namespace. + +```go +NewProvider(DefaultProviderRegistryHost, "", "aws") // panic +NewProvider(DefaultProviderRegistryHost, "-", "aws") // panic +NewProvider(DefaultProviderRegistryHost, "?", "aws") // panic +``` -### What to do with a legacy address? +If you come across an ambiguous address, you should resolve +it to a fully qualified one and use that one instead. -Ask the Registry API whether and where the provider was moved to +### Resolving Ambiguous Address -(`-` represents the legacy, basically unknown namespace) +The Registry API provides the safest way of resolving an ambiguous address. ```sh # grafana (redirected to its own namespace) @@ -54,28 +157,15 @@ $ curl -s https://registry.terraform.io/v1/providers/-/aws/versions | jq '(.id, null ``` -Then: +When you cache results, ensure you have invalidation +mechanism in place as target (migrated) namespace may change. - - Reparse the _new_ address (`moved_to`) of any _moved_ provider (e.g. `grafana/grafana`) via `ParseRawProviderSourceString` - - Reparse the full address (`id`) of any other provider (e.g. `hashicorp/aws`) - -Depending on context (legacy) `terraform` may need to be parsed separately. -Read more about this provider below. - -If for some reason you cannot ask the Registry API you may also use -`ParseAndInferProviderSourceString` which assumes that any legacy address -(including `terraform`) belongs to the `hashicorp` namespace. - -If you cache results (which you should), ensure you have invalidation -mechanism in place because target (migrated) namespace may change. -Hard-coding migrations anywhere in code is strongly discouraged. - -### `terraform` provider +#### `terraform` provider Like any other legacy address `terraform` is also ambiguous. Such address may (most unlikely) represent a custom-built provider called `terraform`, or the now archived [`hashicorp/terraform` provider in the registry](https://registry.terraform.io/providers/hashicorp/terraform/latest), -or (most likely) the `terraform` provider built into 0.12+, which is +or (most likely) the `terraform` provider built into 0.11+, which is represented via a dedicated FQN of `terraform.io/builtin/terraform` in 0.13+. You may be able to differentiate between these different providers if you @@ -85,4 +175,7 @@ Alternatively you may just treat the address as the builtin provider, i.e. assume all of its logic including schema is contained within Terraform Core. -In such case you should just use `NewBuiltInProvider("terraform")`. +In such case you should construct the address in the following way +```go +pAddr := NewProvider(BuiltInProviderHost, BuiltInProviderNamespace, "terraform") +``` diff --git a/vendor/github.com/hashicorp/terraform-registry-address/go.mod b/vendor/github.com/hashicorp/terraform-registry-address/go.mod index a82603ea5..44328056f 100644 --- a/vendor/github.com/hashicorp/terraform-registry-address/go.mod +++ b/vendor/github.com/hashicorp/terraform-registry-address/go.mod @@ -3,7 +3,7 @@ module github.com/hashicorp/terraform-registry-address go 1.14 require ( - github.com/google/go-cmp v0.5.5 + github.com/google/go-cmp v0.5.8 github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 golang.org/x/net v0.0.0-20210119194325-5f4716e94777 ) diff --git a/vendor/github.com/hashicorp/terraform-registry-address/go.sum b/vendor/github.com/hashicorp/terraform-registry-address/go.sum index 85038dfa9..83124050c 100644 --- a/vendor/github.com/hashicorp/terraform-registry-address/go.sum +++ b/vendor/github.com/hashicorp/terraform-registry-address/go.sum @@ -1,20 +1,16 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= @@ -36,8 +32,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/hashicorp/terraform-registry-address/module.go b/vendor/github.com/hashicorp/terraform-registry-address/module.go new file mode 100644 index 000000000..6af0c5976 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-registry-address/module.go @@ -0,0 +1,251 @@ +package tfaddr + +import ( + "fmt" + "path" + "regexp" + "strings" + + svchost "github.com/hashicorp/terraform-svchost" +) + +// Module is representing a module listed in a Terraform module +// registry. +type Module struct { + // Package is the registry package that the target module belongs to. + // The module installer must translate this into a ModuleSourceRemote + // using the registry API and then take that underlying address's + // Package in order to find the actual package location. + Package ModulePackage + + // If Subdir is non-empty then it represents a sub-directory within the + // remote package that the registry address eventually resolves to. + // This will ultimately become the suffix of the Subdir of the + // ModuleSourceRemote that the registry address translates to. + // + // Subdir uses a normalized forward-slash-based path syntax within the + // virtual filesystem represented by the final package. It will never + // include `../` or `./` sequences. + Subdir string +} + +// DefaultModuleRegistryHost is the hostname used for registry-based module +// source addresses that do not have an explicit hostname. +const DefaultModuleRegistryHost = svchost.Hostname("registry.terraform.io") + +var moduleRegistryNamePattern = regexp.MustCompile("^[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?$") +var moduleRegistryTargetSystemPattern = regexp.MustCompile("^[0-9a-z]{1,64}$") + +// ParseModuleSource only accepts module registry addresses, and +// will reject any other address type. +func ParseModuleSource(raw string) (Module, error) { + var err error + + var subDir string + raw, subDir = splitPackageSubdir(raw) + if strings.HasPrefix(subDir, "../") { + return Module{}, fmt.Errorf("subdirectory path %q leads outside of the module package", subDir) + } + + parts := strings.Split(raw, "/") + // A valid registry address has either three or four parts, because the + // leading hostname part is optional. + if len(parts) != 3 && len(parts) != 4 { + return Module{}, fmt.Errorf("a module registry source address must have either three or four slash-separated components") + } + + host := DefaultModuleRegistryHost + if len(parts) == 4 { + host, err = svchost.ForComparison(parts[0]) + if err != nil { + // The svchost library doesn't produce very good error messages to + // return to an end-user, so we'll use some custom ones here. + switch { + case strings.Contains(parts[0], "--"): + // Looks like possibly punycode, which we don't allow here + // to ensure that source addresses are written readably. + return Module{}, fmt.Errorf("invalid module registry hostname %q; internationalized domain names must be given as direct unicode characters, not in punycode", parts[0]) + default: + return Module{}, fmt.Errorf("invalid module registry hostname %q", parts[0]) + } + } + if !strings.Contains(host.String(), ".") { + return Module{}, fmt.Errorf("invalid module registry hostname: must contain at least one dot") + } + // Discard the hostname prefix now that we've processed it + parts = parts[1:] + } + + ret := Module{ + Package: ModulePackage{ + Host: host, + }, + + Subdir: subDir, + } + + if host == svchost.Hostname("github.com") || host == svchost.Hostname("bitbucket.org") { + return ret, fmt.Errorf("can't use %q as a module registry host, because it's reserved for installing directly from version control repositories", host) + } + + if ret.Package.Namespace, err = parseModuleRegistryName(parts[0]); err != nil { + if strings.Contains(parts[0], ".") { + // Seems like the user omitted one of the latter components in + // an address with an explicit hostname. + return ret, fmt.Errorf("source address must have three more components after the hostname: the namespace, the name, and the target system") + } + return ret, fmt.Errorf("invalid namespace %q: %s", parts[0], err) + } + if ret.Package.Name, err = parseModuleRegistryName(parts[1]); err != nil { + return ret, fmt.Errorf("invalid module name %q: %s", parts[1], err) + } + if ret.Package.TargetSystem, err = parseModuleRegistryTargetSystem(parts[2]); err != nil { + if strings.Contains(parts[2], "?") { + // The user was trying to include a query string, probably? + return ret, fmt.Errorf("module registry addresses may not include a query string portion") + } + return ret, fmt.Errorf("invalid target system %q: %s", parts[2], err) + } + + return ret, nil +} + +// MustParseModuleSource is a wrapper around ParseModuleSource that panics if +// it returns an error. +func MustParseModuleSource(raw string) (Module) { + mod, err := ParseModuleSource(raw) + if err != nil { + panic(err) + } + return mod +} + +// parseModuleRegistryName validates and normalizes a string in either the +// "namespace" or "name" position of a module registry source address. +func parseModuleRegistryName(given string) (string, error) { + // Similar to the names in provider source addresses, we defined these + // to be compatible with what filesystems and typical remote systems + // like GitHub allow in names. Unfortunately we didn't end up defining + // these exactly equivalently: provider names can only use dashes as + // punctuation, whereas module names can use underscores. So here we're + // using some regular expressions from the original module source + // implementation, rather than using the IDNA rules as we do in + // ParseProviderPart. + + if !moduleRegistryNamePattern.MatchString(given) { + return "", fmt.Errorf("must be between one and 64 characters, including ASCII letters, digits, dashes, and underscores, where dashes and underscores may not be the prefix or suffix") + } + + // We also skip normalizing the name to lowercase, because we historically + // didn't do that and so existing module registries might be doing + // case-sensitive matching. + return given, nil +} + +// parseModuleRegistryTargetSystem validates and normalizes a string in the +// "target system" position of a module registry source address. This is +// what we historically called "provider" but never actually enforced as +// being a provider address, and now _cannot_ be a provider address because +// provider addresses have three slash-separated components of their own. +func parseModuleRegistryTargetSystem(given string) (string, error) { + // Similar to the names in provider source addresses, we defined these + // to be compatible with what filesystems and typical remote systems + // like GitHub allow in names. Unfortunately we didn't end up defining + // these exactly equivalently: provider names can't use dashes or + // underscores. So here we're using some regular expressions from the + // original module source implementation, rather than using the IDNA rules + // as we do in ParseProviderPart. + + if !moduleRegistryTargetSystemPattern.MatchString(given) { + return "", fmt.Errorf("must be between one and 64 ASCII letters or digits") + } + + // We also skip normalizing the name to lowercase, because we historically + // didn't do that and so existing module registries might be doing + // case-sensitive matching. + return given, nil +} + +// String returns a full representation of the address, including any +// additional components that are typically implied by omission in +// user-written addresses. +// +// We typically use this longer representation in error message, in case +// the inclusion of normally-omitted components is helpful in debugging +// unexpected behavior. +func (s Module) String() string { + if s.Subdir != "" { + return s.Package.String() + "//" + s.Subdir + } + return s.Package.String() +} + +// ForDisplay is similar to String but instead returns a representation of +// the idiomatic way to write the address in configuration, omitting +// components that are commonly just implied in addresses written by +// users. +// +// We typically use this shorter representation in informational messages, +// such as the note that we're about to start downloading a package. +func (s Module) ForDisplay() string { + if s.Subdir != "" { + return s.Package.ForDisplay() + "//" + s.Subdir + } + return s.Package.ForDisplay() +} + +// splitPackageSubdir detects whether the given address string has a +// subdirectory portion, and if so returns a non-empty subDir string +// along with the trimmed package address. +// +// If the given string doesn't have a subdirectory portion then it'll +// just be returned verbatim in packageAddr, with an empty subDir value. +func splitPackageSubdir(given string) (packageAddr, subDir string) { + packageAddr, subDir = sourceDirSubdir(given) + if subDir != "" { + subDir = path.Clean(subDir) + } + return packageAddr, subDir +} + +// sourceDirSubdir takes a source URL and returns a tuple of the URL without +// the subdir and the subdir. +// +// ex: +// dom.com/path/?q=p => dom.com/path/?q=p, "" +// proto://dom.com/path//*?q=p => proto://dom.com/path?q=p, "*" +// proto://dom.com/path//path2?q=p => proto://dom.com/path?q=p, "path2" +func sourceDirSubdir(src string) (string, string) { + // URL might contains another url in query parameters + stop := len(src) + if idx := strings.Index(src, "?"); idx > -1 { + stop = idx + } + + // Calculate an offset to avoid accidentally marking the scheme + // as the dir. + var offset int + if idx := strings.Index(src[:stop], "://"); idx > -1 { + offset = idx + 3 + } + + // First see if we even have an explicit subdir + idx := strings.Index(src[offset:stop], "//") + if idx == -1 { + return src, "" + } + + idx += offset + subdir := src[idx+2:] + src = src[:idx] + + // Next, check if we have query parameters and push them onto the + // URL. + if idx = strings.Index(subdir, "?"); idx > -1 { + query := subdir[idx:] + subdir = subdir[:idx] + src += query + } + + return src, subdir +} diff --git a/vendor/github.com/hashicorp/terraform-registry-address/module_package.go b/vendor/github.com/hashicorp/terraform-registry-address/module_package.go new file mode 100644 index 000000000..d8ad2534a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-registry-address/module_package.go @@ -0,0 +1,58 @@ +package tfaddr + +import ( + "strings" + + svchost "github.com/hashicorp/terraform-svchost" +) + +// A ModulePackage is an extra indirection over a ModulePackage where +// we use a module registry to translate a more symbolic address (and +// associated version constraint given out of band) into a physical source +// location. +// +// ModulePackage is distinct from ModulePackage because they have +// disjoint use-cases: registry package addresses are only used to query a +// registry in order to find a real module package address. These being +// distinct is intended to help future maintainers more easily follow the +// series of steps in the module installer, with the help of the type checker. +type ModulePackage struct { + Host svchost.Hostname + Namespace string + Name string + TargetSystem string +} + +func (s ModulePackage) String() string { + // Note: we're using the "display" form of the hostname here because + // for our service hostnames "for display" means something different: + // it means to render non-ASCII characters directly as Unicode + // characters, rather than using the "punycode" representation we + // use for internal processing, and so the "display" representation + // is actually what users would write in their configurations. + return s.Host.ForDisplay() + "/" + s.ForRegistryProtocol() +} + +func (s ModulePackage) ForDisplay() string { + if s.Host == DefaultModuleRegistryHost { + return s.ForRegistryProtocol() + } + return s.Host.ForDisplay() + "/" + s.ForRegistryProtocol() +} + +// ForRegistryProtocol returns a string representation of just the namespace, +// name, and target system portions of the address, always omitting the +// registry hostname and the subdirectory portion, if any. +// +// This is primarily intended for generating addresses to send to the +// registry in question via the registry protocol, since the protocol +// skips sending the registry its own hostname as part of identifiers. +func (s ModulePackage) ForRegistryProtocol() string { + var buf strings.Builder + buf.WriteString(s.Namespace) + buf.WriteByte('/') + buf.WriteString(s.Name) + buf.WriteByte('/') + buf.WriteString(s.TargetSystem) + return buf.String() +} diff --git a/vendor/github.com/hashicorp/terraform-registry-address/provider.go b/vendor/github.com/hashicorp/terraform-registry-address/provider.go index 4dd0a5b78..23e1e221f 100644 --- a/vendor/github.com/hashicorp/terraform-registry-address/provider.go +++ b/vendor/github.com/hashicorp/terraform-registry-address/provider.go @@ -16,9 +16,9 @@ type Provider struct { Hostname svchost.Hostname } -// DefaultRegistryHost is the hostname used for provider addresses that do +// DefaultProviderRegistryHost is the hostname used for provider addresses that do // not have an explicit hostname. -const DefaultRegistryHost = svchost.Hostname("registry.terraform.io") +const DefaultProviderRegistryHost = svchost.Hostname("registry.terraform.io") // BuiltInProviderHost is the pseudo-hostname used for the "built-in" provider // namespace. Built-in provider addresses must also have their namespace set @@ -34,11 +34,18 @@ const BuiltInProviderHost = svchost.Hostname("terraform.io") // special, even if they haven't encountered the concept formally yet. const BuiltInProviderNamespace = "builtin" +// UnknownProviderNamespace is the special string used to indicate +// unknown namespace, e.g. in "aws". This is equivalent to +// LegacyProviderNamespace for <0.12 style address. This namespace +// would never be produced by Terraform itself explicitly, it is +// only an internal placeholder. +const UnknownProviderNamespace = "?" + // LegacyProviderNamespace is the special string used in the Namespace field // of type Provider to mark a legacy provider address. This special namespace // value would normally be invalid, and can be used only when the hostname is -// DefaultRegistryHost because that host owns the mapping from legacy name to -// FQN. +// DefaultProviderRegistryHost because that host owns the mapping from legacy name to +// FQN. This may be produced by Terraform 0.13. const LegacyProviderNamespace = "-" // String returns an FQN string, indended for use in machine-readable output. @@ -56,7 +63,7 @@ func (pt Provider) ForDisplay() string { panic("called ForDisplay on zero-value addrs.Provider") } - if pt.Hostname == DefaultRegistryHost { + if pt.Hostname == DefaultProviderRegistryHost { return pt.Namespace + "/" + pt.Type } return pt.Hostname.ForDisplay() + "/" + pt.Namespace + "/" + pt.Type @@ -75,10 +82,18 @@ func (pt Provider) ForDisplay() string { // ParseProviderPart first to check that the given value is valid. func NewProvider(hostname svchost.Hostname, namespace, typeName string) Provider { if namespace == LegacyProviderNamespace { - // Legacy provider addresses must always be created via - // NewLegacyProvider so that we can use static analysis to find - // codepaths still working with those. - panic("attempt to create legacy provider address using NewProvider; use NewLegacyProvider instead") + // Legacy provider addresses must always be created via struct + panic("attempt to create legacy provider address using NewProvider; use Provider{} instead") + } + if namespace == UnknownProviderNamespace { + // Provider addresses with unknown namespace must always + // be created via struct + panic("attempt to create provider address with unknown namespace using NewProvider; use Provider{} instead") + } + if namespace == "" { + // This case is already handled by MustParseProviderPart() below, + // but we catch it early to provide more helpful message. + panic("attempt to create provider address with empty namespace") } return Provider{ @@ -88,63 +103,6 @@ func NewProvider(hostname svchost.Hostname, namespace, typeName string) Provider } } -// ImpliedProviderForUnqualifiedType represents the rules for inferring what -// provider FQN a user intended when only a naked type name is available. -// -// For all except the type name "terraform" this returns a so-called "default" -// provider, which is under the registry.terraform.io/hashicorp/ namespace. -// -// As a special case, the string "terraform" maps to -// "terraform.io/builtin/terraform" because that is the more likely user -// intent than the now-unmaintained "registry.terraform.io/hashicorp/terraform" -// which remains only for compatibility with older Terraform versions. -func ImpliedProviderForUnqualifiedType(typeName string) Provider { - switch typeName { - case "terraform": - // Note for future maintainers: any additional strings we add here - // as implied to be builtin must never also be use as provider names - // in the registry.terraform.io/hashicorp/... namespace, because - // otherwise older versions of Terraform could implicitly select - // the registry name instead of the internal one. - return NewBuiltInProvider(typeName) - default: - return NewDefaultProvider(typeName) - } -} - -// NewDefaultProvider returns the default address of a HashiCorp-maintained, -// Registry-hosted provider. -func NewDefaultProvider(name string) Provider { - return Provider{ - Type: MustParseProviderPart(name), - Namespace: "hashicorp", - Hostname: DefaultRegistryHost, - } -} - -// NewBuiltInProvider returns the address of a "built-in" provider. See -// the docs for Provider.IsBuiltIn for more information. -func NewBuiltInProvider(name string) Provider { - return Provider{ - Type: MustParseProviderPart(name), - Namespace: BuiltInProviderNamespace, - Hostname: BuiltInProviderHost, - } -} - -// NewLegacyProvider returns a mock address for a provider. -// This will be removed when ProviderType is fully integrated. -func NewLegacyProvider(name string) Provider { - return Provider{ - // We intentionally don't normalize and validate the legacy names, - // because existing code expects legacy provider names to pass through - // verbatim, even if not compliant with our new naming rules. - Type: name, - Namespace: LegacyProviderNamespace, - Hostname: DefaultRegistryHost, - } -} - // LegacyString returns the provider type, which is frequently used // interchangeably with provider name. This function can and should be removed // when provider type is fully integrated. As a safeguard for future @@ -167,6 +125,12 @@ func (pt Provider) IsZero() bool { return pt == Provider{} } +// HasKnownNamespace returns true if the provider namespace is known +// (also if it is legacy namespace) +func (pt Provider) HasKnownNamespace() bool { + return pt.Namespace != UnknownProviderNamespace +} + // IsBuiltIn returns true if the receiver is the address of a "built-in" // provider. That is, a provider under terraform.io/builtin/ which is // included as part of the Terraform binary itself rather than one to be @@ -201,25 +165,16 @@ func (pt Provider) IsLegacy() bool { panic("called IsLegacy() on zero-value addrs.Provider") } - return pt.Hostname == DefaultRegistryHost && pt.Namespace == LegacyProviderNamespace + return pt.Hostname == DefaultProviderRegistryHost && pt.Namespace == LegacyProviderNamespace } -// IsDefault returns true if the provider is a default hashicorp provider -func (pt Provider) IsDefault() bool { - if pt.IsZero() { - panic("called IsDefault() on zero-value addrs.Provider") - } - - return pt.Hostname == DefaultRegistryHost && pt.Namespace == "hashicorp" -} - // Equals returns true if the receiver and other provider have the same attributes. func (pt Provider) Equals(other Provider) bool { return pt == other } -// ParseRawProviderSourceString parses the source attribute and returns a provider. +// ParseProviderSource parses the source attribute and returns a provider. // This is intended primarily to parse the FQN-like strings returned by // terraform-config-inspect. // @@ -230,7 +185,7 @@ func (pt Provider) Equals(other Provider) bool { // // "name"-only format is parsed as -/name (i.e. legacy namespace) // requiring further identification of the namespace via Registry API -func ParseRawProviderSourceString(str string) (Provider, error) { +func ParseProviderSource(str string) (Provider, error) { var ret Provider parts, err := parseSourceStringParts(str) if err != nil { @@ -239,10 +194,14 @@ func ParseRawProviderSourceString(str string) (Provider, error) { name := parts[len(parts)-1] ret.Type = name - ret.Hostname = DefaultRegistryHost + ret.Hostname = DefaultProviderRegistryHost if len(parts) == 1 { - return NewLegacyProvider(name), nil + return Provider{ + Hostname: DefaultProviderRegistryHost, + Namespace: UnknownProviderNamespace, + Type: name, + }, nil } if len(parts) >= 2 { @@ -278,13 +237,13 @@ func ParseRawProviderSourceString(str string) (Provider, error) { ret.Hostname = hn } - if ret.Namespace == LegacyProviderNamespace && ret.Hostname != DefaultRegistryHost { + if ret.Namespace == LegacyProviderNamespace && ret.Hostname != DefaultProviderRegistryHost { // Legacy provider addresses must always be on the default registry // host, because the default registry host decides what actual FQN // each one maps to. return Provider{}, &ParserError{ Summary: "Invalid provider namespace", - Detail: "The legacy provider namespace \"-\" can be used only with hostname " + DefaultRegistryHost.ForDisplay() + ".", + Detail: "The legacy provider namespace \"-\" can be used only with hostname " + DefaultProviderRegistryHost.ForDisplay() + ".", } } @@ -332,28 +291,52 @@ func ParseRawProviderSourceString(str string) (Provider, error) { return ret, nil } -// ParseAndInferProviderSourceString parses the source attribute and returns a provider. -// This is intended primarily to parse the FQN-like strings returned by -// terraform-config-inspect. -// -// The following are valid source string formats: -// name -// namespace/name -// hostname/namespace/name -// -// "name" format is assumed to be hashicorp/name -func ParseAndInferProviderSourceString(str string) (Provider, error) { - var ret Provider - parts, err := parseSourceStringParts(str) +// MustParseProviderSource is a wrapper around ParseProviderSource that panics if +// it returns an error. +func MustParseProviderSource(raw string) (Provider) { + p, err := ParseProviderSource(raw) if err != nil { - return ret, err + panic(err) } + return p +} - if len(parts) == 1 { - return NewDefaultProvider(parts[0]), nil +// ValidateProviderAddress returns error if the given address is not FQN, +// that is if it is missing any of the three components from +// hostname/namespace/name. +func ValidateProviderAddress(raw string) error { + parts, err := parseSourceStringParts(raw) + if err != nil { + return err + } + + if len(parts) != 3 { + return &ParserError{ + Summary: "Invalid provider address format", + Detail: `Expected FQN in the format "hostname/namespace/name"`, + } + } + + p, err := ParseProviderSource(raw) + if err != nil { + return err + } + + if !p.HasKnownNamespace() { + return &ParserError{ + Summary: "Unknown provider namespace", + Detail: `Expected FQN in the format "hostname/namespace/name"`, + } + } + + if !p.IsLegacy() { + return &ParserError{ + Summary: "Invalid legacy provider namespace", + Detail: `Expected FQN in the format "hostname/namespace/name"`, + } } - return ParseRawProviderSourceString(str) + return nil } func parseSourceStringParts(str string) ([]string, error) { @@ -390,16 +373,6 @@ func parseSourceStringParts(str string) ([]string, error) { return parts, nil } -// MustParseRawProviderSourceString is a wrapper around ParseRawProviderSourceString that panics if -// it returns an error. -func MustParseRawProviderSourceString(str string) Provider { - result, err := ParseRawProviderSourceString(str) - if err != nil { - panic(err) - } - return result -} - // ParseProviderPart processes an addrs.Provider namespace or type string // provided by an end-user, producing a normalized version if possible or // an error if the string contains invalid characters. @@ -468,15 +441,3 @@ func MustParseProviderPart(given string) string { } return result } - -// IsProviderPartNormalized compares a given string to the result of ParseProviderPart(string) -func IsProviderPartNormalized(str string) (bool, error) { - normalized, err := ParseProviderPart(str) - if err != nil { - return false, err - } - if str == normalized { - return true, nil - } - return false, nil -} diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go index be6ebca9c..f6a00199c 100644 --- a/vendor/github.com/hashicorp/yamux/addr.go +++ b/vendor/github.com/hashicorp/yamux/addr.go @@ -54,7 +54,7 @@ func (s *Stream) LocalAddr() net.Addr { return s.session.LocalAddr() } -// LocalAddr returns the remote address +// RemoteAddr returns the remote address func (s *Stream) RemoteAddr() net.Addr { return s.session.RemoteAddr() } diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go index 4f5293828..2fdbf844a 100644 --- a/vendor/github.com/hashicorp/yamux/const.go +++ b/vendor/github.com/hashicorp/yamux/const.go @@ -5,6 +5,25 @@ import ( "fmt" ) +// NetError implements net.Error +type NetError struct { + err error + timeout bool + temporary bool +} + +func (e *NetError) Error() string { + return e.err.Error() +} + +func (e *NetError) Timeout() bool { + return e.timeout +} + +func (e *NetError) Temporary() bool { + return e.temporary +} + var ( // ErrInvalidVersion means we received a frame with an // invalid version @@ -30,7 +49,13 @@ var ( ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") // ErrTimeout is used when we reach an IO deadline - ErrTimeout = fmt.Errorf("i/o deadline reached") + ErrTimeout = &NetError{ + err: fmt.Errorf("i/o deadline reached"), + + // Error should meet net.Error interface for timeouts for compatability + // with standard library expectations, such as http servers. + timeout: true, + } // ErrStreamClosed is returned when using a closed stream ErrStreamClosed = fmt.Errorf("stream closed") diff --git a/vendor/github.com/hashicorp/yamux/go.mod b/vendor/github.com/hashicorp/yamux/go.mod index 672a0e581..dd8974d3f 100644 --- a/vendor/github.com/hashicorp/yamux/go.mod +++ b/vendor/github.com/hashicorp/yamux/go.mod @@ -1 +1,3 @@ module github.com/hashicorp/yamux + +go 1.15 diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go index 18a078c8a..0c3e67b02 100644 --- a/vendor/github.com/hashicorp/yamux/mux.go +++ b/vendor/github.com/hashicorp/yamux/mux.go @@ -31,6 +31,20 @@ type Config struct { // window size that we allow for a stream. MaxStreamWindowSize uint32 + // StreamOpenTimeout is the maximum amount of time that a stream will + // be allowed to remain in pending state while waiting for an ack from the peer. + // Once the timeout is reached the session will be gracefully closed. + // A zero value disables the StreamOpenTimeout allowing unbounded + // blocking on OpenStream calls. + StreamOpenTimeout time.Duration + + // StreamCloseTimeout is the maximum time that a stream will allowed to + // be in a half-closed state when `Close` is called before forcibly + // closing the connection. Forcibly closed connections will empty the + // receive buffer, drop any future packets received for that stream, + // and send a RST to the remote side. + StreamCloseTimeout time.Duration + // LogOutput is used to control the log destination. Either Logger or // LogOutput can be set, not both. LogOutput io.Writer @@ -48,6 +62,8 @@ func DefaultConfig() *Config { KeepAliveInterval: 30 * time.Second, ConnectionWriteTimeout: 10 * time.Second, MaxStreamWindowSize: initialStreamWindow, + StreamCloseTimeout: 5 * time.Minute, + StreamOpenTimeout: 75 * time.Second, LogOutput: os.Stderr, } } diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go index a80ddec35..38fe3ed1f 100644 --- a/vendor/github.com/hashicorp/yamux/session.go +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -2,6 +2,7 @@ package yamux import ( "bufio" + "bytes" "fmt" "io" "io/ioutil" @@ -63,24 +64,27 @@ type Session struct { // sendCh is used to mark a stream as ready to send, // or to send a header out directly. - sendCh chan sendReady + sendCh chan *sendReady // recvDoneCh is closed when recv() exits to avoid a race // between stream registration and stream shutdown recvDoneCh chan struct{} + sendDoneCh chan struct{} // shutdown is used to safely close a session - shutdown bool - shutdownErr error - shutdownCh chan struct{} - shutdownLock sync.Mutex + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex + shutdownErrLock sync.Mutex } // sendReady is used to either mark a stream as ready // or to directly send a header type sendReady struct { Hdr []byte - Body io.Reader + mu sync.Mutex // Protects Body from unsafe reads. + Body []byte Err chan error } @@ -101,8 +105,9 @@ func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { inflight: make(map[uint32]struct{}), synCh: make(chan struct{}, config.AcceptBacklog), acceptCh: make(chan *Stream, config.AcceptBacklog), - sendCh: make(chan sendReady, 64), + sendCh: make(chan *sendReady, 64), recvDoneCh: make(chan struct{}), + sendDoneCh: make(chan struct{}), shutdownCh: make(chan struct{}), } if client { @@ -184,6 +189,10 @@ GET_ID: s.inflight[id] = struct{}{} s.streamLock.Unlock() + if s.config.StreamOpenTimeout > 0 { + go s.setOpenTimeout(stream) + } + // Send the window update to create if err := stream.sendWindowUpdate(); err != nil { select { @@ -196,6 +205,27 @@ GET_ID: return stream, nil } +// setOpenTimeout implements a timeout for streams that are opened but not established. +// If the StreamOpenTimeout is exceeded we assume the peer is unable to ACK, +// and close the session. +// The number of running timers is bounded by the capacity of the synCh. +func (s *Session) setOpenTimeout(stream *Stream) { + timer := time.NewTimer(s.config.StreamOpenTimeout) + defer timer.Stop() + + select { + case <-stream.establishCh: + return + case <-s.shutdownCh: + return + case <-timer.C: + // Timeout reached while waiting for ACK. + // Close the session to force connection re-establishment. + s.logger.Printf("[ERR] yamux: aborted stream open (destination=%s): %v", s.RemoteAddr().String(), ErrTimeout.err) + s.Close() + } +} + // Accept is used to block until the next available stream // is ready to be accepted. func (s *Session) Accept() (net.Conn, error) { @@ -230,10 +260,15 @@ func (s *Session) Close() error { return nil } s.shutdown = true + + s.shutdownErrLock.Lock() if s.shutdownErr == nil { s.shutdownErr = ErrSessionShutdown } + s.shutdownErrLock.Unlock() + close(s.shutdownCh) + s.conn.Close() <-s.recvDoneCh @@ -242,17 +277,18 @@ func (s *Session) Close() error { for _, stream := range s.streams { stream.forceClose() } + <-s.sendDoneCh return nil } // exitErr is used to handle an error that is causing the // session to terminate. func (s *Session) exitErr(err error) { - s.shutdownLock.Lock() + s.shutdownErrLock.Lock() if s.shutdownErr == nil { s.shutdownErr = err } - s.shutdownLock.Unlock() + s.shutdownErrLock.Unlock() s.Close() } @@ -327,7 +363,7 @@ func (s *Session) keepalive() { } // waitForSendErr waits to send a header, checking for a potential shutdown -func (s *Session) waitForSend(hdr header, body io.Reader) error { +func (s *Session) waitForSend(hdr header, body []byte) error { errCh := make(chan error, 1) return s.waitForSendErr(hdr, body, errCh) } @@ -335,7 +371,7 @@ func (s *Session) waitForSend(hdr header, body io.Reader) error { // waitForSendErr waits to send a header with optional data, checking for a // potential shutdown. Since there's the expectation that sends can happen // in a timely manner, we enforce the connection write timeout here. -func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { +func (s *Session) waitForSendErr(hdr header, body []byte, errCh chan error) error { t := timerPool.Get() timer := t.(*time.Timer) timer.Reset(s.config.ConnectionWriteTimeout) @@ -348,7 +384,7 @@ func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) e timerPool.Put(t) }() - ready := sendReady{Hdr: hdr, Body: body, Err: errCh} + ready := &sendReady{Hdr: hdr, Body: body, Err: errCh} select { case s.sendCh <- ready: case <-s.shutdownCh: @@ -357,12 +393,34 @@ func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) e return ErrConnectionWriteTimeout } + bodyCopy := func() { + if body == nil { + return // A nil body is ignored. + } + + // In the event of session shutdown or connection write timeout, + // we need to prevent `send` from reading the body buffer after + // returning from this function since the caller may re-use the + // underlying array. + ready.mu.Lock() + defer ready.mu.Unlock() + + if ready.Body == nil { + return // Body was already copied in `send`. + } + newBody := make([]byte, len(body)) + copy(newBody, body) + ready.Body = newBody + } + select { case err := <-errCh: return err case <-s.shutdownCh: + bodyCopy() return ErrSessionShutdown case <-timer.C: + bodyCopy() return ErrConnectionWriteTimeout } } @@ -384,7 +442,7 @@ func (s *Session) sendNoWait(hdr header) error { }() select { - case s.sendCh <- sendReady{Hdr: hdr}: + case s.sendCh <- &sendReady{Hdr: hdr}: return nil case <-s.shutdownCh: return ErrSessionShutdown @@ -395,39 +453,59 @@ func (s *Session) sendNoWait(hdr header) error { // send is a long running goroutine that sends data func (s *Session) send() { + if err := s.sendLoop(); err != nil { + s.exitErr(err) + } +} + +func (s *Session) sendLoop() error { + defer close(s.sendDoneCh) + var bodyBuf bytes.Buffer for { + bodyBuf.Reset() + select { case ready := <-s.sendCh: // Send a header if ready if ready.Hdr != nil { - sent := 0 - for sent < len(ready.Hdr) { - n, err := s.conn.Write(ready.Hdr[sent:]) - if err != nil { - s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) - asyncSendErr(ready.Err, err) - s.exitErr(err) - return - } - sent += n + _, err := s.conn.Write(ready.Hdr) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + return err } } - // Send data from a body if given + ready.mu.Lock() if ready.Body != nil { - _, err := io.Copy(s.conn, ready.Body) + // Copy the body into the buffer to avoid + // holding a mutex lock during the write. + _, err := bodyBuf.Write(ready.Body) + if err != nil { + ready.Body = nil + ready.mu.Unlock() + s.logger.Printf("[ERR] yamux: Failed to copy body into buffer: %v", err) + asyncSendErr(ready.Err, err) + return err + } + ready.Body = nil + } + ready.mu.Unlock() + + if bodyBuf.Len() > 0 { + // Send data from a body if given + _, err := s.conn.Write(bodyBuf.Bytes()) if err != nil { s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) asyncSendErr(ready.Err, err) - s.exitErr(err) - return + return err } } // No error, successful send asyncSendErr(ready.Err, nil) case <-s.shutdownCh: - return + return nil } } } @@ -614,8 +692,9 @@ func (s *Session) incomingStream(id uint32) error { // Backlog exceeded! RST the stream s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") delete(s.streams, id) - stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0) - return s.sendNoWait(stream.sendHdr) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) } } diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go index aa2391973..d197d28e5 100644 --- a/vendor/github.com/hashicorp/yamux/stream.go +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -2,6 +2,7 @@ package yamux import ( "bytes" + "errors" "io" "sync" "sync/atomic" @@ -49,6 +50,13 @@ type Stream struct { readDeadline atomic.Value // time.Time writeDeadline atomic.Value // time.Time + + // establishCh is notified if the stream is established or being closed. + establishCh chan struct{} + + // closeTimer is set with stateLock held to honor the StreamCloseTimeout + // setting on Session. + closeTimer *time.Timer } // newStream is used to construct a new stream within @@ -66,6 +74,7 @@ func newStream(session *Session, id uint32, state streamState) *Stream { sendWindow: initialStreamWindow, recvNotifyCh: make(chan struct{}, 1), sendNotifyCh: make(chan struct{}, 1), + establishCh: make(chan struct{}, 1), } s.readDeadline.Store(time.Time{}) s.writeDeadline.Store(time.Time{}) @@ -161,7 +170,7 @@ func (s *Stream) Write(b []byte) (n int, err error) { func (s *Stream) write(b []byte) (n int, err error) { var flags uint16 var max uint32 - var body io.Reader + var body []byte START: s.stateLock.Lock() switch s.state { @@ -187,11 +196,15 @@ START: // Send up to our send window max = min(window, uint32(len(b))) - body = bytes.NewReader(b[:max]) + body = b[:max] // Send the header s.sendHdr.encode(typeData, flags, s.id, max) if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.sendHdr = header(make([]byte, headerSize)) + } return 0, err } @@ -265,6 +278,10 @@ func (s *Stream) sendWindowUpdate() error { // Send the header s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.controlHdr = header(make([]byte, headerSize)) + } return err } return nil @@ -279,6 +296,10 @@ func (s *Stream) sendClose() error { flags |= flagFIN s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.controlHdr = header(make([]byte, headerSize)) + } return err } return nil @@ -312,6 +333,27 @@ func (s *Stream) Close() error { s.stateLock.Unlock() return nil SEND_CLOSE: + // This shouldn't happen (the more realistic scenario to cancel the + // timer is via processFlags) but just in case this ever happens, we + // cancel the timer to prevent dangling timers. + if s.closeTimer != nil { + s.closeTimer.Stop() + s.closeTimer = nil + } + + // If we have a StreamCloseTimeout set we start the timeout timer. + // We do this only if we're not already closing the stream since that + // means this was a graceful close. + // + // This prevents memory leaks if one side (this side) closes and the + // remote side poorly behaves and never responds with a FIN to complete + // the close. After the specified timeout, we clean our resources up no + // matter what. + if !closeStream && s.session.config.StreamCloseTimeout > 0 { + s.closeTimer = time.AfterFunc( + s.session.config.StreamCloseTimeout, s.closeTimeout) + } + s.stateLock.Unlock() s.sendClose() s.notifyWaiting() @@ -321,6 +363,23 @@ SEND_CLOSE: return nil } +// closeTimeout is called after StreamCloseTimeout during a close to +// close this stream. +func (s *Stream) closeTimeout() { + // Close our side forcibly + s.forceClose() + + // Free the stream from the session map + s.session.closeStream(s.id) + + // Send a RST so the remote side closes too. + s.sendLock.Lock() + defer s.sendLock.Unlock() + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, s.id, 0) + s.session.sendNoWait(hdr) +} + // forceClose is used for when the session is exiting func (s *Stream) forceClose() { s.stateLock.Lock() @@ -332,20 +391,27 @@ func (s *Stream) forceClose() { // processFlags is used to update the state of the stream // based on set flags, if any. Lock must be held func (s *Stream) processFlags(flags uint16) error { + s.stateLock.Lock() + defer s.stateLock.Unlock() + // Close the stream without holding the state lock closeStream := false defer func() { if closeStream { + if s.closeTimer != nil { + // Stop our close timeout timer since we gracefully closed + s.closeTimer.Stop() + } + s.session.closeStream(s.id) } }() - s.stateLock.Lock() - defer s.stateLock.Unlock() if flags&flagACK == flagACK { if s.state == streamSYNSent { s.state = streamEstablished } + asyncNotify(s.establishCh) s.session.establishStream(s.id) } if flags&flagFIN == flagFIN { @@ -378,6 +444,7 @@ func (s *Stream) processFlags(flags uint16) error { func (s *Stream) notifyWaiting() { asyncNotify(s.recvNotifyCh) asyncNotify(s.sendNotifyCh) + asyncNotify(s.establishCh) } // incrSendWindow updates the size of our send window @@ -412,6 +479,7 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { if length > s.recvWindow { s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) + s.recvLock.Unlock() return ErrRecvWindowExceeded } @@ -446,15 +514,17 @@ func (s *Stream) SetDeadline(t time.Time) error { return nil } -// SetReadDeadline sets the deadline for future Read calls. +// SetReadDeadline sets the deadline for blocked and future Read calls. func (s *Stream) SetReadDeadline(t time.Time) error { s.readDeadline.Store(t) + asyncNotify(s.recvNotifyCh) return nil } -// SetWriteDeadline sets the deadline for future Write calls +// SetWriteDeadline sets the deadline for blocked and future Write calls func (s *Stream) SetWriteDeadline(t time.Time) error { s.writeDeadline.Store(t) + asyncNotify(s.sendNotifyCh) return nil } diff --git a/vendor/github.com/heimweh/go-pagerduty/pagerduty/event_orchestration_path.go b/vendor/github.com/heimweh/go-pagerduty/pagerduty/event_orchestration_path.go index ddc7e0091..ebfac32dd 100644 --- a/vendor/github.com/heimweh/go-pagerduty/pagerduty/event_orchestration_path.go +++ b/vendor/github.com/heimweh/go-pagerduty/pagerduty/event_orchestration_path.go @@ -88,9 +88,9 @@ type EventOrchestrationPathActionVariables struct { type EventOrchestrationPathActionExtractions struct { Target string `json:"target,omitempty"` - Regex string `json:"regex,omitempty"` + Regex string `json:"regex,omitempty"` Template string `json:"template,omitempty"` - Source string `json:"source,omitempty"` + Source string `json:"source,omitempty"` } type EventOrchestrationPathCatchAll struct { diff --git a/vendor/github.com/heimweh/go-pagerduty/pagerduty/references.go b/vendor/github.com/heimweh/go-pagerduty/pagerduty/references.go index 8716eb0f2..1cbe818a8 100644 --- a/vendor/github.com/heimweh/go-pagerduty/pagerduty/references.go +++ b/vendor/github.com/heimweh/go-pagerduty/pagerduty/references.go @@ -19,6 +19,9 @@ type UserReference resourceReference // EscalationPolicyReference represents a reference to an escalation policy. type EscalationPolicyReference resourceReference +// ResponsePlayReference represents a reference to a response play. +type ResponsePlayReference resourceReference + // ScheduleReference represents a reference to a schedule. type ScheduleReference resourceReference diff --git a/vendor/github.com/heimweh/go-pagerduty/pagerduty/service.go b/vendor/github.com/heimweh/go-pagerduty/pagerduty/service.go index b51f12036..a68706027 100644 --- a/vendor/github.com/heimweh/go-pagerduty/pagerduty/service.go +++ b/vendor/github.com/heimweh/go-pagerduty/pagerduty/service.go @@ -49,6 +49,12 @@ type AlertGroupingParameters struct { Config *AlertGroupingConfig `json:"config,omitempty"` } +// AutoPauseNotificationsParameters defines how alerts on this service are automatically suspended for a period of time before triggering, when identified as likely being transient. +type AutoPauseNotificationsParameters struct { + Enabled bool `json:"enabled"` + Timeout *int `json:"timeout"` +} + // IncidentUrgencyRule is the default urgency for new incidents. type IncidentUrgencyRule struct { DuringSupportHours *IncidentUrgencyType `json:"during_support_hours,omitempty"` @@ -123,29 +129,31 @@ type ValueExtractor struct { // Service represents a service. type Service struct { - AcknowledgementTimeout *int `json:"acknowledgement_timeout"` - Addons []*AddonReference `json:"addons,omitempty"` - AlertCreation string `json:"alert_creation,omitempty"` - AlertGrouping *string `json:"alert_grouping"` - AlertGroupingTimeout *int `json:"alert_grouping_timeout,omitempty"` - AlertGroupingParameters *AlertGroupingParameters `json:"alert_grouping_parameters,omitempty"` - AutoResolveTimeout *int `json:"auto_resolve_timeout"` - CreatedAt string `json:"created_at,omitempty"` - Description string `json:"description,omitempty"` - EscalationPolicy *EscalationPolicyReference `json:"escalation_policy,omitempty"` - HTMLURL string `json:"html_url,omitempty"` - ID string `json:"id,omitempty"` - IncidentUrgencyRule *IncidentUrgencyRule `json:"incident_urgency_rule,omitempty"` - Integrations []*IntegrationReference `json:"integrations,omitempty"` - LastIncidentTimestamp string `json:"last_incident_timestamp,omitempty"` - Name string `json:"name,omitempty"` - ScheduledActions []*ScheduledAction `json:"scheduled_actions,omitempty"` - Self string `json:"self,omitempty"` - Status string `json:"status,omitempty"` - Summary string `json:"summary,omitempty"` - SupportHours *SupportHours `json:"support_hours,omitempty"` - Teams []*TeamReference `json:"teams,omitempty"` - Type string `json:"type,omitempty"` + AcknowledgementTimeout *int `json:"acknowledgement_timeout"` + Addons []*AddonReference `json:"addons,omitempty"` + AlertCreation string `json:"alert_creation,omitempty"` + AlertGrouping *string `json:"alert_grouping"` + AlertGroupingTimeout *int `json:"alert_grouping_timeout,omitempty"` + AlertGroupingParameters *AlertGroupingParameters `json:"alert_grouping_parameters,omitempty"` + AutoPauseNotificationsParameters *AutoPauseNotificationsParameters `json:"auto_pause_notifications_parameters,omitempty"` + AutoResolveTimeout *int `json:"auto_resolve_timeout"` + CreatedAt string `json:"created_at,omitempty"` + Description string `json:"description,omitempty"` + EscalationPolicy *EscalationPolicyReference `json:"escalation_policy,omitempty"` + ResponsePlay *ResponsePlayReference `json:"response_play,omitempty"` + HTMLURL string `json:"html_url,omitempty"` + ID string `json:"id,omitempty"` + IncidentUrgencyRule *IncidentUrgencyRule `json:"incident_urgency_rule,omitempty"` + Integrations []*IntegrationReference `json:"integrations,omitempty"` + LastIncidentTimestamp string `json:"last_incident_timestamp,omitempty"` + Name string `json:"name,omitempty"` + ScheduledActions []*ScheduledAction `json:"scheduled_actions,omitempty"` + Self string `json:"self,omitempty"` + Status string `json:"status,omitempty"` + Summary string `json:"summary,omitempty"` + SupportHours *SupportHours `json:"support_hours,omitempty"` + Teams []*TeamReference `json:"teams,omitempty"` + Type string `json:"type,omitempty"` } // ServicePayload represents a service. diff --git a/vendor/github.com/heimweh/go-pagerduty/pagerduty/user.go b/vendor/github.com/heimweh/go-pagerduty/pagerduty/user.go index 9e032e81b..bbbb3798f 100644 --- a/vendor/github.com/heimweh/go-pagerduty/pagerduty/user.go +++ b/vendor/github.com/heimweh/go-pagerduty/pagerduty/user.go @@ -333,7 +333,6 @@ func (s *UserService) CreateContactMethod(userID string, contactMethod *ContactM resp, err := s.client.newRequestDo("POST", u, nil, &ContactMethodPayload{ContactMethod: contactMethod}, &v) if err != nil { - if e, ok := err.(*Error); !ok || strings.Compare(fmt.Sprintf("%v", e.Errors), "[User Contact method must be unique]") != 0 { return nil, nil, err } @@ -344,7 +343,6 @@ func (s *UserService) CreateContactMethod(userID string, contactMethod *ContactM } v.ContactMethod = sContact resp = sResp - } if err = cachePutContactMethod(v.ContactMethod); err != nil { @@ -356,7 +354,6 @@ func (s *UserService) CreateContactMethod(userID string, contactMethod *ContactM return v.ContactMethod, resp, nil } - func (s *UserService) findExistingContactMethod(userID string, contactMethod *ContactMethod) (*ContactMethod, *Response, error) { lResp, _, lErr := s.ListContactMethods(userID) if lErr != nil { diff --git a/vendor/github.com/heimweh/go-pagerduty/pagerduty/webhook_subscription.go b/vendor/github.com/heimweh/go-pagerduty/pagerduty/webhook_subscription.go index 19f06ddbd..64a140ec9 100644 --- a/vendor/github.com/heimweh/go-pagerduty/pagerduty/webhook_subscription.go +++ b/vendor/github.com/heimweh/go-pagerduty/pagerduty/webhook_subscription.go @@ -22,6 +22,7 @@ type DeliveryMethod struct { Type string `json:"type,omitempty"` URL string `json:"url,omitempty"` CustomHeaders []*CustomHeaders `json:"custom_headers"` + Secret string `json:"secret,omitempty"` } type CustomHeaders struct { diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 000000000..402433593 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 000000000..d31b37815 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 000000000..0af08e65e --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,141 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + - ./gen.sh + - go install mvdan.cc/garble@latest + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" + replacements: + aix: AIX + darwin: OSX + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 + freebsd: FreeBSD + netbsd: NetBSD + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm + replacements: + darwin: Darwin + linux: Linux + freebsd: FreeBSD + amd64: x86_64 diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE index 1eb75ef68..87d557477 100644 --- a/vendor/github.com/klauspost/compress/LICENSE +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -26,3 +26,279 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 000000000..ad5c63a82 --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,560 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. +* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +
+ See Details +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. +
+ +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) + +
+ See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ + +
+ See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
+ +
+ See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +``` + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 000000000..ea5a692d5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index b69237c9b..6f341914c 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -92,7 +92,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTra im := int32((nbBitsOut << 16) - first.deltaNbBits) lu := (im >> nbBitsOut) + first.deltaFindState c.state = c.stateTable[lu] - return } // encode the output symbol provided and write it to the bitstream. @@ -301,7 +300,7 @@ func (s *Scratch) writeCount() error { out[outP+1] = byte(bitStream >> 8) outP += (bitCount + 7) / 8 - if uint16(charnum) > s.symbolLen { + if charnum > s.symbolLen { return errors.New("internal error: charnum > s.symbolLen") } s.Out = out[:outP] @@ -331,7 +330,7 @@ type cTable struct { func (s *Scratch) allocCtable() { tableSize := 1 << s.actualTableLog // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < int(tableSize) { + if cap(s.ct.tableSymbol) < tableSize { s.ct.tableSymbol = make([]byte, tableSize) } s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] @@ -565,8 +564,8 @@ func (s *Scratch) normalizeCount2() error { distributed uint32 total = uint32(s.br.remain()) tableLog = s.actualTableLog - lowThreshold = uint32(total >> tableLog) - lowOne = uint32((total * 3) >> (tableLog + 1)) + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) ) for i, cnt := range s.count[:s.symbolLen] { if cnt == 0 { @@ -591,7 +590,7 @@ func (s *Scratch) normalizeCount2() error { if (total / toDistribute) > lowOne { // risk of rounding to zero - lowOne = uint32((total * 3) / (toDistribute * 2)) + lowOne = (total * 3) / (toDistribute * 2) for i, cnt := range s.count[:s.symbolLen] { if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { s.norm[i] = 1 diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index 413ec3b3c..926f5f153 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -172,7 +172,7 @@ type decSymbol struct { // allocDtable will allocate decoding tables if they are not big enough. func (s *Scratch) allocDtable() { tableSize := 1 << s.actualTableLog - if cap(s.decTable) < int(tableSize) { + if cap(s.decTable) < tableSize { s.decTable = make([]decSymbol, tableSize) } s.decTable = s.decTable[:tableSize] @@ -340,7 +340,7 @@ type decoder struct { func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { d.dt = dt d.br = in - d.state = uint16(in.getBits(tableLog)) + d.state = in.getBits(tableLog) } // next returns the next symbol and sets the next state. diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 000000000..aff942205 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/go.mod b/vendor/github.com/klauspost/compress/go.mod new file mode 100644 index 000000000..b605e2d52 --- /dev/null +++ b/vendor/github.com/klauspost/compress/go.mod @@ -0,0 +1,3 @@ +module github.com/klauspost/compress + +go 1.16 diff --git a/vendor/github.com/klauspost/compress/go.sum b/vendor/github.com/klauspost/compress/go.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md index e12da4db2..8b6e5c663 100644 --- a/vendor/github.com/klauspost/compress/huff0/README.md +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -14,7 +14,9 @@ but it can be used as a secondary step to compressors (like Snappy) that does no ## News - * Mar 2018: First implementation released. Consider this beta software for now. +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. # Usage diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index a4979e886..504a7be9d 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -8,115 +8,10 @@ package huff0 import ( "encoding/binary" "errors" + "fmt" "io" ) -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBit32(uint32(v))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) peekBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -func (b *bitReader) advance(n uint8) { - b.bitsRead += n -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - // bitReader reads a bitstream in reverse. // The last set bit indicates the start of the stream and is used // for aligning the input. @@ -213,10 +108,17 @@ func (b *bitReaderBytes) finished() bool { return b.off == 0 && b.bitsRead >= 64 } +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderBytes) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } @@ -313,15 +215,17 @@ func (b *bitReaderShifted) fill() { } } -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderShifted) finished() bool { - return b.off == 0 && b.bitsRead >= 64 +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) } // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderShifted) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index 6bce4e87d..ec71f7a34 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -5,8 +5,6 @@ package huff0 -import "fmt" - // bitWriter will write bits. // First bit will be LSB of the first byte of output. type bitWriter struct { @@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF} /* up to 16 bits */ -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { @@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { b.nBits += encA.nBits + encB.nBits } -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - return - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - b.bitContainer >>= 1 << 3 - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - b.bitContainer >>= 2 << 3 - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - b.bitContainer >>= 3 << 3 - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - b.bitContainer >>= 4 << 3 - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - b.bitContainer >>= 5 << 3 - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - b.bitContainer >>= 6 << 3 - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - b.bitContainer >>= 7 << 3 - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - b.bitContainer = 0 - b.nBits = 0 - return - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.nBits &= 7 -} - // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { @@ -201,10 +93,3 @@ func (b *bitWriter) close() error { b.flushAlign() return nil } - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go index 50bcdf6ea..4dcab8d23 100644 --- a/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) { b.off = 0 } -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - // Int32 returns a little endian int32 starting at current offset. func (b byteReader) Int32() int32 { v3 := int32(b.b[b.off+3]) @@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 { return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 } -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - // remain will return the number of bytes remaining. func (b byteReader) remain() int { return len(b.b) - b.off diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index f9ed5f830..4d14542fa 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -2,6 +2,7 @@ package huff0 import ( "fmt" + "math" "runtime" "sync" ) @@ -161,6 +162,70 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error) return s.Out, false, nil } +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + func (s *Scratch) compress1X(src []byte) ([]byte, error) { return s.compress1xDo(s.Out, src) } @@ -225,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { if err != nil { return nil, err } + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. @@ -268,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { return nil, errs[i] } o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. @@ -331,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool { return true } +//lint:ignore U1000 used for debugging func (s *Scratch) validateTable(c cTable) bool { if len(c) < int(s.symbolLen) { return false @@ -403,7 +477,7 @@ func (s *Scratch) buildCTable() error { var startNode = int16(s.symbolLen) nonNullRank := s.symbolLen - 1 - nodeNb := int16(startNode) + nodeNb := startNode huffNode := s.nodes[1 : huffNodesLen+1] // This overlays the slice above, but allows "-1" index lookups. @@ -536,7 +610,6 @@ func (s *Scratch) huffSort() { } nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} } - return } func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { @@ -580,7 +653,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { // Get pos of last (smallest) symbol per rank { - currentNbBits := uint8(maxNbBits) + currentNbBits := maxNbBits for pos := int(n); pos >= 0; pos-- { if huffNode[pos].nbBits >= currentNbBits { continue diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 41703bba4..c0c48bd70 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -4,13 +4,13 @@ import ( "errors" "fmt" "io" + "sync" "github.com/klauspost/compress/fse" ) type dTable struct { single []dEntrySingle - double []dEntryDouble } // single-symbols decoding @@ -18,13 +18,6 @@ type dEntrySingle struct { entry uint16 } -// double-symbols decoding -type dEntryDouble struct { - seq uint16 - nBits uint8 - len uint8 -} - // Uses special code for all tables that are < 8 bits. const use8BitTables = true @@ -34,7 +27,7 @@ const use8BitTables = true // If no Scratch is provided a new one is allocated. // The returned Scratch can be used for encoding or decoding input using this table. func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(in) + s, err = s.prepare(nil) if err != nil { return s, nil, err } @@ -216,6 +209,7 @@ func (s *Scratch) Decoder() *Decoder { return &Decoder{ dt: s.dt, actualTableLog: s.actualTableLog, + bufs: &s.decPool, } } @@ -223,103 +217,15 @@ func (s *Scratch) Decoder() *Decoder { type Decoder struct { dt dTable actualTableLog uint8 + bufs *sync.Pool } -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - return dst, br.close() + return &[4][256]byte{} } // decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. @@ -341,41 +247,258 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 - shift := (8 - d.actualTableLog) & 7 - - //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) - for br.off >= 4 { - br.fillFast() - v := dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded + switch d.actualTableLog { + case 8: + const shift = 8 - 8 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) } - dst = append(dst, buf[:]...) } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -383,6 +506,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { // br < 4, so uint8 is fine bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + for bitsLeft > 0 { if br.bitsRead >= 64-8 { for br.off > 0 { @@ -393,6 +518,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } if len(dst) >= maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } v := dt[br.peekByteFast()>>shift] @@ -401,6 +527,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } @@ -420,33 +547,35 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 - const shift = 0 + const shift = 56 //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) for br.off >= 4 { br.fillFast() - v := dt[br.peekByteFast()>>shift] + v := dt[uint8(br.value>>shift)] br.advance(uint8(v.entry)) buf[off+0] = uint8(v.entry >> 8) - v = dt[br.peekByteFast()>>shift] + v = dt[uint8(br.value>>shift)] br.advance(uint8(v.entry)) buf[off+1] = uint8(v.entry >> 8) - v = dt[br.peekByteFast()>>shift] + v = dt[uint8(br.value>>shift)] br.advance(uint8(v.entry)) buf[off+2] = uint8(v.entry >> 8) - v = dt[br.peekByteFast()>>shift] + v = dt[uint8(br.value>>shift)] br.advance(uint8(v.entry)) buf[off+3] = uint8(v.entry >> 8) off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -455,6 +584,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -471,208 +601,20 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } } if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } - v := dt[br.peekByteFast()>>shift] + v := dt[br.peekByteFast()] nBits := uint8(v.entry) br.advance(nBits) bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 / 4 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v2 := single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v2 = single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v2 := single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v2 = single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == bufoff { - if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - for i := range br { - offset := dstEvery * i - br := &br[i] - bitsLeft := br.off*8 + uint(64-br.bitsRead) - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= len(out) { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - // Decompress4X will decompress a 4X encoded stream. // The length of the supplied input must match the end of a block exactly. // The *capacity* of the dst slice must match the destination size of @@ -706,19 +648,18 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { out := dst dstEvery := (dstSize + 3) / 4 - shift := (8 - d.actualTableLog) & 7 + shift := (56 + (8 - d.actualTableLog)) & 63 const tlSize = 1 << 8 - const tlMask = tlSize - 1 single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -728,96 +669,109 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { // Interleave 2 decodes. const stream = 0 const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { const stream = 2 const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) out = out[bufoff:] - decoded += 256 + decoded += bufoff * 4 // There must at least be 3 buffers left. if len(out) < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } } @@ -825,23 +779,31 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { if off > 0 { ioff := int(off) if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -861,24 +823,31 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } // Read value and increment offset. - v := single[br.peekByteFast()>>shift].entry + v := single[uint8(br.value>>shift)].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } @@ -914,18 +883,17 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { out := dst dstEvery := (dstSize + 3) / 4 - const shift = 0 + const shift = 56 const tlSize = 1 << 8 - const tlMask = tlSize - 1 single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -935,96 +903,109 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { // Interleave 2 decodes. const stream = 0 const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { const stream = 2 const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) out = out[bufoff:] - decoded += 256 + decoded += bufoff * 4 // There must at least be 3 buffers left. if len(out) < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } } @@ -1034,21 +1015,27 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { if len(out) < dstEvery*3+ioff { return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1068,24 +1055,32 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } // Read value and increment offset. - v := single[br.peekByteFast()>>shift].entry + v := single[br.peekByteFast()].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 000000000..9f3e9f79e --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,222 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 000000000..dd1a5aecd --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,847 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + XORQ DX, DX + + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), SI + MOVQ 48(AX), BX + MOVQ 24(AX), R9 + MOVQ 32(AX), R10 + MOVQ (AX), R11 + + // Main loop +main_loop: + MOVQ SI, R8 + CMPQ R8, BX + SETGE DL + + // br0.fillFast32() + MOVQ 32(R11), R12 + MOVBQZX 40(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill0 + MOVQ 24(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ (R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 24(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br0.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 32(R11) + MOVB R13, 40(R11) + ADDQ R9, R8 + + // br1.fillFast32() + MOVQ 80(R11), R12 + MOVBQZX 88(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill1 + MOVQ 72(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 48(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 72(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br1.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 80(R11) + MOVB R13, 88(R11) + ADDQ R9, R8 + + // br2.fillFast32() + MOVQ 128(R11), R12 + MOVBQZX 136(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill2 + MOVQ 120(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 96(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 120(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br2.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 128(R11) + MOVB R13, 136(R11) + ADDQ R9, R8 + + // br3.fillFast32() + MOVQ 176(R11), R12 + MOVBQZX 184(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill3 + MOVQ 168(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 144(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 168(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br3.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 176(R11) + MOVB R13, 184(R11) + ADDQ $0x02, SI + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), SI + SHLQ $0x02, SI + MOVQ SI, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + XORQ DX, DX + + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R9 + MOVQ 32(CX), R10 + MOVQ (CX), R11 + + // Main loop +main_loop: + MOVQ BX, R8 + CMPQ R8, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R11), R12 + MOVBQZX 40(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill0 + MOVQ 24(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ (R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 24(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br0.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 32(R11) + MOVB R13, 40(R11) + ADDQ R9, R8 + + // br1.fillFast32() + MOVQ 80(R11), R12 + MOVBQZX 88(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill1 + MOVQ 72(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 48(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 72(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br1.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 80(R11) + MOVB R13, 88(R11) + ADDQ R9, R8 + + // br2.fillFast32() + MOVQ 128(R11), R12 + MOVBQZX 136(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill2 + MOVQ 120(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 96(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 120(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br2.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 128(R11) + MOVB R13, 136(R11) + ADDQ R9, R8 + + // br3.fillFast32() + MOVQ 176(R11), R12 + MOVBQZX 184(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill3 + MOVQ 168(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 144(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 168(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br3.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 176(R11) + MOVB R13, 184(R11) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exeeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exeeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exeeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exeeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exeeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exeeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 000000000..4f6f37cb2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,295 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) + out = out[bufoff:] + decoded += bufoff * 4 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 7ec2022b6..e8ad17ad0 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "math/bits" + "sync" "github.com/klauspost/compress/fse" ) @@ -116,6 +117,7 @@ type Scratch struct { nodes []nodeElt tmpOut [4][]byte fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. huffWeight [maxSymbolValue + 1]byte } @@ -245,6 +247,68 @@ func (c cTable) write(s *Scratch) error { return nil } +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + // estimateSize returns the estimated size in bytes of the input represented in the // histogram supplied. func (c cTable) estimateSize(hist []uint32) int { diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 000000000..3954c5121 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 000000000..e802579c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 000000000..4465fbe9e --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE similarity index 100% rename from vendor/github.com/klauspost/compress/snappy/LICENSE rename to vendor/github.com/klauspost/compress/internal/snapref/LICENSE diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go similarity index 85% rename from vendor/github.com/klauspost/compress/snappy/decode.go rename to vendor/github.com/klauspost/compress/internal/snapref/decode.go index 72efb0353..40796a49d 100644 --- a/vendor/github.com/klauspost/compress/snappy/decode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package snappy +package snapref import ( "encoding/binary" @@ -52,6 +52,8 @@ const ( // Otherwise, a newly allocated slice will be returned. // // The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. func Decode(dst, src []byte) ([]byte, error) { dLen, s, err := decodedLen(src) if err != nil { @@ -83,6 +85,8 @@ func NewReader(r io.Reader) *Reader { } // Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. type Reader struct { r io.Reader err error @@ -114,32 +118,23 @@ func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { return true } -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } +func (r *Reader) fill() error { + for r.i >= r.j { if !r.readFull(r.buf[:4], true) { - return 0, r.err + return r.err } chunkType := r.buf[0] if !r.readHeader { if chunkType != chunkTypeStreamIdentifier { r.err = ErrCorrupt - return 0, r.err + return r.err } r.readHeader = true } chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 if chunkLen > len(r.buf) { r.err = ErrUnsupported - return 0, r.err + return r.err } // The chunk types are specified at @@ -149,11 +144,11 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.2. Compressed data (chunk type 0x00). if chunkLen < checksumSize { r.err = ErrCorrupt - return 0, r.err + return r.err } buf := r.buf[:chunkLen] if !r.readFull(buf, false) { - return 0, r.err + return r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 buf = buf[checksumSize:] @@ -161,19 +156,19 @@ func (r *Reader) Read(p []byte) (int, error) { n, err := DecodedLen(buf) if err != nil { r.err = err - return 0, r.err + return r.err } if n > len(r.decoded) { r.err = ErrCorrupt - return 0, r.err + return r.err } if _, err := Decode(r.decoded, buf); err != nil { r.err = err - return 0, r.err + return r.err } if crc(r.decoded[:n]) != checksum { r.err = ErrCorrupt - return 0, r.err + return r.err } r.i, r.j = 0, n continue @@ -182,25 +177,25 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.3. Uncompressed data (chunk type 0x01). if chunkLen < checksumSize { r.err = ErrCorrupt - return 0, r.err + return r.err } buf := r.buf[:checksumSize] if !r.readFull(buf, false) { - return 0, r.err + return r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 // Read directly into r.decoded instead of via r.buf. n := chunkLen - checksumSize if n > len(r.decoded) { r.err = ErrCorrupt - return 0, r.err + return r.err } if !r.readFull(r.decoded[:n], false) { - return 0, r.err + return r.err } if crc(r.decoded[:n]) != checksum { r.err = ErrCorrupt - return 0, r.err + return r.err } r.i, r.j = 0, n continue @@ -209,15 +204,15 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.1. Stream identifier (chunk type 0xff). if chunkLen != len(magicBody) { r.err = ErrCorrupt - return 0, r.err + return r.err } if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err + return r.err } for i := 0; i < len(magicBody); i++ { if r.buf[i] != magicBody[i] { r.err = ErrCorrupt - return 0, r.err + return r.err } } continue @@ -226,12 +221,44 @@ func (r *Reader) Read(p []byte) (int, error) { if chunkType <= 0x7f { // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). r.err = ErrUnsupported - return 0, r.err + return r.err } // Section 4.4 Padding (chunk type 0xfe). // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). if !r.readFull(r.buf[:chunkLen], false) { - return 0, r.err + return r.err } } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil } diff --git a/vendor/github.com/klauspost/compress/snappy/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go similarity index 97% rename from vendor/github.com/klauspost/compress/snappy/decode_other.go rename to vendor/github.com/klauspost/compress/internal/snapref/decode_other.go index 94a96c5d7..77395a6b8 100644 --- a/vendor/github.com/klauspost/compress/snappy/decode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -2,9 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine !gc noasm - -package snappy +package snapref // decode writes the decoding of src to dst. It assumes that the varint-encoded // length of the decompressed bytes has already been read, and that len(dst) @@ -87,7 +85,7 @@ func decode(dst, src []byte) int { } // Copy from an earlier sub-slice of dst to a later sub-slice. // If no overlap, use the built-in copy: - if offset > length { + if offset >= length { copy(dst[d:d+length], dst[d-offset:]) d += length continue diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go similarity index 98% rename from vendor/github.com/klauspost/compress/snappy/encode.go rename to vendor/github.com/klauspost/compress/internal/snapref/encode.go index 8d393e904..13c6040a5 100644 --- a/vendor/github.com/klauspost/compress/snappy/encode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package snappy +package snapref import ( "encoding/binary" @@ -15,6 +15,8 @@ import ( // Otherwise, a newly allocated slice will be returned. // // The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. func Encode(dst, src []byte) []byte { if n := MaxEncodedLen(len(src)); n < 0 { panic(ErrTooLarge) @@ -139,6 +141,8 @@ func NewBufferedWriter(w io.Writer) *Writer { } // Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. type Writer struct { w io.Writer err error diff --git a/vendor/github.com/klauspost/compress/snappy/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go similarity index 99% rename from vendor/github.com/klauspost/compress/snappy/encode_other.go rename to vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index dbcae905e..511bba65d 100644 --- a/vendor/github.com/klauspost/compress/snappy/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -2,9 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine !gc noasm - -package snappy +package snapref func load32(b []byte, i int) uint32 { b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go similarity index 97% rename from vendor/github.com/klauspost/compress/snappy/snappy.go rename to vendor/github.com/klauspost/compress/internal/snapref/snappy.go index 74a36689e..34d01f4aa 100644 --- a/vendor/github.com/klauspost/compress/snappy/snappy.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package snappy implements the Snappy compression format. It aims for very +// Package snapref implements the Snappy compression format. It aims for very // high speeds and reasonable compression. // // There are actually two Snappy formats: block and stream. They are related, @@ -17,7 +17,7 @@ // // The canonical, C++ implementation is at https://github.com/google/snappy and // it only implements the block format. -package snappy +package snapref import ( "hash/crc32" diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 000000000..2263853fc --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.16 + diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/klauspost/compress/snappy/.gitignore b/vendor/github.com/klauspost/compress/snappy/.gitignore deleted file mode 100644 index 042091d9b..000000000 --- a/vendor/github.com/klauspost/compress/snappy/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -cmd/snappytool/snappytool -testdata/bench - -# These explicitly listed benchmark data files are for an obsolete version of -# snappy_test.go. -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS deleted file mode 100644 index bcfa19520..000000000 --- a/vendor/github.com/klauspost/compress/snappy/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Damian Gryski -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Rodolfo Carvalho -Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS deleted file mode 100644 index 931ae3160..000000000 --- a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS +++ /dev/null @@ -1,37 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Damian Gryski -Jan Mercl <0xjnml@gmail.com> -Kai Backman -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Rodolfo Carvalho -Russ Cox -Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/README b/vendor/github.com/klauspost/compress/snappy/README deleted file mode 100644 index cea12879a..000000000 --- a/vendor/github.com/klauspost/compress/snappy/README +++ /dev/null @@ -1,107 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - - - -Benchmarks. - -The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten -or so files, the same set used by the C++ Snappy code (github.com/google/snappy -and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ -3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: - -"go test -test.bench=." - -_UFlat0-8 2.19GB/s ± 0% html -_UFlat1-8 1.41GB/s ± 0% urls -_UFlat2-8 23.5GB/s ± 2% jpg -_UFlat3-8 1.91GB/s ± 0% jpg_200 -_UFlat4-8 14.0GB/s ± 1% pdf -_UFlat5-8 1.97GB/s ± 0% html4 -_UFlat6-8 814MB/s ± 0% txt1 -_UFlat7-8 785MB/s ± 0% txt2 -_UFlat8-8 857MB/s ± 0% txt3 -_UFlat9-8 719MB/s ± 1% txt4 -_UFlat10-8 2.84GB/s ± 0% pb -_UFlat11-8 1.05GB/s ± 0% gaviota - -_ZFlat0-8 1.04GB/s ± 0% html -_ZFlat1-8 534MB/s ± 0% urls -_ZFlat2-8 15.7GB/s ± 1% jpg -_ZFlat3-8 740MB/s ± 3% jpg_200 -_ZFlat4-8 9.20GB/s ± 1% pdf -_ZFlat5-8 991MB/s ± 0% html4 -_ZFlat6-8 379MB/s ± 0% txt1 -_ZFlat7-8 352MB/s ± 0% txt2 -_ZFlat8-8 396MB/s ± 1% txt3 -_ZFlat9-8 327MB/s ± 1% txt4 -_ZFlat10-8 1.33GB/s ± 1% pb -_ZFlat11-8 605MB/s ± 1% gaviota - - - -"go test -test.bench=. -tags=noasm" - -_UFlat0-8 621MB/s ± 2% html -_UFlat1-8 494MB/s ± 1% urls -_UFlat2-8 23.2GB/s ± 1% jpg -_UFlat3-8 1.12GB/s ± 1% jpg_200 -_UFlat4-8 4.35GB/s ± 1% pdf -_UFlat5-8 609MB/s ± 0% html4 -_UFlat6-8 296MB/s ± 0% txt1 -_UFlat7-8 288MB/s ± 0% txt2 -_UFlat8-8 309MB/s ± 1% txt3 -_UFlat9-8 280MB/s ± 1% txt4 -_UFlat10-8 753MB/s ± 0% pb -_UFlat11-8 400MB/s ± 0% gaviota - -_ZFlat0-8 409MB/s ± 1% html -_ZFlat1-8 250MB/s ± 1% urls -_ZFlat2-8 12.3GB/s ± 1% jpg -_ZFlat3-8 132MB/s ± 0% jpg_200 -_ZFlat4-8 2.92GB/s ± 0% pdf -_ZFlat5-8 405MB/s ± 1% html4 -_ZFlat6-8 179MB/s ± 1% txt1 -_ZFlat7-8 170MB/s ± 1% txt2 -_ZFlat8-8 189MB/s ± 1% txt3 -_ZFlat9-8 164MB/s ± 1% txt4 -_ZFlat10-8 479MB/s ± 1% pb -_ZFlat11-8 270MB/s ± 1% gaviota - - - -For comparison (Go's encoded output is byte-for-byte identical to C++'s), here -are the numbers from C++ Snappy's - -make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log - -BM_UFlat/0 2.4GB/s html -BM_UFlat/1 1.4GB/s urls -BM_UFlat/2 21.8GB/s jpg -BM_UFlat/3 1.5GB/s jpg_200 -BM_UFlat/4 13.3GB/s pdf -BM_UFlat/5 2.1GB/s html4 -BM_UFlat/6 1.0GB/s txt1 -BM_UFlat/7 959.4MB/s txt2 -BM_UFlat/8 1.0GB/s txt3 -BM_UFlat/9 864.5MB/s txt4 -BM_UFlat/10 2.9GB/s pb -BM_UFlat/11 1.2GB/s gaviota - -BM_ZFlat/0 944.3MB/s html (22.31 %) -BM_ZFlat/1 501.6MB/s urls (47.78 %) -BM_ZFlat/2 14.3GB/s jpg (99.95 %) -BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) -BM_ZFlat/4 8.3GB/s pdf (83.30 %) -BM_ZFlat/5 903.5MB/s html4 (22.52 %) -BM_ZFlat/6 336.0MB/s txt1 (57.88 %) -BM_ZFlat/7 312.3MB/s txt2 (61.91 %) -BM_ZFlat/8 353.1MB/s txt3 (54.99 %) -BM_ZFlat/9 289.9MB/s txt4 (66.26 %) -BM_ZFlat/10 1.2GB/s pb (19.68 %) -BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.go b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go deleted file mode 100644 index 150d91bc8..000000000 --- a/vendor/github.com/klauspost/compress/snappy/encode_amd64.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// emitLiteral has the same semantics as in encode_other.go. -// -//go:noescape -func emitLiteral(dst, lit []byte) int - -// emitCopy has the same semantics as in encode_other.go. -// -//go:noescape -func emitCopy(dst []byte, offset, length int) int - -// extendMatch has the same semantics as in encode_other.go. -// -//go:noescape -func extendMatch(src []byte, i, j int) int - -// encodeBlock has the same semantics as in encode_other.go. -// -//go:noescape -func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.s b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s deleted file mode 100644 index adfd979fe..000000000 --- a/vendor/github.com/klauspost/compress/snappy/encode_amd64.s +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a -// Go toolchain regression. See https://github.com/golang/go/issues/15426 and -// https://github.com/golang/snappy/issues/29 -// -// As a workaround, the package was built with a known good assembler, and -// those instructions were disassembled by "objdump -d" to yield the -// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 -// style comments, in AT&T asm syntax. Note that rsp here is a physical -// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). -// The instructions were then encoded as "BYTE $0x.." sequences, which assemble -// fine on Go 1.6. - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - AX len(lit) -// - BX n -// - DX return value -// - DI &dst[i] -// - R10 &lit[0] -// -// The 24 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $24-56 - MOVQ dst_base+0(FP), DI - MOVQ lit_base+24(FP), R10 - MOVQ lit_len+32(FP), AX - MOVQ AX, DX - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT oneByte - CMPL BX, $256 - JLT twoBytes - -threeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - ADDQ $3, DX - JMP memmove - -twoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - ADDQ $2, DX - JMP memmove - -oneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - ADDQ $1, DX - -memmove: - MOVQ DX, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - AX length -// - SI &dst[0] -// - DI &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVQ dst_base+0(FP), DI - MOVQ DI, SI - MOVQ offset+24(FP), R11 - MOVQ length+32(FP), AX - -loop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP loop0 - -step1: - // if length > 64 { etc } - CMPL AX, $64 - JLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMPL AX, $12 - JGE step3 - CMPL R11, $2048 - JGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - DX &src[0] -// - SI &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVQ src_base+0(FP), DX - MOVQ src_len+8(FP), R14 - MOVQ i+24(FP), R15 - MOVQ j+32(FP), SI - ADDQ DX, R14 - ADDQ DX, R15 - ADDQ DX, SI - MOVQ R14, R13 - SUBQ $8, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA cmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, R15 - ADDQ $8, SI - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE extendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE extendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - AX . . -// - BX . . -// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). -// - DX 64 &src[0], tableSize -// - SI 72 &src[s] -// - DI 80 &dst[d] -// - R9 88 sLimit -// - R10 . &src[nextEmit] -// - R11 96 prevHash, currHash, nextHash, offset -// - R12 104 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 112 candidate -// -// The second column (56, 64, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. -TEXT ·encodeBlock(SB), 0, $32888-56 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVQ $24, CX - MOVQ $256, DX - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - CMPQ DX, $16384 - JGE varTable - CMPQ DX, R14 - JGE varTable - SUBQ $1, CX - SHLQ $1, DX - JMP calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU - // writes 16 bytes, so we can do only tableSize/8 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - SHRQ $3, DX - LEAQ table-32768(SP), BX - PXOR X0, X0 - -memclr: - MOVOU X0, 0(BX) - ADDQ $16, BX - SUBQ $1, DX - JNZ memclr - - // !!! DX = &src[0] - MOVQ SI, DX - - // sLimit := len(src) - inputMargin - MOVQ R14, R9 - SUBQ $15, R9 - - // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't - // change for the rest of the function. - MOVQ CX, 56(SP) - MOVQ DX, 64(SP) - MOVQ R9, 88(SP) - - // nextEmit := 0 - MOVQ DX, R10 - - // s := 1 - ADDQ $1, SI - - // nextHash := hash(load32(src, s), shift) - MOVL 0(SI), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - -outer: - // for { etc } - - // skip := 32 - MOVQ $32, R12 - - // nextS := s - MOVQ SI, R13 - - // candidate := 0 - MOVQ $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVQ R13, SI - - // bytesBetweenHashLookups := skip >> 5 - MOVQ R12, R14 - SHRQ $5, R14 - - // nextS = s + bytesBetweenHashLookups - ADDQ R14, R13 - - // skip += bytesBetweenHashLookups - ADDQ R14, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVQ R13, AX - SUBQ DX, AX - CMPQ AX, R9 - JA emitRemainder - - // candidate = int(table[nextHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[nextHash] = uint16(s) - MOVQ SI, AX - SUBQ DX, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // nextHash = hash(load32(src, nextS), shift) - MOVL 0(R13), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVL 0(SI), AX - MOVL (DX)(R15*1), BX - CMPL AX, BX - JNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVQ SI, AX - SUBQ R10, AX - CMPQ AX, $16 - JLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT inlineEmitLiteralOneByte - CMPL BX, $256 - JLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVQ SI, 72(SP) - MOVQ DI, 80(SP) - MOVQ R15, 112(SP) - CALL runtime·memmove(SB) - MOVQ 56(SP), CX - MOVQ 64(SP), DX - MOVQ 72(SP), SI - MOVQ 80(SP), DI - MOVQ 88(SP), R9 - MOVQ 112(SP), R15 - JMP inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB AX, BX - SUBB $1, BX - SHLB $2, BX - MOVB BX, (DI) - ADDQ $1, DI - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R10), X0 - MOVOU X0, 0(DI) - ADDQ AX, DI - -inner1: - // for { etc } - - // base := s - MOVQ SI, R12 - - // !!! offset := base - candidate - MOVQ R12, R11 - SUBQ R15, R11 - SUBQ DX, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVQ src_len+32(FP), R14 - ADDQ DX, R14 - - // !!! R13 = &src[len(src) - 8] - MOVQ R14, R13 - SUBQ $8, R13 - - // !!! R15 = &src[candidate + 4] - ADDQ $4, R15 - ADDQ DX, R15 - - // !!! s += 4 - ADDQ $4, SI - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA inlineExtendMatchCmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE inlineExtendMatchBSF - ADDQ $8, R15 - ADDQ $8, SI - JMP inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - JMP inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE inlineExtendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE inlineExtendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVQ SI, AX - SUBQ R12, AX - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - CMPL AX, $64 - JLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - CMPL AX, $12 - JGE inlineEmitCopyStep3 - CMPL R11, $2048 - JGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - JMP inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVQ SI, R10 - - // if s >= sLimit { goto emitRemainder } - MOVQ SI, AX - SUBQ DX, AX - CMPQ AX, R9 - JAE emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVQ -1(SI), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // table[prevHash] = uint16(s-1) - MOVQ SI, AX - SUBQ DX, AX - SUBQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // currHash := hash(uint32(x>>8), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // candidate = int(table[currHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[currHash] = uint16(s) - ADDQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVL (DX)(R15*1), BX - CMPL R14, BX - JEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // s++ - ADDQ $1, SI - - // break out of the inner1 for loop, i.e. continue the outer loop. - JMP outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVQ src_len+32(FP), AX - ADDQ DX, AX - CMPQ R10, AX - JEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVQ DI, 0(SP) - MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ R10, 24(SP) - SUBQ R10, AX - MOVQ AX, 32(SP) - MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVQ DI, 80(SP) - CALL ·emitLiteral(SB) - MOVQ 80(SP), DI - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADDQ 48(SP), DI - -encodeBlockEnd: - MOVQ dst_base+0(FP), AX - SUBQ AX, DI - MOVQ DI, d+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/snappy/runbench.cmd b/vendor/github.com/klauspost/compress/snappy/runbench.cmd deleted file mode 100644 index d24eb4b47..000000000 --- a/vendor/github.com/klauspost/compress/snappy/runbench.cmd +++ /dev/null @@ -1,2 +0,0 @@ -del old.txt -go test -bench=. >>old.txt && go test -bench=. >>old.txt && go test -bench=. >>old.txt && benchstat -delta-test=ttest old.txt new.txt diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index 07f7285f0..beb7fa872 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -16,30 +16,28 @@ Currently the package is heavily optimized for 64 bit processors and will be sig Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. -Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd - +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) ## Compressor ### Status: STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively -used by several projects. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), -kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. There may still be specific combinations of data types/size/settings that could lead to edge cases, so as always, testing is recommended. For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. -The "Fastest" compression ratio is roughly equivalent to zstd level 1. -The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. The compression ratio compared to stdlib is around level 3, but usually 3x as fast. -Compared to cgo zstd, the speed is around level 3 (default), but compression slightly worse, between level 1&2. - ### Usage @@ -54,11 +52,11 @@ To create a writer with default options, do like this: ```Go // Compress input to output. func Compress(in io.Reader, out io.Writer) error { - w, err := NewWriter(output) + enc, err := zstd.NewWriter(out) if err != nil { return err } - _, err := io.Copy(w, input) + _, err = io.Copy(enc, in) if err != nil { enc.Close() return err @@ -80,6 +78,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is in the future. So if you want to limit concurrency for future updates, specify the concurrency you would like. +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined compression settings can be specified. @@ -106,7 +107,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. `EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently, but each call will only run on a single goroutine. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. Encoded blocks can be concatenated and the result will be the combined input stream. Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. @@ -140,7 +142,7 @@ I have collected some speed examples to compare speed and compression against ot * `file` is the input file. * `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. -* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default". +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". * `insize`/`outsize` is the input/output size. * `millis` is the number of milliseconds used for compression. * `mb/s` is megabytes (2^20 bytes) per second. @@ -151,84 +153,106 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip This package: file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73101992 643 313.87 -silesia.tar zskp 2 211947520 67504318 969 208.38 -silesia.tar zskp 3 211947520 65177448 1899 106.44 +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 cgo zstd: silesia.tar zstd 1 211947520 73605392 543 371.56 silesia.tar zstd 3 211947520 66793289 864 233.68 silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1654 122.21 -silesia.tar gzkp 1 211947520 80369488 1168 173.06 +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 GOB stream of binary data. Highly compressible. https://files.klauspost.com/compress/gob-stream.7z file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 235022249 3088 590.30 -gob-stream zskp 2 1911399616 205669791 3786 481.34 -gob-stream zskp 3 1911399616 185792019 9324 195.48 +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + gob-stream zstd 1 1911399616 249810424 2637 691.26 gob-stream zstd 3 1911399616 208192146 3490 522.31 gob-stream zstd 6 1911399616 193632038 6687 272.56 -gob-stream gzstd 1 1911399616 357382641 10251 177.82 -gob-stream gzkp 1 1911399616 362156523 5695 320.08 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 The test data for the Large Text Compression Benchmark is the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. http://mattmahoney.net/dc/textdata.html file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343848582 3609 264.18 -enwik9 zskp 2 1000000000 317276632 5746 165.97 -enwik9 zskp 3 1000000000 294540704 11725 81.34 +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + enwik9 zstd 1 1000000000 358072021 3110 306.65 enwik9 zstd 3 1000000000 313734672 4784 199.35 enwik9 zstd 6 1000000000 295138875 10290 92.68 -enwik9 gzstd 1 1000000000 382578136 9604 99.30 -enwik9 gzkp 1 1000000000 383825945 6544 145.73 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 Highly compressible JSON file. https://files.klauspost.com/compress/github-june-2days-2019.json.zst file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 -github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 -github-june-2days-2019.json zskp 3 6273951764 537511906 29252 204.54 +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 -github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 -github-june-2days-2019.json gzkp 1 6273951764 1128755542 19236 311.03 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 VM Image, Linux mint with a few installed applications: https://files.klauspost.com/compress/rawstudio-mint14.7z file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 -rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 -rawstudio-mint14.tar zskp 3 8558382592 3224594213 71751 113.75 +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 -rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 -rawstudio-mint14.tar gzkp 1 8558382592 3970463184 41749 195.49 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 CSV data: https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 -nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 -nyc-taxi-data-10M.csv zskp 3 3325605752 538490114 19880 159.53 +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 -nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 -nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 ``` ## Decompressor @@ -263,8 +287,13 @@ func Decompress(in io.Reader, out io.Writer) error { } ``` -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. -See "Allocation-less operation" below. +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. For decoding buffers, it could look something like this: @@ -273,7 +302,7 @@ import "github.com/klauspost/compress/zstd" // Create a reader that caches decompressors. // For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil) +var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) // Decompress a buffer. We don't supply a destination buffer, // so it will be allocated by the decoder. @@ -283,9 +312,12 @@ func Decompress(src []byte) ([]byte, error) { ``` Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. ### Dictionaries @@ -337,70 +369,71 @@ In this case no unneeded allocations should be made. The buffer decoder does everything on the same goroutine and does nothing concurrently. It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. -The stream decoder operates on +The stream decoder will create goroutines that: -* One goroutine reads input and splits the input to several block decoders. -* A number of decoders will decode blocks. -* A goroutine coordinates these blocks and sends history from one to the next. +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. -In practice this means that concurrency is often limited to utilizing about 2 cores effectively. - - +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + ### Benchmarks -These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). - The first two are streaming decodes and the last are smaller inputs. - + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + ``` -BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op -BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op - -BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op -BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op - -Concurrent performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op - -BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op ``` -This reflects the performance around May 2020, but this may be out of date. +This reflects the performance around May 2022, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. # Contributions Contributions are always welcome. For new features/fixes, remember to add tests and for performance enhancements include benchmarks. -For sending files for reproducing errors use a service like [goobox](https://goobox.io/#/upload) or similar to share your files. - For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index 854458537..97299d499 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -7,6 +7,7 @@ package zstd import ( "encoding/binary" "errors" + "fmt" "io" "math/bits" ) @@ -50,16 +51,16 @@ func (b *bitReader) getBits(n uint8) int { if n == 0 /*|| b.bitsRead >= 64 */ { return 0 } - return b.getBitsFast(n) + return int(b.get32BitsFast(n)) } -// getBitsFast requires that at least one bit is requested every time. +// get32BitsFast requires that at least one bit is requested every time. // There are no checks if the buffer is filled. -func (b *bitReader) getBitsFast(n uint8) int { +func (b *bitReader) get32BitsFast(n uint8) uint32 { const regMask = 64 - 1 v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) b.bitsRead += n - return int(v) + return v } // fillFast() will make sure at least 32 bits are available. @@ -125,6 +126,9 @@ func (b *bitReader) remain() uint { func (b *bitReader) close() error { // Release reference. b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index 303ae90f9..78b3c61be 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -5,8 +5,6 @@ package zstd -import "fmt" - // bitWriter will write bits. // First bit will be LSB of the first byte of output. type bitWriter struct { @@ -38,7 +36,7 @@ func (b *bitWriter) addBits16NC(value uint16, bits uint8) { b.nBits += bits } -// addBits32NC will add up to 32 bits. +// addBits32NC will add up to 31 bits. // It will not check if there is space for them, // so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits32NC(value uint32, bits uint8) { @@ -46,6 +44,26 @@ func (b *bitWriter) addBits32NC(value uint32, bits uint8) { b.nBits += bits } +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { @@ -53,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { b.nBits += bits } -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 4733ea876..7eed729be 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -5,9 +5,14 @@ package zstd import ( + "bytes" + "encoding/binary" "errors" "fmt" "io" + "io/ioutil" + "os" + "path/filepath" "sync" "github.com/klauspost/compress/huff0" @@ -38,14 +43,14 @@ const ( // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) maxCompressedBlockSize = 128 << 10 + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + // Maximum possible block size (all Raw+Uncompressed). maxBlockSize = (1 << 21) - 1 - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header - maxCompressedLiteralSize = 1 << 18 - maxRLELiteralSize = 1 << 20 - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff // We support slightly less than the reference decoder to be able to // use ints on 32 bit archs. @@ -76,20 +81,27 @@ type blockDec struct { // Window size of the block. WindowSize uint64 - history chan *history - input chan struct{} - result chan decodeOutput - sequenceBuf []seq - err error - decWG sync.WaitGroup + err error + + // Check against this crc + checkCRC []byte // Frame to use for singlethreaded decoding. // Should not be used by the decoder itself since parent may be another frame. localFrame *frameDec + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + // Block is RLE, this is the size. RLESize uint32 - tmp [4]byte Type blockType @@ -109,13 +121,8 @@ func (b *blockDec) String() string { func newBlockDec(lowMem bool) *blockDec { b := blockDec{ - lowMem: lowMem, - result: make(chan decodeOutput, 1), - input: make(chan struct{}, 1), - history: make(chan *history, 1), + lowMem: lowMem, } - b.decWG.Add(1) - go b.startDecoder() return &b } @@ -123,44 +130,60 @@ func newBlockDec(lowMem bool) *blockDec { // Input must be a start of a block and will be at the end of the block when returned. func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { b.WindowSize = windowSize - tmp := br.readSmall(3) - if tmp == nil { - if debug { - println("Reading block header:", io.ErrUnexpectedEOF) - } - return io.ErrUnexpectedEOF + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err } bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) b.Last = bh&1 != 0 b.Type = blockType((bh >> 1) & 3) // find size. cSize := int(bh >> 3) - maxSize := maxBlockSize + maxSize := maxCompressedBlockSizeAlloc switch b.Type { case blockTypeReserved: return ErrReservedBlockType case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } b.RLESize = uint32(cSize) if b.lowMem { maxSize = cSize } cSize = 1 case blockTypeCompressed: - if debug { + if debugDecoder { println("Data size on stream:", cSize) } b.RLESize = 0 - maxSize = maxCompressedBlockSize + maxSize = maxCompressedBlockSizeAlloc if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + maxSize = int(windowSize) + compressedBlockOverAlloc } if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { - if debug { + if debugDecoder { printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) } return ErrCompressedSizeTooBig } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = 0 // We do not need a destination for raw blocks. maxSize = -1 @@ -170,19 +193,18 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { // Read block data. if cap(b.dataStorage) < cSize { - if b.lowMem { - b.dataStorage = make([]byte, 0, cSize) + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { - b.dataStorage = make([]byte, 0, maxBlockSize) + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } if cap(b.dst) <= maxSize { b.dst = make([]byte, 0, maxSize+1) } - var err error b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { - if debug { + if debugDecoder { println("Reading block:", err, "(", cSize, ")", len(b.data)) printf("%T", br) } @@ -196,85 +218,14 @@ func (b *blockDec) sendErr(err error) { b.Last = true b.Type = blockTypeReserved b.err = err - b.input <- struct{}{} } // Close will release resources. // Closed blockDec cannot be reset. func (b *blockDec) Close() { - close(b.input) - close(b.history) - close(b.result) - b.decWG.Wait() } -// decodeAsync will prepare decoding the block when it receives input. -// This will separate output and history. -func (b *blockDec) startDecoder() { - defer b.decWG.Done() - for range b.input { - //println("blockDec: Got block input") - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxBlockSize) - } - } - o := decodeOutput{ - d: b, - b: b.dst[:b.RLESize], - err: nil, - } - v := b.data[0] - for i := range o.b { - o.b[i] = v - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeRaw: - o := decodeOutput{ - d: b, - b: b.data, - err: nil, - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeCompressed: - b.dst = b.dst[:0] - err := b.decodeCompressed(nil) - o := decodeOutput{ - d: b, - b: b.dst, - err: err, - } - if debug { - println("Decompressed to", len(b.dst), "bytes, error:", err) - } - b.result <- o - case blockTypeReserved: - // Used for returning errors. - <-b.history - b.result <- decodeOutput{ - d: b, - b: nil, - err: b.err, - } - default: - panic("Invalid block type") - } - if debug { - println("blockDec: Finished block") - } - } -} - -// decodeAsync will prepare decoding the block when it receives the history. -// If history is provided, it will not fetch it from the channel. +// decodeBuf func (b *blockDec) decodeBuf(hist *history) error { switch b.Type { case blockTypeRLE: @@ -297,14 +248,23 @@ func (b *blockDec) decodeBuf(hist *history) error { return nil case blockTypeCompressed: saved := b.dst - b.dst = hist.b - hist.b = nil + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } err := b.decodeCompressed(hist) - if debug { + if debugDecoder { println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) } - hist.b = b.dst - b.dst = saved + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } return err case blockTypeReserved: // Used for returning errors. @@ -314,30 +274,18 @@ func (b *blockDec) decodeBuf(hist *history) error { } } -// decodeCompressed will start decompressing a block. -// If no history is supplied the decoder will decodeAsync as much as possible -// before fetching from blockDec.history -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - delayedHistory := hist == nil - - if delayedHistory { - // We must always grab history. - defer func() { - if hist == nil { - <-b.history - } - }() - } +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header if len(in) < 2 { - return ErrBlockTooSmall + return in, ErrBlockTooSmall } + litType := literalsBlockType(in[0] & 3) var litRegenSize int var litCompSize int sizeFormat := (in[0] >> 2) & 3 var fourStreams bool + var literals []byte switch litType { case literalsBlockRaw, literalsBlockRLE: switch sizeFormat { @@ -353,7 +301,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) in = in[3:] @@ -364,7 +312,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) litRegenSize = int(n & 1023) @@ -375,7 +323,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 4 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) litRegenSize = int(n & 16383) @@ -385,7 +333,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 5 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) litRegenSize = int(n & 262143) @@ -393,16 +341,18 @@ func (b *blockDec) decodeCompressed(hist *history) error { in = in[5:] } } - if debug { + if debugDecoder { println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) } - var literals []byte - var huff *huff0.Scratch + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + switch litType { case literalsBlockRaw: if len(in) < litRegenSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litRegenSize] in = in[litRegenSize:] @@ -410,19 +360,13 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockRLE: if len(in) < 1 { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } if cap(b.literalBuf) < litRegenSize { if b.lowMem { - b.literalBuf = make([]byte, litRegenSize) + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) } else { - if litRegenSize > maxCompressedLiteralSize { - // Exceptional - b.literalBuf = make([]byte, litRegenSize) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) - - } + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) } } literals = b.literalBuf[:litRegenSize] @@ -431,45 +375,79 @@ func (b *blockDec) decodeCompressed(hist *history) error { literals[i] = v } in = in[1:] - if debug { + if debugDecoder { printf("Found %d RLE compressed literals\n", litRegenSize) } case literalsBlockTreeless: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } // Store compressed literals, so we defer decoding until we get history. literals = in[:litCompSize] in = in[litCompSize:] - if debug { + if debugDecoder { printf("Found %d compressed literals\n", litCompSize) } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + case literalsBlockCompressed: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litCompSize] in = in[litCompSize:] - huff = huffDecoderPool.Get().(*huff0.Scratch) - var err error // Ensure we have space to store it. if cap(b.literalBuf) < litRegenSize { if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) } } - if huff == nil { - huff = &huff0.Scratch{} + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } } + var err error huff, literals, err = huff0.ReadTable(literals, huff) if err != nil { println("reading huffman table:", err) - return err + return in, err } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize // Use our out buffer. if fourStreams { literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) @@ -478,27 +456,63 @@ func (b *blockDec) decodeCompressed(hist *history) error { } if err != nil { println("decoding compressed literals:", err) - return err + return in, err } // Make sure we don't leak our literals buffer if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) } - if debug { + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) } } + hist.decoders.literals = literals + return in, nil +} +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } // Decode Sequences // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section if len(in) < 1 { return ErrBlockTooSmall } + var nSeqs int seqHeader := in[0] - nSeqs := 0 switch { - case seqHeader == 0: - in = in[1:] case seqHeader < 128: nSeqs = int(seqHeader) in = in[1:] @@ -515,19 +529,16 @@ func (b *blockDec) decodeCompressed(hist *history) error { nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) in = in[3:] } - // Allocate sequences - if cap(b.sequenceBuf) < nSeqs { - if b.lowMem { - b.sequenceBuf = make([]seq, nSeqs) - } else { - // Allocate max - b.sequenceBuf = make([]seq, nSeqs, maxSequences) + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) } - } else { - // Reuse buffer - b.sequenceBuf = b.sequenceBuf[:nSeqs] + return ErrUnexpectedBlockSize } - var seqs = &sequenceDecs{} + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs if nSeqs > 0 { if len(in) < 1 { return ErrBlockTooSmall @@ -535,12 +546,12 @@ func (b *blockDec) decodeCompressed(hist *history) error { br := byteReader{b: in, off: 0} compMode := br.Uint8() br.advance(1) - if debug { + if debugDecoder { printf("Compression modes: 0b%b", compMode) } for i := uint(0); i < 3; i++ { mode := seqCompMode((compMode >> (6 - i*2)) & 3) - if debug { + if debugDecoder { println("Table", tableIndex(i), "is", mode) } var seq *sequenceDec @@ -556,6 +567,9 @@ func (b *blockDec) decodeCompressed(hist *history) error { } switch mode { case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } seq.fse = &fsePredef[i] case compModeRLE: if br.remain() < 1 { @@ -563,34 +577,36 @@ func (b *blockDec) decodeCompressed(hist *history) error { } v := br.Uint8() br.advance(1) - dec := fseDecoderPool.Get().(*fseDecoder) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } symb, err := decSymbolValue(v, symbolTableX[i]) if err != nil { printf("RLE Transform table (%v) error: %v", tableIndex(i), err) return err } - dec.setRLE(symb) - seq.fse = dec - if debug { + seq.fse.setRLE(symb) + if debugDecoder { printf("RLE set to %+v, code: %v", symb, v) } case compModeFSE: println("Reading table for", tableIndex(i)) - dec := fseDecoderPool.Get().(*fseDecoder) - err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) if err != nil { println("Read table error:", err) return err } - err = dec.transform(symbolTableX[i]) + err = seq.fse.transform(symbolTableX[i]) if err != nil { println("Transform table error:", err) return err } - if debug { - println("Read table ok", "symbolLen:", dec.symbolLen) + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) } - seq.fse = dec case compModeRepeat: seq.repeat = true } @@ -600,140 +616,106 @@ func (b *blockDec) decodeCompressed(hist *history) error { } in = br.unread() } - - // Wait for history. - // All time spent after this is critical since it is strictly sequential. - if hist == nil { - hist = <-b.history - if hist.error { - return ErrDecoderClosed - } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") } - // Decode treeless literal block. - if litType == literalsBlockTreeless { - // TODO: We could send the history early WITHOUT the stream history. - // This would allow decoding treeless literials before the byte history is available. - // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. - // So not much obvious gain here. - - if hist.huffTree == nil { - return errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) - } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) - } - } - var err error - // Use our out buffer. - huff = hist.huffTree - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return err - } - if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - } else { - if hist.huffTree != nil && huff != nil { - if hist.dict == nil || hist.dict.litEnc != hist.huffTree { - huffDecoderPool.Put(hist.huffTree) - } - hist.huffTree = nil + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] } + return nil } - if huff != nil { - hist.huffTree = huff + br := seqs.br + if br == nil { + br = &bitReader{} } - if debug { - println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + if err := br.init(in); err != nil { + return err } - if nSeqs == 0 { - // Decompressed content is defined entirely as Literals Section content. - b.dst = append(b.dst, literals...) - if delayedHistory { - hist.append(literals) + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } } - return nil + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) } - seqs, err := seqs.mergeHistory(&hist.decoders) - if err != nil { - return err - } - if debug { - println("History merged ok") + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } } - br := &bitReader{} - if err := br.init(in); err != nil { - return err + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets - // TODO: Investigate if sending history without decoders are faster. - // This would allow the sequences to be decoded async and only have to construct stream history. - // If only recent offsets were not transferred, this would be an obvious win. - // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} +func (b *blockDec) executeSequences(hist *history) error { hbytes := hist.b if len(hbytes) > hist.windowSize { hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history any more. + // We do not need history anymore. if hist.dict != nil { hist.dict.content = nil } } - - if err := seqs.initialize(br, hist, literals, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - - err = seqs.decode(nSeqs, br, hbytes) + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) if err != nil { return err } - if !br.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) - } + return b.updateHistory(hist) +} - err = br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } +func (b *blockDec) updateHistory(hist *history) error { if len(b.data) > maxCompressedBlockSize { return fmt.Errorf("compressed block size too large (%d)", len(b.data)) } // Set output and release references. - b.dst = seqs.out - seqs.out, seqs.literals, seqs.hist = nil, nil, nil + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset - if !delayedHistory { - // If we don't have delayed history, no need to update. - hist.recentOffsets = seqs.prevOffset - return nil - } if b.Last { // if last block we don't care about history. println("Last block, no history returned") hist.b = hist.b[:0] return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } } - hist.append(b.dst) - hist.recentOffsets = seqs.prevOffset - if debug { - println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") - } + hist.decoders.out, hist.decoders.literals = nil, nil return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 083fbb502..12e8f6f0b 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -22,28 +22,44 @@ type blockEnc struct { dictLitEnc *huff0.Scratch wr bitWriter - extraLits int - last bool - + extraLits int output []byte recentOffsets [3]uint32 prevRecentOffsets [3]uint32 + + last bool + lowMem bool } // init should be used once the block has been created. // If called more than once, the effect is the same as calling reset. func (b *blockEnc) init() { - if cap(b.literals) < maxCompressedLiteralSize { - b.literals = make([]byte, 0, maxCompressedLiteralSize) - } - const defSeqs = 200 - b.literals = b.literals[:0] - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - if cap(b.output) < maxCompressedBlockSize { - b.output = make([]byte, 0, maxCompressedBlockSize) + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 2000 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } } + if b.coders.mlEnc == nil { b.coders.mlEnc = &fseEncoder{} b.coders.mlPrev = &fseEncoder{} @@ -76,6 +92,7 @@ func (b *blockEnc) reset(prev *blockEnc) { if prev != nil { b.recentOffsets = prev.prevRecentOffsets } + b.dictLitEnc = nil } // reset will reset the block for a new encode, but in the same stream, @@ -139,7 +156,7 @@ func (h *literalsHeader) setSize(regenLen int) { switch { case inBits < 5: lh |= (uint64(regenLen) << 3) | (1 << 60) - if debug { + if debugEncoder { got := int(lh>>3) & 0xff if got != regenLen { panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) @@ -167,7 +184,7 @@ func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { lh |= 1 << 2 } lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) - if debug { + if debugEncoder { const mmask = (1 << 24) - 1 n := (lh >> 4) & mmask if int(n&1023) != inLen { @@ -295,7 +312,7 @@ func (b *blockEnc) encodeRaw(a []byte) { bh.setType(blockTypeRaw) b.output = bh.appendTo(b.output[:0]) b.output = append(b.output, a...) - if debug { + if debugEncoder { println("Adding RAW block, length", len(a), "last:", b.last) } } @@ -308,7 +325,7 @@ func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { bh.setType(blockTypeRaw) dst = bh.appendTo(dst) dst = append(dst, src...) - if debug { + if debugEncoder { println("Adding RAW block, length", len(src), "last:", b.last) } return dst @@ -322,7 +339,7 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { // Don't compress extremely small blocks if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { - if debug { + if debugEncoder { println("Adding RAW block, length", len(lits), "last:", b.last) } bh.setType(blockTypeRaw) @@ -354,7 +371,7 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { switch err { case huff0.ErrIncompressible: - if debug { + if debugEncoder { println("Adding RAW block, length", len(lits), "last:", b.last) } bh.setType(blockTypeRaw) @@ -362,16 +379,16 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { b.output = append(b.output, lits...) return nil case huff0.ErrUseRLE: - if debug { + if debugEncoder { println("Adding RLE block, length", len(lits)) } bh.setType(blockTypeRLE) b.output = bh.appendTo(b.output) b.output = append(b.output, lits[0]) return nil + case nil: default: return err - case nil: } // Compressed... // Now, allow reuse @@ -379,12 +396,12 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { bh.setType(blockTypeCompressed) var lh literalsHeader if reUsed { - if debug { + if debugEncoder { println("Reused tree, compressed to", len(out)) } lh.setType(literalsBlockTreeless) } else { - if debug { + if debugEncoder { println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) } lh.setType(literalsBlockCompressed) @@ -409,7 +426,7 @@ func fuzzFseEncoder(data []byte) int { return 0 } enc := fseEncoder{} - hist := enc.Histogram()[:256] + hist := enc.Histogram() maxSym := uint8(0) for i, v := range data { v = v & 63 @@ -500,7 +517,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { lh.setSize(len(b.literals)) b.output = lh.appendTo(b.output) b.output = append(b.output, b.literals...) - if debug { + if debugEncoder { println("Adding literals RAW, length", len(b.literals)) } case huff0.ErrUseRLE: @@ -508,27 +525,22 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { lh.setSize(len(b.literals)) b.output = lh.appendTo(b.output) b.output = append(b.output, b.literals[0]) - if debug { + if debugEncoder { println("Adding literals RLE") } - default: - if debug { - println("Adding literals ERROR:", err) - } - return err case nil: // Compressed litLen... if reUsed { - if debug { + if debugEncoder { println("reused tree") } lh.setType(literalsBlockTreeless) } else { - if debug { + if debugEncoder { println("new tree, size:", len(b.litEnc.OutTable)) } lh.setType(literalsBlockCompressed) - if debug { + if debugEncoder { _, _, err := huff0.ReadTable(out, nil) if err != nil { panic(err) @@ -536,16 +548,21 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { } } lh.setSizes(len(out), len(b.literals), single) - if debug { + if debugEncoder { printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) println("Adding literal header:", lh) } b.output = lh.appendTo(b.output) b.output = append(b.output, out...) b.litEnc.Reuse = huff0.ReusePolicyAllow - if debug { + if debugEncoder { println("Adding literals compressed") } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err } // Sequence compression @@ -560,7 +577,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { n := len(b.sequences) - 0x7f00 b.output = append(b.output, 255, uint8(n), uint8(n>>8)) } - if debug { + if debugEncoder { println("Encoding", len(b.sequences), "sequences") } b.genCodes() @@ -594,17 +611,17 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { nSize = nSize + (nSize+2*8*16)>>4 switch { case predefSize <= prevSize && predefSize <= nSize || forcePreDef: - if debug { + if debugEncoder { println("Using predefined", predefSize>>3, "<=", nSize>>3) } return preDef, compModePredefined case prevSize <= nSize: - if debug { + if debugEncoder { println("Using previous", prevSize>>3, "<=", nSize>>3) } return prev, compModeRepeat default: - if debug { + if debugEncoder { println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) } @@ -617,7 +634,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if llEnc.useRLE { mode |= uint8(compModeRLE) << 6 llEnc.setRLE(b.sequences[0].llCode) - if debug { + if debugEncoder { println("llEnc.useRLE") } } else { @@ -628,7 +645,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if ofEnc.useRLE { mode |= uint8(compModeRLE) << 4 ofEnc.setRLE(b.sequences[0].ofCode) - if debug { + if debugEncoder { println("ofEnc.useRLE") } } else { @@ -640,7 +657,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if mlEnc.useRLE { mode |= uint8(compModeRLE) << 2 mlEnc.setRLE(b.sequences[0].mlCode) - if debug { + if debugEncoder { println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) } } else { @@ -649,7 +666,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { mode |= uint8(m) << 2 } b.output = append(b.output, mode) - if debug { + if debugEncoder { printf("Compression modes: 0b%b", mode) } b.output, err = llEnc.writeCount(b.output) @@ -705,52 +722,53 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) } seq-- - if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 { - // No need to flush (common) - for seq >= 0 { - s = b.sequences[seq] - wr.flush32() - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - // tabelog max is 8 for all. - of.encode(ofB) - ml.encode(mlB) - ll.encode(llB) - wr.flush32() - - // We checked that all can stay within 32 bits - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.addBits32NC(s.offset, ofB.outBits) - - if debugSequences { - println("Encoded seq", seq, s) - } - - seq-- + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits<= 0 { - s = b.sequences[seq] - wr.flush32() - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - // tabelog max is below 8 for each. - of.encode(ofB) - ml.encode(mlB) - ll.encode(llB) - wr.flush32() - - // ml+ll = max 32 bits total - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.flush32() - wr.addBits32NC(s.offset, ofB.outBits) - - if debugSequences { - println("Encoded seq", seq, s) - } - seq-- - } + seq-- } ml.flush(mlEnc.actualTableLog) of.flush(ofEnc.actualTableLog) @@ -769,7 +787,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { // Size is output minus block header. bh.setSize(uint32(len(b.output)-bhOffset) - 3) - if debug { + if debugEncoder { println("Rewriting block header", bh) } _ = bh.appendTo(b.output[bhOffset:bhOffset]) @@ -784,14 +802,13 @@ func (b *blockEnc) genCodes() { // nothing to do return } - if len(b.sequences) > math.MaxUint16 { panic("can only encode up to 64K sequences") } // No bounds checks after here: - llH := b.coders.llEnc.Histogram()[:256] - ofH := b.coders.ofEnc.Histogram()[:256] - mlH := b.coders.mlEnc.Histogram()[:256] + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() for i := range llH { llH[i] = 0 } @@ -803,7 +820,8 @@ func (b *blockEnc) genCodes() { } var llMax, ofMax, mlMax uint8 - for i, seq := range b.sequences { + for i := range b.sequences { + seq := &b.sequences[i] v := llCode(seq.litLen) seq.llCode = v llH[v]++ @@ -827,7 +845,6 @@ func (b *blockEnc) genCodes() { panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) } } - b.sequences[i] = seq } maxCount := func(a []uint32) int { var max uint32 diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index 658ef7838..2ad02070d 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -12,8 +12,8 @@ import ( type byteBuffer interface { // Read up to 8 bytes. - // Returns nil if no more input is available. - readSmall(n int) []byte + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) // Read >8 bytes. // MAY use the destination slice. @@ -23,23 +23,23 @@ type byteBuffer interface { readByte() (byte, error) // Skip n bytes. - skipN(n int) error + skipN(n int64) error } // in-memory buffer type byteBuf []byte -func (b *byteBuf) readSmall(n int) []byte { +func (b *byteBuf) readSmall(n int) ([]byte, error) { if debugAsserts && n > 8 { panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) } bb := *b if len(bb) < n { - return nil + return nil, io.ErrUnexpectedEOF } r := bb[:n] *b = bb[n:] - return r + return r, nil } func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { @@ -52,10 +52,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { return r, nil } -func (b *byteBuf) remain() []byte { - return *b -} - func (b *byteBuf) readByte() (byte, error) { bb := *b if len(bb) < 1 { @@ -66,9 +62,12 @@ func (b *byteBuf) readByte() (byte, error) { return r, nil } -func (b *byteBuf) skipN(n int) error { +func (b *byteBuf) skipN(n int64) error { bb := *b - if len(bb) < n { + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { return io.ErrUnexpectedEOF } *b = bb[n:] @@ -81,19 +80,22 @@ type readerWrapper struct { tmp [8]byte } -func (r *readerWrapper) readSmall(n int) []byte { +func (r *readerWrapper) readSmall(n int) ([]byte, error) { if debugAsserts && n > 8 { panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) } n2, err := io.ReadFull(r.r, r.tmp[:n]) // We only really care about the actual bytes read. - if n2 != n { - if debug { + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { println("readSmall: got", n2, "want", n, "err", err) } - return nil + return nil, err } - return r.tmp[:n] + return r.tmp[:n], nil } func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { @@ -110,6 +112,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { func (r *readerWrapper) readByte() (byte, error) { n2, err := r.r.Read(r.tmp[:1]) if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } return 0, err } if n2 != 1 { @@ -118,9 +123,9 @@ func (r *readerWrapper) readByte() (byte, error) { return r.tmp[0], nil } -func (r *readerWrapper) skipN(n int) error { - n2, err := io.CopyN(ioutil.Discard, r.r, int64(n)) - if n2 != int64(n) { +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(ioutil.Discard, r.r, n) + if n2 != n { err = io.ErrUnexpectedEOF } return err diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go index 2c4fca17f..0e59a242d 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -13,12 +13,6 @@ type byteReader struct { off int } -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - // advance the stream b n bytes. func (b *byteReader) advance(n uint) { b.off += int(n) diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 000000000..5022e71c8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,230 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool + + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. + HasCheckSum bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + *h = Header{} + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + b, in := in[:4], in[4:] + if !bytes.Equal(b, frameMagic) { + if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + return ErrMagicMismatch + } + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) + return nil + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + fhd, in := in[0], in[1:] + h.HeaderSize++ + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + if !h.SingleSegment { + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + h.HeaderSize++ + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + h.HeaderSize += int(size) + switch size { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + h.HeaderSize += int(fcsSize) + switch fcsSize { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index d78be6d42..d212f4737 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -6,9 +6,12 @@ package zstd import ( "bytes" - "errors" + "context" + "encoding/binary" "io" "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" ) // Decoder provides decoding of zstandard streams. @@ -23,12 +26,19 @@ type Decoder struct { // Unreferenced decoders, ready for use. decoders chan *blockDec - // Streams ready to be decoded. - stream chan decodeStream - // Current read position used for Reader functionality. current decoderState + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + } + + frame *frameDec + // Custom dictionaries. // Always uses copies. dicts map[uint32]dict @@ -47,7 +57,10 @@ type decoderState struct { output chan decodeOutput // cancel remaining output. - cancel chan struct{} + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest flushed bool } @@ -82,9 +95,13 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { return nil, err } } - d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.crc = xxhash.New() d.current.flushed = true + if r == nil { + d.current.err = ErrDecoderNilInput + } + // Transfer option dicts. d.dicts = make(map[uint32]dict, len(d.o.dicts)) for _, dc := range d.o.dicts { @@ -110,9 +127,6 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { // Returns the number of bytes written and any error that occurred. // When the stream is done, io.EOF will be returned. func (d *Decoder) Read(p []byte) (int, error) { - if d.stream == nil { - return 0, errors.New("no input has been initialized") - } var n int for { if len(d.current.b) > 0 { @@ -130,12 +144,12 @@ func (d *Decoder) Read(p []byte) (int, error) { break } if !d.nextBlock(n == 0) { - return n, nil + return n, d.current.err } } } if len(d.current.b) > 0 { - if debug { + if debugDecoder { println("returning", n, "still bytes left:", len(d.current.b)) } // Only return error at end of block @@ -144,7 +158,7 @@ func (d *Decoder) Read(p []byte) (int, error) { if d.current.err != nil { d.drainOutput() } - if debug { + if debugDecoder { println("returning", n, d.current.err, len(d.decoders)) } return n, d.current.err @@ -152,28 +166,33 @@ func (d *Decoder) Read(p []byte) (int, error) { // Reset will reset the decoder the supplied stream after the current has finished processing. // Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. func (d *Decoder) Reset(r io.Reader) error { if d.current.err == ErrDecoderClosed { return d.current.err } - if r == nil { - return errors.New("nil Reader sent as input") - } - - if d.stream == nil { - d.stream = make(chan decodeStream, 1) - d.streamWg.Add(1) - go d.startStreamDecoder(d.stream) - } d.drainOutput() - // If bytes buffer and < 1MB, do sync decoding anyway. - if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 { - if debug { + d.syncStream.br.r = nil + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < 5<<20 { + bb2 := bb + if debugDecoder { println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) } - b := bb.Bytes() + b := bb2.Bytes() var dst []byte if cap(d.current.b) > 0 { dst = d.current.b @@ -186,36 +205,48 @@ func (d *Decoder) Reset(r io.Reader) error { d.current.b = dst d.current.err = err d.current.flushed = true - if debug { + if debugDecoder { println("sync decode to", len(dst), "bytes, err:", err) } return nil } - // Remove current block. + d.stashDecoder() d.current.decodeOutput = decodeOutput{} d.current.err = nil - d.current.cancel = make(chan struct{}) d.current.flushed = false d.current.d = nil - d.stream <- decodeStream{ - r: r, - output: d.current.output, - cancel: d.current.cancel, + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + return nil } // drainOutput will drain the output until errEndOfStream is sent. func (d *Decoder) drainOutput() { if d.current.cancel != nil { - println("cancelling current") - close(d.current.cancel) + if debugDecoder { + println("cancelling current") + } + d.current.cancel() d.current.cancel = nil } if d.current.d != nil { - if debug { + if debugDecoder { printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) } d.decoders <- d.current.d @@ -226,39 +257,31 @@ func (d *Decoder) drainOutput() { println("current already flushed") return } - for { - select { - case v := <-d.current.output: - if v.d != nil { - if debug { - printf("re-adding decoder %p", v.d) - } - d.decoders <- v.d - } - if v.err == errEndOfStream { - println("current flushed") - d.current.flushed = true - return + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) } + d.decoders <- v.d } } + d.current.output = nil + d.current.flushed = true } // WriteTo writes data to w until there's no more data to write or when an error occurs. // The return value n is the number of bytes written. // Any error encountered during the write is also returned. func (d *Decoder) WriteTo(w io.Writer) (int64, error) { - if d.stream == nil { - return 0, errors.New("no input has been initialized") - } var n int64 for { if len(d.current.b) > 0 { n2, err2 := w.Write(d.current.b) n += int64(n2) - if err2 != nil && d.current.err == nil { + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { d.current.err = err2 - break + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite } } if d.current.err != nil { @@ -282,7 +305,7 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) { // DecodeAll can be used concurrently. // The Decoder concurrency limits will be respected. func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.current.err == ErrDecoderClosed { + if d.decoders == nil { return dst, ErrDecoderClosed } @@ -290,11 +313,14 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { block := <-d.decoders frame := block.localFrame defer func() { - if debug { + if debugDecoder { printf("re-adding decoder: %p", block) } frame.rawInput = nil frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } d.decoders <- block }() frame.bBuf = input @@ -302,40 +328,53 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { for { frame.history.reset() err := frame.reset(&frame.bBuf) - if err == io.EOF { - if debug { - println("frame reset return EOF") + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil } - return dst, nil + return dst, err } if frame.DictionaryID != nil { dict, ok := d.dicts[*frame.DictionaryID] if !ok { return nil, ErrUnknownDictionary } + if debugDecoder { + println("setting dict", frame.DictionaryID) + } frame.history.setDict(&dict) } - if err != nil { - return dst, err - } - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { - return dst, ErrDecoderSizeExceeded + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded } - if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { - // Never preallocate moe than 1 GB up front. - if uint64(cap(dst)) < frame.FrameContentSize { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { + return dst, ErrDecoderSizeExceeded + } + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) copy(dst2, dst) dst = dst2 } } + if cap(dst) == 0 { - // Allocate window size * 2 by default if nothing is provided and we didn't get frame content size. - size := frame.WindowSize * 2 + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 // Cap to 1 MB. if size > 1<<20 { size = 1 << 20 } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } dst = make([]byte, 0, size) } @@ -344,7 +383,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { return dst, err } if len(frame.bBuf) == 0 { - if debug { + if debugDecoder { println("frame dbuf empty") } break @@ -359,33 +398,176 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { // If non-blocking mode is used the returned boolean will be false // if no data was available without blocking. func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.d != nil { - if debug { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } if d.current.err != nil { // Keep error state. - return blocking + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok } + //ASYNC: + d.stashDecoder() if blocking { - d.current.decodeOutput = <-d.current.output + d.current.decodeOutput, ok = <-d.current.output } else { select { - case d.current.decodeOutput = <-d.current.output: + case d.current.decodeOutput, ok = <-d.current.output: default: return false } } - if debug { - println("got", len(d.current.b), "bytes, error:", d.current.err) + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if !d.o.ignoreChecksum && len(next.b) > 0 { + n, err := d.current.crc.Write(next.b) + if err == nil { + if n != len(next.b) { + d.current.err = io.ErrShortWrite + } + } + } + if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 { + got := d.current.crc.Sum64() + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(got)) + if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) { + if debugDecoder { + println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + println("CRC ok", tmp[:]) + } + } + } + return true } +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err != nil { + return false + } + if d.frame.DictionaryID != nil { + dict, ok := d.dicts[*d.frame.DictionaryID] + if !ok { + d.current.err = ErrUnknownDictionary + return false + } else { + d.frame.history.setDict(&dict) + } + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + // Close will release all resources. // It is NOT possible to reuse the decoder after this. func (d *Decoder) Close() { @@ -393,10 +575,10 @@ func (d *Decoder) Close() { return } d.drainOutput() - if d.stream != nil { - close(d.stream) + if d.current.cancel != nil { + d.current.cancel() d.streamWg.Wait() - d.stream = nil + d.current.cancel = nil } if d.decoders != nil { close(d.decoders) @@ -447,100 +629,296 @@ type decodeOutput struct { err error } -type decodeStream struct { - r io.Reader - - // Blocks ready to be written to output. - output chan decodeOutput - - // cancel reading from the input - cancel chan struct{} +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil } -// errEndOfStream indicates that everything from the stream was read. -var errEndOfStream = errors.New("end-of-stream") - // Create Decoder: -// Spawn n block decoders. These accept tasks to decode a block. -// Create goroutine that handles stream processing, this will send history to decoders as they are available. -// Decoders update the history as they decode. -// When a block is returned: -// a) history is sent to the next decoder, -// b) content written to CRC. -// c) return data to WRITER. -// d) wait for next block to return data. -// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. -func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { defer d.streamWg.Done() - frame := newFrameDec(d.o) - for stream := range inStream { - if debug { - println("got new stream") + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block } - br := readerWrapper{r: stream.r} - decodeStream: - for { - frame.history.reset() - err := frame.reset(&br) - if debug && err != nil { - println("Frame decoder returned", err) + close(seqExecute) + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true } else { - frame.history.setDict(&dict) + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } } } - if err != nil { - stream.output <- decodeOutput{ - err: err, + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + }() + +decodeStream: + for { + var hist history + var hasErr bool + + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block } - break + return } - if debug { - println("starting frame decoder") + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return } - // This goroutine will forward history between frames. - frame.frameDone.Add(1) - frame.initAsync() + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } - go frame.startDecoder(stream.output) - decodeFrame: - // Go through all blocks of the frame. - for { - dec := <-d.decoders - select { - case <-stream.cancel: - if !frame.sendErr(dec, io.EOF) { - // To not let the decoder dangle, send it back. - stream.output <- decodeOutput{d: dec} - } - break decodeStream - default: + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.checkCRC = nil + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + dec.err = err } - err := frame.next(dec) - switch err { - case io.EOF: - // End of current frame, no error - println("EOF on next block") - break decodeFrame - case nil: - continue - default: - println("block decoder returned", err) - break decodeStream + var tmp [4]byte + copy(tmp[:], crc) + dec.checkCRC = tmp[:] + if debugDecoder { + println("found crc to check:", dec.checkCRC) } } - // All blocks have started decoding, check if there are more frames. - println("waiting for done") - frame.frameDone.Wait() - println("done waiting...") + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break + } } - frame.frameDone.Wait() - println("Sending EOS") - stream.output <- decodeOutput{err: errEndOfStream} } + close(seqDecode) + wg.Wait() + d.frame.history.b = frameHistCache } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 284d38449..c70e6fa0f 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -6,7 +6,6 @@ package zstd import ( "errors" - "fmt" "runtime" ) @@ -18,16 +17,22 @@ type decoderOptions struct { lowMem bool concurrent int maxDecodedSize uint64 + maxWindowSize uint64 dicts []dict + ignoreChecksum bool } func (o *decoderOptions) setDefault() { *o = decoderOptions{ // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, } - o.maxDecodedSize = 1 << 63 + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 } // WithDecoderLowmem will set whether to use a lower amount of memory, @@ -36,16 +41,25 @@ func WithDecoderLowmem(b bool) DOption { return func(o *decoderOptions) error { o.lowMem = b; return nil } } -// WithDecoderConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. -// The value supplied must be at least 1. -// By default this will be set to GOMAXPROCS. +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. func WithDecoderConcurrency(n int) DOption { return func(o *decoderOptions) error { - if n <= 0 { - return fmt.Errorf("Concurrency must be at least 1") + if n < 0 { + return errors.New("concurrency must be at least 1") + } + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n } - o.concurrent = n return nil } } @@ -53,15 +67,14 @@ func WithDecoderConcurrency(n int) DOption { // WithDecoderMaxMemory allows to set a maximum decoded size for in-memory // non-streaming operations or maximum window size for streaming operations. // This can be used to control memory usage of potentially hostile content. -// For streaming operations, the maximum window size is capped at 1<<30 bytes. -// Maximum and default is 1 << 63 bytes. +// Maximum is 1 << 63 bytes. Default is 64GiB. func WithDecoderMaxMemory(n uint64) DOption { return func(o *decoderOptions) error { if n == 0 { return errors.New("WithDecoderMaxMemory must be at least 1") } if n > 1<<63 { - return fmt.Errorf("WithDecoderMaxmemory must be less than 1 << 63") + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") } o.maxDecodedSize = n return nil @@ -82,3 +95,29 @@ func WithDecoderDicts(dicts ...[]byte) DOption { return nil } } + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index fa25a18d8..a36ae83ef 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -82,7 +82,7 @@ func loadDict(b []byte) (*dict, error) { println("Transform table error:", err) return err } - if debug { + if debugDecoder || debugEncoder { println("Read table ok", "symbolLen:", dec.symbolLen) } // Set decoders as predefined so they aren't reused. diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index b1b7c6e6a..15ae8ee80 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -7,6 +7,10 @@ import ( "github.com/klauspost/compress/zstd/internal/xxhash" ) +const ( + dictShardBits = 6 +) + type fastBase struct { // cur is the offset at the start of hist cur int32 @@ -17,6 +21,7 @@ type fastBase struct { tmp [8]byte blk *blockEnc lastDictID uint32 + lowMem bool } // CRC returns the underlying CRC writer. @@ -33,8 +38,8 @@ func (e *fastBase) AppendCRC(dst []byte) []byte { // WindowSize returns the window size of the encoder, // or a window size small enough to contain the input size, if > 0. -func (e *fastBase) WindowSize(size int) int32 { - if size > 0 && size < int(e.maxMatchOff) { +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { b := int32(1) << uint(bits.Len(uint(size))) // Keep minimum window. if b < 1024 { @@ -57,15 +62,10 @@ func (e *fastBase) addBlock(src []byte) int32 { // check if we have space already if len(e.hist)+len(src) > cap(e.hist) { if cap(e.hist) == 0 { - l := e.maxMatchOff * 2 - // Make it at least 1MB. - if l < 1<<20 { - l = 1 << 20 - } - e.hist = make([]byte, 0, l) + e.ensureHist(len(src)) } else { - if cap(e.hist) < int(e.maxMatchOff*2) { - panic("unexpected buffer size") + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) } // Move down offset := int32(len(e.hist)) - e.maxMatchOff @@ -79,6 +79,28 @@ func (e *fastBase) addBlock(src []byte) int32 { return s } +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + // useBlock will replace the block with the provided one, // but transfer recent offsets from the previous. func (e *fastBase) UseBlock(enc *blockEnc) { @@ -86,11 +108,6 @@ func (e *fastBase) UseBlock(enc *blockEnc) { e.blk = enc } -func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 { - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} - func (e *fastBase) matchlen(s, t int32, src []byte) int32 { if debugAsserts { if s < 0 { @@ -109,15 +126,30 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 { panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) } } + a := src[s:] + b := src[t:] + b = b[:len(a)] + end := int32((len(a) >> 3) << 3) + for i := int32(0); i < end; i += 8 { + if diff := load6432(a, i) ^ load6432(b, i); diff != 0 { + return i + int32(bits.TrailingZeros64(diff)>>3) + } + } - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) + a = a[end:] + b = b[end:] + for i := range a { + if a[i] != b[i] { + return int32(i) + end + } + } + return int32(len(a)) + end } // Reset the encoding table. func (e *fastBase) resetBase(d *dict, singleBlock bool) { if e.blk == nil { - e.blk = &blockEnc{} + e.blk = &blockEnc{lowMem: e.lowMem} e.blk.init() } else { e.blk.reset(nil) @@ -128,14 +160,15 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) { } else { e.crc.Reset() } - if (!singleBlock || d.DictContentSize() > 0) && cap(e.hist) < int(e.maxMatchOff*2)+d.DictContentSize() { - l := e.maxMatchOff*2 + int32(d.DictContentSize()) - // Make it at least 1MB. - if l < 1<<20 { - l = 1 << 20 + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true } - e.hist = make([]byte, 0, l) + e.ensureHist(d.DictContentSize() + maxCompressedBlockSize) + e.lowMem = low } + // We offset current position so everything will be out of reach. // If above reset line, history will be purged. if e.cur < bufferReset { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 000000000..96028ecd8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,558 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 +} + +const highScore = 25000 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep)) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = prevEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + _ = addLiterals + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + bestOf := func(a, b match) match { + if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 { + return a + } + return b + } + const goodEnough = 100 + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + matchAt := func(offset int32, s int32, first uint32, rep int32) match { + if s-offset >= e.maxMatchOff || load3232(src, offset) != first { + return match{s: s, est: highScore} + } + if debugAsserts { + if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { + panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} + m.estBits(bitsPerByte) + return m + } + + best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) + + if canRepeat && best.length < goodEnough { + cv32 := uint32(cv >> 8) + spp := s + 1 + best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) + best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) + best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + if best.length > 0 { + cv32 = uint32(cv >> 24) + spp += 2 + best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) + best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) + best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + continue + } + + s++ + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s) + cv2 := load6432(src, s+1) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + // Long at s+1, s+2 + best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) + best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) + if false { + // Short at s+3. + // Too often worse... + best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)) + } + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { + bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) + if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 { + bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1)) + } + best = bestEnd + } + } + } + + if debugAsserts { + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + if best.rep > 0 { + s = best.s + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := best.s + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + repIndex := best.offset + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = uint32(best.rep) + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + s = best.s + best.length + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + + } + break encodeLoop + } + // Index skipped... + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + switch best.rep { + case 2: + offset1, offset2 = offset2, offset1 + case 3: + offset1, offset2, offset3 = offset3, offset1, offset2 + } + cv = load6432(src, s) + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + s = best.s + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := best.length + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + // every entry + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 94a5343d0..c769f6941 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -9,6 +9,7 @@ import "fmt" const ( betterLongTableBits = 19 // Bits used in the long match table betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash // Note: Increasing the short table bits or making the hash shorter // can actually lead to compression degradation since it will 'steal' more from the @@ -16,29 +17,559 @@ const ( // This greatly depends on the type of input. betterShortTableBits = 13 // Bits used in the short match table betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard ) -type prevEntry struct { - offset int32 - prev int32 +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } } -// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type betterFastEncoder struct { - fastBase - table [betterShortTableSize]tableEntry - longTable [betterLongTableSize]prevEntry - dictTable []tableEntry - dictLongTable []prevEntry +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) } // Encode improves compression... -func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { const ( // Input margin is the number of bytes we read (8) // and the maximum we will read ahead (2) @@ -56,6 +587,7 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { e.longTable[i] = prevEntry{} } e.cur = e.maxMatchOff + e.allDirty = true break } // Shift down everything in the table that isn't already too far away. @@ -88,6 +620,7 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { prev: v2, } } + e.allDirty = true e.cur = e.maxMatchOff break } @@ -125,7 +658,7 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -141,8 +674,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hash5(cv, betterShortTableBits) - nextHashL := hash8(cv, betterLongTableBits) + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -150,7 +683,9 @@ encodeLoop: repIndex := s - offset1 + repOff off := s + e.cur e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) if canRepeat { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { @@ -191,7 +726,7 @@ encodeLoop: nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -201,10 +736,13 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) index0 += 2 } cv = load6432(src, s) @@ -251,7 +789,7 @@ encodeLoop: s += lenght + repOff2 nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -262,10 +800,13 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) index0 += 2 } cv = load6432(src, s) @@ -340,12 +881,13 @@ encodeLoop: // See if we can find a long match at s+1 const checkAt = 1 cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, betterLongTableBits) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) candidateL = e.longTable[nextHashL] coffsetL = candidateL.offset - e.cur // We can store it, since we have at least a 4 byte match. e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { // Found a long match, at least 8 bytes. matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 @@ -398,9 +940,41 @@ encodeLoop: } cv = load6432(src, s) } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. offset2 = offset1 offset1 = s - t @@ -449,10 +1023,13 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) index0 += 2 } @@ -470,15 +1047,17 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv, betterShortTableBits) - nextHashL := hash8(cv, betterLongTableBits) + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match l := 4 + e.matchlen(s+4, o2+4, src) e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) seq.matchLen = uint32(l) - zstdMinMatch seq.litLen = 0 @@ -507,20 +1086,21 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.Encode(blk, src) +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } } // ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { e.resetBase(d, singleBlock) if d == nil { return @@ -535,10 +1115,10 @@ func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { const hashLog = betterShortTableBits cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hash5(cv, hashLog) // 0 -> 4 - nextHash1 := hash5(cv>>8, hashLog) // 1 -> 5 - nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6 - nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7 + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 e.dictTable[nextHash] = tableEntry{ val: uint32(cv), offset: i, @@ -557,6 +1137,7 @@ func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { } } e.lastDictID = d.id + e.allDirty = true } // Init or copy dict table @@ -566,7 +1147,7 @@ func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { } if len(d.content) >= 8 { cv := load6432(d.content, 0) - h := hash8(cv, betterLongTableBits) + h := hashLen(cv, betterLongTableBits, betterLongLen) e.dictLongTable[h] = prevEntry{ offset: e.maxMatchOff, prev: e.dictLongTable[h].offset, @@ -576,7 +1157,7 @@ func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { off := 8 // First to read for i := e.maxMatchOff + 1; i < end; i++ { cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hash8(cv, betterLongTableBits) + h := hashLen(cv, betterLongTableBits, betterLongLen) e.dictLongTable[h] = prevEntry{ offset: i, prev: e.dictLongTable[h].offset, @@ -585,11 +1166,72 @@ func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { } } e.lastDictID = d.id + e.allDirty = true } + // Reset table to initial state - copy(e.longTable[:], e.dictLongTable) + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } e.cur = e.maxMatchOff - // Reset table to initial state - copy(e.table[:], e.dictTable) + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index 19eebf66e..7ff0c64fa 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -10,16 +10,28 @@ const ( dFastLongTableBits = 17 // Bits used in the long match table dFastLongTableSize = 1 << dFastLongTableBits // Size of the table dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard dFastShortTableBits = tableBits // Bits used in the short match table dFastShortTableSize = 1 << dFastShortTableBits // Size of the table dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + ) type doubleFastEncoder struct { fastEncoder - longTable [dFastLongTableSize]tableEntry - dictLongTable []tableEntry + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool } // Encode mimmics functionality in zstd_dfast.c @@ -100,7 +112,7 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -115,8 +127,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -161,7 +173,7 @@ encodeLoop: s += lenght + repOff nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -199,7 +211,7 @@ encodeLoop: // See if we can find a long match at s+1 const checkAt = 1 cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, dFastLongTableBits) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) candidateL = e.longTable[nextHashL] coffsetL = s - (candidateL.offset - e.cur) + checkAt @@ -295,16 +307,16 @@ encodeLoop: cv1 := load6432(src, index1) te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hash8(cv0, dFastLongTableBits)] = te0 - e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 cv0 >>= 8 cv1 >>= 8 te0.offset++ te1.offset++ te0.val = uint32(cv0) te1.val = uint32(cv1) - e.table[hash5(cv0, dFastShortTableBits)] = te0 - e.table[hash5(cv1, dFastShortTableBits)] = te1 + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 cv = load6432(src, s) @@ -321,8 +333,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -359,7 +371,7 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } @@ -418,7 +430,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -427,8 +439,8 @@ encodeLoop: var t int32 for { - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -474,7 +486,7 @@ encodeLoop: s += length + repOff nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, length) } @@ -512,7 +524,7 @@ encodeLoop: // See if we can find a long match at s+1 const checkAt = 1 cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, dFastLongTableBits) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) candidateL = e.longTable[nextHashL] coffsetL = s - (candidateL.offset - e.cur) + checkAt @@ -605,16 +617,16 @@ encodeLoop: cv1 := load6432(src, index1) te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hash8(cv0, dFastLongTableBits)] = te0 - e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 cv0 >>= 8 cv1 >>= 8 te0.offset++ te1.offset++ te0.val = uint32(cv0) te1.val = uint32(cv1) - e.table[hash5(cv0, dFastShortTableBits)] = te0 - e.table[hash5(cv1, dFastShortTableBits)] = te1 + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 cv = load6432(src, s) @@ -631,8 +643,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv1>>8, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -668,7 +680,7 @@ encodeLoop: blk.literals = append(blk.literals, src[nextEmit:]...) blk.extraLits = len(src) - int(nextEmit) } - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } @@ -678,9 +690,379 @@ encodeLoop: } } +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + // ResetDict will reset and set a dictionary if not nil func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) if d == nil { return } @@ -692,22 +1074,51 @@ func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { } if len(d.content) >= 8 { cv := load6432(d.content, 0) - e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ val: uint32(cv), offset: e.maxMatchOff, } end := int32(len(d.content)) - 8 + e.maxMatchOff for i := e.maxMatchOff + 1; i < end; i++ { cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) - e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ val: uint32(cv), offset: i, } } } e.lastDictID = d.id + e.allDirty = true } // Reset table to initial state e.cur = e.maxMatchOff - copy(e.longTable[:], e.dictLongTable) + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 0b301df43..f51ab529a 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -6,15 +6,16 @@ package zstd import ( "fmt" - "math" - "math/bits" ) const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - maxMatchLength = 131074 + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 ) type tableEntry struct { @@ -24,8 +25,14 @@ type tableEntry struct { type fastEncoder struct { fastBase - table [tableSize]tableEntry - dictTable []tableEntry + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool } // Encode mimmics functionality in zstd_fast.c @@ -78,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 8 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -95,7 +102,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -114,8 +121,8 @@ encodeLoop: panic("offset0 was 0") } - nextHash := hash6(cv, hashLog) - nextHash2 := hash6(cv>>8, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) candidate := e.table[nextHash] candidate2 := e.table[nextHash2] repIndex := s - offset1 + 2 @@ -127,20 +134,7 @@ encodeLoop: // Consider history as well. var seq seq var length int32 - // length = 4 + e.matchlen(s+6, repIndex+4, src) - { - a := src[s+6:] - b := src[repIndex+4:] - endI := len(a) & (math.MaxInt32 - 7) - length = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - + length = 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. @@ -170,7 +164,7 @@ encodeLoop: s += length + 2 nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, length) } @@ -227,20 +221,7 @@ encodeLoop: } // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - var l int32 - { - a := src[s+4:] - b := src[t+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards tMin := s - e.maxMatchOff @@ -277,23 +258,10 @@ encodeLoop: if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { // We have at least 4 byte match. // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - var l int32 - { - a := src[s+4:] - b := src[o2+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := 4 + e.matchlen(s+4, o2+4, src) // Store this, since we have it. - nextHash := hash6(cv, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} seq.matchLen = uint32(l) - zstdMinMatch seq.litLen = 0 @@ -322,7 +290,7 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } @@ -335,7 +303,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { inputMargin = 8 minNonLiteralBlockSize = 1 + 1 + inputMargin ) - if debug { + if debugEncoder { if len(src) > maxBlockSize { panic("src too big") } @@ -366,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 8 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -383,7 +351,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -397,8 +365,8 @@ encodeLoop: // By not using them for the first 3 matches for { - nextHash := hash6(cv, hashLog) - nextHash2 := hash6(cv>>8, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) candidate := e.table[nextHash] candidate2 := e.table[nextHash2] repIndex := s - offset1 + 2 @@ -409,21 +377,7 @@ encodeLoop: if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - // length := 4 + e.matchlen(s+6, repIndex+4, src) - // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:])) - var length int32 - { - a := src[s+6:] - b := src[repIndex+4:] - endI := len(a) & (math.MaxInt32 - 7) - length = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + length := 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) @@ -454,7 +408,7 @@ encodeLoop: s += length + 2 nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, length) } @@ -513,21 +467,7 @@ encodeLoop: panic(fmt.Sprintf("t (%d) < 0 ", t)) } // Extend the 4-byte match as long as possible. - //l := e.matchlenNoHist(s+4, t+4, src) + 4 - // l := int32(matchLen(src[s+4:], src[t+4:])) + 4 - var l int32 - { - a := src[s+4:] - b := src[t+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards tMin := s - e.maxMatchOff @@ -564,24 +504,10 @@ encodeLoop: if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { // We have at least 4 byte match. // No need to check backwards. We come straight from a match - //l := 4 + e.matchlenNoHist(s+4, o2+4, src) - // l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) - var l int32 - { - a := src[s+4:] - b := src[o2+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := 4 + e.matchlen(s+4, o2+4, src) // Store this, since we have it. - nextHash := hash6(cv, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} seq.matchLen = uint32(l) - zstdMinMatch seq.litLen = 0 @@ -608,7 +534,7 @@ encodeLoop: blk.literals = append(blk.literals, src[nextEmit:]...) blk.extraLits = len(src) - int(nextEmit) } - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } // We do not store history, so we must offset e.cur to avoid false matches for next user. @@ -617,8 +543,284 @@ encodeLoop: } } +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + length = 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + // ResetDict will reset and set a dictionary if not nil func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { e.resetBase(d, singleBlock) if d == nil { return @@ -635,9 +837,9 @@ func (e *fastEncoder) Reset(d *dict, singleBlock bool) { const hashLog = tableBits cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hash6(cv, hashLog) // 0 -> 5 - nextHash1 := hash6(cv>>8, hashLog) // 1 -> 6 - nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7 + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6 + nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7 e.dictTable[nextHash] = tableEntry{ val: uint32(cv), offset: i, @@ -653,9 +855,44 @@ func (e *fastEncoder) Reset(d *dict, singleBlock bool) { } } e.lastDictID = d.id + e.allDirty = true } e.cur = e.maxMatchOff - // Reset table to initial state - copy(e.table[:], e.dictTable) + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index f5759211d..7aaaedb23 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -33,7 +33,7 @@ type encoder interface { Block() *blockEnc CRC() *xxhash.Digest AppendCRC([]byte) []byte - WindowSize(size int) int32 + WindowSize(size int64) int32 UseBlock(*blockEnc) Reset(d *dict, singleBlock bool) } @@ -48,6 +48,8 @@ type encoderState struct { err error writeErr error nWritten int64 + nInput int64 + frameContentSize int64 headerWritten bool eofWritten bool fullFrameWritten bool @@ -96,23 +98,25 @@ func (e *Encoder) Reset(w io.Writer) { if cap(s.filling) == 0 { s.filling = make([]byte, 0, e.o.blockSize) } - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() } if s.encoder == nil { s.encoder = e.o.encoder() } - if s.writing == nil { - s.writing = &blockEnc{} - s.writing.init() - } - s.writing.initNewEncode() s.filling = s.filling[:0] - s.current = s.current[:0] - s.previous = s.previous[:0] s.encoder.Reset(e.o.dict, false) s.headerWritten = false s.eofWritten = false @@ -120,7 +124,21 @@ func (e *Encoder) Reset(w io.Writer) { s.w = w s.err = nil s.nWritten = 0 + s.nInput = 0 s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } } // Write data to the encoder. @@ -176,6 +194,12 @@ func (e *Encoder) nextBlock(final bool) error { } if !s.headerWritten { // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } if final && len(s.filling) > 0 { s.current = e.EncodeAll(s.filling, s.current[:0]) var n2 int @@ -184,6 +208,7 @@ func (e *Encoder) nextBlock(final bool) error { return s.err } s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) s.current = s.current[:0] s.filling = s.filling[:0] s.headerWritten = true @@ -194,8 +219,8 @@ func (e *Encoder) nextBlock(final bool) error { var tmp [maxHeaderSize]byte fh := frameHeader{ - ContentSize: 0, - WindowSize: uint32(s.encoder.WindowSize(0)), + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), SingleSegment: false, Checksum: e.o.crc, DictID: e.o.dict.ID(), @@ -235,11 +260,52 @@ func (e *Encoder) nextBlock(final bool) error { return s.err } + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.err = err + return err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + // Move blocks forward. s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) s.wg.Add(1) go func(src []byte) { - if debug { + if debugEncoder { println("Adding block,", len(src), "bytes, final:", final) } defer func() { @@ -284,7 +350,7 @@ func (e *Encoder) nextBlock(final bool) error { } switch err { case errIncompressible: - if debug { + if debugEncoder { println("Storing incompressible block as raw") } blk.encodeRaw(src) @@ -307,7 +373,7 @@ func (e *Encoder) nextBlock(final bool) error { // // The Copy function uses ReaderFrom if available. func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { - if debug { + if debugEncoder { println("Using ReadFrom") } @@ -330,20 +396,20 @@ func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { switch err { case io.EOF: e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] - if debug { + if debugEncoder { println("ReadFrom: got EOF final block:", len(e.state.filling)) } return n, nil + case nil: default: - if debug { + if debugEncoder { println("ReadFrom: got error:", err) } e.state.err = err return n, err - case nil: } if len(src) > 0 { - if debug { + if debugEncoder { println("ReadFrom: got space left in source:", len(src)) } continue @@ -388,6 +454,11 @@ func (e *Encoder) Close() error { if err != nil { return err } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } if e.state.fullFrameWritten { return s.err } @@ -457,21 +528,21 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { // If a non-single block is needed the encoder will reset again. e.encoders <- enc }() - // Use single segments when above minimum window and below 1MB. - single := len(src) < 1<<20 && len(src) > MinWindowSize + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { single = *e.o.single } fh := frameHeader{ ContentSize: uint64(len(src)), - WindowSize: uint32(enc.WindowSize(len(src))), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), SingleSegment: single, Checksum: e.o.crc, DictID: e.o.dict.ID(), } // If less than 1MB, allocate a buffer up front. - if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 { + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { dst = make([]byte, 0, len(src)) } dst, err := fh.appendTo(dst) @@ -480,7 +551,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } // If we can do everything in one block, prefer that. - if len(src) <= maxCompressedBlockSize { + if len(src) <= e.o.blockSize { enc.Reset(e.o.dict, true) // Slightly faster with no history and everything in one block. if e.o.crc { @@ -506,7 +577,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { switch err { case errIncompressible: - if debug { + if debugEncoder { println("Storing incompressible block as raw") } dst = blk.encodeRawTo(dst, src) @@ -542,7 +613,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { switch err { case errIncompressible: - if debug { + if debugEncoder { println("Storing incompressible block as raw") } dst = blk.encodeRawTo(dst, todo) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 1209915bc..a7c5e1aac 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -24,31 +24,45 @@ type encoderOptions struct { allLitEntropy bool customWindow bool customALEntropy bool + customBlockSize bool + lowMem bool dict *dict } func (o *encoderOptions) setDefault() { *o = encoderOptions{ - // use less ram: true for now, but may change. concurrent: runtime.GOMAXPROCS(0), crc: true, single: nil, - blockSize: 1 << 16, + blockSize: maxCompressedBlockSize, windowSize: 8 << 20, level: SpeedDefault, allLitEntropy: true, + lowMem: false, } } // encoder returns an encoder with the selected options. func (o encoderOptions) encoder() encoder { switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + case SpeedDefault: - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}}} + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} case SpeedBetterCompression: - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} - case SpeedFastest: - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} } panic("unknown compression level") } @@ -60,8 +74,9 @@ func WithEncoderCRC(b bool) EOption { } // WithEncoderConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. +// meaning the maximum number of encoders to run concurrently. // The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. // By default this will be set to GOMAXPROCS. func WithEncoderConcurrency(n int) EOption { return func(o *encoderOptions) error { @@ -93,6 +108,7 @@ func WithWindowSize(n int) EOption { o.customWindow = true if o.blockSize > o.windowSize { o.blockSize = o.windowSize + o.customBlockSize = true } return nil } @@ -143,20 +159,20 @@ const ( // By using this, notice that CPU usage may go up in the future. SpeedBetterCompression + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + // speedLast should be kept as the last actual compression option. // The is not for external usage, but is used to keep track of the valid options. speedLast - - // SpeedBestCompression will choose the best available compression option. - // For now this is not implemented. - SpeedBestCompression = SpeedBetterCompression ) // EncoderLevelFromString will convert a string representation of an encoding level back // to a compression level. The compare is not case sensitive. // If the string wasn't recognized, (false, SpeedDefault) will be returned. func EncoderLevelFromString(s string) (bool, EncoderLevel) { - for l := EncoderLevel(speedNotSet + 1); l < speedLast; l++ { + for l := speedNotSet + 1; l < speedLast; l++ { if strings.EqualFold(s, l.String()) { return true, l } @@ -173,10 +189,11 @@ func EncoderLevelFromZstd(level int) EncoderLevel { return SpeedFastest case level >= 3 && level < 6: return SpeedDefault - case level > 5: + case level >= 6 && level < 10: return SpeedBetterCompression + default: + return SpeedBestCompression } - return SpeedDefault } // String provides a string representation of the compression level. @@ -188,6 +205,8 @@ func (e EncoderLevel) String() string { return "default" case SpeedBetterCompression: return "better" + case SpeedBestCompression: + return "best" default: return "invalid" } @@ -205,10 +224,15 @@ func WithEncoderLevel(l EncoderLevel) EOption { switch o.level { case SpeedFastest: o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } case SpeedDefault: o.windowSize = 8 << 20 case SpeedBetterCompression: o.windowSize = 16 << 20 + case SpeedBestCompression: + o.windowSize = 32 << 20 } } if !o.customALEntropy { @@ -259,7 +283,7 @@ func WithNoEntropyCompression(b bool) EOption { // a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. // For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. // This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. -// If this is not specified, block encodes will automatically choose this based on the input size. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. // This setting has no effect on streamed encodes. func WithSingleSegment(b bool) EOption { return func(o *encoderOptions) error { @@ -268,6 +292,17 @@ func WithSingleSegment(b bool) EOption { } } +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + // WithEncoderDict allows to register a dictionary that will be used for the encode. // The encoder *may* choose to use no dictionary instead for certain payloads. func WithEncoderDict(dict []byte) EOption { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index fc4a566d3..9568a4ba3 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -8,27 +8,17 @@ import ( "bytes" "encoding/hex" "errors" - "hash" "io" - "sync" "github.com/klauspost/compress/zstd/internal/xxhash" ) type frameDec struct { - o decoderOptions - crc hash.Hash64 - offset int64 + o decoderOptions + crc *xxhash.Digest WindowSize uint64 - // maxWindowSize is the maximum windows size to support. - // should never be bigger than max-int. - maxWindowSize uint64 - - // In order queue of blocks being decoded. - decoding chan *blockDec - // Frame history passed between blocks history history @@ -38,20 +28,18 @@ type frameDec struct { bBuf byteBuf FrameContentSize uint64 - frameDone sync.WaitGroup DictionaryID *uint32 HasCheckSum bool SingleSegment bool - - // asyncRunning indicates whether the async routine processes input on 'decoding'. - asyncRunningMu sync.Mutex - asyncRunning bool } const ( - // The minimum Window_Size is 1 KB. + // MinWindowSize is the minimum Window Size, which is 1 KB. MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. MaxWindowSize = 1 << 29 ) @@ -61,12 +49,11 @@ var ( ) func newFrameDec(o decoderOptions) *frameDec { - d := frameDec{ - o: o, - maxWindowSize: MaxWindowSize, + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize } - if d.maxWindowSize > o.maxDecodedSize { - d.maxWindowSize = o.maxDecodedSize + d := frameDec{ + o: o, } return &d } @@ -78,50 +65,74 @@ func newFrameDec(o decoderOptions) *frameDec { func (d *frameDec) reset(br byteBuffer) error { d.HasCheckSum = false d.WindowSize = 0 - var b []byte + var signature [4]byte for { - b = br.readSmall(4) - if b == nil { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: return io.EOF + default: + return err + case nil: + signature[0] = b[0] + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + default: + return err + case nil: + copy(signature[1:], b) } - if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { - if debug { - println("Not skippable", hex.EncodeToString(b), hex.EncodeToString(skippableFrameMagic)) + + if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic)) } // Break if not skippable frame. break } // Read size to skip - b = br.readSmall(4) - if b == nil { - println("Reading Frame Size EOF") - return io.ErrUnexpectedEOF + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err } n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) println("Skipping frame with", n, "bytes.") - err := br.skipN(int(n)) + err = br.skipN(int64(n)) if err != nil { - if debug { + if debugDecoder { println("Reading discarded frame", err) } return err } } - if !bytes.Equal(b, frameMagic) { - println("Got magic numbers: ", b, "want:", frameMagic) + if !bytes.Equal(signature[:], frameMagic) { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", frameMagic) + } return ErrMagicMismatch } // Read Frame_Header_Descriptor fhd, err := br.readByte() if err != nil { - println("Reading Frame_Header_Descriptor", err) + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } return err } d.SingleSegment = fhd&(1<<5) != 0 if fhd&(1<<3) != 0 { - return errors.New("Reserved bit set on frame header") + return errors.New("reserved bit set on frame header") } // Read Window_Descriptor @@ -130,7 +141,9 @@ func (d *frameDec) reset(br byteBuffer) error { if !d.SingleSegment { wd, err := br.readByte() if err != nil { - println("Reading Window_Descriptor", err) + if debugDecoder { + println("Reading Window_Descriptor", err) + } return err } printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) @@ -147,12 +160,11 @@ func (d *frameDec) reset(br byteBuffer) error { if size == 3 { size = 4 } - b = br.readSmall(int(size)) - if b == nil { - if debug { - println("Reading Dictionary_ID", io.ErrUnexpectedEOF) - } - return io.ErrUnexpectedEOF + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err } var id uint32 switch size { @@ -163,7 +175,7 @@ func (d *frameDec) reset(br byteBuffer) error { case 4: id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) } - if debug { + if debugDecoder { println("Dict size", size, "ID:", id) } if id > 0 { @@ -185,12 +197,12 @@ func (d *frameDec) reset(br byteBuffer) error { default: fcsSize = 1 << v } - d.FrameContentSize = 0 + d.FrameContentSize = fcsUnknown if fcsSize > 0 { - b := br.readSmall(fcsSize) - if b == nil { - println("Reading Frame content", io.ErrUnexpectedEOF) - return io.ErrUnexpectedEOF + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err } switch fcsSize { case 1: @@ -205,10 +217,11 @@ func (d *frameDec) reset(br byteBuffer) error { d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) } - if debug { - println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + if debugDecoder { + println("Read FCS:", d.FrameContentSize) } } + // Move this to shared. d.HasCheckSum = fhd&(1<<2) != 0 if d.HasCheckSum { @@ -218,29 +231,47 @@ func (d *frameDec) reset(br byteBuffer) error { d.crc.Reset() } + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + if d.WindowSize == 0 && d.SingleSegment { // We may not need window in this case. d.WindowSize = d.FrameContentSize if d.WindowSize < MinWindowSize { d.WindowSize = MinWindowSize } + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded + } } - if d.WindowSize > d.maxWindowSize { - printf("window size %d > max %d\n", d.WindowSize, d.maxWindowSize) - return ErrWindowSizeExceeded - } // The minimum Window_Size is 1 KB. if d.WindowSize < MinWindowSize { - println("got window size: ", d.WindowSize) + if debugDecoder { + println("got window size: ", d.WindowSize) + } return ErrWindowSizeTooSmall } d.history.windowSize = int(d.WindowSize) - if d.o.lowMem && d.history.windowSize < maxBlockSize { - d.history.maxSize = d.history.windowSize * 2 + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or very small window size. + d.history.allocFrameBuffer = d.history.windowSize * 2 } else { - d.history.maxSize = d.history.windowSize + maxBlockSize + // Alloc with one additional block + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) } + // history contains input - maybe we do something d.rawInput = br return nil @@ -248,56 +279,37 @@ func (d *frameDec) reset(br byteBuffer) error { // next will start decoding the next block from stream. func (d *frameDec) next(block *blockDec) error { - if debug { - printf("decoding new block %p:%p", block, block.data) + if debugDecoder { + println("decoding new block") } err := block.reset(d.rawInput, d.WindowSize) if err != nil { println("block error:", err) // Signal the frame decoder we have a problem. - d.sendErr(block, err) + block.sendErr(err) return err } - block.input <- struct{}{} - if debug { - println("next block:", block) - } - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return nil - } - if block.Last { - // We indicate the frame is done by sending io.EOF - d.decoding <- block - return io.EOF - } - d.decoding <- block return nil } -// sendEOF will queue an error block on the frame. -// This will cause the frame decoder to return when it encounters the block. -// Returns true if the decoder was added. -func (d *frameDec) sendErr(block *blockDec, err error) bool { - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return false - } - - println("sending error", err.Error()) - block.sendErr(err) - d.decoding <- block - return true -} - // checkCRC will check the checksum if the frame has one. // Will return ErrCRCMismatch if crc check failed, otherwise nil. func (d *frameDec) checkCRC() error { if !d.HasCheckSum { return nil } + + // We can overwrite upper tmp now + want, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + if d.o.ignoreChecksum { + return nil + } + var tmp [4]byte got := d.crc.Sum64() // Flip to match file order. @@ -306,142 +318,29 @@ func (d *frameDec) checkCRC() error { tmp[2] = byte(got >> 16) tmp[3] = byte(got >> 24) - // We can overwrite upper tmp now - want := d.rawInput.readSmall(4) - if want == nil { - println("CRC missing?") - return io.ErrUnexpectedEOF - } - if !bytes.Equal(tmp[:], want) { - if debug { + if debugDecoder { println("CRC Check Failed:", tmp[:], "!=", want) } return ErrCRCMismatch } - if debug { + if debugDecoder { println("CRC ok", tmp[:]) } return nil } -func (d *frameDec) initAsync() { - if !d.o.lowMem && !d.SingleSegment { - // set max extra size history to 10MB. - d.history.maxSize = d.history.windowSize + maxBlockSize*5 - } - // re-alloc if more than one extra block size. - if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.history.b) < d.history.maxSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.decoding) < d.o.concurrent { - d.decoding = make(chan *blockDec, d.o.concurrent) - } - if debug { - h := d.history - printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) - } - d.asyncRunningMu.Lock() - d.asyncRunning = true - d.asyncRunningMu.Unlock() -} - -// startDecoder will start decoding blocks and write them to the writer. -// The decoder will stop as soon as an error occurs or at end of frame. -// When the frame has finished decoding the *bufio.Reader -// containing the remaining input will be sent on frameDec.frameDone. -func (d *frameDec) startDecoder(output chan decodeOutput) { - written := int64(0) - - defer func() { - d.asyncRunningMu.Lock() - d.asyncRunning = false - d.asyncRunningMu.Unlock() - - // Drain the currently decoding. - d.history.error = true - flushdone: - for { - select { - case b := <-d.decoding: - b.history <- &d.history - output <- <-b.result - default: - break flushdone - } - } - println("frame decoder done, signalling done") - d.frameDone.Done() - }() - // Get decoder for first block. - block := <-d.decoding - block.history <- &d.history - for { - var next *blockDec - // Get result - r := <-block.result - if r.err != nil { - println("Result contained error", r.err) - output <- r - return - } - if debug { - println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) - d.offset += int64(len(r.b)) - } - if !block.Last { - // Send history to next block - select { - case next = <-d.decoding: - if debug { - println("Sending ", len(d.history.b), "bytes as history") - } - next.history <- &d.history - default: - // Wait until we have sent the block, so - // other decoders can potentially get the decoder. - next = nil - } - } - - // Add checksum, async to decoding. - if d.HasCheckSum { - n, err := d.crc.Write(r.b) - if err != nil { - r.err = err - if n != len(r.b) { - r.err = io.ErrShortWrite - } - output <- r - return - } - } - written += int64(len(r.b)) - if d.SingleSegment && uint64(written) > d.FrameContentSize { - println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) - r.err = ErrFrameSizeExceeded - output <- r - return - } - if block.Last { - r.err = d.checkCRC() - output <- r - return - } - output <- r - if next == nil { - // There was no decoder available, we wait for one now that we have sent to the writer. - if debug { - println("Sending ", len(d.history.b), " bytes as history") - } - next = <-d.decoding - next.history <- &d.history +// consumeCRC reads the checksum data if the frame has one. +func (d *frameDec) consumeCRC() error { + if d.HasCheckSum { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err } - block = next } + + return nil } // runDecoder will create a sync decoder that will decode a block of data. @@ -450,41 +349,67 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { // We use the history for output to avoid copying it. d.history.b = dst + d.history.ignoreBuffer = len(dst) // Store input length, so we only check new data. crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.FrameContentSize != fcsUnknown { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } var err error for { err = dec.reset(d.rawInput, d.WindowSize) if err != nil { break } - if debug { + if debugDecoder { println("next block:", dec) } err = dec.decodeBuf(&d.history) - if err != nil || dec.Last { + if err != nil { break } if uint64(len(d.history.b)) > d.o.maxDecodedSize { err = ErrDecoderSizeExceeded break } - if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { - println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) err = ErrFrameSizeExceeded break } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } } dst = d.history.b if err == nil { - if d.HasCheckSum { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } } } } diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go index e6d3d49b3..2f8860a72 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -5,8 +5,10 @@ package zstd import ( + "encoding/binary" "errors" "fmt" + "io" ) const ( @@ -178,10 +180,32 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<> 3) - // println(s.norm[:s.symbolLen], s.symbolLen) return s.buildDtable() } +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + // decSymbol contains information about a state entry, // Including the state offset base, the output symbol and // the number of bits to read for the low part of the destination state. @@ -204,18 +228,10 @@ func (d decSymbol) newState() uint16 { return uint16(d >> 16) } -func (d decSymbol) baseline() uint32 { - return uint32(d >> 32) -} - func (d decSymbol) baselineInt() int { return int(d >> 32) } -func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { - *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - func (d *decSymbol) setNBits(nBits uint8) { const mask = 0xffffffffffffff00 *d = (*d & mask) | decSymbol(nBits) @@ -231,11 +247,6 @@ func (d *decSymbol) setNewState(state uint16) { *d = (*d & mask) | decSymbol(state)<<16 } -func (d *decSymbol) setBaseline(baseline uint32) { - const mask = 0xffffffff - *d = (*d & mask) | decSymbol(baseline)<<32 -} - func (d *decSymbol) setExt(addBits uint8, baseline uint32) { const mask = 0xffff00ff *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) @@ -257,68 +268,6 @@ func (s *fseDecoder) setRLE(symbol decSymbol) { s.dt[0] = symbol } -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - symbolNext := s.stateTable[:256] - - // Init, lay down lowprob symbols - { - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.dt[highThreshold].setAddBits(uint8(i)) - highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.dt[:tableSize] { - symbol := v.addBits() - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].setNBits(nBits) - newState := (nextState << nBits) - tableSize - if newState > tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.dt[u&maxTableMask].setNewState(newState) - } - } - return nil -} - // transform will transform the decoder table into a table usable for // decoding without having to apply the transformation while decoding. // The state will contain the base value and the number of bits to read. @@ -352,34 +301,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { s.state = dt[br.getBits(tableLog)] } -// next returns the current symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) next(br *bitReader) { - lowBits := uint16(br.getBits(s.state.nbBits())) - s.state = s.dt[s.state.newState()+lowBits] -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (s *fseState) finished(br *bitReader) bool { - return br.finished() && s.state.nbBits() > 0 -} - -// final returns the current state symbol without decoding the next. -func (s *fseState) final() (int, uint8) { - return s.state.baselineInt(), s.state.addBits() -} - // final returns the current state symbol without decoding the next. func (s decSymbol) final() (int, uint8) { return s.baselineInt(), s.addBits() } - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { - lowBits := uint16(br.getBitsFast(s.state.nbBits())) - s.state = s.dt[s.state.newState()+lowBits] - return s.state.baseline(), s.state.addBits() -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 000000000..c881d28d8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,64 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 000000000..da32b4420 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,127 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm +// +build !appengine,!noasm,gc,!noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 000000000..332e51fe4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,72 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index aa9eba88b..ab26326a8 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -62,9 +62,8 @@ func (s symbolTransform) String() string { // To indicate that you have populated the histogram call HistogramFinished // with the value of the highest populated symbol, as well as the number of entries // in the most populated entry. These are accepted at face value. -// The returned slice will always be length 256. -func (s *fseEncoder) Histogram() []uint32 { - return s.count[:] +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count } // HistogramFinished can be called to indicate that the histogram has been populated. @@ -77,27 +76,12 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { s.clearCount = maxCount != 0 } -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *fseEncoder) prepare() (*fseEncoder, error) { - if s == nil { - s = &fseEncoder{} - } - s.useRLE = false - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - return s, nil -} - // allocCtable will allocate tables needed for compression. // If existing tables a re big enough, they are simply re-used. func (s *fseEncoder) allocCtable() { tableSize := 1 << s.actualTableLog // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < int(tableSize) { + if cap(s.ct.tableSymbol) < tableSize { s.ct.tableSymbol = make([]byte, tableSize) } s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] @@ -202,13 +186,13 @@ func (s *fseEncoder) buildCTable() error { case 0: case -1, 1: symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = int16(total - 1) + symbolTT[i].deltaFindState = total - 1 total++ default: maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) minStatePlus := uint32(v) << maxBitsOut symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = int16(total - v) + symbolTT[i].deltaFindState = total - v total += v } } @@ -229,7 +213,7 @@ func (s *fseEncoder) setRLE(val byte) { deltaFindState: 0, deltaNbBits: 0, } - if debug { + if debugEncoder { println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) } s.rleVal = val @@ -353,8 +337,8 @@ func (s *fseEncoder) normalizeCount2(length int) error { distributed uint32 total = uint32(length) tableLog = s.actualTableLog - lowThreshold = uint32(total >> tableLog) - lowOne = uint32((total * 3) >> (tableLog + 1)) + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) ) for i, cnt := range s.count[:s.symbolLen] { if cnt == 0 { @@ -379,7 +363,7 @@ func (s *fseEncoder) normalizeCount2(length int) error { if (total / toDistribute) > lowOne { // risk of rounding to zero - lowOne = uint32((total * 3) / (toDistribute * 2)) + lowOne = (total * 3) / (toDistribute * 2) for i, cnt := range s.count[:s.symbolLen] { if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { s.norm[i] = 1 @@ -708,15 +692,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { im := int32((nbBitsOut << 16) - first.deltaNbBits) lu := (im >> nbBitsOut) + int32(first.deltaFindState) c.state = c.stateTable[lu] - return -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] } // flush will write the tablelog to the output and flush the remaining full bytes. diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go index 6c17dc17f..474cb77d2 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -59,7 +59,7 @@ func fillBase(dst []baseOffset, base uint32, bits ...uint8) { } for i, bit := range bits { if base > math.MaxInt32 { - panic(fmt.Sprintf("invalid decoding table, base overflows int32")) + panic("invalid decoding table, base overflows int32") } dst[i] = baseOffset{ diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go index 4a752067f..5d73c21eb 100644 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -13,65 +13,23 @@ const ( prime8bytes = 0xcf1bbcdcb7a56463 ) -// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes. -// l must be >=4 and <=8. Any other value will return hash for 4 bytes. -// h should always be <32. -// Preferably h and l should be a constant. -// FIXME: This does NOT get resolved, if 'mls' is constant, -// so this cannot be used. -func hashLen(u uint64, hashLog, mls uint8) uint32 { +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) case 5: - return hash5(u, hashLog) + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) case 6: - return hash6(u, hashLog) + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) case 7: - return hash7(u, hashLog) + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) case 8: - return hash8(u, hashLog) + return uint32((u * prime8bytes) >> (64 - length)) default: - return hash4x64(u, hashLog) + return (uint32(u) * prime4bytes) >> (32 - length) } } - -// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash3(u uint32, h uint8) uint32 { - return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) -} - -// hash4 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4(u uint32, h uint8) uint32 { - return (u * prime4bytes) >> ((32 - h) & 31) -} - -// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4x64(u uint64, h uint8) uint32 { - return (uint32(u) * prime4bytes) >> ((32 - h) & 31) -} - -// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash5(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) -} - -// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash6(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) -} - -// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash7(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) -} - -// hash8 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash8(u uint64, h uint8) uint32 { - return uint32((u * prime8bytes) >> ((64 - h) & 63)) -} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go index f783e32d2..28b40153c 100644 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -10,20 +10,31 @@ import ( // history contains the information transferred between blocks. type history struct { - b []byte - huffTree *huff0.Scratch - recentOffsets [3]int + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression decoders sequenceDecs - windowSize int - maxSize int - error bool - dict *dict + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict } // reset will reset the history to initial state of a frame. // The history must already have been initialized to the desired size. func (h *history) reset() { h.b = h.b[:0] + h.ignoreBuffer = 0 h.error = false h.recentOffsets = [3]int{1, 4, 8} if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { @@ -35,7 +46,7 @@ func (h *history) reset() { if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { fseDecoderPool.Put(f) } - h.decoders = sequenceDecs{} + h.decoders = sequenceDecs{br: h.decoders.br} if h.huffTree != nil { if h.dict == nil || h.dict.litEnc != h.huffTree { huffDecoderPool.Put(h.huffTree) @@ -54,6 +65,7 @@ func (h *history) setDict(dict *dict) { h.decoders.litLengths = dict.llDec h.decoders.offsets = dict.ofDec h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content h.recentOffsets = dict.offsets h.huffTree = dict.litEnc } @@ -83,6 +95,24 @@ func (h *history) append(b []byte) { copy(h.b[h.windowSize-len(b):], b) } +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + // append bytes to history without ever discarding anything. func (h *history) appendKeep(b []byte) { h.b = append(h.b, b...) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go index 426b9cac7..2c112a0ab 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -195,7 +195,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error { b, d.v4 = consumeUint64(b) b, d.total = consumeUint64(b) copy(d.mem[:], b) - b = b[len(d.mem):] d.n = int(d.total % uint64(len(d.mem))) return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s index 2c9c5357a..cea178561 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -1,12 +1,13 @@ // +build !appengine // +build gc // +build !purego +// +build !noasm #include "textflag.h" // Register allocation: // AX h -// CX pointer to advance through b +// SI pointer to advance through b // DX n // BX loop end // R8 v1, k1 @@ -16,39 +17,39 @@ // R12 tmp // R13 prime1v // R14 prime2v -// R15 prime4v +// DI prime4v -// round reads from and advances the buffer pointer in CX. +// round reads from and advances the buffer pointer in SI. // It assumes that R13 has prime1v and R14 has prime2v. #define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ + MOVQ (SI), R12 \ + ADDQ $8, SI \ IMULQ R14, R12 \ ADDQ R12, r \ ROLQ $31, r \ IMULQ R13, r // mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. #define mergeRound(acc, val) \ IMULQ R14, val \ ROLQ $31, val \ IMULQ R13, val \ XORQ val, acc \ IMULQ R13, acc \ - ADDQ R15, acc + ADDQ DI, acc // func Sum64(b []byte) uint64 TEXT ·Sum64(SB), NOSPLIT, $0-32 // Load fixed primes. MOVQ ·prime1v(SB), R13 MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 + MOVQ ·prime4v(SB), DI // Load slice. - MOVQ b_base+0(FP), CX + MOVQ b_base+0(FP), SI MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX + LEAQ (SI)(DX*1), BX // The first loop limit will be len(b)-32. SUBQ $32, BX @@ -65,14 +66,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32 XORQ R11, R11 SUBQ R13, R11 - // Loop until CX > BX. + // Loop until SI > BX. blockLoop: round(R8) round(R9) round(R10) round(R11) - CMPQ CX, BX + CMPQ SI, BX JLE blockLoop MOVQ R8, AX @@ -100,16 +101,16 @@ noBlocks: afterBlocks: ADDQ DX, AX - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. ADDQ $24, BX - CMPQ CX, BX + CMPQ SI, BX JG fourByte wordLoop: // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX + MOVQ (SI), R8 + ADDQ $8, SI IMULQ R14, R8 ROLQ $31, R8 IMULQ R13, R8 @@ -117,18 +118,18 @@ wordLoop: XORQ R8, AX ROLQ $27, AX IMULQ R13, AX - ADDQ R15, AX + ADDQ DI, AX - CMPQ CX, BX + CMPQ SI, BX JLE wordLoop fourByte: ADDQ $4, BX - CMPQ CX, BX + CMPQ SI, BX JG singles - MOVL (CX), R8 - ADDQ $4, CX + MOVL (SI), R8 + ADDQ $4, SI IMULQ R13, R8 XORQ R8, AX @@ -138,19 +139,19 @@ fourByte: singles: ADDQ $4, BX - CMPQ CX, BX + CMPQ SI, BX JGE finalize singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX + MOVBQZX (SI), R12 + ADDQ $1, SI IMULQ ·prime5v(SB), R12 XORQ R12, AX ROLQ $11, AX IMULQ R13, AX - CMPQ CX, BX + CMPQ SI, BX JL singlesLoop finalize: @@ -179,13 +180,13 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40 MOVQ ·prime2v(SB), R14 // Load slice. - MOVQ arg1_base+8(FP), CX - MOVQ arg1_len+16(FP), DX - LEAQ (CX)(DX*1), BX + MOVQ b_base+8(FP), SI + MOVQ b_len+16(FP), DX + LEAQ (SI)(DX*1), BX SUBQ $32, BX // Load vN from d. - MOVQ arg+0(FP), AX + MOVQ d+0(FP), AX MOVQ 0(AX), R8 // v1 MOVQ 8(AX), R9 // v2 MOVQ 16(AX), R10 // v3 @@ -199,7 +200,7 @@ blockLoop: round(R10) round(R11) - CMPQ CX, BX + CMPQ SI, BX JLE blockLoop // Copy vN back to d. @@ -208,8 +209,8 @@ blockLoop: MOVQ R10, 16(AX) MOVQ R11, 24(AX) - // The number of bytes written is CX minus the old base pointer. - SUBQ arg1_base+8(FP), CX - MOVQ CX, ret+32(FP) + // The number of bytes written is SI minus the old base pointer. + SUBQ b_base+8(FP), SI + MOVQ SI, ret+32(FP) RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 000000000..4d64a17d6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,186 @@ +// +build gc,!purego,!noasm + +#include "textflag.h" + +// Register allocation. +#define digest R1 +#define h R2 // Return value. +#define p R3 // Input pointer. +#define len R4 +#define nblocks R5 // len / 32. +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc \ + +// x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x \ + +#define mergeRound(x) \ + round0(x) \ + EOR x, h \ + MADD h, prime4, prime1, h \ + +// Update v[1-4] with 32-byte blocks. Assumes len >= 32. +#define blocksLoop() \ + LSR $5, len, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 32(p), (x1, x2) \ + round(v1, x1) \ + LDP -16(p), (x3, x4) \ + round(v2, x2) \ + SUB $1, nblocks \ + round(v3, x3) \ + round(v4, x4) \ + CBNZ nblocks, loop \ + +// The primes are repeated here to ensure that they're stored +// in a contiguous array, so we can load them with LDP. +DATA primes<> +0(SB)/8, $11400714785074694791 +DATA primes<> +8(SB)/8, $14029467366897019727 +DATA primes<>+16(SB)/8, $1609587929392839161 +DATA primes<>+24(SB)/8, $9650029242287828579 +DATA primes<>+32(SB)/8, $2870177450012600261 +GLOBL primes<>(SB), NOPTR+RODATA, $40 + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 + LDP b_base+0(FP), (p, len) + + LDP primes<> +0(SB), (prime1, prime2) + LDP primes<>+16(SB), (prime3, prime4) + MOVD primes<>+32(SB), prime5 + + CMP $32, len + CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 } + BLO afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blocksLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(v1) + mergeRound(v2) + mergeRound(v3) + mergeRound(v4) + +afterLoop: + ADD len, h + + TBZ $4, len, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, len, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, len, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, len, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h + MUL prime1, h + +try1: + TBZ $0, len, end + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h + MUL prime1, h + +end: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +// +// Assumes len(b) >= 32. +TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40 + LDP primes<>(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, len) + + blocksLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, len + MOVD len, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go similarity index 54% rename from vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go rename to vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go index 35318d7c4..1a1fac9c2 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -1,6 +1,9 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 // +build !appengine // +build gc // +build !purego +// +build !noasm package xxhash @@ -10,4 +13,4 @@ package xxhash func Sum64(b []byte) uint64 //go:noescape -func writeBlocks(*Digest, []byte) int +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go index 4a5a82160..209cb4a99 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -1,4 +1,5 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm package xxhash diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index b5c8ef133..df0447203 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -20,6 +20,10 @@ type seq struct { llCode, mlCode, ofCode uint8 } +type seqVals struct { + ll, ml, mo int +} + func (s seq) String() string { if s.offset <= 3 { if s.offset == 0 { @@ -61,16 +65,19 @@ type sequenceDecs struct { offsets sequenceDec matchLengths sequenceDec prevOffset [3]int - hist []byte dict []byte literals []byte out []byte + nSeqs int + br *bitReader + seqSize int windowSize int maxBits uint8 + maxSyncLen uint64 } // initialize all 3 decoders from the stream input. -func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error { +func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error { if err := s.litLengths.init(br); err != nil { return errors.New("litLengths:" + err.Error()) } @@ -80,8 +87,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] if err := s.matchLengths.init(br); err != nil { return errors.New("matchLengths:" + err.Error()) } - s.literals = literals - s.hist = hist.b + s.br = br s.prevOffset = hist.recentOffsets s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits s.windowSize = hist.windowSize @@ -93,12 +99,127 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] return nil } +// execute will execute the decoded sequence with the provided history. +// The sequence must be evaluated before being sent. +func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { + if len(s.dict) == 0 { + return s.executeSimple(seqs, hist) + } + + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + // decode sequences from the stream with the provided history. -func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs startSize := len(s.out) // Grab full sizes tables, to avoid bounds checks. llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } for i := seqs - 1; i >= 0; i-- { if br.overread() { @@ -151,7 +272,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if temp == 0 { // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") + println("WARNING: temp was 0") temp = 1 } @@ -176,44 +297,49 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if ll > len(s.literals) { return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) } - size := ll + ml + len(s.out) + size := ll + ml + len(out) if size-startSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size", size) + return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) } - if size > cap(s.out) { - // Not enough size, will be extremely rarely triggered, + if size > cap(out) { + // Not enough size, which can happen under high volume block streaming conditions // but could be if destination slice is too small for sync operations. - // We add maxBlockSize to the capacity. - s.out = append(s.out, make([]byte, maxBlockSize)...) - s.out = s.out[:len(s.out)-maxBlockSize] + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] } if ml > maxMatchLen { return fmt.Errorf("match len (%d) bigger than max allowed length", ml) } // Add literals - s.out = append(s.out, s.literals[:ll]...) + out = append(out, s.literals[:ll]...) s.literals = s.literals[ll:] - out := s.out if mo == 0 && ml > 0 { return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) } - if mo > len(s.out)+len(hist) || mo > s.windowSize { + if mo > len(out)+len(hist) || mo > s.windowSize { if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + dictO := len(s.dict) - (mo - (len(out) + len(hist))) if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } end := dictO + ml if end > len(s.dict) { out = append(out, s.dict[dictO:]...) - mo -= len(s.dict) - dictO ml -= len(s.dict) - dictO } else { out = append(out, s.dict[dictO:end]...) @@ -224,26 +350,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { // Copy from history. // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(s.out); v > 0 { + if v := mo - len(out); v > 0 { // v is the start position in history from end. - start := len(s.hist) - v + start := len(hist) - v if ml > v { // Some goes into current block. // Copy remainder of history - out = append(out, s.hist[start:]...) - mo -= v + out = append(out, hist[start:]...) ml -= v } else { - out = append(out, s.hist[start:start+ml]...) + out = append(out, hist[start:start+ml]...) ml = 0 } } // We must be in current buffer now if ml > 0 { - start := len(s.out) - mo - if ml <= len(s.out)-start { + start := len(out) - mo + if ml <= len(out)-start { // No overlap - out = append(out, s.out[start:start+ml]...) + out = append(out, out[start:start+ml]...) } else { // Overlapping copy // Extend destination slice and copy one byte at the time. @@ -257,7 +382,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } } - s.out = out if i == 0 { // This is the last sequence, so we shouldn't update state. break @@ -271,7 +395,8 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { mlState = mlTable[mlState.newState()&maxTableMask] ofState = ofTable[ofState.newState()&maxTableMask] } else { - bits := br.getBitsFast(nBits) + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) llState = llTable[(llState.newState()+lowBits)&maxTableMask] @@ -284,19 +409,14 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } - // Add final literals - s.out = append(s.out, s.literals...) - return nil -} + // Check if space for literals + if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize) + } -// update states, at least 27 bits must be available. -func (s *sequenceDecs) update(br *bitReader) { - // Max 8 bits - s.litLengths.state.next(br) - // Max 9 bits - s.matchLengths.state.next(br) - // Max 8 bits - s.offsets.state.next(br) + // Add final literals + s.out = append(out, s.literals...) + return br.close() } var bitMask [16]uint16 @@ -307,87 +427,6 @@ func init() { } } -// update states, at least 27 bits must be available. -func (s *sequenceDecs) updateAlt(br *bitReader) { - // Update all 3 states at once. Approx 20% faster. - a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - - nBits := a.nbBits() + b.nbBits() + c.nbBits() - if nBits == 0 { - s.litLengths.state.state = s.litLengths.state.dt[a.newState()] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] - s.offsets.state.state = s.offsets.state.dt[c.newState()] - return - } - bits := br.getBitsFast(nBits) - lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) - s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] - - lowBits = uint16(bits >> (c.nbBits() & 31)) - lowBits &= bitMask[b.nbBits()&15] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] - - lowBits = uint16(bits) & bitMask[c.nbBits()&15] - s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] -} - -// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. -func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - return - } - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - return - } - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - return -} - func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { // Final will not read from stream. ll, llB := llState.final() @@ -450,36 +489,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { s.prevOffset[0] = temp return temp } - -// mergeHistory will merge history. -func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { - for i := uint(0); i < 3; i++ { - var sNew, sHist *sequenceDec - switch i { - default: - // same as "case 0": - sNew = &s.litLengths - sHist = &hist.litLengths - case 1: - sNew = &s.offsets - sHist = &hist.offsets - case 2: - sNew = &s.matchLengths - sHist = &hist.matchLengths - } - if sNew.repeat { - if sHist.fse == nil { - return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) - } - continue - } - if sNew.fse == nil { - return nil, fmt.Errorf("sequence stream %d, no fse found", i) - } - if sHist.fse != nil && !sHist.fse.preDefined { - fseDecoderPool.Put(sHist.fse) - } - sHist.fse = sNew.fse - } - return hist, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 000000000..7598c1018 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,368 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 000000000..27e76774c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4100 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm +// +build !appengine,!noasm,gc,!noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 000000000..c3452bc3a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go index 36bcc3cc0..8014174a7 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -35,7 +35,6 @@ func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { // Ensure we cannot reuse by accident prevEnc := *prev prevEnc.symbolLen = 0 - return } compareSwap(ll, &s.llEnc, &s.llPrev) compareSwap(ml, &s.mlEnc, &s.mlPrev) diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index 841fd95ac..9e1baad73 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -11,7 +11,7 @@ import ( "io" "github.com/klauspost/compress/huff0" - "github.com/klauspost/compress/snappy" + snappy "github.com/klauspost/compress/internal/snapref" ) const ( @@ -185,7 +185,6 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { r.block.reset(nil) r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) if err != nil { - println("snappy.Decode:", err) return written, err } err = r.block.encodeLits(r.block.literals, false) @@ -204,7 +203,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { written += int64(n) continue case chunkTypeUncompressedData: - if debug { + if debugEncoder { println("Uncompressed, chunklen", chunkLen) } // Section 4.3. Uncompressed data (chunk type 0x01). @@ -247,7 +246,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { continue case chunkTypeStreamIdentifier: - if debug { + if debugEncoder { println("stream id", chunkLen, len(snappyMagicBody)) } // Section 4.1. Stream identifier (chunk type 0xff). @@ -417,7 +416,7 @@ var crcTable = crc32.MakeTable(crc32.Castagnoli) // https://github.com/google/snappy/blob/master/framing_format.txt func snappyCRC(b []byte) uint32 { c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 + return c>>15 | c<<17 + 0xa282ead8 } // snappyDecodedLen returns the length of the decoded block and the number of bytes diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 000000000..29c15c8c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,141 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} + +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec, pool: pool} + } +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("read after close or EOF") + } + dec, err := r.dec.Read(p) + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 0807719c8..3eb3f1c82 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -4,6 +4,8 @@ package zstd import ( + "bytes" + "encoding/binary" "errors" "log" "math" @@ -13,6 +15,12 @@ import ( // enable debug printing const debug = false +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + // Enable extra assertions. const debugAsserts = debug || false @@ -31,6 +39,9 @@ const zstdMinMatch = 3 // Reset the buffer offset when reaching this. const bufferReset = math.MaxInt32 - MaxWindowSize +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + var ( // ErrReservedBlockType is returned when a reserved block type is found. // Typically this indicates wrong or corrupted input. @@ -44,6 +55,10 @@ var ( // Typically returned on invalid input. ErrBlockTooSmall = errors.New("block too small") + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. // Typically this indicates wrong or corrupted input. ErrMagicMismatch = errors.New("invalid input: magic number mismatch") @@ -67,37 +82,34 @@ var ( // This is only returned if SingleSegment is specified on the frame. ErrFrameSizeExceeded = errors.New("frame size exceeded") + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + // ErrCRCMismatch is returned if CRC mismatches. ErrCRCMismatch = errors.New("CRC check failed") // ErrDecoderClosed will be returned if the Decoder was used after // Close has been called. ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") ) func println(a ...interface{}) { - if debug { + if debug || debugDecoder || debugEncoder { log.Println(a...) } } func printf(format string, a ...interface{}) { - if debug { + if debug || debugDecoder || debugEncoder { log.Printf(format, a...) } } -// matchLenFast does matching, but will not match the last up to 7 bytes. -func matchLenFast(a, b []byte) int { - endI := len(a) & (math.MaxInt32 - 7) - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + bits.TrailingZeros64(diff)>>3 - } - } - return endI -} - // matchLen returns the maximum length. // a must be the shortest of the two. // The function also returns whether all bytes matched. @@ -121,24 +133,20 @@ func matchLen(a, b []byte) int { } func load3232(b []byte, i int32) uint32 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:4] - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + return binary.LittleEndian.Uint32(b[i:]) } func load6432(b []byte, i int32) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return binary.LittleEndian.Uint64(b[i:]) } func load64(b []byte, i int) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return binary.LittleEndian.Uint64(b[i:]) +} + +type byter interface { + Bytes() []byte + Len() int } + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml deleted file mode 100644 index 98db8f060..000000000 --- a/vendor/github.com/mattn/go-colorable/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - tip - -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -repotoken xnXqRGwgW3SXIguzxf90ZSK1GPYZPaGrw diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md index 56729a92c..ca0483711 100644 --- a/vendor/github.com/mattn/go-colorable/README.md +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -1,8 +1,8 @@ # go-colorable -[![Godoc Reference](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) -[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) -[![Coverage Status](https://coveralls.io/repos/github/mattn/go-colorable/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-colorable?branch=master) +[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest) +[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) +[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) [![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) Colorable writer for windows. diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go index 0b0aef837..416d1bbbf 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine package colorable @@ -27,3 +28,11 @@ func NewColorableStdout() io.Writer { func NewColorableStderr() io.Writer { return os.Stderr } + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go index 3fb771dcc..766d94603 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_others.go +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -1,5 +1,5 @@ -// +build !windows -// +build !appengine +//go:build !windows && !appengine +// +build !windows,!appengine package colorable @@ -28,3 +28,11 @@ func NewColorableStdout() io.Writer { func NewColorableStderr() io.Writer { return os.Stderr } + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go index 1bd628f25..1846ad5ab 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -1,5 +1,5 @@ -// +build windows -// +build !appengine +//go:build windows && !appengine +// +build windows,!appengine package colorable @@ -10,6 +10,7 @@ import ( "os" "strconv" "strings" + "sync" "syscall" "unsafe" @@ -27,6 +28,9 @@ const ( backgroundRed = 0x40 backgroundIntensity = 0x80 backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) + commonLvbUnderscore = 0x8000 + + cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 ) const ( @@ -78,6 +82,8 @@ var ( procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") ) @@ -89,6 +95,7 @@ type Writer struct { oldattr word oldpos coord rest bytes.Buffer + mutex sync.Mutex } // NewColorable returns new instance of Writer which handles escape sequence from File. @@ -98,6 +105,10 @@ func NewColorable(file *os.File) io.Writer { } if isatty.IsTerminal(file.Fd()) { + var mode uint32 + if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 { + return file + } var csbi consoleScreenBufferInfo handle := syscall.Handle(file.Fd()) procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) @@ -424,6 +435,8 @@ func atoiWithDefault(s string, def int) (int, error) { // Write writes data on console func (w *Writer) Write(data []byte) (n int, err error) { + w.mutex.Lock() + defer w.mutex.Unlock() var csbi consoleScreenBufferInfo procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) @@ -439,18 +452,22 @@ func (w *Writer) Write(data []byte) (n int, err error) { } else { er = bytes.NewReader(data) } - var bw [1]byte + var plaintext bytes.Buffer loop: for { c1, err := er.ReadByte() if err != nil { + plaintext.WriteTo(w.out) break loop } if c1 != 0x1b { - bw[0] = c1 - w.out.Write(bw[:]) + plaintext.WriteByte(c1) continue } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } c2, err := er.ReadByte() if err != nil { break loop @@ -675,14 +692,19 @@ loop: switch { case n == 0 || n == 100: attr = w.oldattr - case 1 <= n && n <= 5: - attr |= foregroundIntensity - case n == 7: - attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) - case n == 22 || n == 25: + case n == 4: + attr |= commonLvbUnderscore + case (1 <= n && n <= 3) || n == 5: attr |= foregroundIntensity - case n == 27: - attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case n == 7 || n == 27: + attr = + (attr &^ (foregroundMask | backgroundMask)) | + ((attr & foregroundMask) << 4) | + ((attr & backgroundMask) >> 4) + case n == 22: + attr &^= foregroundIntensity + case n == 24: + attr &^= commonLvbUnderscore case 30 <= n && n <= 37: attr &= backgroundMask if (n-30)&1 != 0 { @@ -701,7 +723,7 @@ loop: n256setup() } attr &= backgroundMask - attr |= n256foreAttr[n256] + attr |= n256foreAttr[n256%len(n256foreAttr)] i += 2 } } else if len(token) == 5 && token[i+1] == "2" { @@ -743,7 +765,7 @@ loop: n256setup() } attr &= foregroundMask - attr |= n256backAttr[n256] + attr |= n256backAttr[n256%len(n256backAttr)] i += 2 } } else if len(token) == 5 && token[i+1] == "2" { @@ -1003,3 +1025,23 @@ func n256setup() { n256backAttr[i] = c.backgroundAttr() } } + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + var mode uint32 + h := os.Stdout.Fd() + if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 { + if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 { + if enabled != nil { + *enabled = true + } + return func() { + procSetConsoleMode.Call(h, uintptr(mode)) + } + } + } + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/go.mod b/vendor/github.com/mattn/go-colorable/go.mod index ef3ca9d4c..27351c027 100644 --- a/vendor/github.com/mattn/go-colorable/go.mod +++ b/vendor/github.com/mattn/go-colorable/go.mod @@ -1,3 +1,8 @@ module github.com/mattn/go-colorable -require github.com/mattn/go-isatty v0.0.8 +require ( + github.com/mattn/go-isatty v0.0.14 + golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6 // indirect +) + +go 1.13 diff --git a/vendor/github.com/mattn/go-colorable/go.sum b/vendor/github.com/mattn/go-colorable/go.sum index 2c12960ec..40c33b333 100644 --- a/vendor/github.com/mattn/go-colorable/go.sum +++ b/vendor/github.com/mattn/go-colorable/go.sum @@ -1,4 +1,5 @@ -github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6 h1:foEbQz/B0Oz6YIqu/69kfXPYeFQAuuMYFkjaqXzl5Wo= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/vendor/github.com/mattn/go-colorable/go.test.sh b/vendor/github.com/mattn/go-colorable/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go index 95f2c6be2..05d6f74bf 100644 --- a/vendor/github.com/mattn/go-colorable/noncolorable.go +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -18,18 +18,22 @@ func NewNonColorable(w io.Writer) io.Writer { // Write writes data on console func (w *NonColorable) Write(data []byte) (n int, err error) { er := bytes.NewReader(data) - var bw [1]byte + var plaintext bytes.Buffer loop: for { c1, err := er.ReadByte() if err != nil { + plaintext.WriteTo(w.out) break loop } if c1 != 0x1b { - bw[0] = c1 - w.out.Write(bw[:]) + plaintext.WriteByte(c1) continue } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } c2, err := er.ReadByte() if err != nil { break loop @@ -38,7 +42,6 @@ loop: continue } - var buf bytes.Buffer for { c, err := er.ReadByte() if err != nil { @@ -47,7 +50,6 @@ loop: if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { break } - buf.Write([]byte(string(c))) } } diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml deleted file mode 100644 index 5597e026d..000000000 --- a/vendor/github.com/mattn/go-isatty/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go -go: - - tip - -os: - - linux - - osx - -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5 diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md index 1e69004bb..38418353e 100644 --- a/vendor/github.com/mattn/go-isatty/README.md +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -1,7 +1,7 @@ # go-isatty [![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) -[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) +[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) [![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) [![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod index a8ddf404f..c9a20b7f3 100644 --- a/vendor/github.com/mattn/go-isatty/go.mod +++ b/vendor/github.com/mattn/go-isatty/go.mod @@ -1,5 +1,5 @@ module github.com/mattn/go-isatty -require golang.org/x/sys v0.0.0-20191008105621-543471e840be +go 1.12 -go 1.14 +require golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum index c141fc53a..912e29cbc 100644 --- a/vendor/github.com/mattn/go-isatty/go.sum +++ b/vendor/github.com/mattn/go-isatty/go.sum @@ -1,4 +1,2 @@ -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-isatty/isatty_android.go b/vendor/github.com/mattn/go-isatty/isatty_android.go deleted file mode 100644 index d3567cb5b..000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_android.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build android - -package isatty - -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TCGETS - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index 07e93039d..39bbcf00f 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -1,20 +1,15 @@ +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine // +build darwin freebsd openbsd netbsd dragonfly // +build !appengine package isatty -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TIOCGETA +import "golang.org/x/sys/unix" // IsTerminal return true if the file descriptor is terminal. func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 + _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) + return err == nil } // IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go index ff714a376..31503226f 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_others.go +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -1,4 +1,5 @@ -// +build appengine js nacl +//go:build appengine || js || nacl || wasm +// +build appengine js nacl wasm package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go index bc0a70920..bae7f9bb3 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_plan9.go +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -1,3 +1,4 @@ +//go:build plan9 // +build plan9 package isatty @@ -8,7 +9,7 @@ import ( // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { - path, err := syscall.Fd2path(fd) + path, err := syscall.Fd2path(int(fd)) if err != nil { return false } diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go index bdd5c79a0..0c3acf2dc 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_solaris.go +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -1,5 +1,5 @@ -// +build solaris -// +build !appengine +//go:build solaris && !appengine +// +build solaris,!appengine package isatty @@ -8,10 +8,9 @@ import ( ) // IsTerminal returns true if the given file descriptor is a terminal. -// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +// see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c func IsTerminal(fd uintptr) bool { - var termio unix.Termio - err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + _, err := unix.IoctlGetTermio(int(fd), unix.TCGETA) return err == nil } diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go index 453b025d0..67787657f 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -1,6 +1,6 @@ -// +build linux aix +//go:build (linux || aix || zos) && !appengine +// +build linux aix zos // +build !appengine -// +build !android package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go index 1fa869154..8e3c99171 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_windows.go +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -1,5 +1,5 @@ -// +build windows -// +build !appengine +//go:build windows && !appengine +// +build windows,!appengine package isatty @@ -76,7 +76,7 @@ func isCygwinPipeName(name string) bool { } // getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler -// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion +// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion // guys are using Windows XP, this is a workaround for those guys, it will also work on system from // Windows vista to 10 // see https://stackoverflow.com/a/18792477 for details diff --git a/vendor/github.com/mitchellh/go-wordwrap/go.mod b/vendor/github.com/mitchellh/go-wordwrap/go.mod index 2ae411b20..431d615d4 100644 --- a/vendor/github.com/mitchellh/go-wordwrap/go.mod +++ b/vendor/github.com/mitchellh/go-wordwrap/go.mod @@ -1 +1,3 @@ module github.com/mitchellh/go-wordwrap + +go 1.14 diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go index ac67205bc..f7bedda38 100644 --- a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go +++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go @@ -5,6 +5,8 @@ import ( "unicode" ) +const nbsp = 0xA0 + // WrapString wraps the given string within lim width in characters. // // Wrapping is currently naive and only happens at white-space. A future @@ -18,50 +20,58 @@ func WrapString(s string, lim uint) string { var current uint var wordBuf, spaceBuf bytes.Buffer + var wordBufLen, spaceBufLen uint for _, char := range s { if char == '\n' { if wordBuf.Len() == 0 { - if current+uint(spaceBuf.Len()) > lim { + if current+spaceBufLen > lim { current = 0 } else { - current += uint(spaceBuf.Len()) + current += spaceBufLen spaceBuf.WriteTo(buf) } spaceBuf.Reset() + spaceBufLen = 0 } else { - current += uint(spaceBuf.Len() + wordBuf.Len()) + current += spaceBufLen + wordBufLen spaceBuf.WriteTo(buf) spaceBuf.Reset() + spaceBufLen = 0 wordBuf.WriteTo(buf) wordBuf.Reset() + wordBufLen = 0 } buf.WriteRune(char) current = 0 - } else if unicode.IsSpace(char) { + } else if unicode.IsSpace(char) && char != nbsp { if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { - current += uint(spaceBuf.Len() + wordBuf.Len()) + current += spaceBufLen + wordBufLen spaceBuf.WriteTo(buf) spaceBuf.Reset() + spaceBufLen = 0 wordBuf.WriteTo(buf) wordBuf.Reset() + wordBufLen = 0 } spaceBuf.WriteRune(char) + spaceBufLen++ } else { - wordBuf.WriteRune(char) + wordBufLen++ - if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim { + if current+wordBufLen+spaceBufLen > lim && wordBufLen < lim { buf.WriteRune('\n') current = 0 spaceBuf.Reset() + spaceBufLen = 0 } } } if wordBuf.Len() == 0 { - if current+uint(spaceBuf.Len()) <= lim { + if current+spaceBufLen <= lim { spaceBuf.WriteTo(buf) } } else { diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml deleted file mode 100644 index 1689c7d73..000000000 --- a/vendor/github.com/mitchellh/mapstructure/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - "1.11.x" - - tip - -script: - - go test diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index 3b3cb723f..c75823490 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,3 +1,78 @@ +## 1.5.0 + +* New option `IgnoreUntaggedFields` to ignore decoding to any fields + without `mapstructure` (or the configured tag name) set [GH-277] +* New option `ErrorUnset` which makes it an error if any fields + in a target struct are not set by the decoding process. [GH-225] +* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] +* Decoding to slice from array no longer crashes [GH-265] +* Decode nested struct pointers to map [GH-271] +* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] +* Fix issue where fields with `,omitempty` would sometimes decode + into a map with an empty string key [GH-281] + +## 1.4.3 + +* Fix cases where `json.Number` didn't decode properly [GH-261] + +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + +## 1.2.2 + +* Do not add unsettable (unexported) values to the unused metadata key + or "remain" value. [GH-150] + +## 1.2.1 + +* Go modules checksum mismatch fix + +## 1.2.0 + +* Added support to capture unused values in a field using the `",remain"` value + in the mapstructure tag. There is an example to showcase usage. +* Added `DecoderConfig` option to always squash embedded structs +* `json.Number` can decode into `uint` types +* Empty slices are preserved and not replaced with nil slices +* Fix panic that can occur in when decoding a map into a nil slice of structs +* Improved package documentation for godoc + ## 1.1.2 * Fix error when decode hook decodes interface implementation into interface diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index 1f0abc65a..3a754ca72 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -1,6 +1,7 @@ package mapstructure import ( + "encoding" "errors" "fmt" "net" @@ -16,10 +17,11 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { // Create variables here so we can reference them with the reflect pkg var f1 DecodeHookFuncType var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue // Fill in the variables into this interface and the rest is done // automatically using the reflect package. - potential := []interface{}{f1, f2} + potential := []interface{}{f1, f2, f3} v := reflect.ValueOf(h) vt := v.Type() @@ -38,13 +40,15 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { // that took reflect.Kind instead of reflect.Type. func DecodeHookExec( raw DecodeHookFunc, - from reflect.Type, to reflect.Type, - data interface{}) (interface{}, error) { + from reflect.Value, to reflect.Value) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { case DecodeHookFuncType: - return f(from, to, data) + return f(from.Type(), to.Type(), from.Interface()) case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), data) + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) default: return nil, errors.New("invalid decode hook signature") } @@ -56,25 +60,42 @@ func DecodeHookExec( // The composed funcs are called in order, with the result of the // previous transformation. func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { var err error + data := f.Interface() + + newFrom := f for _, f1 := range fs { - data, err = DecodeHookExec(f1, f, t, data) + data, err = DecodeHookExec(f1, newFrom, t) if err != nil { return nil, err } + newFrom = reflect.ValueOf(data) + } - // Modify the from kind to be correct with the new data - f = nil - if val := reflect.ValueOf(data); val.IsValid() { - f = val.Type() + return data, nil + } +} + +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + return func(a, b reflect.Value) (interface{}, error) { + var allErrs string + var out interface{} + var err error + + for _, f := range ff { + out, err = DecodeHookExec(f, a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue } + + return out, nil } - return data, nil + return nil, errors.New(allErrs) } } @@ -215,3 +236,44 @@ func WeaklyTypedHook( return data, nil } + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { + return nil, err + } + return result, nil + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/go.mod b/vendor/github.com/mitchellh/mapstructure/go.mod index d2a712562..a03ae9730 100644 --- a/vendor/github.com/mitchellh/mapstructure/go.mod +++ b/vendor/github.com/mitchellh/mapstructure/go.mod @@ -1 +1,3 @@ module github.com/mitchellh/mapstructure + +go 1.14 diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 256ee63fb..1efb22ac3 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -1,10 +1,161 @@ -// Package mapstructure exposes functionality to convert an arbitrary -// map[string]interface{} into a native Go structure. +// Package mapstructure exposes functionality to convert one arbitrary +// Go type into another, typically to convert a map[string]interface{} +// into a native Go structure. // // The Go structure can be arbitrarily complex, containing slices, // other structs, etc. and the decoder will properly decode nested // maps and so on into the proper structures in the native Go struct. // See the examples to see what the decoder is capable of. +// +// The simplest function to start with is Decode. +// +// Field Tags +// +// When decoding to a struct, mapstructure will use the field name by +// default to perform the mapping. For example, if a struct has a field +// "Username" then mapstructure will look for a key in the source value +// of "username" (case insensitive). +// +// type User struct { +// Username string +// } +// +// You can change the behavior of mapstructure by using struct tags. +// The default struct tag that mapstructure looks for is "mapstructure" +// but you can customize it using DecoderConfig. +// +// Renaming Fields +// +// To rename the key that mapstructure looks for, use the "mapstructure" +// tag and set a value directly. For example, to change the "username" example +// above to "user": +// +// type User struct { +// Username string `mapstructure:"user"` +// } +// +// Embedded Structs and Squashing +// +// Embedded structs are treated as if they're another field with that name. +// By default, the two structs below are equivalent when decoding with +// mapstructure: +// +// type Person struct { +// Name string +// } +// +// type Friend struct { +// Person +// } +// +// type Friend struct { +// Person Person +// } +// +// This would require an input that looks like below: +// +// map[string]interface{}{ +// "person": map[string]interface{}{"name": "alice"}, +// } +// +// If your "person" value is NOT nested, then you can append ",squash" to +// your tag value and mapstructure will treat it as if the embedded struct +// were part of the struct directly. Example: +// +// type Friend struct { +// Person `mapstructure:",squash"` +// } +// +// Now the following input would be accepted: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// DecoderConfig has a field that changes the behavior of mapstructure +// to always squash embedded structs. +// +// Remainder Values +// +// If there are any unmapped keys in the source value, mapstructure by +// default will silently ignore them. You can error by setting ErrorUnused +// in DecoderConfig. If you're using Metadata you can also maintain a slice +// of the unused keys. +// +// You can also use the ",remain" suffix on your tag to collect all unused +// values in a map. The field with this tag MUST be a map type and should +// probably be a "map[string]interface{}" or "map[interface{}]interface{}". +// See example below: +// +// type Friend struct { +// Name string +// Other map[string]interface{} `mapstructure:",remain"` +// } +// +// Given the input below, Other would be populated with the other +// values that weren't used (everything but "name"): +// +// map[string]interface{}{ +// "name": "bob", +// "address": "123 Maple St.", +// } +// +// Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value. The zero value of all types is specified in the Go +// specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. +// +// type Source struct { +// Age int `mapstructure:",omitempty"` +// } +// +// Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// +// Other Configuration +// +// mapstructure is highly configurable. See the DecoderConfig struct +// for other features and options that are supported. package mapstructure import ( @@ -21,10 +172,11 @@ import ( // data transformations. See "DecodeHook" in the DecoderConfig // struct. // -// The type should be DecodeHookFuncType or DecodeHookFuncKind. -// Either is accepted. Types are a superset of Kinds (Types can return -// Kinds) and are generally a richer thing to use, but Kinds are simpler -// if you only need those. +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. // // The reason DecodeHookFunc is multi-typed is for backwards compatibility: // we started with Kinds and then realized Types were the better solution, @@ -40,15 +192,22 @@ type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface // source and target types. type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) + // DecoderConfig is the configuration that is used to create a new decoder // and allows customization of various aspects of decoding. type DecoderConfig struct { // DecodeHook, if set, will be called before any decoding and any // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. // - // If an error is returned, the entire decode will fail with that - // error. + // If an error is returned, the entire decode will fail with that error. DecodeHook DecodeHookFunc // If ErrorUnused is true, then it is an error for there to exist @@ -56,6 +215,12 @@ type DecoderConfig struct { // (extra keys). ErrorUnused bool + // If ErrorUnset is true, then it is an error for there to exist + // fields in the result that were not set in the decoding process + // (extra fields). This only applies to decoding to a struct. This + // will affect all nested structs as well. + ErrorUnset bool + // ZeroFields, if set to true, will zero fields before writing them. // For example, a map will be emptied before decoded values are put in // it. If this is false, a map will be merged. @@ -80,6 +245,14 @@ type DecoderConfig struct { // WeaklyTypedInput bool + // Squash will squash embedded structs. A squash tag may also be + // added to an individual struct field using a tag. For example: + // + // type Parent struct { + // Child `mapstructure:",squash"` + // } + Squash bool + // Metadata is the struct that will contain extra metadata about // the decoding. If this is nil, then no metadata will be tracked. Metadata *Metadata @@ -91,6 +264,15 @@ type DecoderConfig struct { // The tag name that mapstructure reads for field names. This // defaults to "mapstructure" TagName string + + // IgnoreUntaggedFields ignores all struct fields without explicit + // TagName, comparable to `mapstructure:"-"` as default behaviour. + IgnoreUntaggedFields bool + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool } // A Decoder takes a raw interface value and turns it into structured @@ -112,6 +294,11 @@ type Metadata struct { // Unused is a slice of keys that were found in the raw value but // weren't decoded since there was no matching field in the result interface Unused []string + + // Unset is a slice of field names that were found in the result interface + // but weren't set in the decoding process since there was no matching value + // in the input + Unset []string } // Decode takes an input structure and uses reflection to translate it to @@ -203,12 +390,20 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { if config.Metadata.Unused == nil { config.Metadata.Unused = make([]string, 0) } + + if config.Metadata.Unset == nil { + config.Metadata.Unset = make([]string, 0) + } } if config.TagName == "" { config.TagName = "mapstructure" } + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + result := &Decoder{ config: config, } @@ -261,9 +456,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e if d.config.DecodeHook != nil { // We have a DecodeHook, so let's pre-process the input. var err error - input, err = DecodeHookExec( - d.config.DecodeHook, - inputVal.Type(), outVal.Type(), input) + input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) if err != nil { return fmt.Errorf("error decoding '%s': %s", name, err) } @@ -271,6 +464,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e var err error outputKind := getKind(outVal) + addMetaKey := true switch outputKind { case reflect.Bool: err = d.decodeBool(name, input, outVal) @@ -289,7 +483,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e case reflect.Map: err = d.decodeMap(name, input, outVal) case reflect.Ptr: - err = d.decodePtr(name, input, outVal) + addMetaKey, err = d.decodePtr(name, input, outVal) case reflect.Slice: err = d.decodeSlice(name, input, outVal) case reflect.Array: @@ -303,7 +497,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e // If we reached here, then we successfully decoded SOMETHING, so // mark the key as used if we're tracking metainput. - if d.config.Metadata != nil && name != "" { + if addMetaKey && d.config.Metadata != nil && name != "" { d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) } @@ -314,7 +508,34 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e // value to "data" of that type. func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { if val.IsValid() && val.Elem().IsValid() { - return d.decode(name, data, val.Elem()) + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil } dataVal := reflect.ValueOf(data) @@ -386,8 +607,8 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) if !converted { return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -412,7 +633,12 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er val.SetInt(0) } case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) if err == nil { val.SetInt(i) } else { @@ -428,8 +654,8 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er val.SetInt(i) default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -438,6 +664,7 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { dataVal := reflect.Indirect(reflect.ValueOf(data)) dataKind := getKind(dataVal) + dataType := dataVal.Type() switch { case dataKind == reflect.Int: @@ -463,16 +690,29 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e val.SetUint(0) } case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) if err == nil { val.SetUint(i) } else { return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := strconv.ParseUint(string(jn), 0, 64) + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetUint(i) default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -502,8 +742,8 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e } default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -528,7 +768,12 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) val.SetFloat(0) } case dataKind == reflect.String && d.config.WeaklyTypedInput: - f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) if err == nil { val.SetFloat(f) } else { @@ -544,8 +789,8 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) val.SetFloat(i) default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -596,7 +841,7 @@ func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val ref for i := 0; i < dataVal.Len(); i++ { err := d.decode( - fmt.Sprintf("%s[%d]", name, i), + name+"["+strconv.Itoa(i)+"]", dataVal.Index(i).Interface(), val) if err != nil { return err @@ -629,7 +874,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle } for _, k := range dataVal.MapKeys() { - fieldName := fmt.Sprintf("%s[%s]", name, k) + fieldName := name + "[" + k.String() + "]" // First decode the key into the proper type currentKey := reflect.Indirect(reflect.New(valKeyType)) @@ -678,27 +923,48 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re } tagValue := f.Tag.Get(d.config.TagName) - tagParts := strings.Split(tagValue, ",") + keyName := f.Name + + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) // Determine the name of the key in the map - keyName := f.Name - if tagParts[0] != "" { - if tagParts[0] == "-" { + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue + } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { continue } - keyName = tagParts[0] - } - // If "squash" is specified in the tag, we squash the field down. - squash := false - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break + // If "squash" is specified in the tag, we squash the field down. + squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } } - } - if squash && v.Kind() != reflect.Struct { - return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { + keyName = keyNameTagValue + } + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue } switch v.Kind() { @@ -713,11 +979,22 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re mType := reflect.MapOf(vKeyType, vElemType) vMap := reflect.MakeMap(mType) - err := d.decode(keyName, x.Interface(), vMap) + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) if err != nil { return err } + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + if squash { for _, k := range vMap.MapKeys() { valMap.SetMapIndex(k, vMap.MapIndex(k)) @@ -738,7 +1015,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re return nil } -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { // If the input data is nil, then we want to just set the output // pointer to be nil as well. isNil := data == nil @@ -759,7 +1036,7 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er val.Set(nilValue) } - return nil + return true, nil } // Create an element of the concrete (non pointer) type and decode @@ -773,16 +1050,16 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er } if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return err + return false, err } val.Set(realVal) } else { if err := d.decode(name, data, reflect.Indirect(val)); err != nil { - return err + return false, err } } - return nil + return false, nil } func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { @@ -791,8 +1068,8 @@ func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) e dataVal := reflect.Indirect(reflect.ValueOf(data)) if val.Type() != dataVal.Type() { return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } val.Set(dataVal) return nil @@ -805,8 +1082,8 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) valElemType := valType.Elem() sliceType := reflect.SliceOf(valElemType) - valSlice := val - if valSlice.IsNil() || d.config.ZeroFields { + // If we have a non array/slice type then we first attempt to convert. + if dataValKind != reflect.Array && dataValKind != reflect.Slice { if d.config.WeaklyTypedInput { switch { // Slice and array we use the normal logic @@ -833,18 +1110,17 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) } } - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - - } + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } - // If the input value is empty, then don't allocate since non-nil != nil - if dataVal.Len() == 0 { - return nil - } + // If the input value is nil, then don't allocate since empty != nil + if dataValKind != reflect.Array && dataVal.IsNil() { + return nil + } + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { // Make a new slice to hold our result, same size as the original data. valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) } @@ -859,7 +1135,7 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) } currentField := valSlice.Index(i) - fieldName := fmt.Sprintf("%s[%d]", name, i) + fieldName := name + "[" + strconv.Itoa(i) + "]" if err := d.decode(fieldName, currentData, currentField); err != nil { errors = appendErrors(errors, err) } @@ -926,7 +1202,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) currentData := dataVal.Index(i).Interface() currentField := valArray.Index(i) - fieldName := fmt.Sprintf("%s[%d]", name, i) + fieldName := name + "[" + strconv.Itoa(i) + "]" if err := d.decode(fieldName, currentData, currentField); err != nil { errors = appendErrors(errors, err) } @@ -962,13 +1238,23 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) // Not the most efficient way to do this but we can optimize later if // we want to. To convert from struct to struct we go to map first // as an intermediary. - m := make(map[string]interface{}) - mval := reflect.Indirect(reflect.ValueOf(&m)) - if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil { + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { return err } - result := d.decodeStructFromMap(name, mval, val) + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) return result default: @@ -991,6 +1277,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e dataValKeysUnused[dataValKey.Interface()] = struct{}{} } + targetValKeysUnused := make(map[interface{}]struct{}) errors := make([]string, 0) // This slice will keep track of all the structs we'll be decoding. @@ -1005,6 +1292,11 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e field reflect.StructField val reflect.Value } + + // remainField is set to a valid field set with the "remain" tag if + // we are keeping track of remaining values. + var remainField *field + fields := []field{} for len(structs) > 0 { structVal := structs[0] @@ -1014,30 +1306,47 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e for i := 0; i < structType.NumField(); i++ { fieldType := structType.Field(i) - fieldKind := fieldType.Type.Kind() + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } // If "squash" is specified in the tag, we squash the field down. - squash := false + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous + remain := false + + // We always parse the tags cause we're looking for other tags too tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") for _, tag := range tagParts[1:] { if tag == "squash" { squash = true break } + + if tag == "remain" { + remain = true + break + } } if squash { - if fieldKind != reflect.Struct { + if fieldVal.Kind() != reflect.Struct { errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) } else { - structs = append(structs, structVal.FieldByName(fieldType.Name)) + structs = append(structs, fieldVal) } continue } - // Normal struct field, store it away - fields = append(fields, field{fieldType, structVal.Field(i)}) + // Build our field + if remain { + remainField = &field{fieldType, fieldVal} + } else { + // Normal struct field, store it away + fields = append(fields, field{fieldType, fieldVal}) + } } } @@ -1064,7 +1373,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e continue } - if strings.EqualFold(mK, fieldName) { + if d.config.MatchName(mK, fieldName) { rawMapKey = dataValKey rawMapVal = dataVal.MapIndex(dataValKey) break @@ -1073,14 +1382,12 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e if !rawMapVal.IsValid() { // There was no matching key in the map for the value in - // the struct. Just ignore. + // the struct. Remember it for potential errors and metadata. + targetValKeysUnused[fieldName] = struct{}{} continue } } - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - if !fieldValue.IsValid() { // This should never happen panic("field is not valid") @@ -1092,10 +1399,13 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e continue } + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + // If the name is empty string, then we're at the root, and we // don't dot-join the fields. if name != "" { - fieldName = fmt.Sprintf("%s.%s", name, fieldName) + fieldName = name + "." + fieldName } if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { @@ -1103,6 +1413,25 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } } + // If we have a "remain"-tagged field and we have unused keys then + // we put the unused keys directly into the remain field. + if remainField != nil && len(dataValKeysUnused) > 0 { + // Build a map of only the unused values + remain := map[interface{}]interface{}{} + for key := range dataValKeysUnused { + remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() + } + + // Decode it as-if we were just decoding this map onto our map. + if err := d.decodeMap(name, remain, remainField.val); err != nil { + errors = appendErrors(errors, err) + } + + // Set the map to nil so we have none so that the next check will + // not error (ErrorUnused) + dataValKeysUnused = nil + } + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { keys := make([]string, 0, len(dataValKeysUnused)) for rawKey := range dataValKeysUnused { @@ -1114,6 +1443,17 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e errors = appendErrors(errors, err) } + if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { + keys := make([]string, 0, len(targetValKeysUnused)) + for rawKey := range targetValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + if len(errors) > 0 { return &Error{errors} } @@ -1123,16 +1463,42 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e for rawKey := range dataValKeysUnused { key := rawKey.(string) if name != "" { - key = fmt.Sprintf("%s.%s", name, key) + key = name + "." + key } d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) } + for rawKey := range targetValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) + } } return nil } +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + func getKind(val reflect.Value) reflect.Kind { kind := val.Kind() @@ -1147,3 +1513,28 @@ func getKind(val reflect.Value) reflect.Kind { return kind } } + +func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields + return true + } + if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + return true + } + } + return false +} + +func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return v + } + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v +} diff --git a/vendor/github.com/montanaflynn/stats/.gitignore b/vendor/github.com/montanaflynn/stats/.gitignore new file mode 100644 index 000000000..e0a38e1c1 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/.gitignore @@ -0,0 +1,5 @@ +coverage.out +release-notes.txt +.directory +.chglog +.vscode \ No newline at end of file diff --git a/vendor/github.com/montanaflynn/stats/.travis.yml b/vendor/github.com/montanaflynn/stats/.travis.yml new file mode 100644 index 000000000..28118fbfe --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/.travis.yml @@ -0,0 +1,29 @@ +language: go +go: + - "1.7" + - "1.8" + - "1.9" + - "1.10" + - "1.11" + - "1.12" + - "1.13" + - "1.14" + - "1.15" + - "1.16" + - stable + - master +arch: + - amd64 + - arm64 +before_install: + - go get github.com/mattn/goveralls +script: + - go test -v -covermode=count -coverprofile=coverage.out +after_success: + - $GOPATH/bin/goveralls -coverprofile=coverage.out -service=travis-ci +notifications: + email: + recipients: + - montana@montanaflynn.me + on_success: change + on_failure: always diff --git a/vendor/github.com/montanaflynn/stats/CHANGELOG.md b/vendor/github.com/montanaflynn/stats/CHANGELOG.md new file mode 100644 index 000000000..ad842a540 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/CHANGELOG.md @@ -0,0 +1,598 @@ + + +## [v0.6.6](https://github.com/montanaflynn/stats/compare/v0.6.5...v0.6.6) (2021-04-26) + +### Add + +* Add support for string and io.Reader in LoadRawData (pr [#68](https://github.com/montanaflynn/stats/issues/68)) +* Add latest versions of Go to test against + +### Use + +* Use math.Sqrt in StandardDeviation (PR [#64](https://github.com/montanaflynn/stats/issues/64)) + + + +## [v0.6.5](https://github.com/montanaflynn/stats/compare/v0.6.4...v0.6.5) (2021-02-21) + +### Add + +* Add Float64Data.Quartiles documentation + +### Update + +* Update changelog with v0.6.5 changes + + + +## [v0.6.4](https://github.com/montanaflynn/stats/compare/v0.6.3...v0.6.4) (2021-02-21) + +### Add + +* Add Quartiles method to Float64Data type (issue [#60](https://github.com/montanaflynn/stats/issues/60)) + +### Fix + +* Fix make release changelog command and add changelog history +* Fix failing tests due to precision errors on arm64 ([#58](https://github.com/montanaflynn/stats/issues/58)) + +### Update + +* Update changelog with v0.6.4 changes +* Update README.md links to CHANGELOG.md and DOCUMENTATION.md +* Update README.md and Makefile with new release commands +* Update changelog with v0.6.4 changes +* Update examples directory to include a README.md used for synopsis +* Update go.mod to include go version where modules are enabled by default +* Update changelog with v0.6.3 changes + + + +## [v0.6.3](https://github.com/montanaflynn/stats/compare/v0.6.2...v0.6.3) (2020-02-18) + +### Add + +* Add creating and committing changelog to Makefile release directive +* Add release-notes.txt and .chglog directory to .gitignore + +### Update + +* Update exported tests to use import for better example documentation +* Update documentation using godoc2md +* Update changelog with v0.6.2 release + + + +## [v0.6.2](https://github.com/montanaflynn/stats/compare/v0.6.1...v0.6.2) (2020-02-18) + +### Fix + +* Fix linting errcheck warnings in go benchmarks + +### Update + +* Update Makefile release directive to use correct release name + + + +## [v0.6.1](https://github.com/montanaflynn/stats/compare/v0.6.0...v0.6.1) (2020-02-18) + +### Add + +* Add StableSample function signature to readme + +### Fix + +* Fix linting warnings for normal distribution functions formatting and tests + +### Update + +* Update documentation links and rename DOC.md to DOCUMENTATION.md +* Update README with link to pkg.go.dev reference and release section +* Update Makefile with new changelog, docs, and release directives +* Update DOC.md links to GitHub source code +* Update doc.go comment and add DOC.md package reference file +* Update changelog using git-chglog + + + +## [v0.6.0](https://github.com/montanaflynn/stats/compare/v0.5.0...v0.6.0) (2020-02-17) + +### Add + +* Add Normal Distribution Functions ([#56](https://github.com/montanaflynn/stats/issues/56)) +* Add previous versions of Go to travis CI config +* Add check for distinct values in Mode function ([#51](https://github.com/montanaflynn/stats/issues/51)) +* Add StableSample function ([#48](https://github.com/montanaflynn/stats/issues/48)) +* Add doc.go file to show description and usage on godoc.org +* Add comments to new error and legacy error variables +* Add ExampleRound function to tests +* Add go.mod file for module support +* Add Sigmoid, SoftMax and Entropy methods and tests +* Add Entropy documentation, example and benchmarks +* Add Entropy function ([#44](https://github.com/montanaflynn/stats/issues/44)) + +### Fix + +* Fix percentile when only one element ([#47](https://github.com/montanaflynn/stats/issues/47)) +* Fix AutoCorrelation name in comments and remove unneeded Sprintf + +### Improve + +* Improve documentation section with command comments + +### Remove + +* Remove very old versions of Go in travis CI config +* Remove boolean comparison to get rid of gometalinter warning + +### Update + +* Update license dates +* Update Distance functions signatures to use Float64Data +* Update Sigmoid examples +* Update error names with backward compatibility + +### Use + +* Use relative link to examples/main.go +* Use a single var block for exported errors + + + +## [v0.5.0](https://github.com/montanaflynn/stats/compare/v0.4.0...v0.5.0) (2019-01-16) + +### Add + +* Add Sigmoid and Softmax functions + +### Fix + +* Fix syntax highlighting and add CumulativeSum func + + + +## [v0.4.0](https://github.com/montanaflynn/stats/compare/0.3.0...v0.4.0) (2019-01-14) + +### Add + +* Add goreport badge and documentation section to README.md +* Add Examples to test files +* Add AutoCorrelation and nist tests +* Add String method to statsErr type +* Add Y coordinate error for ExponentialRegression +* Add syntax highlighting ([#43](https://github.com/montanaflynn/stats/issues/43)) +* Add CumulativeSum ([#40](https://github.com/montanaflynn/stats/issues/40)) +* Add more tests and rename distance files +* Add coverage and benchmarks to azure pipeline +* Add go tests to azure pipeline + +### Change + +* Change travis tip alias to master +* Change codecov to coveralls for code coverage + +### Fix + +* Fix a few lint warnings +* Fix example error + +### Improve + +* Improve test coverage of distance functions + +### Only + +* Only run travis on stable and tip versions +* Only check code coverage on tip + +### Remove + +* Remove azure CI pipeline +* Remove unnecessary type conversions + +### Return + +* Return EmptyInputErr instead of EmptyInput + +### Set + +* Set up CI with Azure Pipelines + + + +## [0.3.0](https://github.com/montanaflynn/stats/compare/0.2.0...0.3.0) (2017-12-02) + +### Add + +* Add Chebyshev, Manhattan, Euclidean and Minkowski distance functions ([#35](https://github.com/montanaflynn/stats/issues/35)) +* Add function for computing chebyshev distance. ([#34](https://github.com/montanaflynn/stats/issues/34)) +* Add support for time.Duration +* Add LoadRawData to docs and examples +* Add unit test for edge case that wasn't covered +* Add unit tests for edge cases that weren't covered +* Add pearson alias delegating to correlation +* Add CovariancePopulation to Float64Data +* Add pearson product-moment correlation coefficient +* Add population covariance +* Add random slice benchmarks +* Add all applicable functions as methods to Float64Data type +* Add MIT license badge +* Add link to examples/methods.go +* Add Protips for usage and documentation sections +* Add tests for rounding up +* Add webdoc target and remove linting from test target +* Add example usage and consolidate contributing information + +### Added + +* Added MedianAbsoluteDeviation + +### Annotation + +* Annotation spelling error + +### Auto + +* auto commit +* auto commit + +### Calculate + +* Calculate correlation with sdev and covp + +### Clean + +* Clean up README.md and add info for offline docs + +### Consolidated + +* Consolidated all error values. + +### Fix + +* Fix Percentile logic +* Fix InterQuartileRange method test +* Fix zero percent bug and add test +* Fix usage example output typos + +### Improve + +* Improve bounds checking in Percentile +* Improve error log messaging + +### Imput + +* Imput -> Input + +### Include + +* Include alternative way to set Float64Data in example + +### Make + +* Make various changes to README.md + +### Merge + +* Merge branch 'master' of github.com:montanaflynn/stats +* Merge master + +### Mode + +* Mode calculation fix and tests + +### Realized + +* Realized the obvious efficiency gains of ignoring the unique numbers at the beginning of the slice. Benchmark joy ensued. + +### Refactor + +* Refactor testing of Round() +* Refactor setting Coordinate y field using Exp in place of Pow +* Refactor Makefile and add docs target + +### Remove + +* Remove deep links to types and functions + +### Rename + +* Rename file from types to data + +### Retrieve + +* Retrieve InterQuartileRange for the Float64Data. + +### Split + +* Split up stats.go into separate files + +### Support + +* Support more types on LoadRawData() ([#36](https://github.com/montanaflynn/stats/issues/36)) + +### Switch + +* Switch default and check targets + +### Update + +* Update Readme +* Update example methods and some text +* Update README and include Float64Data type method examples + +### Pull Requests + +* Merge pull request [#32](https://github.com/montanaflynn/stats/issues/32) from a-robinson/percentile +* Merge pull request [#30](https://github.com/montanaflynn/stats/issues/30) from montanaflynn/fix-test +* Merge pull request [#29](https://github.com/montanaflynn/stats/issues/29) from edupsousa/master +* Merge pull request [#27](https://github.com/montanaflynn/stats/issues/27) from andrey-yantsen/fix-percentile-out-of-bounds +* Merge pull request [#25](https://github.com/montanaflynn/stats/issues/25) from kazhuravlev/patch-1 +* Merge pull request [#22](https://github.com/montanaflynn/stats/issues/22) from JanBerktold/time-duration +* Merge pull request [#24](https://github.com/montanaflynn/stats/issues/24) from alouche/master +* Merge pull request [#21](https://github.com/montanaflynn/stats/issues/21) from brydavis/master +* Merge pull request [#19](https://github.com/montanaflynn/stats/issues/19) from ginodeis/mode-bug +* Merge pull request [#17](https://github.com/montanaflynn/stats/issues/17) from Kunde21/master +* Merge pull request [#3](https://github.com/montanaflynn/stats/issues/3) from montanaflynn/master +* Merge pull request [#2](https://github.com/montanaflynn/stats/issues/2) from montanaflynn/master +* Merge pull request [#13](https://github.com/montanaflynn/stats/issues/13) from toashd/pearson +* Merge pull request [#12](https://github.com/montanaflynn/stats/issues/12) from alixaxel/MAD +* Merge pull request [#1](https://github.com/montanaflynn/stats/issues/1) from montanaflynn/master +* Merge pull request [#11](https://github.com/montanaflynn/stats/issues/11) from Kunde21/modeMemReduce +* Merge pull request [#10](https://github.com/montanaflynn/stats/issues/10) from Kunde21/ModeRewrite + + + +## [0.2.0](https://github.com/montanaflynn/stats/compare/0.1.0...0.2.0) (2015-10-14) + +### Add + +* Add Makefile with gometalinter, testing, benchmarking and coverage report targets +* Add comments describing functions and structs +* Add Correlation func +* Add Covariance func +* Add tests for new function shortcuts +* Add StandardDeviation function as a shortcut to StandardDeviationPopulation +* Add Float64Data and Series types + +### Change + +* Change Sample to return a standard []float64 type + +### Fix + +* Fix broken link to Makefile +* Fix broken link and simplify code coverage reporting command +* Fix go vet warning about printf type placeholder +* Fix failing codecov test coverage reporting +* Fix link to CHANGELOG.md + +### Fixed + +* Fixed typographical error, changed accomdate to accommodate in README. + +### Include + +* Include Variance and StandardDeviation shortcuts + +### Pass + +* Pass gometalinter + +### Refactor + +* Refactor Variance function to be the same as population variance + +### Release + +* Release version 0.2.0 + +### Remove + +* Remove unneeded do packages and update cover URL +* Remove sudo from pip install + +### Reorder + +* Reorder functions and sections + +### Revert + +* Revert to legacy containers to preserve go1.1 testing + +### Switch + +* Switch from legacy to container-based CI infrastructure + +### Update + +* Update contributing instructions and mention Makefile + +### Pull Requests + +* Merge pull request [#5](https://github.com/montanaflynn/stats/issues/5) from orthographic-pedant/spell_check/accommodate + + + +## [0.1.0](https://github.com/montanaflynn/stats/compare/0.0.9...0.1.0) (2015-08-19) + +### Add + +* Add CONTRIBUTING.md + +### Rename + +* Rename functions while preserving backwards compatibility + + + +## 0.0.9 (2015-08-18) + +### Add + +* Add HarmonicMean func +* Add GeometricMean func +* Add .gitignore to avoid commiting test coverage report +* Add Outliers stuct and QuantileOutliers func +* Add Interquartile Range, Midhinge and Trimean examples +* Add Trimean +* Add Midhinge +* Add Inter Quartile Range +* Add a unit test to check for an empty slice error +* Add Quantiles struct and Quantile func +* Add more tests and fix a typo +* Add Golang 1.5 to build tests +* Add a standard MIT license file +* Add basic benchmarking +* Add regression models +* Add codecov token +* Add codecov +* Add check for slices with a single item +* Add coverage tests +* Add back previous Go versions to Travis CI +* Add Travis CI +* Add GoDoc badge +* Add Percentile and Float64ToInt functions +* Add another rounding test for whole numbers +* Add build status badge +* Add code coverage badge +* Add test for NaN, achieving 100% code coverage +* Add round function +* Add standard deviation function +* Add sum function + +### Add + +* add tests for sample +* add sample + +### Added + +* Added sample and population variance and deviation functions +* Added README + +### Adjust + +* Adjust API ordering + +### Avoid + +* Avoid unintended consequence of using sort + +### Better + +* Better performing min/max +* Better description + +### Change + +* Change package path to potentially fix a bug in earlier versions of Go + +### Clean + +* Clean up README and add some more information +* Clean up test error + +### Consistent + +* Consistent empty slice error messages +* Consistent var naming +* Consistent func declaration + +### Convert + +* Convert ints to floats + +### Duplicate + +* Duplicate packages for all versions + +### Export + +* Export Coordinate struct fields + +### First + +* First commit + +### Fix + +* Fix copy pasta mistake testing the wrong function +* Fix error message +* Fix usage output and edit API doc section +* Fix testing edgecase where map was in wrong order +* Fix usage example +* Fix usage examples + +### Include + +* Include the Nearest Rank method of calculating percentiles + +### More + +* More commenting + +### Move + +* Move GoDoc link to top + +### Redirect + +* Redirect kills newer versions of Go + +### Refactor + +* Refactor code and error checking + +### Remove + +* Remove unnecassary typecasting in sum func +* Remove cover since it doesn't work for later versions of go +* Remove golint and gocoveralls + +### Rename + +* Rename StandardDev to StdDev +* Rename StandardDev to StdDev + +### Return + +* Return errors for all functions + +### Run + +* Run go fmt to clean up formatting + +### Simplify + +* Simplify min/max function + +### Start + +* Start with minimal tests + +### Switch + +* Switch wercker to travis and update todos + +### Table + +* table testing style + +### Update + +* Update README and move the example main.go into it's own file +* Update TODO list +* Update README +* Update usage examples and todos + +### Use + +* Use codecov the recommended way +* Use correct string formatting types + +### Pull Requests + +* Merge pull request [#4](https://github.com/montanaflynn/stats/issues/4) from saromanov/sample + diff --git a/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md b/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md new file mode 100644 index 000000000..b56788946 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md @@ -0,0 +1,1237 @@ + + +# stats +`import "github.com/montanaflynn/stats"` + +* [Overview](#pkg-overview) +* [Index](#pkg-index) +* [Examples](#pkg-examples) +* [Subdirectories](#pkg-subdirectories) + +## Overview +Package stats is a well tested and comprehensive +statistics library package with no dependencies. + +Example Usage: + + + // start with some source data to use + data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8} + + // you could also use different types like this + // data := stats.LoadRawData([]int{1, 2, 3, 4, 5}) + // data := stats.LoadRawData([]interface{}{1.1, "2", 3}) + // etc... + + median, _ := stats.Median(data) + fmt.Println(median) // 3.65 + + roundedMedian, _ := stats.Round(median, 0) + fmt.Println(roundedMedian) // 4 + +MIT License Copyright (c) 2014-2020 Montana Flynn (https://montanaflynn.com) + + + + +## Index +* [Variables](#pkg-variables) +* [func AutoCorrelation(data Float64Data, lags int) (float64, error)](#AutoCorrelation) +* [func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#ChebyshevDistance) +* [func Correlation(data1, data2 Float64Data) (float64, error)](#Correlation) +* [func Covariance(data1, data2 Float64Data) (float64, error)](#Covariance) +* [func CovariancePopulation(data1, data2 Float64Data) (float64, error)](#CovariancePopulation) +* [func CumulativeSum(input Float64Data) ([]float64, error)](#CumulativeSum) +* [func Entropy(input Float64Data) (float64, error)](#Entropy) +* [func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#EuclideanDistance) +* [func GeometricMean(input Float64Data) (float64, error)](#GeometricMean) +* [func HarmonicMean(input Float64Data) (float64, error)](#HarmonicMean) +* [func InterQuartileRange(input Float64Data) (float64, error)](#InterQuartileRange) +* [func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#ManhattanDistance) +* [func Max(input Float64Data) (max float64, err error)](#Max) +* [func Mean(input Float64Data) (float64, error)](#Mean) +* [func Median(input Float64Data) (median float64, err error)](#Median) +* [func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error)](#MedianAbsoluteDeviation) +* [func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error)](#MedianAbsoluteDeviationPopulation) +* [func Midhinge(input Float64Data) (float64, error)](#Midhinge) +* [func Min(input Float64Data) (min float64, err error)](#Min) +* [func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error)](#MinkowskiDistance) +* [func Mode(input Float64Data) (mode []float64, err error)](#Mode) +* [func Ncr(n, r int) int](#Ncr) +* [func NormBoxMullerRvs(loc float64, scale float64, size int) []float64](#NormBoxMullerRvs) +* [func NormCdf(x float64, loc float64, scale float64) float64](#NormCdf) +* [func NormEntropy(loc float64, scale float64) float64](#NormEntropy) +* [func NormFit(data []float64) [2]float64](#NormFit) +* [func NormInterval(alpha float64, loc float64, scale float64) [2]float64](#NormInterval) +* [func NormIsf(p float64, loc float64, scale float64) (x float64)](#NormIsf) +* [func NormLogCdf(x float64, loc float64, scale float64) float64](#NormLogCdf) +* [func NormLogPdf(x float64, loc float64, scale float64) float64](#NormLogPdf) +* [func NormLogSf(x float64, loc float64, scale float64) float64](#NormLogSf) +* [func NormMean(loc float64, scale float64) float64](#NormMean) +* [func NormMedian(loc float64, scale float64) float64](#NormMedian) +* [func NormMoment(n int, loc float64, scale float64) float64](#NormMoment) +* [func NormPdf(x float64, loc float64, scale float64) float64](#NormPdf) +* [func NormPpf(p float64, loc float64, scale float64) (x float64)](#NormPpf) +* [func NormPpfRvs(loc float64, scale float64, size int) []float64](#NormPpfRvs) +* [func NormSf(x float64, loc float64, scale float64) float64](#NormSf) +* [func NormStats(loc float64, scale float64, moments string) []float64](#NormStats) +* [func NormStd(loc float64, scale float64) float64](#NormStd) +* [func NormVar(loc float64, scale float64) float64](#NormVar) +* [func Pearson(data1, data2 Float64Data) (float64, error)](#Pearson) +* [func Percentile(input Float64Data, percent float64) (percentile float64, err error)](#Percentile) +* [func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error)](#PercentileNearestRank) +* [func PopulationVariance(input Float64Data) (pvar float64, err error)](#PopulationVariance) +* [func Round(input float64, places int) (rounded float64, err error)](#Round) +* [func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error)](#Sample) +* [func SampleVariance(input Float64Data) (svar float64, err error)](#SampleVariance) +* [func Sigmoid(input Float64Data) ([]float64, error)](#Sigmoid) +* [func SoftMax(input Float64Data) ([]float64, error)](#SoftMax) +* [func StableSample(input Float64Data, takenum int) ([]float64, error)](#StableSample) +* [func StandardDeviation(input Float64Data) (sdev float64, err error)](#StandardDeviation) +* [func StandardDeviationPopulation(input Float64Data) (sdev float64, err error)](#StandardDeviationPopulation) +* [func StandardDeviationSample(input Float64Data) (sdev float64, err error)](#StandardDeviationSample) +* [func StdDevP(input Float64Data) (sdev float64, err error)](#StdDevP) +* [func StdDevS(input Float64Data) (sdev float64, err error)](#StdDevS) +* [func Sum(input Float64Data) (sum float64, err error)](#Sum) +* [func Trimean(input Float64Data) (float64, error)](#Trimean) +* [func VarP(input Float64Data) (sdev float64, err error)](#VarP) +* [func VarS(input Float64Data) (sdev float64, err error)](#VarS) +* [func Variance(input Float64Data) (sdev float64, err error)](#Variance) +* [type Coordinate](#Coordinate) + * [func ExpReg(s []Coordinate) (regressions []Coordinate, err error)](#ExpReg) + * [func LinReg(s []Coordinate) (regressions []Coordinate, err error)](#LinReg) + * [func LogReg(s []Coordinate) (regressions []Coordinate, err error)](#LogReg) +* [type Float64Data](#Float64Data) + * [func LoadRawData(raw interface{}) (f Float64Data)](#LoadRawData) + * [func (f Float64Data) AutoCorrelation(lags int) (float64, error)](#Float64Data.AutoCorrelation) + * [func (f Float64Data) Correlation(d Float64Data) (float64, error)](#Float64Data.Correlation) + * [func (f Float64Data) Covariance(d Float64Data) (float64, error)](#Float64Data.Covariance) + * [func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error)](#Float64Data.CovariancePopulation) + * [func (f Float64Data) CumulativeSum() ([]float64, error)](#Float64Data.CumulativeSum) + * [func (f Float64Data) Entropy() (float64, error)](#Float64Data.Entropy) + * [func (f Float64Data) GeometricMean() (float64, error)](#Float64Data.GeometricMean) + * [func (f Float64Data) Get(i int) float64](#Float64Data.Get) + * [func (f Float64Data) HarmonicMean() (float64, error)](#Float64Data.HarmonicMean) + * [func (f Float64Data) InterQuartileRange() (float64, error)](#Float64Data.InterQuartileRange) + * [func (f Float64Data) Len() int](#Float64Data.Len) + * [func (f Float64Data) Less(i, j int) bool](#Float64Data.Less) + * [func (f Float64Data) Max() (float64, error)](#Float64Data.Max) + * [func (f Float64Data) Mean() (float64, error)](#Float64Data.Mean) + * [func (f Float64Data) Median() (float64, error)](#Float64Data.Median) + * [func (f Float64Data) MedianAbsoluteDeviation() (float64, error)](#Float64Data.MedianAbsoluteDeviation) + * [func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error)](#Float64Data.MedianAbsoluteDeviationPopulation) + * [func (f Float64Data) Midhinge(d Float64Data) (float64, error)](#Float64Data.Midhinge) + * [func (f Float64Data) Min() (float64, error)](#Float64Data.Min) + * [func (f Float64Data) Mode() ([]float64, error)](#Float64Data.Mode) + * [func (f Float64Data) Pearson(d Float64Data) (float64, error)](#Float64Data.Pearson) + * [func (f Float64Data) Percentile(p float64) (float64, error)](#Float64Data.Percentile) + * [func (f Float64Data) PercentileNearestRank(p float64) (float64, error)](#Float64Data.PercentileNearestRank) + * [func (f Float64Data) PopulationVariance() (float64, error)](#Float64Data.PopulationVariance) + * [func (f Float64Data) Quartile(d Float64Data) (Quartiles, error)](#Float64Data.Quartile) + * [func (f Float64Data) QuartileOutliers() (Outliers, error)](#Float64Data.QuartileOutliers) + * [func (f Float64Data) Quartiles() (Quartiles, error)](#Float64Data.Quartiles) + * [func (f Float64Data) Sample(n int, r bool) ([]float64, error)](#Float64Data.Sample) + * [func (f Float64Data) SampleVariance() (float64, error)](#Float64Data.SampleVariance) + * [func (f Float64Data) Sigmoid() ([]float64, error)](#Float64Data.Sigmoid) + * [func (f Float64Data) SoftMax() ([]float64, error)](#Float64Data.SoftMax) + * [func (f Float64Data) StandardDeviation() (float64, error)](#Float64Data.StandardDeviation) + * [func (f Float64Data) StandardDeviationPopulation() (float64, error)](#Float64Data.StandardDeviationPopulation) + * [func (f Float64Data) StandardDeviationSample() (float64, error)](#Float64Data.StandardDeviationSample) + * [func (f Float64Data) Sum() (float64, error)](#Float64Data.Sum) + * [func (f Float64Data) Swap(i, j int)](#Float64Data.Swap) + * [func (f Float64Data) Trimean(d Float64Data) (float64, error)](#Float64Data.Trimean) + * [func (f Float64Data) Variance() (float64, error)](#Float64Data.Variance) +* [type Outliers](#Outliers) + * [func QuartileOutliers(input Float64Data) (Outliers, error)](#QuartileOutliers) +* [type Quartiles](#Quartiles) + * [func Quartile(input Float64Data) (Quartiles, error)](#Quartile) +* [type Series](#Series) + * [func ExponentialRegression(s Series) (regressions Series, err error)](#ExponentialRegression) + * [func LinearRegression(s Series) (regressions Series, err error)](#LinearRegression) + * [func LogarithmicRegression(s Series) (regressions Series, err error)](#LogarithmicRegression) + +#### Examples +* [AutoCorrelation](#example_AutoCorrelation) +* [ChebyshevDistance](#example_ChebyshevDistance) +* [Correlation](#example_Correlation) +* [CumulativeSum](#example_CumulativeSum) +* [Entropy](#example_Entropy) +* [LinearRegression](#example_LinearRegression) +* [LoadRawData](#example_LoadRawData) +* [Max](#example_Max) +* [Median](#example_Median) +* [Min](#example_Min) +* [Round](#example_Round) +* [Sigmoid](#example_Sigmoid) +* [SoftMax](#example_SoftMax) +* [Sum](#example_Sum) + +#### Package files +[correlation.go](/src/github.com/montanaflynn/stats/correlation.go) [cumulative_sum.go](/src/github.com/montanaflynn/stats/cumulative_sum.go) [data.go](/src/github.com/montanaflynn/stats/data.go) [deviation.go](/src/github.com/montanaflynn/stats/deviation.go) [distances.go](/src/github.com/montanaflynn/stats/distances.go) [doc.go](/src/github.com/montanaflynn/stats/doc.go) [entropy.go](/src/github.com/montanaflynn/stats/entropy.go) [errors.go](/src/github.com/montanaflynn/stats/errors.go) [legacy.go](/src/github.com/montanaflynn/stats/legacy.go) [load.go](/src/github.com/montanaflynn/stats/load.go) [max.go](/src/github.com/montanaflynn/stats/max.go) [mean.go](/src/github.com/montanaflynn/stats/mean.go) [median.go](/src/github.com/montanaflynn/stats/median.go) [min.go](/src/github.com/montanaflynn/stats/min.go) [mode.go](/src/github.com/montanaflynn/stats/mode.go) [norm.go](/src/github.com/montanaflynn/stats/norm.go) [outlier.go](/src/github.com/montanaflynn/stats/outlier.go) [percentile.go](/src/github.com/montanaflynn/stats/percentile.go) [quartile.go](/src/github.com/montanaflynn/stats/quartile.go) [ranksum.go](/src/github.com/montanaflynn/stats/ranksum.go) [regression.go](/src/github.com/montanaflynn/stats/regression.go) [round.go](/src/github.com/montanaflynn/stats/round.go) [sample.go](/src/github.com/montanaflynn/stats/sample.go) [sigmoid.go](/src/github.com/montanaflynn/stats/sigmoid.go) [softmax.go](/src/github.com/montanaflynn/stats/softmax.go) [sum.go](/src/github.com/montanaflynn/stats/sum.go) [util.go](/src/github.com/montanaflynn/stats/util.go) [variance.go](/src/github.com/montanaflynn/stats/variance.go) + + + +## Variables +``` go +var ( + // ErrEmptyInput Input must not be empty + ErrEmptyInput = statsError{"Input must not be empty."} + // ErrNaN Not a number + ErrNaN = statsError{"Not a number."} + // ErrNegative Must not contain negative values + ErrNegative = statsError{"Must not contain negative values."} + // ErrZero Must not contain zero values + ErrZero = statsError{"Must not contain zero values."} + // ErrBounds Input is outside of range + ErrBounds = statsError{"Input is outside of range."} + // ErrSize Must be the same length + ErrSize = statsError{"Must be the same length."} + // ErrInfValue Value is infinite + ErrInfValue = statsError{"Value is infinite."} + // ErrYCoord Y Value must be greater than zero + ErrYCoord = statsError{"Y Value must be greater than zero."} +) +``` +These are the package-wide error values. +All error identification should use these values. +https://github.com/golang/go/wiki/Errors#naming + +``` go +var ( + EmptyInputErr = ErrEmptyInput + NaNErr = ErrNaN + NegativeErr = ErrNegative + ZeroErr = ErrZero + BoundsErr = ErrBounds + SizeErr = ErrSize + InfValue = ErrInfValue + YCoordErr = ErrYCoord + EmptyInput = ErrEmptyInput +) +``` +Legacy error names that didn't start with Err + + + +## func [AutoCorrelation](/correlation.go?s=853:918#L38) +``` go +func AutoCorrelation(data Float64Data, lags int) (float64, error) +``` +AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay + + + +## func [ChebyshevDistance](/distances.go?s=368:456#L20) +``` go +func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) +``` +ChebyshevDistance computes the Chebyshev distance between two data sets + + + +## func [Correlation](/correlation.go?s=112:171#L8) +``` go +func Correlation(data1, data2 Float64Data) (float64, error) +``` +Correlation describes the degree of relationship between two sets of data + + + +## func [Covariance](/variance.go?s=1284:1342#L53) +``` go +func Covariance(data1, data2 Float64Data) (float64, error) +``` +Covariance is a measure of how much two sets of data change + + + +## func [CovariancePopulation](/variance.go?s=1864:1932#L81) +``` go +func CovariancePopulation(data1, data2 Float64Data) (float64, error) +``` +CovariancePopulation computes covariance for entire population between two variables. + + + +## func [CumulativeSum](/cumulative_sum.go?s=81:137#L4) +``` go +func CumulativeSum(input Float64Data) ([]float64, error) +``` +CumulativeSum calculates the cumulative sum of the input slice + + + +## func [Entropy](/entropy.go?s=77:125#L6) +``` go +func Entropy(input Float64Data) (float64, error) +``` +Entropy provides calculation of the entropy + + + +## func [EuclideanDistance](/distances.go?s=836:924#L36) +``` go +func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) +``` +EuclideanDistance computes the Euclidean distance between two data sets + + + +## func [GeometricMean](/mean.go?s=319:373#L18) +``` go +func GeometricMean(input Float64Data) (float64, error) +``` +GeometricMean gets the geometric mean for a slice of numbers + + + +## func [HarmonicMean](/mean.go?s=717:770#L40) +``` go +func HarmonicMean(input Float64Data) (float64, error) +``` +HarmonicMean gets the harmonic mean for a slice of numbers + + + +## func [InterQuartileRange](/quartile.go?s=821:880#L45) +``` go +func InterQuartileRange(input Float64Data) (float64, error) +``` +InterQuartileRange finds the range between Q1 and Q3 + + + +## func [ManhattanDistance](/distances.go?s=1277:1365#L50) +``` go +func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) +``` +ManhattanDistance computes the Manhattan distance between two data sets + + + +## func [Max](/max.go?s=78:130#L8) +``` go +func Max(input Float64Data) (max float64, err error) +``` +Max finds the highest number in a slice + + + +## func [Mean](/mean.go?s=77:122#L6) +``` go +func Mean(input Float64Data) (float64, error) +``` +Mean gets the average of a slice of numbers + + + +## func [Median](/median.go?s=85:143#L6) +``` go +func Median(input Float64Data) (median float64, err error) +``` +Median gets the median number in a slice of numbers + + + +## func [MedianAbsoluteDeviation](/deviation.go?s=125:197#L6) +``` go +func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) +``` +MedianAbsoluteDeviation finds the median of the absolute deviations from the dataset median + + + +## func [MedianAbsoluteDeviationPopulation](/deviation.go?s=360:442#L11) +``` go +func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) +``` +MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median + + + +## func [Midhinge](/quartile.go?s=1075:1124#L55) +``` go +func Midhinge(input Float64Data) (float64, error) +``` +Midhinge finds the average of the first and third quartiles + + + +## func [Min](/min.go?s=78:130#L6) +``` go +func Min(input Float64Data) (min float64, err error) +``` +Min finds the lowest number in a set of data + + + +## func [MinkowskiDistance](/distances.go?s=2152:2256#L75) +``` go +func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) +``` +MinkowskiDistance computes the Minkowski distance between two data sets + +Arguments: + + + dataPointX: First set of data points + dataPointY: Second set of data points. Length of both data + sets must be equal. + lambda: aka p or city blocks; With lambda = 1 + returned distance is manhattan distance and + lambda = 2; it is euclidean distance. Lambda + reaching to infinite - distance would be chebysev + distance. + +Return: + + + Distance or error + + + +## func [Mode](/mode.go?s=85:141#L4) +``` go +func Mode(input Float64Data) (mode []float64, err error) +``` +Mode gets the mode [most frequent value(s)] of a slice of float64s + + + +## func [Ncr](/norm.go?s=7384:7406#L239) +``` go +func Ncr(n, r int) int +``` +Ncr is an N choose R algorithm. +Aaron Cannon's algorithm. + + + +## func [NormBoxMullerRvs](/norm.go?s=667:736#L23) +``` go +func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 +``` +NormBoxMullerRvs generates random variates using the Box–Muller transform. +For more information please visit: http://mathworld.wolfram.com/Box-MullerTransformation.html + + + +## func [NormCdf](/norm.go?s=1826:1885#L52) +``` go +func NormCdf(x float64, loc float64, scale float64) float64 +``` +NormCdf is the cumulative distribution function. + + + +## func [NormEntropy](/norm.go?s=5773:5825#L180) +``` go +func NormEntropy(loc float64, scale float64) float64 +``` +NormEntropy is the differential entropy of the RV. + + + +## func [NormFit](/norm.go?s=6058:6097#L187) +``` go +func NormFit(data []float64) [2]float64 +``` +NormFit returns the maximum likelihood estimators for the Normal Distribution. +Takes array of float64 values. +Returns array of Mean followed by Standard Deviation. + + + +## func [NormInterval](/norm.go?s=6976:7047#L221) +``` go +func NormInterval(alpha float64, loc float64, scale float64) [2]float64 +``` +NormInterval finds endpoints of the range that contains alpha percent of the distribution. + + + +## func [NormIsf](/norm.go?s=4330:4393#L137) +``` go +func NormIsf(p float64, loc float64, scale float64) (x float64) +``` +NormIsf is the inverse survival function (inverse of sf). + + + +## func [NormLogCdf](/norm.go?s=2016:2078#L57) +``` go +func NormLogCdf(x float64, loc float64, scale float64) float64 +``` +NormLogCdf is the log of the cumulative distribution function. + + + +## func [NormLogPdf](/norm.go?s=1590:1652#L47) +``` go +func NormLogPdf(x float64, loc float64, scale float64) float64 +``` +NormLogPdf is the log of the probability density function. + + + +## func [NormLogSf](/norm.go?s=2423:2484#L67) +``` go +func NormLogSf(x float64, loc float64, scale float64) float64 +``` +NormLogSf is the log of the survival function. + + + +## func [NormMean](/norm.go?s=6560:6609#L206) +``` go +func NormMean(loc float64, scale float64) float64 +``` +NormMean is the mean/expected value of the distribution. + + + +## func [NormMedian](/norm.go?s=6431:6482#L201) +``` go +func NormMedian(loc float64, scale float64) float64 +``` +NormMedian is the median of the distribution. + + + +## func [NormMoment](/norm.go?s=4694:4752#L146) +``` go +func NormMoment(n int, loc float64, scale float64) float64 +``` +NormMoment approximates the non-central (raw) moment of order n. +For more information please visit: https://math.stackexchange.com/questions/1945448/methods-for-finding-raw-moments-of-the-normal-distribution + + + +## func [NormPdf](/norm.go?s=1357:1416#L42) +``` go +func NormPdf(x float64, loc float64, scale float64) float64 +``` +NormPdf is the probability density function. + + + +## func [NormPpf](/norm.go?s=2854:2917#L75) +``` go +func NormPpf(p float64, loc float64, scale float64) (x float64) +``` +NormPpf is the point percentile function. +This is based on Peter John Acklam's inverse normal CDF. +algorithm: http://home.online.no/~pjacklam/notes/invnorm/ (no longer visible). +For more information please visit: https://stackedboxes.org/2017/05/01/acklams-normal-quantile-function/ + + + +## func [NormPpfRvs](/norm.go?s=247:310#L12) +``` go +func NormPpfRvs(loc float64, scale float64, size int) []float64 +``` +NormPpfRvs generates random variates using the Point Percentile Function. +For more information please visit: https://demonstrations.wolfram.com/TheMethodOfInverseTransforms/ + + + +## func [NormSf](/norm.go?s=2250:2308#L62) +``` go +func NormSf(x float64, loc float64, scale float64) float64 +``` +NormSf is the survival function (also defined as 1 - cdf, but sf is sometimes more accurate). + + + +## func [NormStats](/norm.go?s=5277:5345#L162) +``` go +func NormStats(loc float64, scale float64, moments string) []float64 +``` +NormStats returns the mean, variance, skew, and/or kurtosis. +Mean(‘m’), variance(‘v’), skew(‘s’), and/or kurtosis(‘k’). +Takes string containing any of 'mvsk'. +Returns array of m v s k in that order. + + + +## func [NormStd](/norm.go?s=6814:6862#L216) +``` go +func NormStd(loc float64, scale float64) float64 +``` +NormStd is the standard deviation of the distribution. + + + +## func [NormVar](/norm.go?s=6675:6723#L211) +``` go +func NormVar(loc float64, scale float64) float64 +``` +NormVar is the variance of the distribution. + + + +## func [Pearson](/correlation.go?s=655:710#L33) +``` go +func Pearson(data1, data2 Float64Data) (float64, error) +``` +Pearson calculates the Pearson product-moment correlation coefficient between two variables + + + +## func [Percentile](/percentile.go?s=98:181#L8) +``` go +func Percentile(input Float64Data, percent float64) (percentile float64, err error) +``` +Percentile finds the relative standing in a slice of floats + + + +## func [PercentileNearestRank](/percentile.go?s=1079:1173#L54) +``` go +func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) +``` +PercentileNearestRank finds the relative standing in a slice of floats using the Nearest Rank method + + + +## func [PopulationVariance](/variance.go?s=828:896#L31) +``` go +func PopulationVariance(input Float64Data) (pvar float64, err error) +``` +PopulationVariance finds the amount of variance within a population + + + +## func [Round](/round.go?s=88:154#L6) +``` go +func Round(input float64, places int) (rounded float64, err error) +``` +Round a float to a specific decimal place or precision + + + +## func [Sample](/sample.go?s=112:192#L9) +``` go +func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) +``` +Sample returns sample from input with replacement or without + + + +## func [SampleVariance](/variance.go?s=1058:1122#L42) +``` go +func SampleVariance(input Float64Data) (svar float64, err error) +``` +SampleVariance finds the amount of variance within a sample + + + +## func [Sigmoid](/sigmoid.go?s=228:278#L9) +``` go +func Sigmoid(input Float64Data) ([]float64, error) +``` +Sigmoid returns the input values in the range of -1 to 1 +along the sigmoid or s-shaped curve, commonly used in +machine learning while training neural networks as an +activation function. + + + +## func [SoftMax](/softmax.go?s=206:256#L8) +``` go +func SoftMax(input Float64Data) ([]float64, error) +``` +SoftMax returns the input values in the range of 0 to 1 +with sum of all the probabilities being equal to one. It +is commonly used in machine learning neural networks. + + + +## func [StableSample](/sample.go?s=974:1042#L50) +``` go +func StableSample(input Float64Data, takenum int) ([]float64, error) +``` +StableSample like stable sort, it returns samples from input while keeps the order of original data. + + + +## func [StandardDeviation](/deviation.go?s=695:762#L27) +``` go +func StandardDeviation(input Float64Data) (sdev float64, err error) +``` +StandardDeviation the amount of variation in the dataset + + + +## func [StandardDeviationPopulation](/deviation.go?s=892:969#L32) +``` go +func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) +``` +StandardDeviationPopulation finds the amount of variation from the population + + + +## func [StandardDeviationSample](/deviation.go?s=1254:1327#L46) +``` go +func StandardDeviationSample(input Float64Data) (sdev float64, err error) +``` +StandardDeviationSample finds the amount of variation from a sample + + + +## func [StdDevP](/legacy.go?s=339:396#L14) +``` go +func StdDevP(input Float64Data) (sdev float64, err error) +``` +StdDevP is a shortcut to StandardDeviationPopulation + + + +## func [StdDevS](/legacy.go?s=497:554#L19) +``` go +func StdDevS(input Float64Data) (sdev float64, err error) +``` +StdDevS is a shortcut to StandardDeviationSample + + + +## func [Sum](/sum.go?s=78:130#L6) +``` go +func Sum(input Float64Data) (sum float64, err error) +``` +Sum adds all the numbers of a slice together + + + +## func [Trimean](/quartile.go?s=1320:1368#L65) +``` go +func Trimean(input Float64Data) (float64, error) +``` +Trimean finds the average of the median and the midhinge + + + +## func [VarP](/legacy.go?s=59:113#L4) +``` go +func VarP(input Float64Data) (sdev float64, err error) +``` +VarP is a shortcut to PopulationVariance + + + +## func [VarS](/legacy.go?s=193:247#L9) +``` go +func VarS(input Float64Data) (sdev float64, err error) +``` +VarS is a shortcut to SampleVariance + + + +## func [Variance](/variance.go?s=659:717#L26) +``` go +func Variance(input Float64Data) (sdev float64, err error) +``` +Variance the amount of variation in the dataset + + + + +## type [Coordinate](/regression.go?s=143:183#L9) +``` go +type Coordinate struct { + X, Y float64 +} + +``` +Coordinate holds the data in a series + + + + + + + +### func [ExpReg](/legacy.go?s=791:856#L29) +``` go +func ExpReg(s []Coordinate) (regressions []Coordinate, err error) +``` +ExpReg is a shortcut to ExponentialRegression + + +### func [LinReg](/legacy.go?s=643:708#L24) +``` go +func LinReg(s []Coordinate) (regressions []Coordinate, err error) +``` +LinReg is a shortcut to LinearRegression + + +### func [LogReg](/legacy.go?s=944:1009#L34) +``` go +func LogReg(s []Coordinate) (regressions []Coordinate, err error) +``` +LogReg is a shortcut to LogarithmicRegression + + + + + +## type [Float64Data](/data.go?s=80:106#L4) +``` go +type Float64Data []float64 +``` +Float64Data is a named type for []float64 with helper methods + + + + + + + +### func [LoadRawData](/load.go?s=119:168#L9) +``` go +func LoadRawData(raw interface{}) (f Float64Data) +``` +LoadRawData parses and converts a slice of mixed data types to floats + + + + + +### func (Float64Data) [AutoCorrelation](/data.go?s=3257:3320#L91) +``` go +func (f Float64Data) AutoCorrelation(lags int) (float64, error) +``` +AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay + + + + +### func (Float64Data) [Correlation](/data.go?s=3058:3122#L86) +``` go +func (f Float64Data) Correlation(d Float64Data) (float64, error) +``` +Correlation describes the degree of relationship between two sets of data + + + + +### func (Float64Data) [Covariance](/data.go?s=4801:4864#L141) +``` go +func (f Float64Data) Covariance(d Float64Data) (float64, error) +``` +Covariance is a measure of how much two sets of data change + + + + +### func (Float64Data) [CovariancePopulation](/data.go?s=4983:5056#L146) +``` go +func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error) +``` +CovariancePopulation computes covariance for entire population between two variables + + + + +### func (Float64Data) [CumulativeSum](/data.go?s=883:938#L28) +``` go +func (f Float64Data) CumulativeSum() ([]float64, error) +``` +CumulativeSum returns the cumulative sum of the data + + + + +### func (Float64Data) [Entropy](/data.go?s=5480:5527#L162) +``` go +func (f Float64Data) Entropy() (float64, error) +``` +Entropy provides calculation of the entropy + + + + +### func (Float64Data) [GeometricMean](/data.go?s=1332:1385#L40) +``` go +func (f Float64Data) GeometricMean() (float64, error) +``` +GeometricMean returns the median of the data + + + + +### func (Float64Data) [Get](/data.go?s=129:168#L7) +``` go +func (f Float64Data) Get(i int) float64 +``` +Get item in slice + + + + +### func (Float64Data) [HarmonicMean](/data.go?s=1460:1512#L43) +``` go +func (f Float64Data) HarmonicMean() (float64, error) +``` +HarmonicMean returns the mode of the data + + + + +### func (Float64Data) [InterQuartileRange](/data.go?s=3755:3813#L106) +``` go +func (f Float64Data) InterQuartileRange() (float64, error) +``` +InterQuartileRange finds the range between Q1 and Q3 + + + + +### func (Float64Data) [Len](/data.go?s=217:247#L10) +``` go +func (f Float64Data) Len() int +``` +Len returns length of slice + + + + +### func (Float64Data) [Less](/data.go?s=318:358#L13) +``` go +func (f Float64Data) Less(i, j int) bool +``` +Less returns if one number is less than another + + + + +### func (Float64Data) [Max](/data.go?s=645:688#L22) +``` go +func (f Float64Data) Max() (float64, error) +``` +Max returns the maximum number in the data + + + + +### func (Float64Data) [Mean](/data.go?s=1005:1049#L31) +``` go +func (f Float64Data) Mean() (float64, error) +``` +Mean returns the mean of the data + + + + +### func (Float64Data) [Median](/data.go?s=1111:1157#L34) +``` go +func (f Float64Data) Median() (float64, error) +``` +Median returns the median of the data + + + + +### func (Float64Data) [MedianAbsoluteDeviation](/data.go?s=1630:1693#L46) +``` go +func (f Float64Data) MedianAbsoluteDeviation() (float64, error) +``` +MedianAbsoluteDeviation the median of the absolute deviations from the dataset median + + + + +### func (Float64Data) [MedianAbsoluteDeviationPopulation](/data.go?s=1842:1915#L51) +``` go +func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error) +``` +MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median + + + + +### func (Float64Data) [Midhinge](/data.go?s=3912:3973#L111) +``` go +func (f Float64Data) Midhinge(d Float64Data) (float64, error) +``` +Midhinge finds the average of the first and third quartiles + + + + +### func (Float64Data) [Min](/data.go?s=536:579#L19) +``` go +func (f Float64Data) Min() (float64, error) +``` +Min returns the minimum number in the data + + + + +### func (Float64Data) [Mode](/data.go?s=1217:1263#L37) +``` go +func (f Float64Data) Mode() ([]float64, error) +``` +Mode returns the mode of the data + + + + +### func (Float64Data) [Pearson](/data.go?s=3455:3515#L96) +``` go +func (f Float64Data) Pearson(d Float64Data) (float64, error) +``` +Pearson calculates the Pearson product-moment correlation coefficient between two variables. + + + + +### func (Float64Data) [Percentile](/data.go?s=2696:2755#L76) +``` go +func (f Float64Data) Percentile(p float64) (float64, error) +``` +Percentile finds the relative standing in a slice of floats + + + + +### func (Float64Data) [PercentileNearestRank](/data.go?s=2869:2939#L81) +``` go +func (f Float64Data) PercentileNearestRank(p float64) (float64, error) +``` +PercentileNearestRank finds the relative standing using the Nearest Rank method + + + + +### func (Float64Data) [PopulationVariance](/data.go?s=4495:4553#L131) +``` go +func (f Float64Data) PopulationVariance() (float64, error) +``` +PopulationVariance finds the amount of variance within a population + + + + +### func (Float64Data) [Quartile](/data.go?s=3610:3673#L101) +``` go +func (f Float64Data) Quartile(d Float64Data) (Quartiles, error) +``` +Quartile returns the three quartile points from a slice of data + + + + +### func (Float64Data) [QuartileOutliers](/data.go?s=2542:2599#L71) +``` go +func (f Float64Data) QuartileOutliers() (Outliers, error) +``` +QuartileOutliers finds the mild and extreme outliers + + + + +### func (Float64Data) [Quartiles](/data.go?s=5628:5679#L167) +``` go +func (f Float64Data) Quartiles() (Quartiles, error) +``` +Quartiles returns the three quartile points from instance of Float64Data + + + + +### func (Float64Data) [Sample](/data.go?s=4208:4269#L121) +``` go +func (f Float64Data) Sample(n int, r bool) ([]float64, error) +``` +Sample returns sample from input with replacement or without + + + + +### func (Float64Data) [SampleVariance](/data.go?s=4652:4706#L136) +``` go +func (f Float64Data) SampleVariance() (float64, error) +``` +SampleVariance finds the amount of variance within a sample + + + + +### func (Float64Data) [Sigmoid](/data.go?s=5169:5218#L151) +``` go +func (f Float64Data) Sigmoid() ([]float64, error) +``` +Sigmoid returns the input values along the sigmoid or s-shaped curve + + + + +### func (Float64Data) [SoftMax](/data.go?s=5359:5408#L157) +``` go +func (f Float64Data) SoftMax() ([]float64, error) +``` +SoftMax returns the input values in the range of 0 to 1 +with sum of all the probabilities being equal to one. + + + + +### func (Float64Data) [StandardDeviation](/data.go?s=2026:2083#L56) +``` go +func (f Float64Data) StandardDeviation() (float64, error) +``` +StandardDeviation the amount of variation in the dataset + + + + +### func (Float64Data) [StandardDeviationPopulation](/data.go?s=2199:2266#L61) +``` go +func (f Float64Data) StandardDeviationPopulation() (float64, error) +``` +StandardDeviationPopulation finds the amount of variation from the population + + + + +### func (Float64Data) [StandardDeviationSample](/data.go?s=2382:2445#L66) +``` go +func (f Float64Data) StandardDeviationSample() (float64, error) +``` +StandardDeviationSample finds the amount of variation from a sample + + + + +### func (Float64Data) [Sum](/data.go?s=764:807#L25) +``` go +func (f Float64Data) Sum() (float64, error) +``` +Sum returns the total of all the numbers in the data + + + + +### func (Float64Data) [Swap](/data.go?s=425:460#L16) +``` go +func (f Float64Data) Swap(i, j int) +``` +Swap switches out two numbers in slice + + + + +### func (Float64Data) [Trimean](/data.go?s=4059:4119#L116) +``` go +func (f Float64Data) Trimean(d Float64Data) (float64, error) +``` +Trimean finds the average of the median and the midhinge + + + + +### func (Float64Data) [Variance](/data.go?s=4350:4398#L126) +``` go +func (f Float64Data) Variance() (float64, error) +``` +Variance the amount of variation in the dataset + + + + +## type [Outliers](/outlier.go?s=73:139#L4) +``` go +type Outliers struct { + Mild Float64Data + Extreme Float64Data +} + +``` +Outliers holds mild and extreme outliers found in data + + + + + + + +### func [QuartileOutliers](/outlier.go?s=197:255#L10) +``` go +func QuartileOutliers(input Float64Data) (Outliers, error) +``` +QuartileOutliers finds the mild and extreme outliers + + + + + +## type [Quartiles](/quartile.go?s=75:136#L6) +``` go +type Quartiles struct { + Q1 float64 + Q2 float64 + Q3 float64 +} + +``` +Quartiles holds the three quartile points + + + + + + + +### func [Quartile](/quartile.go?s=205:256#L13) +``` go +func Quartile(input Float64Data) (Quartiles, error) +``` +Quartile returns the three quartile points from a slice of data + + + + + +## type [Series](/regression.go?s=76:100#L6) +``` go +type Series []Coordinate +``` +Series is a container for a series of data + + + + + + + +### func [ExponentialRegression](/regression.go?s=1089:1157#L50) +``` go +func ExponentialRegression(s Series) (regressions Series, err error) +``` +ExponentialRegression returns an exponential regression on data series + + +### func [LinearRegression](/regression.go?s=262:325#L14) +``` go +func LinearRegression(s Series) (regressions Series, err error) +``` +LinearRegression finds the least squares linear regression on data series + + +### func [LogarithmicRegression](/regression.go?s=1903:1971#L85) +``` go +func LogarithmicRegression(s Series) (regressions Series, err error) +``` +LogarithmicRegression returns an logarithmic regression on data series + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/vendor/github.com/go-stack/stack/LICENSE.md b/vendor/github.com/montanaflynn/stats/LICENSE similarity index 94% rename from vendor/github.com/go-stack/stack/LICENSE.md rename to vendor/github.com/montanaflynn/stats/LICENSE index 2abf98ea8..159096129 100644 --- a/vendor/github.com/go-stack/stack/LICENSE.md +++ b/vendor/github.com/montanaflynn/stats/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014 Chris Hines +Copyright (c) 2014-2020 Montana Flynn (https://montanaflynn.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/montanaflynn/stats/Makefile b/vendor/github.com/montanaflynn/stats/Makefile new file mode 100644 index 000000000..969df1280 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/Makefile @@ -0,0 +1,34 @@ +.PHONY: all + +default: test lint + +format: + go fmt . + +test: + go test -race + +check: format test + +benchmark: + go test -bench=. -benchmem + +coverage: + go test -coverprofile=coverage.out + go tool cover -html="coverage.out" + +lint: format + golangci-lint run . + +docs: + godoc2md github.com/montanaflynn/stats | sed -e s#src/target/##g > DOCUMENTATION.md + +release: + git-chglog --output CHANGELOG.md --next-tag ${TAG} + git add CHANGELOG.md + git commit -m "Update changelog with ${TAG} changes" + git tag ${TAG} + git-chglog $(TAG) | tail -n +4 | gsed '1s/^/$(TAG)\n/gm' > release-notes.txt + git push origin master ${TAG} + hub release create --copy -F release-notes.txt ${TAG} + diff --git a/vendor/github.com/montanaflynn/stats/README.md b/vendor/github.com/montanaflynn/stats/README.md new file mode 100644 index 000000000..4495c8ddf --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/README.md @@ -0,0 +1,228 @@ +# Stats - Golang Statistics Package + +[![][travis-svg]][travis-url] [![][coveralls-svg]][coveralls-url] [![][goreport-svg]][goreport-url] [![][godoc-svg]][godoc-url] [![][pkggodev-svg]][pkggodev-url] [![][license-svg]][license-url] + +A well tested and comprehensive Golang statistics library / package / module with no dependencies. + +If you have any suggestions, problems or bug reports please [create an issue](https://github.com/montanaflynn/stats/issues) and I'll do my best to accommodate you. In addition simply starring the repo would show your support for the project and be very much appreciated! + +## Installation + +``` +go get github.com/montanaflynn/stats +``` + +## Example Usage + +All the functions can be seen in [examples/main.go](examples/main.go) but here's a little taste: + +```go +// start with some source data to use +data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8} + +// you could also use different types like this +// data := stats.LoadRawData([]int{1, 2, 3, 4, 5}) +// data := stats.LoadRawData([]interface{}{1.1, "2", 3}) +// etc... + +median, _ := stats.Median(data) +fmt.Println(median) // 3.65 + +roundedMedian, _ := stats.Round(median, 0) +fmt.Println(roundedMedian) // 4 +``` + +## Documentation + +The entire API documentation is available on [GoDoc.org](http://godoc.org/github.com/montanaflynn/stats) or [pkg.go.dev](https://pkg.go.dev/github.com/montanaflynn/stats). + +You can also view docs offline with the following commands: + +``` +# Command line +godoc . # show all exported apis +godoc . Median # show a single function +godoc -ex . Round # show function with example +godoc . Float64Data # show the type and methods + +# Local website +godoc -http=:4444 # start the godoc server on port 4444 +open http://localhost:4444/pkg/github.com/montanaflynn/stats/ +``` + +The exported API is as follows: + +```go +var ( + ErrEmptyInput = statsError{"Input must not be empty."} + ErrNaN = statsError{"Not a number."} + ErrNegative = statsError{"Must not contain negative values."} + ErrZero = statsError{"Must not contain zero values."} + ErrBounds = statsError{"Input is outside of range."} + ErrSize = statsError{"Must be the same length."} + ErrInfValue = statsError{"Value is infinite."} + ErrYCoord = statsError{"Y Value must be greater than zero."} +) + +func Round(input float64, places int) (rounded float64, err error) {} + +type Float64Data []float64 + +func LoadRawData(raw interface{}) (f Float64Data) {} + +func AutoCorrelation(data Float64Data, lags int) (float64, error) {} +func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {} +func Correlation(data1, data2 Float64Data) (float64, error) {} +func Covariance(data1, data2 Float64Data) (float64, error) {} +func CovariancePopulation(data1, data2 Float64Data) (float64, error) {} +func CumulativeSum(input Float64Data) ([]float64, error) {} +func Entropy(input Float64Data) (float64, error) {} +func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {} +func GeometricMean(input Float64Data) (float64, error) {} +func HarmonicMean(input Float64Data) (float64, error) {} +func InterQuartileRange(input Float64Data) (float64, error) {} +func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {} +func Max(input Float64Data) (max float64, err error) {} +func Mean(input Float64Data) (float64, error) {} +func Median(input Float64Data) (median float64, err error) {} +func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) {} +func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) {} +func Midhinge(input Float64Data) (float64, error) {} +func Min(input Float64Data) (min float64, err error) {} +func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) {} +func Mode(input Float64Data) (mode []float64, err error) {} +func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 {} +func NormCdf(x float64, loc float64, scale float64) float64 {} +func NormEntropy(loc float64, scale float64) float64 {} +func NormFit(data []float64) [2]float64{} +func NormInterval(alpha float64, loc float64, scale float64 ) [2]float64 {} +func NormIsf(p float64, loc float64, scale float64) (x float64) {} +func NormLogCdf(x float64, loc float64, scale float64) float64 {} +func NormLogPdf(x float64, loc float64, scale float64) float64 {} +func NormLogSf(x float64, loc float64, scale float64) float64 {} +func NormMean(loc float64, scale float64) float64 {} +func NormMedian(loc float64, scale float64) float64 {} +func NormMoment(n int, loc float64, scale float64) float64 {} +func NormPdf(x float64, loc float64, scale float64) float64 {} +func NormPpf(p float64, loc float64, scale float64) (x float64) {} +func NormPpfRvs(loc float64, scale float64, size int) []float64 {} +func NormSf(x float64, loc float64, scale float64) float64 {} +func NormStats(loc float64, scale float64, moments string) []float64 {} +func NormStd(loc float64, scale float64) float64 {} +func NormVar(loc float64, scale float64) float64 {} +func Pearson(data1, data2 Float64Data) (float64, error) {} +func Percentile(input Float64Data, percent float64) (percentile float64, err error) {} +func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) {} +func PopulationVariance(input Float64Data) (pvar float64, err error) {} +func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) {} +func SampleVariance(input Float64Data) (svar float64, err error) {} +func Sigmoid(input Float64Data) ([]float64, error) {} +func SoftMax(input Float64Data) ([]float64, error) {} +func StableSample(input Float64Data, takenum int) ([]float64, error) {} +func StandardDeviation(input Float64Data) (sdev float64, err error) {} +func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) {} +func StandardDeviationSample(input Float64Data) (sdev float64, err error) {} +func StdDevP(input Float64Data) (sdev float64, err error) {} +func StdDevS(input Float64Data) (sdev float64, err error) {} +func Sum(input Float64Data) (sum float64, err error) {} +func Trimean(input Float64Data) (float64, error) {} +func VarP(input Float64Data) (sdev float64, err error) {} +func VarS(input Float64Data) (sdev float64, err error) {} +func Variance(input Float64Data) (sdev float64, err error) {} + +type Coordinate struct { + X, Y float64 +} + +type Series []Coordinate + +func ExponentialRegression(s Series) (regressions Series, err error) {} +func LinearRegression(s Series) (regressions Series, err error) {} +func LogarithmicRegression(s Series) (regressions Series, err error) {} + +type Outliers struct { + Mild Float64Data + Extreme Float64Data +} + +type Quartiles struct { + Q1 float64 + Q2 float64 + Q3 float64 +} + +func Quartile(input Float64Data) (Quartiles, error) {} +func QuartileOutliers(input Float64Data) (Outliers, error) {} +``` + +## Contributing + +Pull request are always welcome no matter how big or small. I've included a [Makefile](https://github.com/montanaflynn/stats/blob/master/Makefile) that has a lot of helper targets for common actions such as linting, testing, code coverage reporting and more. + +1. Fork the repo and clone your fork +2. Create new branch (`git checkout -b some-thing`) +3. Make the desired changes +4. Ensure tests pass (`go test -cover` or `make test`) +5. Run lint and fix problems (`go vet .` or `make lint`) +6. Commit changes (`git commit -am 'Did something'`) +7. Push branch (`git push origin some-thing`) +8. Submit pull request + +To make things as seamless as possible please also consider the following steps: + +- Update `examples/main.go` with a simple example of the new feature +- Update `README.md` documentation section with any new exported API +- Keep 100% code coverage (you can check with `make coverage`) +- Squash commits into single units of work with `git rebase -i new-feature` + +## Releasing + +To release a new version we should update the [CHANGELOG.md](/CHANGELOG.md) and [DOCUMENTATION.md](/DOCUMENTATION.md). + +First install the tools used to generate the markdown files: + +``` +go get github.com/davecheney/godoc2md +go get github.com/golangci/golangci-lint/cmd/golangci-lint +``` + +Then you can run these `make` directives: + +``` +# Generate DOCUMENTATION.md +make docs +``` + +Then we can create a [CHANGELOG.md](/CHANGELOG.md) a new git tag and a github release: + +``` +make release TAG=v0.x.x +``` + +## MIT License + +Copyright (c) 2014-2021 Montana Flynn (https://montanaflynn.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORpublicS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +[travis-url]: https://travis-ci.org/montanaflynn/stats +[travis-svg]: https://img.shields.io/travis/montanaflynn/stats.svg + +[coveralls-url]: https://coveralls.io/r/montanaflynn/stats?branch=master +[coveralls-svg]: https://img.shields.io/coveralls/montanaflynn/stats.svg + +[goreport-url]: https://goreportcard.com/report/github.com/montanaflynn/stats +[goreport-svg]: https://goreportcard.com/badge/github.com/montanaflynn/stats + +[godoc-url]: https://godoc.org/github.com/montanaflynn/stats +[godoc-svg]: https://godoc.org/github.com/montanaflynn/stats?status.svg + +[pkggodev-url]: https://pkg.go.dev/github.com/montanaflynn/stats +[pkggodev-svg]: https://gistcdn.githack.com/montanaflynn/b02f1d78d8c0de8435895d7e7cd0d473/raw/17f2a5a69f1323ecd42c00e0683655da96d9ecc8/badge.svg + +[license-url]: https://github.com/montanaflynn/stats/blob/master/LICENSE +[license-svg]: https://img.shields.io/badge/license-MIT-blue.svg diff --git a/vendor/github.com/montanaflynn/stats/correlation.go b/vendor/github.com/montanaflynn/stats/correlation.go new file mode 100644 index 000000000..4acab94dc --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/correlation.go @@ -0,0 +1,60 @@ +package stats + +import ( + "math" +) + +// Correlation describes the degree of relationship between two sets of data +func Correlation(data1, data2 Float64Data) (float64, error) { + + l1 := data1.Len() + l2 := data2.Len() + + if l1 == 0 || l2 == 0 { + return math.NaN(), EmptyInputErr + } + + if l1 != l2 { + return math.NaN(), SizeErr + } + + sdev1, _ := StandardDeviationPopulation(data1) + sdev2, _ := StandardDeviationPopulation(data2) + + if sdev1 == 0 || sdev2 == 0 { + return 0, nil + } + + covp, _ := CovariancePopulation(data1, data2) + return covp / (sdev1 * sdev2), nil +} + +// Pearson calculates the Pearson product-moment correlation coefficient between two variables +func Pearson(data1, data2 Float64Data) (float64, error) { + return Correlation(data1, data2) +} + +// AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay +func AutoCorrelation(data Float64Data, lags int) (float64, error) { + if len(data) < 1 { + return 0, EmptyInputErr + } + + mean, _ := Mean(data) + + var result, q float64 + + for i := 0; i < lags; i++ { + v := (data[0] - mean) * (data[0] - mean) + for i := 1; i < len(data); i++ { + delta0 := data[i-1] - mean + delta1 := data[i] - mean + q += (delta0*delta1 - q) / float64(i+1) + v += (delta1*delta1 - v) / float64(i+1) + } + + result = q / v + } + + return result, nil +} diff --git a/vendor/github.com/montanaflynn/stats/cumulative_sum.go b/vendor/github.com/montanaflynn/stats/cumulative_sum.go new file mode 100644 index 000000000..e5305daf3 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/cumulative_sum.go @@ -0,0 +1,21 @@ +package stats + +// CumulativeSum calculates the cumulative sum of the input slice +func CumulativeSum(input Float64Data) ([]float64, error) { + + if input.Len() == 0 { + return Float64Data{}, EmptyInput + } + + cumSum := make([]float64, input.Len()) + + for i, val := range input { + if i == 0 { + cumSum[i] = val + } else { + cumSum[i] = cumSum[i-1] + val + } + } + + return cumSum, nil +} diff --git a/vendor/github.com/montanaflynn/stats/data.go b/vendor/github.com/montanaflynn/stats/data.go new file mode 100644 index 000000000..b86f0d84d --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/data.go @@ -0,0 +1,169 @@ +package stats + +// Float64Data is a named type for []float64 with helper methods +type Float64Data []float64 + +// Get item in slice +func (f Float64Data) Get(i int) float64 { return f[i] } + +// Len returns length of slice +func (f Float64Data) Len() int { return len(f) } + +// Less returns if one number is less than another +func (f Float64Data) Less(i, j int) bool { return f[i] < f[j] } + +// Swap switches out two numbers in slice +func (f Float64Data) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// Min returns the minimum number in the data +func (f Float64Data) Min() (float64, error) { return Min(f) } + +// Max returns the maximum number in the data +func (f Float64Data) Max() (float64, error) { return Max(f) } + +// Sum returns the total of all the numbers in the data +func (f Float64Data) Sum() (float64, error) { return Sum(f) } + +// CumulativeSum returns the cumulative sum of the data +func (f Float64Data) CumulativeSum() ([]float64, error) { return CumulativeSum(f) } + +// Mean returns the mean of the data +func (f Float64Data) Mean() (float64, error) { return Mean(f) } + +// Median returns the median of the data +func (f Float64Data) Median() (float64, error) { return Median(f) } + +// Mode returns the mode of the data +func (f Float64Data) Mode() ([]float64, error) { return Mode(f) } + +// GeometricMean returns the median of the data +func (f Float64Data) GeometricMean() (float64, error) { return GeometricMean(f) } + +// HarmonicMean returns the mode of the data +func (f Float64Data) HarmonicMean() (float64, error) { return HarmonicMean(f) } + +// MedianAbsoluteDeviation the median of the absolute deviations from the dataset median +func (f Float64Data) MedianAbsoluteDeviation() (float64, error) { + return MedianAbsoluteDeviation(f) +} + +// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median +func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error) { + return MedianAbsoluteDeviationPopulation(f) +} + +// StandardDeviation the amount of variation in the dataset +func (f Float64Data) StandardDeviation() (float64, error) { + return StandardDeviation(f) +} + +// StandardDeviationPopulation finds the amount of variation from the population +func (f Float64Data) StandardDeviationPopulation() (float64, error) { + return StandardDeviationPopulation(f) +} + +// StandardDeviationSample finds the amount of variation from a sample +func (f Float64Data) StandardDeviationSample() (float64, error) { + return StandardDeviationSample(f) +} + +// QuartileOutliers finds the mild and extreme outliers +func (f Float64Data) QuartileOutliers() (Outliers, error) { + return QuartileOutliers(f) +} + +// Percentile finds the relative standing in a slice of floats +func (f Float64Data) Percentile(p float64) (float64, error) { + return Percentile(f, p) +} + +// PercentileNearestRank finds the relative standing using the Nearest Rank method +func (f Float64Data) PercentileNearestRank(p float64) (float64, error) { + return PercentileNearestRank(f, p) +} + +// Correlation describes the degree of relationship between two sets of data +func (f Float64Data) Correlation(d Float64Data) (float64, error) { + return Correlation(f, d) +} + +// AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay +func (f Float64Data) AutoCorrelation(lags int) (float64, error) { + return AutoCorrelation(f, lags) +} + +// Pearson calculates the Pearson product-moment correlation coefficient between two variables. +func (f Float64Data) Pearson(d Float64Data) (float64, error) { + return Pearson(f, d) +} + +// Quartile returns the three quartile points from a slice of data +func (f Float64Data) Quartile(d Float64Data) (Quartiles, error) { + return Quartile(d) +} + +// InterQuartileRange finds the range between Q1 and Q3 +func (f Float64Data) InterQuartileRange() (float64, error) { + return InterQuartileRange(f) +} + +// Midhinge finds the average of the first and third quartiles +func (f Float64Data) Midhinge(d Float64Data) (float64, error) { + return Midhinge(d) +} + +// Trimean finds the average of the median and the midhinge +func (f Float64Data) Trimean(d Float64Data) (float64, error) { + return Trimean(d) +} + +// Sample returns sample from input with replacement or without +func (f Float64Data) Sample(n int, r bool) ([]float64, error) { + return Sample(f, n, r) +} + +// Variance the amount of variation in the dataset +func (f Float64Data) Variance() (float64, error) { + return Variance(f) +} + +// PopulationVariance finds the amount of variance within a population +func (f Float64Data) PopulationVariance() (float64, error) { + return PopulationVariance(f) +} + +// SampleVariance finds the amount of variance within a sample +func (f Float64Data) SampleVariance() (float64, error) { + return SampleVariance(f) +} + +// Covariance is a measure of how much two sets of data change +func (f Float64Data) Covariance(d Float64Data) (float64, error) { + return Covariance(f, d) +} + +// CovariancePopulation computes covariance for entire population between two variables +func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error) { + return CovariancePopulation(f, d) +} + +// Sigmoid returns the input values along the sigmoid or s-shaped curve +func (f Float64Data) Sigmoid() ([]float64, error) { + return Sigmoid(f) +} + +// SoftMax returns the input values in the range of 0 to 1 +// with sum of all the probabilities being equal to one. +func (f Float64Data) SoftMax() ([]float64, error) { + return SoftMax(f) +} + +// Entropy provides calculation of the entropy +func (f Float64Data) Entropy() (float64, error) { + return Entropy(f) +} + +// Quartiles returns the three quartile points from instance of Float64Data +func (f Float64Data) Quartiles() (Quartiles, error) { + return Quartile(f) +} diff --git a/vendor/github.com/montanaflynn/stats/deviation.go b/vendor/github.com/montanaflynn/stats/deviation.go new file mode 100644 index 000000000..e69a19f60 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/deviation.go @@ -0,0 +1,57 @@ +package stats + +import "math" + +// MedianAbsoluteDeviation finds the median of the absolute deviations from the dataset median +func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) { + return MedianAbsoluteDeviationPopulation(input) +} + +// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median +func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) { + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + i := copyslice(input) + m, _ := Median(i) + + for key, value := range i { + i[key] = math.Abs(value - m) + } + + return Median(i) +} + +// StandardDeviation the amount of variation in the dataset +func StandardDeviation(input Float64Data) (sdev float64, err error) { + return StandardDeviationPopulation(input) +} + +// StandardDeviationPopulation finds the amount of variation from the population +func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the population variance + vp, _ := PopulationVariance(input) + + // Return the population standard deviation + return math.Sqrt(vp), nil +} + +// StandardDeviationSample finds the amount of variation from a sample +func StandardDeviationSample(input Float64Data) (sdev float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the sample variance + vs, _ := SampleVariance(input) + + // Return the sample standard deviation + return math.Sqrt(vs), nil +} diff --git a/vendor/github.com/montanaflynn/stats/distances.go b/vendor/github.com/montanaflynn/stats/distances.go new file mode 100644 index 000000000..c2b7d8f8e --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/distances.go @@ -0,0 +1,88 @@ +package stats + +import ( + "math" +) + +// Validate data for distance calculation +func validateData(dataPointX, dataPointY Float64Data) error { + if len(dataPointX) == 0 || len(dataPointY) == 0 { + return EmptyInputErr + } + + if len(dataPointX) != len(dataPointY) { + return SizeErr + } + return nil +} + +// ChebyshevDistance computes the Chebyshev distance between two data sets +func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) { + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + var tempDistance float64 + for i := 0; i < len(dataPointY); i++ { + tempDistance = math.Abs(dataPointX[i] - dataPointY[i]) + if distance < tempDistance { + distance = tempDistance + } + } + return distance, nil +} + +// EuclideanDistance computes the Euclidean distance between two data sets +func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) { + + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + distance = 0 + for i := 0; i < len(dataPointX); i++ { + distance = distance + ((dataPointX[i] - dataPointY[i]) * (dataPointX[i] - dataPointY[i])) + } + return math.Sqrt(distance), nil +} + +// ManhattanDistance computes the Manhattan distance between two data sets +func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) { + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + distance = 0 + for i := 0; i < len(dataPointX); i++ { + distance = distance + math.Abs(dataPointX[i]-dataPointY[i]) + } + return distance, nil +} + +// MinkowskiDistance computes the Minkowski distance between two data sets +// +// Arguments: +// dataPointX: First set of data points +// dataPointY: Second set of data points. Length of both data +// sets must be equal. +// lambda: aka p or city blocks; With lambda = 1 +// returned distance is manhattan distance and +// lambda = 2; it is euclidean distance. Lambda +// reaching to infinite - distance would be chebysev +// distance. +// Return: +// Distance or error +func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) { + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + for i := 0; i < len(dataPointY); i++ { + distance = distance + math.Pow(math.Abs(dataPointX[i]-dataPointY[i]), lambda) + } + distance = math.Pow(distance, 1/lambda) + if math.IsInf(distance, 1) { + return math.NaN(), InfValue + } + return distance, nil +} diff --git a/vendor/github.com/montanaflynn/stats/doc.go b/vendor/github.com/montanaflynn/stats/doc.go new file mode 100644 index 000000000..facb8d57b --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/doc.go @@ -0,0 +1,23 @@ +/* +Package stats is a well tested and comprehensive +statistics library package with no dependencies. + +Example Usage: + + // start with some source data to use + data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8} + + // you could also use different types like this + // data := stats.LoadRawData([]int{1, 2, 3, 4, 5}) + // data := stats.LoadRawData([]interface{}{1.1, "2", 3}) + // etc... + + median, _ := stats.Median(data) + fmt.Println(median) // 3.65 + + roundedMedian, _ := stats.Round(median, 0) + fmt.Println(roundedMedian) // 4 + +MIT License Copyright (c) 2014-2020 Montana Flynn (https://montanaflynn.com) +*/ +package stats diff --git a/vendor/github.com/montanaflynn/stats/entropy.go b/vendor/github.com/montanaflynn/stats/entropy.go new file mode 100644 index 000000000..95263b0fc --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/entropy.go @@ -0,0 +1,31 @@ +package stats + +import "math" + +// Entropy provides calculation of the entropy +func Entropy(input Float64Data) (float64, error) { + input, err := normalize(input) + if err != nil { + return math.NaN(), err + } + var result float64 + for i := 0; i < input.Len(); i++ { + v := input.Get(i) + if v == 0 { + continue + } + result += (v * math.Log(v)) + } + return -result, nil +} + +func normalize(input Float64Data) (Float64Data, error) { + sum, err := input.Sum() + if err != nil { + return Float64Data{}, err + } + for i := 0; i < input.Len(); i++ { + input[i] = input[i] / sum + } + return input, nil +} diff --git a/vendor/github.com/montanaflynn/stats/errors.go b/vendor/github.com/montanaflynn/stats/errors.go new file mode 100644 index 000000000..95f82ff7b --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/errors.go @@ -0,0 +1,35 @@ +package stats + +type statsError struct { + err string +} + +func (s statsError) Error() string { + return s.err +} + +func (s statsError) String() string { + return s.err +} + +// These are the package-wide error values. +// All error identification should use these values. +// https://github.com/golang/go/wiki/Errors#naming +var ( + // ErrEmptyInput Input must not be empty + ErrEmptyInput = statsError{"Input must not be empty."} + // ErrNaN Not a number + ErrNaN = statsError{"Not a number."} + // ErrNegative Must not contain negative values + ErrNegative = statsError{"Must not contain negative values."} + // ErrZero Must not contain zero values + ErrZero = statsError{"Must not contain zero values."} + // ErrBounds Input is outside of range + ErrBounds = statsError{"Input is outside of range."} + // ErrSize Must be the same length + ErrSize = statsError{"Must be the same length."} + // ErrInfValue Value is infinite + ErrInfValue = statsError{"Value is infinite."} + // ErrYCoord Y Value must be greater than zero + ErrYCoord = statsError{"Y Value must be greater than zero."} +) diff --git a/vendor/github.com/montanaflynn/stats/go.mod b/vendor/github.com/montanaflynn/stats/go.mod new file mode 100644 index 000000000..7e3ca1aee --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/go.mod @@ -0,0 +1,3 @@ +module github.com/montanaflynn/stats + +go 1.13 diff --git a/vendor/github.com/montanaflynn/stats/legacy.go b/vendor/github.com/montanaflynn/stats/legacy.go new file mode 100644 index 000000000..0f3d1e8bb --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/legacy.go @@ -0,0 +1,49 @@ +package stats + +// VarP is a shortcut to PopulationVariance +func VarP(input Float64Data) (sdev float64, err error) { + return PopulationVariance(input) +} + +// VarS is a shortcut to SampleVariance +func VarS(input Float64Data) (sdev float64, err error) { + return SampleVariance(input) +} + +// StdDevP is a shortcut to StandardDeviationPopulation +func StdDevP(input Float64Data) (sdev float64, err error) { + return StandardDeviationPopulation(input) +} + +// StdDevS is a shortcut to StandardDeviationSample +func StdDevS(input Float64Data) (sdev float64, err error) { + return StandardDeviationSample(input) +} + +// LinReg is a shortcut to LinearRegression +func LinReg(s []Coordinate) (regressions []Coordinate, err error) { + return LinearRegression(s) +} + +// ExpReg is a shortcut to ExponentialRegression +func ExpReg(s []Coordinate) (regressions []Coordinate, err error) { + return ExponentialRegression(s) +} + +// LogReg is a shortcut to LogarithmicRegression +func LogReg(s []Coordinate) (regressions []Coordinate, err error) { + return LogarithmicRegression(s) +} + +// Legacy error names that didn't start with Err +var ( + EmptyInputErr = ErrEmptyInput + NaNErr = ErrNaN + NegativeErr = ErrNegative + ZeroErr = ErrZero + BoundsErr = ErrBounds + SizeErr = ErrSize + InfValue = ErrInfValue + YCoordErr = ErrYCoord + EmptyInput = ErrEmptyInput +) diff --git a/vendor/github.com/montanaflynn/stats/load.go b/vendor/github.com/montanaflynn/stats/load.go new file mode 100644 index 000000000..0eb0e2729 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/load.go @@ -0,0 +1,199 @@ +package stats + +import ( + "bufio" + "io" + "strconv" + "strings" + "time" +) + +// LoadRawData parses and converts a slice of mixed data types to floats +func LoadRawData(raw interface{}) (f Float64Data) { + var r []interface{} + var s Float64Data + + switch t := raw.(type) { + case []interface{}: + r = t + case []uint: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint8: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint16: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint32: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint64: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []bool: + for _, v := range t { + if v { + s = append(s, 1.0) + } else { + s = append(s, 0.0) + } + } + return s + case []float64: + return Float64Data(t) + case []int: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int8: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int16: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int32: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int64: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []string: + for _, v := range t { + r = append(r, v) + } + case []time.Duration: + for _, v := range t { + r = append(r, v) + } + case map[int]int: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int8: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int16: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int32: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int64: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]string: + for i := 0; i < len(t); i++ { + r = append(r, t[i]) + } + case map[int]uint: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint8: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint16: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint32: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint64: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]bool: + for i := 0; i < len(t); i++ { + if t[i] { + s = append(s, 1.0) + } else { + s = append(s, 0.0) + } + } + return s + case map[int]float64: + for i := 0; i < len(t); i++ { + s = append(s, t[i]) + } + return s + case map[int]time.Duration: + for i := 0; i < len(t); i++ { + r = append(r, t[i]) + } + case string: + for _, v := range strings.Fields(t) { + r = append(r, v) + } + case io.Reader: + scanner := bufio.NewScanner(t) + for scanner.Scan() { + l := scanner.Text() + for _, v := range strings.Fields(l) { + r = append(r, v) + } + } + } + + for _, v := range r { + switch t := v.(type) { + case int: + a := float64(t) + f = append(f, a) + case uint: + f = append(f, float64(t)) + case float64: + f = append(f, t) + case string: + fl, err := strconv.ParseFloat(t, 64) + if err == nil { + f = append(f, fl) + } + case bool: + if t { + f = append(f, 1.0) + } else { + f = append(f, 0.0) + } + case time.Duration: + f = append(f, float64(t)) + } + } + return f +} diff --git a/vendor/github.com/montanaflynn/stats/max.go b/vendor/github.com/montanaflynn/stats/max.go new file mode 100644 index 000000000..bb8c83c32 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/max.go @@ -0,0 +1,26 @@ +package stats + +import ( + "math" +) + +// Max finds the highest number in a slice +func Max(input Float64Data) (max float64, err error) { + + // Return an error if there are no numbers + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the first value as the starting point + max = input.Get(0) + + // Loop and replace higher values + for i := 1; i < input.Len(); i++ { + if input.Get(i) > max { + max = input.Get(i) + } + } + + return max, nil +} diff --git a/vendor/github.com/montanaflynn/stats/mean.go b/vendor/github.com/montanaflynn/stats/mean.go new file mode 100644 index 000000000..a78d299ae --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/mean.go @@ -0,0 +1,60 @@ +package stats + +import "math" + +// Mean gets the average of a slice of numbers +func Mean(input Float64Data) (float64, error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + sum, _ := input.Sum() + + return sum / float64(input.Len()), nil +} + +// GeometricMean gets the geometric mean for a slice of numbers +func GeometricMean(input Float64Data) (float64, error) { + + l := input.Len() + if l == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the product of all the numbers + var p float64 + for _, n := range input { + if p == 0 { + p = n + } else { + p *= n + } + } + + // Calculate the geometric mean + return math.Pow(p, 1/float64(l)), nil +} + +// HarmonicMean gets the harmonic mean for a slice of numbers +func HarmonicMean(input Float64Data) (float64, error) { + + l := input.Len() + if l == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the sum of all the numbers reciprocals and return an + // error for values that cannot be included in harmonic mean + var p float64 + for _, n := range input { + if n < 0 { + return math.NaN(), NegativeErr + } else if n == 0 { + return math.NaN(), ZeroErr + } + p += (1 / n) + } + + return float64(l) / p, nil +} diff --git a/vendor/github.com/montanaflynn/stats/median.go b/vendor/github.com/montanaflynn/stats/median.go new file mode 100644 index 000000000..a678c3653 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/median.go @@ -0,0 +1,25 @@ +package stats + +import "math" + +// Median gets the median number in a slice of numbers +func Median(input Float64Data) (median float64, err error) { + + // Start by sorting a copy of the slice + c := sortedCopy(input) + + // No math is needed if there are no numbers + // For even numbers we add the two middle numbers + // and divide by two using the mean function above + // For odd numbers we just use the middle number + l := len(c) + if l == 0 { + return math.NaN(), EmptyInputErr + } else if l%2 == 0 { + median, _ = Mean(c[l/2-1 : l/2+1]) + } else { + median = c[l/2] + } + + return median, nil +} diff --git a/vendor/github.com/montanaflynn/stats/min.go b/vendor/github.com/montanaflynn/stats/min.go new file mode 100644 index 000000000..bf7e70acf --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/min.go @@ -0,0 +1,26 @@ +package stats + +import "math" + +// Min finds the lowest number in a set of data +func Min(input Float64Data) (min float64, err error) { + + // Get the count of numbers in the slice + l := input.Len() + + // Return an error if there are no numbers + if l == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the first value as the starting point + min = input.Get(0) + + // Iterate until done checking for a lower value + for i := 1; i < l; i++ { + if input.Get(i) < min { + min = input.Get(i) + } + } + return min, nil +} diff --git a/vendor/github.com/montanaflynn/stats/mode.go b/vendor/github.com/montanaflynn/stats/mode.go new file mode 100644 index 000000000..a7cf9f7a4 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/mode.go @@ -0,0 +1,47 @@ +package stats + +// Mode gets the mode [most frequent value(s)] of a slice of float64s +func Mode(input Float64Data) (mode []float64, err error) { + // Return the input if there's only one number + l := input.Len() + if l == 1 { + return input, nil + } else if l == 0 { + return nil, EmptyInputErr + } + + c := sortedCopyDif(input) + // Traverse sorted array, + // tracking the longest repeating sequence + mode = make([]float64, 5) + cnt, maxCnt := 1, 1 + for i := 1; i < l; i++ { + switch { + case c[i] == c[i-1]: + cnt++ + case cnt == maxCnt && maxCnt != 1: + mode = append(mode, c[i-1]) + cnt = 1 + case cnt > maxCnt: + mode = append(mode[:0], c[i-1]) + maxCnt, cnt = cnt, 1 + default: + cnt = 1 + } + } + switch { + case cnt == maxCnt: + mode = append(mode, c[l-1]) + case cnt > maxCnt: + mode = append(mode[:0], c[l-1]) + maxCnt = cnt + } + + // Since length must be greater than 1, + // check for slices of distinct values + if maxCnt == 1 || len(mode)*maxCnt == l && maxCnt != l { + return Float64Data{}, nil + } + + return mode, nil +} diff --git a/vendor/github.com/montanaflynn/stats/norm.go b/vendor/github.com/montanaflynn/stats/norm.go new file mode 100644 index 000000000..4eb8eb8b9 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/norm.go @@ -0,0 +1,254 @@ +package stats + +import ( + "math" + "math/rand" + "strings" + "time" +) + +// NormPpfRvs generates random variates using the Point Percentile Function. +// For more information please visit: https://demonstrations.wolfram.com/TheMethodOfInverseTransforms/ +func NormPpfRvs(loc float64, scale float64, size int) []float64 { + rand.Seed(time.Now().UnixNano()) + var toReturn []float64 + for i := 0; i < size; i++ { + toReturn = append(toReturn, NormPpf(rand.Float64(), loc, scale)) + } + return toReturn +} + +// NormBoxMullerRvs generates random variates using the Box–Muller transform. +// For more information please visit: http://mathworld.wolfram.com/Box-MullerTransformation.html +func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 { + rand.Seed(time.Now().UnixNano()) + var toReturn []float64 + for i := 0; i < int(float64(size/2)+float64(size%2)); i++ { + // u1 and u2 are uniformly distributed random numbers between 0 and 1. + u1 := rand.Float64() + u2 := rand.Float64() + // x1 and x2 are normally distributed random numbers. + x1 := loc + (scale * (math.Sqrt(-2*math.Log(u1)) * math.Cos(2*math.Pi*u2))) + toReturn = append(toReturn, x1) + if (i+1)*2 <= size { + x2 := loc + (scale * (math.Sqrt(-2*math.Log(u1)) * math.Sin(2*math.Pi*u2))) + toReturn = append(toReturn, x2) + } + } + return toReturn +} + +// NormPdf is the probability density function. +func NormPdf(x float64, loc float64, scale float64) float64 { + return (math.Pow(math.E, -(math.Pow(x-loc, 2))/(2*math.Pow(scale, 2)))) / (scale * math.Sqrt(2*math.Pi)) +} + +// NormLogPdf is the log of the probability density function. +func NormLogPdf(x float64, loc float64, scale float64) float64 { + return math.Log((math.Pow(math.E, -(math.Pow(x-loc, 2))/(2*math.Pow(scale, 2)))) / (scale * math.Sqrt(2*math.Pi))) +} + +// NormCdf is the cumulative distribution function. +func NormCdf(x float64, loc float64, scale float64) float64 { + return 0.5 * (1 + math.Erf((x-loc)/(scale*math.Sqrt(2)))) +} + +// NormLogCdf is the log of the cumulative distribution function. +func NormLogCdf(x float64, loc float64, scale float64) float64 { + return math.Log(0.5 * (1 + math.Erf((x-loc)/(scale*math.Sqrt(2))))) +} + +// NormSf is the survival function (also defined as 1 - cdf, but sf is sometimes more accurate). +func NormSf(x float64, loc float64, scale float64) float64 { + return 1 - 0.5*(1+math.Erf((x-loc)/(scale*math.Sqrt(2)))) +} + +// NormLogSf is the log of the survival function. +func NormLogSf(x float64, loc float64, scale float64) float64 { + return math.Log(1 - 0.5*(1+math.Erf((x-loc)/(scale*math.Sqrt(2))))) +} + +// NormPpf is the point percentile function. +// This is based on Peter John Acklam's inverse normal CDF. +// algorithm: http://home.online.no/~pjacklam/notes/invnorm/ (no longer visible). +// For more information please visit: https://stackedboxes.org/2017/05/01/acklams-normal-quantile-function/ +func NormPpf(p float64, loc float64, scale float64) (x float64) { + const ( + a1 = -3.969683028665376e+01 + a2 = 2.209460984245205e+02 + a3 = -2.759285104469687e+02 + a4 = 1.383577518672690e+02 + a5 = -3.066479806614716e+01 + a6 = 2.506628277459239e+00 + + b1 = -5.447609879822406e+01 + b2 = 1.615858368580409e+02 + b3 = -1.556989798598866e+02 + b4 = 6.680131188771972e+01 + b5 = -1.328068155288572e+01 + + c1 = -7.784894002430293e-03 + c2 = -3.223964580411365e-01 + c3 = -2.400758277161838e+00 + c4 = -2.549732539343734e+00 + c5 = 4.374664141464968e+00 + c6 = 2.938163982698783e+00 + + d1 = 7.784695709041462e-03 + d2 = 3.224671290700398e-01 + d3 = 2.445134137142996e+00 + d4 = 3.754408661907416e+00 + + plow = 0.02425 + phigh = 1 - plow + ) + + if p < 0 || p > 1 { + return math.NaN() + } else if p == 0 { + return -math.Inf(0) + } else if p == 1 { + return math.Inf(0) + } + + if p < plow { + q := math.Sqrt(-2 * math.Log(p)) + x = (((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) / + ((((d1*q+d2)*q+d3)*q+d4)*q + 1) + } else if phigh < p { + q := math.Sqrt(-2 * math.Log(1-p)) + x = -(((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) / + ((((d1*q+d2)*q+d3)*q+d4)*q + 1) + } else { + q := p - 0.5 + r := q * q + x = (((((a1*r+a2)*r+a3)*r+a4)*r+a5)*r + a6) * q / + (((((b1*r+b2)*r+b3)*r+b4)*r+b5)*r + 1) + } + + e := 0.5*math.Erfc(-x/math.Sqrt2) - p + u := e * math.Sqrt(2*math.Pi) * math.Exp(x*x/2) + x = x - u/(1+x*u/2) + + return x*scale + loc +} + +// NormIsf is the inverse survival function (inverse of sf). +func NormIsf(p float64, loc float64, scale float64) (x float64) { + if -NormPpf(p, loc, scale) == 0 { + return 0 + } + return -NormPpf(p, loc, scale) +} + +// NormMoment approximates the non-central (raw) moment of order n. +// For more information please visit: https://math.stackexchange.com/questions/1945448/methods-for-finding-raw-moments-of-the-normal-distribution +func NormMoment(n int, loc float64, scale float64) float64 { + toReturn := 0.0 + for i := 0; i < n+1; i++ { + if (n-i)%2 == 0 { + toReturn += float64(Ncr(n, i)) * (math.Pow(loc, float64(i))) * (math.Pow(scale, float64(n-i))) * + (float64(factorial(n-i)) / ((math.Pow(2.0, float64((n-i)/2))) * + float64(factorial((n-i)/2)))) + } + } + return toReturn +} + +// NormStats returns the mean, variance, skew, and/or kurtosis. +// Mean(‘m’), variance(‘v’), skew(‘s’), and/or kurtosis(‘k’). +// Takes string containing any of 'mvsk'. +// Returns array of m v s k in that order. +func NormStats(loc float64, scale float64, moments string) []float64 { + var toReturn []float64 + if strings.ContainsAny(moments, "m") { + toReturn = append(toReturn, loc) + } + if strings.ContainsAny(moments, "v") { + toReturn = append(toReturn, math.Pow(scale, 2)) + } + if strings.ContainsAny(moments, "s") { + toReturn = append(toReturn, 0.0) + } + if strings.ContainsAny(moments, "k") { + toReturn = append(toReturn, 0.0) + } + return toReturn +} + +// NormEntropy is the differential entropy of the RV. +func NormEntropy(loc float64, scale float64) float64 { + return math.Log(scale * math.Sqrt(2*math.Pi*math.E)) +} + +// NormFit returns the maximum likelihood estimators for the Normal Distribution. +// Takes array of float64 values. +// Returns array of Mean followed by Standard Deviation. +func NormFit(data []float64) [2]float64 { + sum := 0.00 + for i := 0; i < len(data); i++ { + sum += data[i] + } + mean := sum / float64(len(data)) + stdNumerator := 0.00 + for i := 0; i < len(data); i++ { + stdNumerator += math.Pow(data[i]-mean, 2) + } + return [2]float64{mean, math.Sqrt((stdNumerator) / (float64(len(data))))} +} + +// NormMedian is the median of the distribution. +func NormMedian(loc float64, scale float64) float64 { + return loc +} + +// NormMean is the mean/expected value of the distribution. +func NormMean(loc float64, scale float64) float64 { + return loc +} + +// NormVar is the variance of the distribution. +func NormVar(loc float64, scale float64) float64 { + return math.Pow(scale, 2) +} + +// NormStd is the standard deviation of the distribution. +func NormStd(loc float64, scale float64) float64 { + return scale +} + +// NormInterval finds endpoints of the range that contains alpha percent of the distribution. +func NormInterval(alpha float64, loc float64, scale float64) [2]float64 { + q1 := (1.0 - alpha) / 2 + q2 := (1.0 + alpha) / 2 + a := NormPpf(q1, loc, scale) + b := NormPpf(q2, loc, scale) + return [2]float64{a, b} +} + +// factorial is the naive factorial algorithm. +func factorial(x int) int { + if x == 0 { + return 1 + } + return x * factorial(x-1) +} + +// Ncr is an N choose R algorithm. +// Aaron Cannon's algorithm. +func Ncr(n, r int) int { + if n <= 1 || r == 0 || n == r { + return 1 + } + if newR := n - r; newR < r { + r = newR + } + if r == 1 { + return n + } + ret := int(n - r + 1) + for i, j := ret+1, int(2); j <= r; i, j = i+1, j+1 { + ret = ret * i / j + } + return ret +} diff --git a/vendor/github.com/montanaflynn/stats/outlier.go b/vendor/github.com/montanaflynn/stats/outlier.go new file mode 100644 index 000000000..7c9795bd3 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/outlier.go @@ -0,0 +1,44 @@ +package stats + +// Outliers holds mild and extreme outliers found in data +type Outliers struct { + Mild Float64Data + Extreme Float64Data +} + +// QuartileOutliers finds the mild and extreme outliers +func QuartileOutliers(input Float64Data) (Outliers, error) { + if input.Len() == 0 { + return Outliers{}, EmptyInputErr + } + + // Start by sorting a copy of the slice + copy := sortedCopy(input) + + // Calculate the quartiles and interquartile range + qs, _ := Quartile(copy) + iqr, _ := InterQuartileRange(copy) + + // Calculate the lower and upper inner and outer fences + lif := qs.Q1 - (1.5 * iqr) + uif := qs.Q3 + (1.5 * iqr) + lof := qs.Q1 - (3 * iqr) + uof := qs.Q3 + (3 * iqr) + + // Find the data points that are outside of the + // inner and upper fences and add them to mild + // and extreme outlier slices + var mild Float64Data + var extreme Float64Data + for _, v := range copy { + + if v < lof || v > uof { + extreme = append(extreme, v) + } else if v < lif || v > uif { + mild = append(mild, v) + } + } + + // Wrap them into our struct + return Outliers{mild, extreme}, nil +} diff --git a/vendor/github.com/montanaflynn/stats/percentile.go b/vendor/github.com/montanaflynn/stats/percentile.go new file mode 100644 index 000000000..f5641783e --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/percentile.go @@ -0,0 +1,86 @@ +package stats + +import ( + "math" +) + +// Percentile finds the relative standing in a slice of floats +func Percentile(input Float64Data, percent float64) (percentile float64, err error) { + length := input.Len() + if length == 0 { + return math.NaN(), EmptyInputErr + } + + if length == 1 { + return input[0], nil + } + + if percent <= 0 || percent > 100 { + return math.NaN(), BoundsErr + } + + // Start by sorting a copy of the slice + c := sortedCopy(input) + + // Multiply percent by length of input + index := (percent / 100) * float64(len(c)) + + // Check if the index is a whole number + if index == float64(int64(index)) { + + // Convert float to int + i := int(index) + + // Find the value at the index + percentile = c[i-1] + + } else if index > 1 { + + // Convert float to int via truncation + i := int(index) + + // Find the average of the index and following values + percentile, _ = Mean(Float64Data{c[i-1], c[i]}) + + } else { + return math.NaN(), BoundsErr + } + + return percentile, nil + +} + +// PercentileNearestRank finds the relative standing in a slice of floats using the Nearest Rank method +func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) { + + // Find the length of items in the slice + il := input.Len() + + // Return an error for empty slices + if il == 0 { + return math.NaN(), EmptyInputErr + } + + // Return error for less than 0 or greater than 100 percentages + if percent < 0 || percent > 100 { + return math.NaN(), BoundsErr + } + + // Start by sorting a copy of the slice + c := sortedCopy(input) + + // Return the last item + if percent == 100.0 { + return c[il-1], nil + } + + // Find ordinal ranking + or := int(math.Ceil(float64(il) * percent / 100)) + + // Return the item that is in the place of the ordinal rank + if or == 0 { + return c[0], nil + } + return c[or-1], nil + +} diff --git a/vendor/github.com/montanaflynn/stats/quartile.go b/vendor/github.com/montanaflynn/stats/quartile.go new file mode 100644 index 000000000..40bbf6e57 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/quartile.go @@ -0,0 +1,74 @@ +package stats + +import "math" + +// Quartiles holds the three quartile points +type Quartiles struct { + Q1 float64 + Q2 float64 + Q3 float64 +} + +// Quartile returns the three quartile points from a slice of data +func Quartile(input Float64Data) (Quartiles, error) { + + il := input.Len() + if il == 0 { + return Quartiles{}, EmptyInputErr + } + + // Start by sorting a copy of the slice + copy := sortedCopy(input) + + // Find the cutoff places depeding on if + // the input slice length is even or odd + var c1 int + var c2 int + if il%2 == 0 { + c1 = il / 2 + c2 = il / 2 + } else { + c1 = (il - 1) / 2 + c2 = c1 + 1 + } + + // Find the Medians with the cutoff points + Q1, _ := Median(copy[:c1]) + Q2, _ := Median(copy) + Q3, _ := Median(copy[c2:]) + + return Quartiles{Q1, Q2, Q3}, nil + +} + +// InterQuartileRange finds the range between Q1 and Q3 +func InterQuartileRange(input Float64Data) (float64, error) { + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + qs, _ := Quartile(input) + iqr := qs.Q3 - qs.Q1 + return iqr, nil +} + +// Midhinge finds the average of the first and third quartiles +func Midhinge(input Float64Data) (float64, error) { + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + qs, _ := Quartile(input) + mh := (qs.Q1 + qs.Q3) / 2 + return mh, nil +} + +// Trimean finds the average of the median and the midhinge +func Trimean(input Float64Data) (float64, error) { + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + c := sortedCopy(input) + q, _ := Quartile(c) + + return (q.Q1 + (q.Q2 * 2) + q.Q3) / 4, nil +} diff --git a/vendor/github.com/montanaflynn/stats/ranksum.go b/vendor/github.com/montanaflynn/stats/ranksum.go new file mode 100644 index 000000000..fc424ef4e --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/ranksum.go @@ -0,0 +1,183 @@ +package stats + +// import "math" +// +// // WilcoxonRankSum tests the null hypothesis that two sets +// // of data are drawn from the same distribution. It does +// // not handle ties between measurements in x and y. +// // +// // Parameters: +// // data1 Float64Data: First set of data points. +// // data2 Float64Data: Second set of data points. +// // Length of both data samples must be equal. +// // +// // Return: +// // statistic float64: The test statistic under the +// // large-sample approximation that the +// // rank sum statistic is normally distributed. +// // pvalue float64: The two-sided p-value of the test +// // err error: Any error from the input data parameters +// // +// // https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test +// func WilcoxonRankSum(data1, data2 Float64Data) (float64, float64, error) { +// +// l1 := data1.Len() +// l2 := data2.Len() +// +// if l1 == 0 || l2 == 0 { +// return math.NaN(), math.NaN(), EmptyInputErr +// } +// +// if l1 != l2 { +// return math.NaN(), math.NaN(), SizeErr +// } +// +// alldata := Float64Data{} +// alldata = append(alldata, data1...) +// alldata = append(alldata, data2...) +// +// // ranked := +// +// return 0.0, 0.0, nil +// } +// +// // x, y = map(np.asarray, (x, y)) +// // n1 = len(x) +// // n2 = len(y) +// // alldata = np.concatenate((x, y)) +// // ranked = rankdata(alldata) +// // x = ranked[:n1] +// // s = np.sum(x, axis=0) +// // expected = n1 * (n1+n2+1) / 2.0 +// // z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0) +// // prob = 2 * distributions.norm.sf(abs(z)) +// // +// // return RanksumsResult(z, prob) +// +// // def rankdata(a, method='average'): +// // """ +// // Assign ranks to data, dealing with ties appropriately. +// // Ranks begin at 1. The `method` argument controls how ranks are assigned +// // to equal values. See [1]_ for further discussion of ranking methods. +// // Parameters +// // ---------- +// // a : array_like +// // The array of values to be ranked. The array is first flattened. +// // method : str, optional +// // The method used to assign ranks to tied elements. +// // The options are 'average', 'min', 'max', 'dense' and 'ordinal'. +// // 'average': +// // The average of the ranks that would have been assigned to +// // all the tied values is assigned to each value. +// // 'min': +// // The minimum of the ranks that would have been assigned to all +// // the tied values is assigned to each value. (This is also +// // referred to as "competition" ranking.) +// // 'max': +// // The maximum of the ranks that would have been assigned to all +// // the tied values is assigned to each value. +// // 'dense': +// // Like 'min', but the rank of the next highest element is assigned +// // the rank immediately after those assigned to the tied elements. +// // 'ordinal': +// // All values are given a distinct rank, corresponding to the order +// // that the values occur in `a`. +// // The default is 'average'. +// // Returns +// // ------- +// // ranks : ndarray +// // An array of length equal to the size of `a`, containing rank +// // scores. +// // References +// // ---------- +// // .. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking +// // Examples +// // -------- +// // >>> from scipy.stats import rankdata +// // >>> rankdata([0, 2, 3, 2]) +// // array([ 1. , 2.5, 4. , 2.5]) +// // """ +// // +// // arr = np.ravel(np.asarray(a)) +// // algo = 'quicksort' +// // sorter = np.argsort(arr, kind=algo) +// // +// // inv = np.empty(sorter.size, dtype=np.intp) +// // inv[sorter] = np.arange(sorter.size, dtype=np.intp) +// // +// // +// // arr = arr[sorter] +// // obs = np.r_[True, arr[1:] != arr[:-1]] +// // dense = obs.cumsum()[inv] +// // +// // +// // # cumulative counts of each unique value +// // count = np.r_[np.nonzero(obs)[0], len(obs)] +// // +// // # average method +// // return .5 * (count[dense] + count[dense - 1] + 1) +// +// type rankable interface { +// Len() int +// RankEqual(int, int) bool +// } +// +// func StandardRank(d rankable) []float64 { +// r := make([]float64, d.Len()) +// var k int +// for i := range r { +// if i == 0 || !d.RankEqual(i, i-1) { +// k = i + 1 +// } +// r[i] = float64(k) +// } +// return r +// } +// +// func ModifiedRank(d rankable) []float64 { +// r := make([]float64, d.Len()) +// for i := range r { +// k := i + 1 +// for j := i + 1; j < len(r) && d.RankEqual(i, j); j++ { +// k = j + 1 +// } +// r[i] = float64(k) +// } +// return r +// } +// +// func DenseRank(d rankable) []float64 { +// r := make([]float64, d.Len()) +// var k int +// for i := range r { +// if i == 0 || !d.RankEqual(i, i-1) { +// k++ +// } +// r[i] = float64(k) +// } +// return r +// } +// +// func OrdinalRank(d rankable) []float64 { +// r := make([]float64, d.Len()) +// for i := range r { +// r[i] = float64(i + 1) +// } +// return r +// } +// +// func FractionalRank(d rankable) []float64 { +// r := make([]float64, d.Len()) +// for i := 0; i < len(r); { +// var j int +// f := float64(i + 1) +// for j = i + 1; j < len(r) && d.RankEqual(i, j); j++ { +// f += float64(j + 1) +// } +// f /= float64(j - i) +// for ; i < j; i++ { +// r[i] = f +// } +// } +// return r +// } diff --git a/vendor/github.com/montanaflynn/stats/regression.go b/vendor/github.com/montanaflynn/stats/regression.go new file mode 100644 index 000000000..401d95120 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/regression.go @@ -0,0 +1,113 @@ +package stats + +import "math" + +// Series is a container for a series of data +type Series []Coordinate + +// Coordinate holds the data in a series +type Coordinate struct { + X, Y float64 +} + +// LinearRegression finds the least squares linear regression on data series +func LinearRegression(s Series) (regressions Series, err error) { + + if len(s) == 0 { + return nil, EmptyInputErr + } + + // Placeholder for the math to be done + var sum [5]float64 + + // Loop over data keeping index in place + i := 0 + for ; i < len(s); i++ { + sum[0] += s[i].X + sum[1] += s[i].Y + sum[2] += s[i].X * s[i].X + sum[3] += s[i].X * s[i].Y + sum[4] += s[i].Y * s[i].Y + } + + // Find gradient and intercept + f := float64(i) + gradient := (f*sum[3] - sum[0]*sum[1]) / (f*sum[2] - sum[0]*sum[0]) + intercept := (sum[1] / f) - (gradient * sum[0] / f) + + // Create the new regression series + for j := 0; j < len(s); j++ { + regressions = append(regressions, Coordinate{ + X: s[j].X, + Y: s[j].X*gradient + intercept, + }) + } + + return regressions, nil +} + +// ExponentialRegression returns an exponential regression on data series +func ExponentialRegression(s Series) (regressions Series, err error) { + + if len(s) == 0 { + return nil, EmptyInputErr + } + + var sum [6]float64 + + for i := 0; i < len(s); i++ { + if s[i].Y < 0 { + return nil, YCoordErr + } + sum[0] += s[i].X + sum[1] += s[i].Y + sum[2] += s[i].X * s[i].X * s[i].Y + sum[3] += s[i].Y * math.Log(s[i].Y) + sum[4] += s[i].X * s[i].Y * math.Log(s[i].Y) + sum[5] += s[i].X * s[i].Y + } + + denominator := (sum[1]*sum[2] - sum[5]*sum[5]) + a := math.Pow(math.E, (sum[2]*sum[3]-sum[5]*sum[4])/denominator) + b := (sum[1]*sum[4] - sum[5]*sum[3]) / denominator + + for j := 0; j < len(s); j++ { + regressions = append(regressions, Coordinate{ + X: s[j].X, + Y: a * math.Exp(b*s[j].X), + }) + } + + return regressions, nil +} + +// LogarithmicRegression returns an logarithmic regression on data series +func LogarithmicRegression(s Series) (regressions Series, err error) { + + if len(s) == 0 { + return nil, EmptyInputErr + } + + var sum [4]float64 + + i := 0 + for ; i < len(s); i++ { + sum[0] += math.Log(s[i].X) + sum[1] += s[i].Y * math.Log(s[i].X) + sum[2] += s[i].Y + sum[3] += math.Pow(math.Log(s[i].X), 2) + } + + f := float64(i) + a := (f*sum[1] - sum[2]*sum[0]) / (f*sum[3] - sum[0]*sum[0]) + b := (sum[2] - a*sum[0]) / f + + for j := 0; j < len(s); j++ { + regressions = append(regressions, Coordinate{ + X: s[j].X, + Y: b + a*math.Log(s[j].X), + }) + } + + return regressions, nil +} diff --git a/vendor/github.com/montanaflynn/stats/round.go b/vendor/github.com/montanaflynn/stats/round.go new file mode 100644 index 000000000..b66779c9f --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/round.go @@ -0,0 +1,38 @@ +package stats + +import "math" + +// Round a float to a specific decimal place or precision +func Round(input float64, places int) (rounded float64, err error) { + + // If the float is not a number + if math.IsNaN(input) { + return math.NaN(), NaNErr + } + + // Find out the actual sign and correct the input for later + sign := 1.0 + if input < 0 { + sign = -1 + input *= -1 + } + + // Use the places arg to get the amount of precision wanted + precision := math.Pow(10, float64(places)) + + // Find the decimal place we are looking to round + digit := input * precision + + // Get the actual decimal number as a fraction to be compared + _, decimal := math.Modf(digit) + + // If the decimal is less than .5 we round down otherwise up + if decimal >= 0.5 { + rounded = math.Ceil(digit) + } else { + rounded = math.Floor(digit) + } + + // Finally we do the math to actually create a rounded number + return rounded / precision * sign, nil +} diff --git a/vendor/github.com/montanaflynn/stats/sample.go b/vendor/github.com/montanaflynn/stats/sample.go new file mode 100644 index 000000000..40166af6a --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/sample.go @@ -0,0 +1,76 @@ +package stats + +import ( + "math/rand" + "sort" +) + +// Sample returns sample from input with replacement or without +func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) { + + if input.Len() == 0 { + return nil, EmptyInputErr + } + + length := input.Len() + if replacement { + + result := Float64Data{} + rand.Seed(unixnano()) + + // In every step, randomly take the num for + for i := 0; i < takenum; i++ { + idx := rand.Intn(length) + result = append(result, input[idx]) + } + + return result, nil + + } else if !replacement && takenum <= length { + + rand.Seed(unixnano()) + + // Get permutation of number of indexies + perm := rand.Perm(length) + result := Float64Data{} + + // Get element of input by permutated index + for _, idx := range perm[0:takenum] { + result = append(result, input[idx]) + } + + return result, nil + + } + + return nil, BoundsErr +} + +// StableSample like stable sort, it returns samples from input while keeps the order of original data. +func StableSample(input Float64Data, takenum int) ([]float64, error) { + if input.Len() == 0 { + return nil, EmptyInputErr + } + + length := input.Len() + + if takenum <= length { + + rand.Seed(unixnano()) + + perm := rand.Perm(length) + perm = perm[0:takenum] + // Sort perm before applying + sort.Ints(perm) + result := Float64Data{} + + for _, idx := range perm { + result = append(result, input[idx]) + } + + return result, nil + + } + + return nil, BoundsErr +} diff --git a/vendor/github.com/montanaflynn/stats/sigmoid.go b/vendor/github.com/montanaflynn/stats/sigmoid.go new file mode 100644 index 000000000..5f2559d81 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/sigmoid.go @@ -0,0 +1,18 @@ +package stats + +import "math" + +// Sigmoid returns the input values in the range of -1 to 1 +// along the sigmoid or s-shaped curve, commonly used in +// machine learning while training neural networks as an +// activation function. +func Sigmoid(input Float64Data) ([]float64, error) { + if input.Len() == 0 { + return Float64Data{}, EmptyInput + } + s := make([]float64, len(input)) + for i, v := range input { + s[i] = 1 / (1 + math.Exp(-v)) + } + return s, nil +} diff --git a/vendor/github.com/montanaflynn/stats/softmax.go b/vendor/github.com/montanaflynn/stats/softmax.go new file mode 100644 index 000000000..85072642b --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/softmax.go @@ -0,0 +1,25 @@ +package stats + +import "math" + +// SoftMax returns the input values in the range of 0 to 1 +// with sum of all the probabilities being equal to one. It +// is commonly used in machine learning neural networks. +func SoftMax(input Float64Data) ([]float64, error) { + if input.Len() == 0 { + return Float64Data{}, EmptyInput + } + + s := 0.0 + c, _ := Max(input) + for _, e := range input { + s += math.Exp(e - c) + } + + sm := make([]float64, len(input)) + for i, v := range input { + sm[i] = math.Exp(v-c) / s + } + + return sm, nil +} diff --git a/vendor/github.com/montanaflynn/stats/sum.go b/vendor/github.com/montanaflynn/stats/sum.go new file mode 100644 index 000000000..15b611d17 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/sum.go @@ -0,0 +1,18 @@ +package stats + +import "math" + +// Sum adds all the numbers of a slice together +func Sum(input Float64Data) (sum float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + // Add em up + for _, n := range input { + sum += n + } + + return sum, nil +} diff --git a/vendor/github.com/montanaflynn/stats/util.go b/vendor/github.com/montanaflynn/stats/util.go new file mode 100644 index 000000000..881997604 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/util.go @@ -0,0 +1,43 @@ +package stats + +import ( + "sort" + "time" +) + +// float64ToInt rounds a float64 to an int +func float64ToInt(input float64) (output int) { + r, _ := Round(input, 0) + return int(r) +} + +// unixnano returns nanoseconds from UTC epoch +func unixnano() int64 { + return time.Now().UTC().UnixNano() +} + +// copyslice copies a slice of float64s +func copyslice(input Float64Data) Float64Data { + s := make(Float64Data, input.Len()) + copy(s, input) + return s +} + +// sortedCopy returns a sorted copy of float64s +func sortedCopy(input Float64Data) (copy Float64Data) { + copy = copyslice(input) + sort.Float64s(copy) + return +} + +// sortedCopyDif returns a sorted copy of float64s +// only if the original data isn't sorted. +// Only use this if returned slice won't be manipulated! +func sortedCopyDif(input Float64Data) (copy Float64Data) { + if sort.Float64sAreSorted(input) { + return input + } + copy = copyslice(input) + sort.Float64s(copy) + return +} diff --git a/vendor/github.com/montanaflynn/stats/variance.go b/vendor/github.com/montanaflynn/stats/variance.go new file mode 100644 index 000000000..a6445690d --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/variance.go @@ -0,0 +1,105 @@ +package stats + +import "math" + +// _variance finds the variance for both population and sample data +func _variance(input Float64Data, sample int) (variance float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + // Sum the square of the mean subtracted from each number + m, _ := Mean(input) + + for _, n := range input { + variance += (n - m) * (n - m) + } + + // When getting the mean of the squared differences + // "sample" will allow us to know if it's a sample + // or population and wether to subtract by one or not + return variance / float64((input.Len() - (1 * sample))), nil +} + +// Variance the amount of variation in the dataset +func Variance(input Float64Data) (sdev float64, err error) { + return PopulationVariance(input) +} + +// PopulationVariance finds the amount of variance within a population +func PopulationVariance(input Float64Data) (pvar float64, err error) { + + v, err := _variance(input, 0) + if err != nil { + return math.NaN(), err + } + + return v, nil +} + +// SampleVariance finds the amount of variance within a sample +func SampleVariance(input Float64Data) (svar float64, err error) { + + v, err := _variance(input, 1) + if err != nil { + return math.NaN(), err + } + + return v, nil +} + +// Covariance is a measure of how much two sets of data change +func Covariance(data1, data2 Float64Data) (float64, error) { + + l1 := data1.Len() + l2 := data2.Len() + + if l1 == 0 || l2 == 0 { + return math.NaN(), EmptyInputErr + } + + if l1 != l2 { + return math.NaN(), SizeErr + } + + m1, _ := Mean(data1) + m2, _ := Mean(data2) + + // Calculate sum of squares + var ss float64 + for i := 0; i < l1; i++ { + delta1 := (data1.Get(i) - m1) + delta2 := (data2.Get(i) - m2) + ss += (delta1*delta2 - ss) / float64(i+1) + } + + return ss * float64(l1) / float64(l1-1), nil +} + +// CovariancePopulation computes covariance for entire population between two variables. +func CovariancePopulation(data1, data2 Float64Data) (float64, error) { + + l1 := data1.Len() + l2 := data2.Len() + + if l1 == 0 || l2 == 0 { + return math.NaN(), EmptyInputErr + } + + if l1 != l2 { + return math.NaN(), SizeErr + } + + m1, _ := Mean(data1) + m2, _ := Mean(data2) + + var s float64 + for i := 0; i < l1; i++ { + delta1 := (data1.Get(i) - m1) + delta2 := (data2.Get(i) - m2) + s += delta1 * delta2 + } + + return s / float64(l1), nil +} diff --git a/vendor/github.com/oklog/run/.travis.yml b/vendor/github.com/oklog/run/.travis.yml deleted file mode 100644 index 362bdd41c..000000000 --- a/vendor/github.com/oklog/run/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -sudo: false -go: - - 1.x - - tip -install: - - go get -v github.com/golang/lint/golint - - go build ./... -script: - - go vet ./... - - $HOME/gopath/bin/golint . - - go test -v -race ./... diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md index a7228cd9a..eba7d11cf 100644 --- a/vendor/github.com/oklog/run/README.md +++ b/vendor/github.com/oklog/run/README.md @@ -1,7 +1,7 @@ # run [![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) -[![Build Status](https://travis-ci.org/oklog/run.svg?branch=master)](https://travis-ci.org/oklog/run) +[![Build Status](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Foklog%2Frun%2Fbadge&style=flat-square&label=build)](https://github.com/oklog/run/actions?query=workflow%3ATest) [![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run) [![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE) @@ -10,9 +10,11 @@ run.Group is a universal mechanism to manage goroutine lifecycles. Create a zero-value run.Group, and then add actors to it. Actors are defined as a pair of functions: an **execute** function, which should run synchronously; and an **interrupt** function, which, when invoked, should cause the execute -function to return. Finally, invoke Run, which blocks until the first actor -returns. This general-purpose API allows callers to model pretty much any -runnable task, and achieve well-defined lifecycle semantics for the group. +function to return. Finally, invoke Run, which concurrently runs all of the +actors, waits until the first actor exits, invokes the interrupt functions, and +finally returns control to the caller only once all actors have returned. This +general-purpose API allows callers to model pretty much any runnable task, and +achieve well-defined lifecycle semantics for the group. run.Group was written to manage component lifecycles in func main for [OK Log](https://github.com/oklog/oklog). diff --git a/vendor/github.com/oklog/run/actors.go b/vendor/github.com/oklog/run/actors.go new file mode 100644 index 000000000..ef93495d3 --- /dev/null +++ b/vendor/github.com/oklog/run/actors.go @@ -0,0 +1,38 @@ +package run + +import ( + "context" + "fmt" + "os" + "os/signal" +) + +// SignalHandler returns an actor, i.e. an execute and interrupt func, that +// terminates with SignalError when the process receives one of the provided +// signals, or the parent context is canceled. +func SignalHandler(ctx context.Context, signals ...os.Signal) (execute func() error, interrupt func(error)) { + ctx, cancel := context.WithCancel(ctx) + return func() error { + c := make(chan os.Signal, 1) + signal.Notify(c, signals...) + select { + case sig := <-c: + return SignalError{Signal: sig} + case <-ctx.Done(): + return ctx.Err() + } + }, func(error) { + cancel() + } +} + +// SignalError is returned by the signal handler's execute function +// when it terminates due to a received signal. +type SignalError struct { + Signal os.Signal +} + +// Error implements the error interface. +func (e SignalError) Error() string { + return fmt.Sprintf("received signal %s", e.Signal) +} diff --git a/vendor/github.com/oklog/run/go.mod b/vendor/github.com/oklog/run/go.mod new file mode 100644 index 000000000..b1297232e --- /dev/null +++ b/vendor/github.com/oklog/run/go.mod @@ -0,0 +1,3 @@ +module github.com/oklog/run + +go 1.13 diff --git a/vendor/github.com/vmihailenco/msgpack/v4/.golangci.yml b/vendor/github.com/vmihailenco/msgpack/v4/.golangci.yml new file mode 100644 index 000000000..98d6cb779 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/.golangci.yml @@ -0,0 +1,12 @@ +run: + concurrency: 8 + deadline: 5m + tests: false +linters: + enable-all: true + disable: + - gochecknoglobals + - gocognit + - godox + - wsl + - funlen diff --git a/vendor/github.com/vmihailenco/msgpack/v4/.travis.yml b/vendor/github.com/vmihailenco/msgpack/v4/.travis.yml new file mode 100644 index 000000000..b35bf5484 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/.travis.yml @@ -0,0 +1,21 @@ +sudo: false +language: go + +go: + - 1.11.x + - 1.12.x + - 1.13.x + - 1.14.x + - tip + +matrix: + allow_failures: + - go: tip + +env: + - GO111MODULE=on + +go_import_path: github.com/vmihailenco/msgpack + +before_install: + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.21.0 diff --git a/vendor/github.com/vmihailenco/msgpack/v4/CHANGELOG.md b/vendor/github.com/vmihailenco/msgpack/v4/CHANGELOG.md new file mode 100644 index 000000000..fac97090e --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/CHANGELOG.md @@ -0,0 +1,24 @@ +## v4 + +- Encode, Decode, Marshal, and Unmarshal are changed to accept single argument. EncodeMulti and DecodeMulti are added as replacement. +- Added EncodeInt8/16/32/64 and EncodeUint8/16/32/64. +- Encoder changed to preserve type of numbers instead of chosing most compact encoding. The old behavior can be achieved with Encoder.UseCompactEncoding. + +## v3.3 + +- `msgpack:",inline"` tag is restored to force inlining structs. + +## v3.2 + +- Decoding extension types returns pointer to the value instead of the value. Fixes #153 + +## v3 + +- gopkg.in is not supported any more. Update import path to github.com/vmihailenco/msgpack. +- Msgpack maps are decoded into map[string]interface{} by default. +- EncodeSliceLen is removed in favor of EncodeArrayLen. DecodeSliceLen is removed in favor of DecodeArrayLen. +- Embedded structs are automatically inlined where possible. +- Time is encoded using extension as described in https://github.com/msgpack/msgpack/pull/209. Old format is supported as well. +- EncodeInt8/16/32/64 is replaced with EncodeInt. EncodeUint8/16/32/64 is replaced with EncodeUint. There should be no performance differences. +- DecodeInterface can now return int8/16/32 and uint8/16/32. +- PeekCode returns codes.Code instead of byte. diff --git a/vendor/github.com/vmihailenco/msgpack/v4/LICENSE b/vendor/github.com/vmihailenco/msgpack/v4/LICENSE new file mode 100644 index 000000000..b749d0707 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/vmihailenco/msgpack Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmihailenco/msgpack/v4/Makefile b/vendor/github.com/vmihailenco/msgpack/v4/Makefile new file mode 100644 index 000000000..57914e333 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/Makefile @@ -0,0 +1,6 @@ +all: + go test ./... + go test ./... -short -race + go test ./... -run=NONE -bench=. -benchmem + env GOOS=linux GOARCH=386 go test ./... + golangci-lint run diff --git a/vendor/github.com/vmihailenco/msgpack/v4/README.md b/vendor/github.com/vmihailenco/msgpack/v4/README.md new file mode 100644 index 000000000..a5b1004e0 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/README.md @@ -0,0 +1,72 @@ +# MessagePack encoding for Golang + +[![Build Status](https://travis-ci.org/vmihailenco/msgpack.svg?branch=v2)](https://travis-ci.org/vmihailenco/msgpack) +[![GoDoc](https://godoc.org/github.com/vmihailenco/msgpack?status.svg)](https://godoc.org/github.com/vmihailenco/msgpack) + +Supports: +- Primitives, arrays, maps, structs, time.Time and interface{}. +- Appengine *datastore.Key and datastore.Cursor. +- [CustomEncoder](https://godoc.org/github.com/vmihailenco/msgpack#example-CustomEncoder)/CustomDecoder interfaces for custom encoding. +- [Extensions](https://godoc.org/github.com/vmihailenco/msgpack#example-RegisterExt) to encode type information. +- Renaming fields via `msgpack:"my_field_name"` and alias via `msgpack:"alias:another_name"`. +- Omitting individual empty fields via `msgpack:",omitempty"` tag or all [empty fields in a struct](https://godoc.org/github.com/vmihailenco/msgpack#example-Marshal--OmitEmpty). +- [Map keys sorting](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.SortMapKeys). +- Encoding/decoding all [structs as arrays](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.UseArrayForStructs) or [individual structs](https://godoc.org/github.com/vmihailenco/msgpack#example-Marshal--AsArray). +- [Encoder.UseJSONTag](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.UseJSONTag) with [Decoder.UseJSONTag](https://godoc.org/github.com/vmihailenco/msgpack#Decoder.UseJSONTag) can turn msgpack into drop-in replacement for JSON. +- Simple but very fast and efficient [queries](https://godoc.org/github.com/vmihailenco/msgpack#example-Decoder-Query). + +API docs: https://godoc.org/github.com/vmihailenco/msgpack. +Examples: https://godoc.org/github.com/vmihailenco/msgpack#pkg-examples. + +## Installation + +This project uses [Go Modules](https://github.com/golang/go/wiki/Modules) and semantic import versioning since v4: + +``` shell +go mod init github.com/my/repo +go get github.com/vmihailenco/msgpack/v4 +``` + +## Quickstart + +``` go +import "github.com/vmihailenco/msgpack/v4" + +func ExampleMarshal() { + type Item struct { + Foo string + } + + b, err := msgpack.Marshal(&Item{Foo: "bar"}) + if err != nil { + panic(err) + } + + var item Item + err = msgpack.Unmarshal(b, &item) + if err != nil { + panic(err) + } + fmt.Println(item.Foo) + // Output: bar +} +``` + +## Benchmark + +``` +BenchmarkStructVmihailencoMsgpack-4 200000 12814 ns/op 2128 B/op 26 allocs/op +BenchmarkStructUgorjiGoMsgpack-4 100000 17678 ns/op 3616 B/op 70 allocs/op +BenchmarkStructUgorjiGoCodec-4 100000 19053 ns/op 7346 B/op 23 allocs/op +BenchmarkStructJSON-4 20000 69438 ns/op 7864 B/op 26 allocs/op +BenchmarkStructGOB-4 10000 104331 ns/op 14664 B/op 278 allocs/op +``` + +## Howto + +Please go through [examples](https://godoc.org/github.com/vmihailenco/msgpack#pkg-examples) to get an idea how to use this package. + +## See also + +- [Golang PostgreSQL ORM](https://github.com/go-pg/pg) +- [Golang message task queue](https://github.com/vmihailenco/taskq) diff --git a/vendor/github.com/vmihailenco/msgpack/v4/appengine.go b/vendor/github.com/vmihailenco/msgpack/v4/appengine.go new file mode 100644 index 000000000..e8e91e53f --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/appengine.go @@ -0,0 +1,64 @@ +// +build appengine + +package msgpack + +import ( + "reflect" + + ds "google.golang.org/appengine/datastore" +) + +func init() { + Register((*ds.Key)(nil), encodeDatastoreKeyValue, decodeDatastoreKeyValue) + Register((*ds.Cursor)(nil), encodeDatastoreCursorValue, decodeDatastoreCursorValue) +} + +func EncodeDatastoreKey(e *Encoder, key *ds.Key) error { + if key == nil { + return e.EncodeNil() + } + return e.EncodeString(key.Encode()) +} + +func encodeDatastoreKeyValue(e *Encoder, v reflect.Value) error { + key := v.Interface().(*ds.Key) + return EncodeDatastoreKey(e, key) +} + +func DecodeDatastoreKey(d *Decoder) (*ds.Key, error) { + v, err := d.DecodeString() + if err != nil { + return nil, err + } + if v == "" { + return nil, nil + } + return ds.DecodeKey(v) +} + +func decodeDatastoreKeyValue(d *Decoder, v reflect.Value) error { + key, err := DecodeDatastoreKey(d) + if err != nil { + return err + } + v.Set(reflect.ValueOf(key)) + return nil +} + +func encodeDatastoreCursorValue(e *Encoder, v reflect.Value) error { + cursor := v.Interface().(ds.Cursor) + return e.Encode(cursor.String()) +} + +func decodeDatastoreCursorValue(d *Decoder, v reflect.Value) error { + s, err := d.DecodeString() + if err != nil { + return err + } + cursor, err := ds.DecodeCursor(s) + if err != nil { + return err + } + v.Set(reflect.ValueOf(cursor)) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/codes/codes.go b/vendor/github.com/vmihailenco/msgpack/v4/codes/codes.go new file mode 100644 index 000000000..28e0a5a88 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/codes/codes.go @@ -0,0 +1,90 @@ +package codes + +type Code byte + +var ( + PosFixedNumHigh Code = 0x7f + NegFixedNumLow Code = 0xe0 + + Nil Code = 0xc0 + + False Code = 0xc2 + True Code = 0xc3 + + Float Code = 0xca + Double Code = 0xcb + + Uint8 Code = 0xcc + Uint16 Code = 0xcd + Uint32 Code = 0xce + Uint64 Code = 0xcf + + Int8 Code = 0xd0 + Int16 Code = 0xd1 + Int32 Code = 0xd2 + Int64 Code = 0xd3 + + FixedStrLow Code = 0xa0 + FixedStrHigh Code = 0xbf + FixedStrMask Code = 0x1f + Str8 Code = 0xd9 + Str16 Code = 0xda + Str32 Code = 0xdb + + Bin8 Code = 0xc4 + Bin16 Code = 0xc5 + Bin32 Code = 0xc6 + + FixedArrayLow Code = 0x90 + FixedArrayHigh Code = 0x9f + FixedArrayMask Code = 0xf + Array16 Code = 0xdc + Array32 Code = 0xdd + + FixedMapLow Code = 0x80 + FixedMapHigh Code = 0x8f + FixedMapMask Code = 0xf + Map16 Code = 0xde + Map32 Code = 0xdf + + FixExt1 Code = 0xd4 + FixExt2 Code = 0xd5 + FixExt4 Code = 0xd6 + FixExt8 Code = 0xd7 + FixExt16 Code = 0xd8 + Ext8 Code = 0xc7 + Ext16 Code = 0xc8 + Ext32 Code = 0xc9 +) + +func IsFixedNum(c Code) bool { + return c <= PosFixedNumHigh || c >= NegFixedNumLow +} + +func IsFixedMap(c Code) bool { + return c >= FixedMapLow && c <= FixedMapHigh +} + +func IsFixedArray(c Code) bool { + return c >= FixedArrayLow && c <= FixedArrayHigh +} + +func IsFixedString(c Code) bool { + return c >= FixedStrLow && c <= FixedStrHigh +} + +func IsString(c Code) bool { + return IsFixedString(c) || c == Str8 || c == Str16 || c == Str32 +} + +func IsBin(c Code) bool { + return c == Bin8 || c == Bin16 || c == Bin32 +} + +func IsFixedExt(c Code) bool { + return c >= FixExt1 && c <= FixExt16 +} + +func IsExt(c Code) bool { + return IsFixedExt(c) || c == Ext8 || c == Ext16 || c == Ext32 +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode.go b/vendor/github.com/vmihailenco/msgpack/v4/decode.go new file mode 100644 index 000000000..1711675e9 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/decode.go @@ -0,0 +1,617 @@ +package msgpack + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sync" + "time" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +const ( + looseIfaceFlag uint32 = 1 << iota + decodeUsingJSONFlag + disallowUnknownFieldsFlag +) + +const ( + bytesAllocLimit = 1e6 // 1mb + sliceAllocLimit = 1e4 + maxMapSize = 1e6 +) + +type bufReader interface { + io.Reader + io.ByteScanner +} + +//------------------------------------------------------------------------------ + +var decPool = sync.Pool{ + New: func() interface{} { + return NewDecoder(nil) + }, +} + +// Unmarshal decodes the MessagePack-encoded data and stores the result +// in the value pointed to by v. +func Unmarshal(data []byte, v interface{}) error { + dec := decPool.Get().(*Decoder) + + if r, ok := dec.r.(*bytes.Reader); ok { + r.Reset(data) + } else { + dec.Reset(bytes.NewReader(data)) + } + err := dec.Decode(v) + + decPool.Put(dec) + + return err +} + +// A Decoder reads and decodes MessagePack values from an input stream. +type Decoder struct { + r io.Reader + s io.ByteScanner + buf []byte + + extLen int + rec []byte // accumulates read data if not nil + + intern []string + flags uint32 + decodeMapFunc func(*Decoder) (interface{}, error) +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read data from r +// beyond the MessagePack values requested. Buffering can be disabled +// by passing a reader that implements io.ByteScanner interface. +func NewDecoder(r io.Reader) *Decoder { + d := new(Decoder) + d.Reset(r) + return d +} + +// Reset discards any buffered data, resets all state, and switches the buffered +// reader to read from r. +func (d *Decoder) Reset(r io.Reader) { + if br, ok := r.(bufReader); ok { + d.r = br + d.s = br + } else if br, ok := d.r.(*bufio.Reader); ok { + br.Reset(r) + } else { + br := bufio.NewReader(r) + d.r = br + d.s = br + } + + if d.intern != nil { + d.intern = d.intern[:0] + } + + //TODO: + //d.useLoose = false + //d.useJSONTag = false + //d.disallowUnknownFields = false + //d.decodeMapFunc = nil +} + +func (d *Decoder) SetDecodeMapFunc(fn func(*Decoder) (interface{}, error)) { + d.decodeMapFunc = fn +} + +// UseDecodeInterfaceLoose causes decoder to use DecodeInterfaceLoose +// to decode msgpack value into Go interface{}. +func (d *Decoder) UseDecodeInterfaceLoose(on bool) *Decoder { + if on { + d.flags |= looseIfaceFlag + } else { + d.flags &= ^looseIfaceFlag + } + return d +} + +// UseJSONTag causes the Decoder to use json struct tag as fallback option +// if there is no msgpack tag. +func (d *Decoder) UseJSONTag(on bool) *Decoder { + if on { + d.flags |= decodeUsingJSONFlag + } else { + d.flags &= ^decodeUsingJSONFlag + } + return d +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (d *Decoder) DisallowUnknownFields() { + if true { + d.flags |= disallowUnknownFieldsFlag + } else { + d.flags &= ^disallowUnknownFieldsFlag + } +} + +// Buffered returns a reader of the data remaining in the Decoder's buffer. +// The reader is valid until the next call to Decode. +func (d *Decoder) Buffered() io.Reader { + return d.r +} + +//nolint:gocyclo +func (d *Decoder) Decode(v interface{}) error { + var err error + switch v := v.(type) { + case *string: + if v != nil { + *v, err = d.DecodeString() + return err + } + case *[]byte: + if v != nil { + return d.decodeBytesPtr(v) + } + case *int: + if v != nil { + *v, err = d.DecodeInt() + return err + } + case *int8: + if v != nil { + *v, err = d.DecodeInt8() + return err + } + case *int16: + if v != nil { + *v, err = d.DecodeInt16() + return err + } + case *int32: + if v != nil { + *v, err = d.DecodeInt32() + return err + } + case *int64: + if v != nil { + *v, err = d.DecodeInt64() + return err + } + case *uint: + if v != nil { + *v, err = d.DecodeUint() + return err + } + case *uint8: + if v != nil { + *v, err = d.DecodeUint8() + return err + } + case *uint16: + if v != nil { + *v, err = d.DecodeUint16() + return err + } + case *uint32: + if v != nil { + *v, err = d.DecodeUint32() + return err + } + case *uint64: + if v != nil { + *v, err = d.DecodeUint64() + return err + } + case *bool: + if v != nil { + *v, err = d.DecodeBool() + return err + } + case *float32: + if v != nil { + *v, err = d.DecodeFloat32() + return err + } + case *float64: + if v != nil { + *v, err = d.DecodeFloat64() + return err + } + case *[]string: + return d.decodeStringSlicePtr(v) + case *map[string]string: + return d.decodeMapStringStringPtr(v) + case *map[string]interface{}: + return d.decodeMapStringInterfacePtr(v) + case *time.Duration: + if v != nil { + vv, err := d.DecodeInt64() + *v = time.Duration(vv) + return err + } + case *time.Time: + if v != nil { + *v, err = d.DecodeTime() + return err + } + } + + vv := reflect.ValueOf(v) + if !vv.IsValid() { + return errors.New("msgpack: Decode(nil)") + } + if vv.Kind() != reflect.Ptr { + return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) + } + vv = vv.Elem() + if !vv.IsValid() { + return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) + } + return d.DecodeValue(vv) +} + +func (d *Decoder) DecodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := d.Decode(vv); err != nil { + return err + } + } + return nil +} + +func (d *Decoder) decodeInterfaceCond() (interface{}, error) { + if d.flags&looseIfaceFlag != 0 { + return d.DecodeInterfaceLoose() + } + return d.DecodeInterface() +} + +func (d *Decoder) DecodeValue(v reflect.Value) error { + decode := getDecoder(v.Type()) + return decode(d, v) +} + +func (d *Decoder) DecodeNil() error { + c, err := d.readCode() + if err != nil { + return err + } + if c != codes.Nil { + return fmt.Errorf("msgpack: invalid code=%x decoding nil", c) + } + return nil +} + +func (d *Decoder) decodeNilValue(v reflect.Value) error { + err := d.DecodeNil() + if v.IsNil() { + return err + } + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + v.Set(reflect.Zero(v.Type())) + return err +} + +func (d *Decoder) DecodeBool() (bool, error) { + c, err := d.readCode() + if err != nil { + return false, err + } + return d.bool(c) +} + +func (d *Decoder) bool(c codes.Code) (bool, error) { + if c == codes.False { + return false, nil + } + if c == codes.True { + return true, nil + } + return false, fmt.Errorf("msgpack: invalid code=%x decoding bool", c) +} + +func (d *Decoder) DecodeDuration() (time.Duration, error) { + n, err := d.DecodeInt64() + if err != nil { + return 0, err + } + return time.Duration(n), nil +} + +// DecodeInterface decodes value into interface. It returns following types: +// - nil, +// - bool, +// - int8, int16, int32, int64, +// - uint8, uint16, uint32, uint64, +// - float32 and float64, +// - string, +// - []byte, +// - slices of any of the above, +// - maps of any of the above. +// +// DecodeInterface should be used only when you don't know the type of value +// you are decoding. For example, if you are decoding number it is better to use +// DecodeInt64 for negative numbers and DecodeUint64 for positive numbers. +func (d *Decoder) DecodeInterface() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if codes.IsFixedNum(c) { + return int8(c), nil + } + if codes.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + } + if codes.IsFixedArray(c) { + return d.decodeSlice(c) + } + if codes.IsFixedString(c) { + return d.string(c) + } + + switch c { + case codes.Nil: + return nil, nil + case codes.False, codes.True: + return d.bool(c) + case codes.Float: + return d.float32(c) + case codes.Double: + return d.float64(c) + case codes.Uint8: + return d.uint8() + case codes.Uint16: + return d.uint16() + case codes.Uint32: + return d.uint32() + case codes.Uint64: + return d.uint64() + case codes.Int8: + return d.int8() + case codes.Int16: + return d.int16() + case codes.Int32: + return d.int32() + case codes.Int64: + return d.int64() + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.bytes(c, nil) + case codes.Str8, codes.Str16, codes.Str32: + return d.string(c) + case codes.Array16, codes.Array32: + return d.decodeSlice(c) + case codes.Map16, codes.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.extInterface(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// DecodeInterfaceLoose is like DecodeInterface except that: +// - int8, int16, and int32 are converted to int64, +// - uint8, uint16, and uint32 are converted to uint64, +// - float32 is converted to float64. +func (d *Decoder) DecodeInterfaceLoose() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if codes.IsFixedNum(c) { + return int64(int8(c)), nil + } + if codes.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + } + if codes.IsFixedArray(c) { + return d.decodeSlice(c) + } + if codes.IsFixedString(c) { + return d.string(c) + } + + switch c { + case codes.Nil: + return nil, nil + case codes.False, codes.True: + return d.bool(c) + case codes.Float, codes.Double: + return d.float64(c) + case codes.Uint8, codes.Uint16, codes.Uint32, codes.Uint64: + return d.uint(c) + case codes.Int8, codes.Int16, codes.Int32, codes.Int64: + return d.int(c) + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.bytes(c, nil) + case codes.Str8, codes.Str16, codes.Str32: + return d.string(c) + case codes.Array16, codes.Array32: + return d.decodeSlice(c) + case codes.Map16, codes.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.extInterface(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// Skip skips next value. +func (d *Decoder) Skip() error { + c, err := d.readCode() + if err != nil { + return err + } + + if codes.IsFixedNum(c) { + return nil + } + if codes.IsFixedMap(c) { + return d.skipMap(c) + } + if codes.IsFixedArray(c) { + return d.skipSlice(c) + } + if codes.IsFixedString(c) { + return d.skipBytes(c) + } + + switch c { + case codes.Nil, codes.False, codes.True: + return nil + case codes.Uint8, codes.Int8: + return d.skipN(1) + case codes.Uint16, codes.Int16: + return d.skipN(2) + case codes.Uint32, codes.Int32, codes.Float: + return d.skipN(4) + case codes.Uint64, codes.Int64, codes.Double: + return d.skipN(8) + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.skipBytes(c) + case codes.Str8, codes.Str16, codes.Str32: + return d.skipBytes(c) + case codes.Array16, codes.Array32: + return d.skipSlice(c) + case codes.Map16, codes.Map32: + return d.skipMap(c) + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.skipExt(c) + } + + return fmt.Errorf("msgpack: unknown code %x", c) +} + +// PeekCode returns the next MessagePack code without advancing the reader. +// Subpackage msgpack/codes contains list of available codes. +func (d *Decoder) PeekCode() (codes.Code, error) { + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + return codes.Code(c), d.s.UnreadByte() +} + +func (d *Decoder) hasNilCode() bool { + code, err := d.PeekCode() + return err == nil && code == codes.Nil +} + +func (d *Decoder) readCode() (codes.Code, error) { + d.extLen = 0 + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + if d.rec != nil { + d.rec = append(d.rec, c) + } + return codes.Code(c), nil +} + +func (d *Decoder) readFull(b []byte) error { + _, err := io.ReadFull(d.r, b) + if err != nil { + return err + } + if d.rec != nil { + //TODO: read directly into d.rec? + d.rec = append(d.rec, b...) + } + return nil +} + +func (d *Decoder) readN(n int) ([]byte, error) { + var err error + d.buf, err = readN(d.r, d.buf, n) + if err != nil { + return nil, err + } + if d.rec != nil { + //TODO: read directly into d.rec? + d.rec = append(d.rec, d.buf...) + } + return d.buf, nil +} + +func readN(r io.Reader, b []byte, n int) ([]byte, error) { + if b == nil { + if n == 0 { + return make([]byte, 0), nil + } + switch { + case n < 64: + b = make([]byte, 0, 64) + case n <= bytesAllocLimit: + b = make([]byte, 0, n) + default: + b = make([]byte, 0, bytesAllocLimit) + } + } + + if n <= cap(b) { + b = b[:n] + _, err := io.ReadFull(r, b) + return b, err + } + b = b[:cap(b)] + + var pos int + for { + alloc := min(n-len(b), bytesAllocLimit) + b = append(b, make([]byte, alloc)...) + + _, err := io.ReadFull(r, b[pos:]) + if err != nil { + return b, err + } + + if len(b) == n { + break + } + pos = len(b) + } + + return b, nil +} + +func min(a, b int) int { //nolint:unparam + if a <= b { + return a + } + return b +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_map.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_map.go new file mode 100644 index 000000000..16c40fe77 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/decode_map.go @@ -0,0 +1,350 @@ +package msgpack + +import ( + "errors" + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +var ( + mapStringStringPtrType = reflect.TypeOf((*map[string]string)(nil)) + mapStringStringType = mapStringStringPtrType.Elem() +) + +var ( + mapStringInterfacePtrType = reflect.TypeOf((*map[string]interface{})(nil)) + mapStringInterfaceType = mapStringInterfacePtrType.Elem() +) + +func decodeMapValue(d *Decoder, v reflect.Value) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + + typ := v.Type() + if size == -1 { + v.Set(reflect.Zero(typ)) + return nil + } + + if v.IsNil() { + v.Set(reflect.MakeMap(typ)) + } + if size == 0 { + return nil + } + + return decodeMapValueSize(d, v, size) +} + +func decodeMapValueSize(d *Decoder, v reflect.Value, size int) error { + typ := v.Type() + keyType := typ.Key() + valueType := typ.Elem() + + for i := 0; i < size; i++ { + mk := reflect.New(keyType).Elem() + if err := d.DecodeValue(mk); err != nil { + return err + } + + mv := reflect.New(valueType).Elem() + if err := d.DecodeValue(mv); err != nil { + return err + } + + v.SetMapIndex(mk, mv) + } + + return nil +} + +// DecodeMapLen decodes map length. Length is -1 when map is nil. +func (d *Decoder) DecodeMapLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + + if codes.IsExt(c) { + if err = d.skipExtHeader(c); err != nil { + return 0, err + } + + c, err = d.readCode() + if err != nil { + return 0, err + } + } + return d.mapLen(c) +} + +func (d *Decoder) mapLen(c codes.Code) (int, error) { + size, err := d._mapLen(c) + err = expandInvalidCodeMapLenError(c, err) + return size, err +} + +func (d *Decoder) _mapLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } + if c >= codes.FixedMapLow && c <= codes.FixedMapHigh { + return int(c & codes.FixedMapMask), nil + } + if c == codes.Map16 { + size, err := d.uint16() + return int(size), err + } + if c == codes.Map32 { + size, err := d.uint32() + return int(size), err + } + return 0, errInvalidCode +} + +var errInvalidCode = errors.New("invalid code") + +func expandInvalidCodeMapLenError(c codes.Code, err error) error { + if err == errInvalidCode { + return fmt.Errorf("msgpack: invalid code=%x decoding map length", c) + } + return err +} + +func decodeMapStringStringValue(d *Decoder, v reflect.Value) error { + mptr := v.Addr().Convert(mapStringStringPtrType).Interface().(*map[string]string) + return d.decodeMapStringStringPtr(mptr) +} + +func (d *Decoder) decodeMapStringStringPtr(ptr *map[string]string) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + if size == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + *ptr = make(map[string]string, min(size, maxMapSize)) + m = *ptr + } + + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.DecodeString() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +func decodeMapStringInterfaceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(mapStringInterfacePtrType).Interface().(*map[string]interface{}) + return d.decodeMapStringInterfacePtr(ptr) +} + +func (d *Decoder) decodeMapStringInterfacePtr(ptr *map[string]interface{}) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + *ptr = make(map[string]interface{}, min(n, maxMapSize)) + m = *ptr + } + + for i := 0; i < n; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +var errUnsupportedMapKey = errors.New("msgpack: unsupported map key") + +func (d *Decoder) DecodeMap() (interface{}, error) { + if d.decodeMapFunc != nil { + return d.decodeMapFunc(d) + } + + size, err := d.DecodeMapLen() + if err != nil { + return nil, err + } + if size == -1 { + return nil, nil + } + if size == 0 { + return make(map[string]interface{}), nil + } + + code, err := d.PeekCode() + if err != nil { + return nil, err + } + + if codes.IsString(code) || codes.IsBin(code) { + return d.decodeMapStringInterfaceSize(size) + } + + key, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + value, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + keyType := reflect.TypeOf(key) + valueType := reflect.TypeOf(value) + + if !keyType.Comparable() { + return nil, errUnsupportedMapKey + } + + mapType := reflect.MapOf(keyType, valueType) + mapValue := reflect.MakeMap(mapType) + + mapValue.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value)) + size-- + + err = decodeMapValueSize(d, mapValue, size) + if err != nil { + return nil, err + } + + return mapValue.Interface(), nil +} + +func (d *Decoder) decodeMapStringInterfaceSize(size int) (map[string]interface{}, error) { + m := make(map[string]interface{}, min(size, maxMapSize)) + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return nil, err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + m[mk] = mv + } + return m, nil +} + +func (d *Decoder) skipMap(c codes.Code) error { + n, err := d.mapLen(c) + if err != nil { + return err + } + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + if err := d.Skip(); err != nil { + return err + } + } + return nil +} + +func decodeStructValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + var isArray bool + + n, err := d._mapLen(c) + if err != nil { + var err2 error + n, err2 = d.arrayLen(c) + if err2 != nil { + return expandInvalidCodeMapLenError(c, err) + } + isArray = true + } + if n == -1 { + if err = mustSet(v); err != nil { + return err + } + v.Set(reflect.Zero(v.Type())) + return nil + } + + var fields *fields + if d.flags&decodeUsingJSONFlag != 0 { + fields = jsonStructs.Fields(v.Type()) + } else { + fields = structs.Fields(v.Type()) + } + + if isArray { + for i, f := range fields.List { + if i >= n { + break + } + if err := f.DecodeValue(d, v); err != nil { + return err + } + } + + // Skip extra values. + for i := len(fields.List); i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + + return nil + } + + for i := 0; i < n; i++ { + name, err := d.DecodeString() + if err != nil { + return err + } + + if f := fields.Map[name]; f != nil { + if err := f.DecodeValue(d, v); err != nil { + return err + } + } else if d.flags&disallowUnknownFieldsFlag != 0 { + return fmt.Errorf("msgpack: unknown field %q", name) + } else if err := d.Skip(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_number.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_number.go new file mode 100644 index 000000000..f6b9151f0 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/decode_number.go @@ -0,0 +1,307 @@ +package msgpack + +import ( + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +func (d *Decoder) skipN(n int) error { + _, err := d.readN(n) + return err +} + +func (d *Decoder) uint8() (uint8, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return uint8(c), nil +} + +func (d *Decoder) int8() (int8, error) { + n, err := d.uint8() + return int8(n), err +} + +func (d *Decoder) uint16() (uint16, error) { + b, err := d.readN(2) + if err != nil { + return 0, err + } + return (uint16(b[0]) << 8) | uint16(b[1]), nil +} + +func (d *Decoder) int16() (int16, error) { + n, err := d.uint16() + return int16(n), err +} + +func (d *Decoder) uint32() (uint32, error) { + b, err := d.readN(4) + if err != nil { + return 0, err + } + n := (uint32(b[0]) << 24) | + (uint32(b[1]) << 16) | + (uint32(b[2]) << 8) | + uint32(b[3]) + return n, nil +} + +func (d *Decoder) int32() (int32, error) { + n, err := d.uint32() + return int32(n), err +} + +func (d *Decoder) uint64() (uint64, error) { + b, err := d.readN(8) + if err != nil { + return 0, err + } + n := (uint64(b[0]) << 56) | + (uint64(b[1]) << 48) | + (uint64(b[2]) << 40) | + (uint64(b[3]) << 32) | + (uint64(b[4]) << 24) | + (uint64(b[5]) << 16) | + (uint64(b[6]) << 8) | + uint64(b[7]) + return n, nil +} + +func (d *Decoder) int64() (int64, error) { + n, err := d.uint64() + return int64(n), err +} + +// DecodeUint64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go uint64. +func (d *Decoder) DecodeUint64() (uint64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.uint(c) +} + +func (d *Decoder) uint(c codes.Code) (uint64, error) { + if c == codes.Nil { + return 0, nil + } + if codes.IsFixedNum(c) { + return uint64(int8(c)), nil + } + switch c { + case codes.Uint8: + n, err := d.uint8() + return uint64(n), err + case codes.Int8: + n, err := d.int8() + return uint64(n), err + case codes.Uint16: + n, err := d.uint16() + return uint64(n), err + case codes.Int16: + n, err := d.int16() + return uint64(n), err + case codes.Uint32: + n, err := d.uint32() + return uint64(n), err + case codes.Int32: + n, err := d.int32() + return uint64(n), err + case codes.Uint64, codes.Int64: + return d.uint64() + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding uint64", c) +} + +// DecodeInt64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go int64. +func (d *Decoder) DecodeInt64() (int64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.int(c) +} + +func (d *Decoder) int(c codes.Code) (int64, error) { + if c == codes.Nil { + return 0, nil + } + if codes.IsFixedNum(c) { + return int64(int8(c)), nil + } + switch c { + case codes.Uint8: + n, err := d.uint8() + return int64(n), err + case codes.Int8: + n, err := d.uint8() + return int64(int8(n)), err + case codes.Uint16: + n, err := d.uint16() + return int64(n), err + case codes.Int16: + n, err := d.uint16() + return int64(int16(n)), err + case codes.Uint32: + n, err := d.uint32() + return int64(n), err + case codes.Int32: + n, err := d.uint32() + return int64(int32(n)), err + case codes.Uint64, codes.Int64: + n, err := d.uint64() + return int64(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding int64", c) +} + +func (d *Decoder) DecodeFloat32() (float32, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float32(c) +} + +func (d *Decoder) float32(c codes.Code) (float32, error) { + if c == codes.Float { + n, err := d.uint32() + if err != nil { + return 0, err + } + return math.Float32frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float32(n), nil +} + +// DecodeFloat64 decodes msgpack float32/64 into Go float64. +func (d *Decoder) DecodeFloat64() (float64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float64(c) +} + +func (d *Decoder) float64(c codes.Code) (float64, error) { + switch c { + case codes.Float: + n, err := d.float32(c) + if err != nil { + return 0, err + } + return float64(n), nil + case codes.Double: + n, err := d.uint64() + if err != nil { + return 0, err + } + return math.Float64frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float64(n), nil +} + +func (d *Decoder) DecodeUint() (uint, error) { + n, err := d.DecodeUint64() + return uint(n), err +} + +func (d *Decoder) DecodeUint8() (uint8, error) { + n, err := d.DecodeUint64() + return uint8(n), err +} + +func (d *Decoder) DecodeUint16() (uint16, error) { + n, err := d.DecodeUint64() + return uint16(n), err +} + +func (d *Decoder) DecodeUint32() (uint32, error) { + n, err := d.DecodeUint64() + return uint32(n), err +} + +func (d *Decoder) DecodeInt() (int, error) { + n, err := d.DecodeInt64() + return int(n), err +} + +func (d *Decoder) DecodeInt8() (int8, error) { + n, err := d.DecodeInt64() + return int8(n), err +} + +func (d *Decoder) DecodeInt16() (int16, error) { + n, err := d.DecodeInt64() + return int16(n), err +} + +func (d *Decoder) DecodeInt32() (int32, error) { + n, err := d.DecodeInt64() + return int32(n), err +} + +func decodeFloat32Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat32() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetFloat(float64(f)) + return nil +} + +func decodeFloat64Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetFloat(f) + return nil +} + +func decodeInt64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeInt64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetInt(n) + return nil +} + +func decodeUint64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeUint64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetUint(n) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_query.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_query.go new file mode 100644 index 000000000..80cd80e78 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/decode_query.go @@ -0,0 +1,158 @@ +package msgpack + +import ( + "fmt" + "strconv" + "strings" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +type queryResult struct { + query string + key string + hasAsterisk bool + + values []interface{} +} + +func (q *queryResult) nextKey() { + ind := strings.IndexByte(q.query, '.') + if ind == -1 { + q.key = q.query + q.query = "" + return + } + q.key = q.query[:ind] + q.query = q.query[ind+1:] +} + +// Query extracts data specified by the query from the msgpack stream skipping +// any other data. Query consists of map keys and array indexes separated with dot, +// e.g. key1.0.key2. +func (d *Decoder) Query(query string) ([]interface{}, error) { + res := queryResult{ + query: query, + } + if err := d.query(&res); err != nil { + return nil, err + } + return res.values, nil +} + +func (d *Decoder) query(q *queryResult) error { + q.nextKey() + if q.key == "" { + v, err := d.decodeInterfaceCond() + if err != nil { + return err + } + q.values = append(q.values, v) + return nil + } + + code, err := d.PeekCode() + if err != nil { + return err + } + + switch { + case code == codes.Map16 || code == codes.Map32 || codes.IsFixedMap(code): + err = d.queryMapKey(q) + case code == codes.Array16 || code == codes.Array32 || codes.IsFixedArray(code): + err = d.queryArrayIndex(q) + default: + err = fmt.Errorf("msgpack: unsupported code=%x decoding key=%q", code, q.key) + } + return err +} + +func (d *Decoder) queryMapKey(q *queryResult) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + for i := 0; i < n; i++ { + k, err := d.bytesNoCopy() + if err != nil { + return err + } + + if string(k) == q.key { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext((n - i - 1) * 2) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) queryArrayIndex(q *queryResult) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + if q.key == "*" { + q.hasAsterisk = true + + query := q.query + for i := 0; i < n; i++ { + q.query = query + if err := d.query(q); err != nil { + return err + } + } + + q.hasAsterisk = false + return nil + } + + ind, err := strconv.Atoi(q.key) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if i == ind { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext(n - i - 1) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) skipNext(n int) error { + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_slice.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_slice.go new file mode 100644 index 000000000..adf17ae5c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/decode_slice.go @@ -0,0 +1,191 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +var sliceStringPtrType = reflect.TypeOf((*[]string)(nil)) + +// DecodeArrayLen decodes array length. Length is -1 when array is nil. +func (d *Decoder) DecodeArrayLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.arrayLen(c) +} + +func (d *Decoder) arrayLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } else if c >= codes.FixedArrayLow && c <= codes.FixedArrayHigh { + return int(c & codes.FixedArrayMask), nil + } + switch c { + case codes.Array16: + n, err := d.uint16() + return int(n), err + case codes.Array32: + n, err := d.uint32() + return int(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding array length", c) +} + +func decodeStringSliceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(sliceStringPtrType).Interface().(*[]string) + return d.decodeStringSlicePtr(ptr) +} + +func (d *Decoder) decodeStringSlicePtr(ptr *[]string) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + ss := makeStrings(*ptr, n) + for i := 0; i < n; i++ { + s, err := d.DecodeString() + if err != nil { + return err + } + ss = append(ss, s) + } + *ptr = ss + + return nil +} + +func makeStrings(s []string, n int) []string { + if n > sliceAllocLimit { + n = sliceAllocLimit + } + + if s == nil { + return make([]string, 0, n) + } + + if cap(s) >= n { + return s[:0] + } + + s = s[:cap(s)] + s = append(s, make([]string, n-len(s))...) + return s[:0] +} + +func decodeSliceValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + v.Set(reflect.Zero(v.Type())) + return nil + } + if n == 0 && v.IsNil() { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + return nil + } + + if v.Cap() >= n { + v.Set(v.Slice(0, n)) + } else if v.Len() < v.Cap() { + v.Set(v.Slice(0, v.Cap())) + } + + for i := 0; i < n; i++ { + if i >= v.Len() { + v.Set(growSliceValue(v, n)) + } + elem := v.Index(i) + if err := d.DecodeValue(elem); err != nil { + return err + } + } + + return nil +} + +func growSliceValue(v reflect.Value, n int) reflect.Value { + diff := n - v.Len() + if diff > sliceAllocLimit { + diff = sliceAllocLimit + } + v = reflect.AppendSlice(v, reflect.MakeSlice(v.Type(), diff, diff)) + return v +} + +func decodeArrayValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + return nil + } + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + + for i := 0; i < n; i++ { + sv := v.Index(i) + if err := d.DecodeValue(sv); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) DecodeSlice() ([]interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.decodeSlice(c) +} + +func (d *Decoder) decodeSlice(c codes.Code) ([]interface{}, error) { + n, err := d.arrayLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + + s := make([]interface{}, 0, min(n, sliceAllocLimit)) + for i := 0; i < n; i++ { + v, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + s = append(s, v) + } + + return s, nil +} + +func (d *Decoder) skipSlice(c codes.Code) error { + n, err := d.arrayLen(c) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_string.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_string.go new file mode 100644 index 000000000..c5adf3865 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/decode_string.go @@ -0,0 +1,186 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +func (d *Decoder) bytesLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } + + if codes.IsFixedString(c) { + return int(c & codes.FixedStrMask), nil + } + + switch c { + case codes.Str8, codes.Bin8: + n, err := d.uint8() + return int(n), err + case codes.Str16, codes.Bin16: + n, err := d.uint16() + return int(n), err + case codes.Str32, codes.Bin32: + n, err := d.uint32() + return int(n), err + } + + return 0, fmt.Errorf("msgpack: invalid code=%x decoding bytes length", c) +} + +func (d *Decoder) DecodeString() (string, error) { + c, err := d.readCode() + if err != nil { + return "", err + } + return d.string(c) +} + +func (d *Decoder) string(c codes.Code) (string, error) { + n, err := d.bytesLen(c) + if err != nil { + return "", err + } + return d.stringWithLen(n) +} + +func (d *Decoder) stringWithLen(n int) (string, error) { + if n <= 0 { + return "", nil + } + b, err := d.readN(n) + return string(b), err +} + +func decodeStringValue(d *Decoder, v reflect.Value) error { + if err := mustSet(v); err != nil { + return err + } + + s, err := d.DecodeString() + if err != nil { + return err + } + + v.SetString(s) + return nil +} + +func (d *Decoder) DecodeBytesLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.bytesLen(c) +} + +func (d *Decoder) DecodeBytes() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.bytes(c, nil) +} + +func (d *Decoder) bytes(c codes.Code, b []byte) ([]byte, error) { + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return readN(d.r, b, n) +} + +func (d *Decoder) bytesNoCopy() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return d.readN(n) +} + +func (d *Decoder) decodeBytesPtr(ptr *[]byte) error { + c, err := d.readCode() + if err != nil { + return err + } + return d.bytesPtr(c, ptr) +} + +func (d *Decoder) bytesPtr(c codes.Code, ptr *[]byte) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + *ptr, err = readN(d.r, *ptr, n) + return err +} + +func (d *Decoder) skipBytes(c codes.Code) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n <= 0 { + return nil + } + return d.skipN(n) +} + +func decodeBytesValue(d *Decoder, v reflect.Value) error { + if err := mustSet(v); err != nil { + return err + } + + c, err := d.readCode() + if err != nil { + return err + } + + b, err := d.bytes(c, v.Bytes()) + if err != nil { + return err + } + + v.SetBytes(b) + + return nil +} + +func decodeByteArrayValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + return nil + } + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + + b := v.Slice(0, n).Bytes() + return d.readFull(b) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_value.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_value.go new file mode 100644 index 000000000..810d3be8f --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/decode_value.go @@ -0,0 +1,276 @@ +package msgpack + +import ( + "encoding" + "errors" + "fmt" + "reflect" +) + +var interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() +var stringType = reflect.TypeOf((*string)(nil)).Elem() + +var valueDecoders []decoderFunc + +//nolint:gochecknoinits +func init() { + valueDecoders = []decoderFunc{ + reflect.Bool: decodeBoolValue, + reflect.Int: decodeInt64Value, + reflect.Int8: decodeInt64Value, + reflect.Int16: decodeInt64Value, + reflect.Int32: decodeInt64Value, + reflect.Int64: decodeInt64Value, + reflect.Uint: decodeUint64Value, + reflect.Uint8: decodeUint64Value, + reflect.Uint16: decodeUint64Value, + reflect.Uint32: decodeUint64Value, + reflect.Uint64: decodeUint64Value, + reflect.Float32: decodeFloat32Value, + reflect.Float64: decodeFloat64Value, + reflect.Complex64: decodeUnsupportedValue, + reflect.Complex128: decodeUnsupportedValue, + reflect.Array: decodeArrayValue, + reflect.Chan: decodeUnsupportedValue, + reflect.Func: decodeUnsupportedValue, + reflect.Interface: decodeInterfaceValue, + reflect.Map: decodeMapValue, + reflect.Ptr: decodeUnsupportedValue, + reflect.Slice: decodeSliceValue, + reflect.String: decodeStringValue, + reflect.Struct: decodeStructValue, + reflect.UnsafePointer: decodeUnsupportedValue, + } +} + +func mustSet(v reflect.Value) error { + if !v.CanSet() { + return fmt.Errorf("msgpack: Decode(nonsettable %s)", v.Type()) + } + return nil +} + +func getDecoder(typ reflect.Type) decoderFunc { + if v, ok := typeDecMap.Load(typ); ok { + return v.(decoderFunc) + } + fn := _getDecoder(typ) + typeDecMap.Store(typ, fn) + return fn +} + +func _getDecoder(typ reflect.Type) decoderFunc { + kind := typ.Kind() + + if typ.Implements(customDecoderType) { + return decodeCustomValue + } + if typ.Implements(unmarshalerType) { + return unmarshalValue + } + if typ.Implements(binaryUnmarshalerType) { + return unmarshalBinaryValue + } + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customDecoderType) { + return decodeCustomValueAddr + } + if ptr.Implements(unmarshalerType) { + return unmarshalValueAddr + } + if ptr.Implements(binaryUnmarshalerType) { + return unmarshalBinaryValueAddr + } + } + + switch kind { + case reflect.Ptr: + return ptrDecoderFunc(typ) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + return decodeBytesValue + } + if elem == stringType { + return decodeStringSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return decodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return decodeMapStringStringValue + case interfaceType: + return decodeMapStringInterfaceValue + } + } + } + + return valueDecoders[kind] +} + +func ptrDecoderFunc(typ reflect.Type) decoderFunc { + decoder := getDecoder(typ.Elem()) + return func(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + if err := mustSet(v); err != nil { + return err + } + if !v.IsNil() { + v.Set(reflect.Zero(v.Type())) + } + return d.DecodeNil() + } + if v.IsNil() { + if err := mustSet(v); err != nil { + return err + } + v.Set(reflect.New(v.Type().Elem())) + } + return decoder(d, v.Elem()) + } +} + +func decodeCustomValueAddr(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return decodeCustomValue(d, v.Addr()) +} + +func decodeCustomValue(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + + decoder := v.Interface().(CustomDecoder) + return decoder.DecodeMsgpack(d) +} + +func unmarshalValueAddr(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return unmarshalValue(d, v.Addr()) +} + +func unmarshalValue(d *Decoder, v reflect.Value) error { + if d.extLen == 0 || d.extLen == 1 { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + + var b []byte + + if d.extLen != 0 { + var err error + b, err = d.readN(d.extLen) + if err != nil { + return err + } + } else { + d.rec = make([]byte, 0, 64) + if err := d.Skip(); err != nil { + return err + } + b = d.rec + d.rec = nil + } + + unmarshaler := v.Interface().(Unmarshaler) + return unmarshaler.UnmarshalMsgpack(b) +} + +func decodeBoolValue(d *Decoder, v reflect.Value) error { + flag, err := d.DecodeBool() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetBool(flag) + return nil +} + +func decodeInterfaceValue(d *Decoder, v reflect.Value) error { + if v.IsNil() { + return d.interfaceValue(v) + } + + elem := v.Elem() + if !elem.CanAddr() { + if d.hasNilCode() { + v.Set(reflect.Zero(v.Type())) + return d.DecodeNil() + } + } + + return d.DecodeValue(elem) +} + +func (d *Decoder) interfaceValue(v reflect.Value) error { + vv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + + if vv != nil { + if v.Type() == errorType { + if vv, ok := vv.(string); ok { + v.Set(reflect.ValueOf(errors.New(vv))) + return nil + } + } + + v.Set(reflect.ValueOf(vv)) + } + + return nil +} + +func decodeUnsupportedValue(d *Decoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Decode(unsupported %s)", v.Type()) +} + +//------------------------------------------------------------------------------ + +func unmarshalBinaryValueAddr(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return unmarshalBinaryValue(d, v.Addr()) +} + +func unmarshalBinaryValue(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + + data, err := d.DecodeBytes() + if err != nil { + return err + } + + unmarshaler := v.Interface().(encoding.BinaryUnmarshaler) + return unmarshaler.UnmarshalBinary(data) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode.go b/vendor/github.com/vmihailenco/msgpack/v4/encode.go new file mode 100644 index 000000000..37f098701 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/encode.go @@ -0,0 +1,241 @@ +package msgpack + +import ( + "bytes" + "io" + "reflect" + "sync" + "time" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +const ( + sortMapKeysFlag uint32 = 1 << iota + structAsArrayFlag + encodeUsingJSONFlag + useCompactIntsFlag + useCompactFloatsFlag +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +type byteWriter struct { + io.Writer + + buf [1]byte +} + +func newByteWriter(w io.Writer) *byteWriter { + bw := new(byteWriter) + bw.Reset(w) + return bw +} + +func (bw *byteWriter) Reset(w io.Writer) { + bw.Writer = w +} + +func (bw *byteWriter) WriteByte(c byte) error { + bw.buf[0] = c + _, err := bw.Write(bw.buf[:]) + return err +} + +//------------------------------------------------------------------------------ + +var encPool = sync.Pool{ + New: func() interface{} { + return NewEncoder(nil) + }, +} + +// Marshal returns the MessagePack encoding of v. +func Marshal(v interface{}) ([]byte, error) { + enc := encPool.Get().(*Encoder) + + var buf bytes.Buffer + enc.Reset(&buf) + + err := enc.Encode(v) + b := buf.Bytes() + + encPool.Put(enc) + + if err != nil { + return nil, err + } + return b, err +} + +type Encoder struct { + w writer + + buf []byte + timeBuf []byte + bootstrap [9 + 12]byte + + intern map[string]int + + flags uint32 +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := new(Encoder) + e.buf = e.bootstrap[:9] + e.timeBuf = e.bootstrap[9 : 9+12] + e.Reset(w) + return e +} + +func (e *Encoder) Reset(w io.Writer) { + if bw, ok := w.(writer); ok { + e.w = bw + } else if bw, ok := e.w.(*byteWriter); ok { + bw.Reset(w) + } else { + e.w = newByteWriter(w) + } + + for k := range e.intern { + delete(e.intern, k) + } + + //TODO: + //e.sortMapKeys = false + //e.structAsArray = false + //e.useJSONTag = false + //e.useCompact = false +} + +// SortMapKeys causes the Encoder to encode map keys in increasing order. +// Supported map types are: +// - map[string]string +// - map[string]interface{} +func (e *Encoder) SortMapKeys(on bool) *Encoder { + if on { + e.flags |= sortMapKeysFlag + } else { + e.flags &= ^sortMapKeysFlag + } + return e +} + +// StructAsArray causes the Encoder to encode Go structs as msgpack arrays. +func (e *Encoder) StructAsArray(on bool) *Encoder { + if on { + e.flags |= structAsArrayFlag + } else { + e.flags &= ^structAsArrayFlag + } + return e +} + +// UseJSONTag causes the Encoder to use json struct tag as fallback option +// if there is no msgpack tag. +func (e *Encoder) UseJSONTag(on bool) *Encoder { + if on { + e.flags |= encodeUsingJSONFlag + } else { + e.flags &= ^encodeUsingJSONFlag + } + return e +} + +// UseCompactEncoding causes the Encoder to chose the most compact encoding. +// For example, it allows to encode small Go int64 as msgpack int8 saving 7 bytes. +func (e *Encoder) UseCompactEncoding(on bool) *Encoder { + if on { + e.flags |= useCompactIntsFlag + } else { + e.flags &= ^useCompactIntsFlag + } + return e +} + +// UseCompactFloats causes the Encoder to chose a compact integer encoding +// for floats that can be represented as integers. +func (e *Encoder) UseCompactFloats(on bool) { + if on { + e.flags |= useCompactFloatsFlag + } else { + e.flags &= ^useCompactFloatsFlag + } +} + +func (e *Encoder) Encode(v interface{}) error { + switch v := v.(type) { + case nil: + return e.EncodeNil() + case string: + return e.EncodeString(v) + case []byte: + return e.EncodeBytes(v) + case int: + return e.encodeInt64Cond(int64(v)) + case int64: + return e.encodeInt64Cond(v) + case uint: + return e.encodeUint64Cond(uint64(v)) + case uint64: + return e.encodeUint64Cond(v) + case bool: + return e.EncodeBool(v) + case float32: + return e.EncodeFloat32(v) + case float64: + return e.EncodeFloat64(v) + case time.Duration: + return e.encodeInt64Cond(int64(v)) + case time.Time: + return e.EncodeTime(v) + } + return e.EncodeValue(reflect.ValueOf(v)) +} + +func (e *Encoder) EncodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := e.Encode(vv); err != nil { + return err + } + } + return nil +} + +func (e *Encoder) EncodeValue(v reflect.Value) error { + fn := getEncoder(v.Type()) + return fn(e, v) +} + +func (e *Encoder) EncodeNil() error { + return e.writeCode(codes.Nil) +} + +func (e *Encoder) EncodeBool(value bool) error { + if value { + return e.writeCode(codes.True) + } + return e.writeCode(codes.False) +} + +func (e *Encoder) EncodeDuration(d time.Duration) error { + return e.EncodeInt(int64(d)) +} + +func (e *Encoder) writeCode(c codes.Code) error { + return e.w.WriteByte(byte(c)) +} + +func (e *Encoder) write(b []byte) error { + _, err := e.w.Write(b) + return err +} + +func (e *Encoder) writeString(s string) error { + _, err := e.w.Write(stringToBytes(s)) + return err +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode_map.go b/vendor/github.com/vmihailenco/msgpack/v4/encode_map.go new file mode 100644 index 000000000..d9b954d86 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/encode_map.go @@ -0,0 +1,172 @@ +package msgpack + +import ( + "reflect" + "sort" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +func encodeMapValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + for _, key := range v.MapKeys() { + if err := e.EncodeValue(key); err != nil { + return err + } + if err := e.EncodeValue(v.MapIndex(key)); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringStringValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringStringType).Interface().(map[string]string) + if e.flags&sortMapKeysFlag != 0 { + return e.encodeSortedMapStringString(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.EncodeString(mv); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringInterfaceType).Interface().(map[string]interface{}) + if e.flags&sortMapKeysFlag != 0 { + return e.encodeSortedMapStringInterface(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.Encode(mv); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringString(m map[string]string) error { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.EncodeString(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringInterface(m map[string]interface{}) error { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.Encode(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) EncodeMapLen(l int) error { + if l < 16 { + return e.writeCode(codes.FixedMapLow | codes.Code(l)) + } + if l < 65536 { + return e.write2(codes.Map16, uint16(l)) + } + return e.write4(codes.Map32, uint32(l)) +} + +func encodeStructValue(e *Encoder, strct reflect.Value) error { + var structFields *fields + if e.flags&encodeUsingJSONFlag != 0 { + structFields = jsonStructs.Fields(strct.Type()) + } else { + structFields = structs.Fields(strct.Type()) + } + + if e.flags&structAsArrayFlag != 0 || structFields.AsArray { + return encodeStructValueAsArray(e, strct, structFields.List) + } + fields := structFields.OmitEmpty(strct) + + if err := e.EncodeMapLen(len(fields)); err != nil { + return err + } + + for _, f := range fields { + if err := e.EncodeString(f.name); err != nil { + return err + } + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + + return nil +} + +func encodeStructValueAsArray(e *Encoder, strct reflect.Value, fields []*field) error { + if err := e.EncodeArrayLen(len(fields)); err != nil { + return err + } + for _, f := range fields { + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode_number.go b/vendor/github.com/vmihailenco/msgpack/v4/encode_number.go new file mode 100644 index 000000000..bf3c2f851 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/encode_number.go @@ -0,0 +1,244 @@ +package msgpack + +import ( + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +// EncodeUint8 encodes an uint8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeUint8(n uint8) error { + return e.write1(codes.Uint8, n) +} + +func (e *Encoder) encodeUint8Cond(n uint8) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint8(n) +} + +// EncodeUint16 encodes an uint16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeUint16(n uint16) error { + return e.write2(codes.Uint16, n) +} + +func (e *Encoder) encodeUint16Cond(n uint16) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint16(n) +} + +// EncodeUint32 encodes an uint16 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeUint32(n uint32) error { + return e.write4(codes.Uint32, n) +} + +func (e *Encoder) encodeUint32Cond(n uint32) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint32(n) +} + +// EncodeUint64 encodes an uint16 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeUint64(n uint64) error { + return e.write8(codes.Uint64, n) +} + +func (e *Encoder) encodeUint64Cond(n uint64) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(n) + } + return e.EncodeUint64(n) +} + +// EncodeInt8 encodes an int8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeInt8(n int8) error { + return e.write1(codes.Int8, uint8(n)) +} + +func (e *Encoder) encodeInt8Cond(n int8) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt8(n) +} + +// EncodeInt16 encodes an int16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeInt16(n int16) error { + return e.write2(codes.Int16, uint16(n)) +} + +func (e *Encoder) encodeInt16Cond(n int16) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt16(n) +} + +// EncodeInt32 encodes an int32 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeInt32(n int32) error { + return e.write4(codes.Int32, uint32(n)) +} + +func (e *Encoder) encodeInt32Cond(n int32) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt32(n) +} + +// EncodeInt64 encodes an int64 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeInt64(n int64) error { + return e.write8(codes.Int64, uint64(n)) +} + +func (e *Encoder) encodeInt64Cond(n int64) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(n) + } + return e.EncodeInt64(n) +} + +// EncodeUnsignedNumber encodes an uint64 in 1, 2, 3, 5, or 9 bytes. +// Type of the number is lost during encoding. +func (e *Encoder) EncodeUint(n uint64) error { + if n <= math.MaxInt8 { + return e.w.WriteByte(byte(n)) + } + if n <= math.MaxUint8 { + return e.EncodeUint8(uint8(n)) + } + if n <= math.MaxUint16 { + return e.EncodeUint16(uint16(n)) + } + if n <= math.MaxUint32 { + return e.EncodeUint32(uint32(n)) + } + return e.EncodeUint64(n) +} + +// EncodeNumber encodes an int64 in 1, 2, 3, 5, or 9 bytes. +// Type of the number is lost during encoding. +func (e *Encoder) EncodeInt(n int64) error { + if n >= 0 { + return e.EncodeUint(uint64(n)) + } + if n >= int64(int8(codes.NegFixedNumLow)) { + return e.w.WriteByte(byte(n)) + } + if n >= math.MinInt8 { + return e.EncodeInt8(int8(n)) + } + if n >= math.MinInt16 { + return e.EncodeInt16(int16(n)) + } + if n >= math.MinInt32 { + return e.EncodeInt32(int32(n)) + } + return e.EncodeInt64(n) +} + +func (e *Encoder) EncodeFloat32(n float32) error { + if e.flags&useCompactFloatsFlag != 0 { + if float32(int64(n)) == n { + return e.EncodeInt(int64(n)) + } + } + return e.write4(codes.Float, math.Float32bits(n)) +} + +func (e *Encoder) EncodeFloat64(n float64) error { + if e.flags&useCompactFloatsFlag != 0 { + // Both NaN and Inf convert to int64(-0x8000000000000000) + // If n is NaN then it never compares true with any other value + // If n is Inf then it doesn't convert from int64 back to +/-Inf + // In both cases the comparison works. + if float64(int64(n)) == n { + return e.EncodeInt(int64(n)) + } + } + return e.write8(codes.Double, math.Float64bits(n)) +} + +func (e *Encoder) write1(code codes.Code, n uint8) error { + e.buf = e.buf[:2] + e.buf[0] = byte(code) + e.buf[1] = n + return e.write(e.buf) +} + +func (e *Encoder) write2(code codes.Code, n uint16) error { + e.buf = e.buf[:3] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 8) + e.buf[2] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write4(code codes.Code, n uint32) error { + e.buf = e.buf[:5] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 24) + e.buf[2] = byte(n >> 16) + e.buf[3] = byte(n >> 8) + e.buf[4] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write8(code codes.Code, n uint64) error { + e.buf = e.buf[:9] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 56) + e.buf[2] = byte(n >> 48) + e.buf[3] = byte(n >> 40) + e.buf[4] = byte(n >> 32) + e.buf[5] = byte(n >> 24) + e.buf[6] = byte(n >> 16) + e.buf[7] = byte(n >> 8) + e.buf[8] = byte(n) + return e.write(e.buf) +} + +func encodeUint8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint8Cond(uint8(v.Uint())) +} + +func encodeUint16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint16Cond(uint16(v.Uint())) +} + +func encodeUint32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint32Cond(uint32(v.Uint())) +} + +func encodeUint64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint64Cond(v.Uint()) +} + +func encodeInt8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt8Cond(int8(v.Int())) +} + +func encodeInt16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt16Cond(int16(v.Int())) +} + +func encodeInt32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt32Cond(int32(v.Int())) +} + +func encodeInt64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt64Cond(v.Int()) +} + +func encodeFloat32Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat32(float32(v.Float())) +} + +func encodeFloat64Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat64(v.Float()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode_slice.go b/vendor/github.com/vmihailenco/msgpack/v4/encode_slice.go new file mode 100644 index 000000000..69a9618e0 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/encode_slice.go @@ -0,0 +1,131 @@ +package msgpack + +import ( + "reflect" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +var sliceStringType = reflect.TypeOf(([]string)(nil)) + +func encodeStringValue(e *Encoder, v reflect.Value) error { + return e.EncodeString(v.String()) +} + +func encodeByteSliceValue(e *Encoder, v reflect.Value) error { + return e.EncodeBytes(v.Bytes()) +} + +func encodeByteArrayValue(e *Encoder, v reflect.Value) error { + if err := e.EncodeBytesLen(v.Len()); err != nil { + return err + } + + if v.CanAddr() { + b := v.Slice(0, v.Len()).Bytes() + return e.write(b) + } + + e.buf = grow(e.buf, v.Len()) + reflect.Copy(reflect.ValueOf(e.buf), v) + return e.write(e.buf) +} + +func grow(b []byte, n int) []byte { + if cap(b) >= n { + return b[:n] + } + b = b[:cap(b)] + b = append(b, make([]byte, n-len(b))...) + return b +} + +func (e *Encoder) EncodeBytesLen(l int) error { + if l < 256 { + return e.write1(codes.Bin8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Bin16, uint16(l)) + } + return e.write4(codes.Bin32, uint32(l)) +} + +func (e *Encoder) encodeStrLen(l int) error { + if l < 32 { + return e.writeCode(codes.FixedStrLow | codes.Code(l)) + } + if l < 256 { + return e.write1(codes.Str8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Str16, uint16(l)) + } + return e.write4(codes.Str32, uint32(l)) +} + +func (e *Encoder) EncodeString(v string) error { + if err := e.encodeStrLen(len(v)); err != nil { + return err + } + return e.writeString(v) +} + +func (e *Encoder) EncodeBytes(v []byte) error { + if v == nil { + return e.EncodeNil() + } + if err := e.EncodeBytesLen(len(v)); err != nil { + return err + } + return e.write(v) +} + +func (e *Encoder) EncodeArrayLen(l int) error { + if l < 16 { + return e.writeCode(codes.FixedArrayLow | codes.Code(l)) + } + if l < 65536 { + return e.write2(codes.Array16, uint16(l)) + } + return e.write4(codes.Array32, uint32(l)) +} + +func encodeStringSliceValue(e *Encoder, v reflect.Value) error { + ss := v.Convert(sliceStringType).Interface().([]string) + return e.encodeStringSlice(ss) +} + +func (e *Encoder) encodeStringSlice(s []string) error { + if s == nil { + return e.EncodeNil() + } + if err := e.EncodeArrayLen(len(s)); err != nil { + return err + } + for _, v := range s { + if err := e.EncodeString(v); err != nil { + return err + } + } + return nil +} + +func encodeSliceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encodeArrayValue(e, v) +} + +func encodeArrayValue(e *Encoder, v reflect.Value) error { + l := v.Len() + if err := e.EncodeArrayLen(l); err != nil { + return err + } + for i := 0; i < l; i++ { + if err := e.EncodeValue(v.Index(i)); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode_value.go b/vendor/github.com/vmihailenco/msgpack/v4/encode_value.go new file mode 100644 index 000000000..335fcdb7e --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/encode_value.go @@ -0,0 +1,216 @@ +package msgpack + +import ( + "encoding" + "fmt" + "reflect" +) + +var valueEncoders []encoderFunc + +//nolint:gochecknoinits +func init() { + valueEncoders = []encoderFunc{ + reflect.Bool: encodeBoolValue, + reflect.Int: encodeInt64CondValue, + reflect.Int8: encodeInt8CondValue, + reflect.Int16: encodeInt16CondValue, + reflect.Int32: encodeInt32CondValue, + reflect.Int64: encodeInt64CondValue, + reflect.Uint: encodeUint64CondValue, + reflect.Uint8: encodeUint8CondValue, + reflect.Uint16: encodeUint16CondValue, + reflect.Uint32: encodeUint32CondValue, + reflect.Uint64: encodeUint64CondValue, + reflect.Float32: encodeFloat32Value, + reflect.Float64: encodeFloat64Value, + reflect.Complex64: encodeUnsupportedValue, + reflect.Complex128: encodeUnsupportedValue, + reflect.Array: encodeArrayValue, + reflect.Chan: encodeUnsupportedValue, + reflect.Func: encodeUnsupportedValue, + reflect.Interface: encodeInterfaceValue, + reflect.Map: encodeMapValue, + reflect.Ptr: encodeUnsupportedValue, + reflect.Slice: encodeSliceValue, + reflect.String: encodeStringValue, + reflect.Struct: encodeStructValue, + reflect.UnsafePointer: encodeUnsupportedValue, + } +} + +func getEncoder(typ reflect.Type) encoderFunc { + if v, ok := typeEncMap.Load(typ); ok { + return v.(encoderFunc) + } + fn := _getEncoder(typ) + typeEncMap.Store(typ, fn) + return fn +} + +func _getEncoder(typ reflect.Type) encoderFunc { + kind := typ.Kind() + + if kind == reflect.Ptr { + if _, ok := typeEncMap.Load(typ.Elem()); ok { + return ptrEncoderFunc(typ) + } + } + + if typ.Implements(customEncoderType) { + return encodeCustomValue + } + if typ.Implements(marshalerType) { + return marshalValue + } + if typ.Implements(binaryMarshalerType) { + return marshalBinaryValue + } + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customEncoderType) { + return encodeCustomValuePtr + } + if ptr.Implements(marshalerType) { + return marshalValuePtr + } + if ptr.Implements(binaryMarshalerType) { + return marshalBinaryValuePtr + } + } + + if typ == errorType { + return encodeErrorValue + } + + switch kind { + case reflect.Ptr: + return ptrEncoderFunc(typ) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + return encodeByteSliceValue + } + if elem == stringType { + return encodeStringSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return encodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return encodeMapStringStringValue + case interfaceType: + return encodeMapStringInterfaceValue + } + } + } + + return valueEncoders[kind] +} + +func ptrEncoderFunc(typ reflect.Type) encoderFunc { + encoder := getEncoder(typ.Elem()) + return func(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encoder(e, v.Elem()) + } +} + +func encodeCustomValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + encoder := v.Addr().Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func encodeCustomValue(e *Encoder, v reflect.Value) error { + if nilable(v) && v.IsNil() { + return e.EncodeNil() + } + + encoder := v.Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func marshalValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalValue(e, v.Addr()) +} + +func marshalValue(e *Encoder, v reflect.Value) error { + if nilable(v) && v.IsNil() { + return e.EncodeNil() + } + + marshaler := v.Interface().(Marshaler) + b, err := marshaler.MarshalMsgpack() + if err != nil { + return err + } + _, err = e.w.Write(b) + return err +} + +func encodeBoolValue(e *Encoder, v reflect.Value) error { + return e.EncodeBool(v.Bool()) +} + +func encodeInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeValue(v.Elem()) +} + +func encodeErrorValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeString(v.Interface().(error).Error()) +} + +func encodeUnsupportedValue(e *Encoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Encode(unsupported %s)", v.Type()) +} + +func nilable(v reflect.Value) bool { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return true + } + return false +} + +//------------------------------------------------------------------------------ + +func marshalBinaryValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalBinaryValue(e, v.Addr()) +} + +func marshalBinaryValue(e *Encoder, v reflect.Value) error { + if nilable(v) && v.IsNil() { + return e.EncodeNil() + } + + marshaler := v.Interface().(encoding.BinaryMarshaler) + data, err := marshaler.MarshalBinary() + if err != nil { + return err + } + + return e.EncodeBytes(data) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/ext.go b/vendor/github.com/vmihailenco/msgpack/v4/ext.go new file mode 100644 index 000000000..17e709bc8 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/ext.go @@ -0,0 +1,244 @@ +package msgpack + +import ( + "bytes" + "fmt" + "reflect" + "sync" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +type extInfo struct { + Type reflect.Type + Decoder decoderFunc +} + +var extTypes = make(map[int8]*extInfo) + +var bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +// RegisterExt records a type, identified by a value for that type, +// under the provided id. That id will identify the concrete type of a value +// sent or received as an interface variable. Only types that will be +// transferred as implementations of interface values need to be registered. +// Expecting to be used only during initialization, it panics if the mapping +// between types and ids is not a bijection. +func RegisterExt(id int8, value interface{}) { + typ := reflect.TypeOf(value) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + ptr := reflect.PtrTo(typ) + + if _, ok := extTypes[id]; ok { + panic(fmt.Errorf("msgpack: ext with id=%d is already registered", id)) + } + + registerExt(id, ptr, getEncoder(ptr), getDecoder(ptr)) + registerExt(id, typ, getEncoder(typ), getDecoder(typ)) +} + +func registerExt(id int8, typ reflect.Type, enc encoderFunc, dec decoderFunc) { + if enc != nil { + typeEncMap.Store(typ, makeExtEncoder(id, enc)) + } + if dec != nil { + extTypes[id] = &extInfo{ + Type: typ, + Decoder: dec, + } + typeDecMap.Store(typ, makeExtDecoder(id, dec)) + } +} + +func (e *Encoder) EncodeExtHeader(typeID int8, length int) error { + if err := e.encodeExtLen(length); err != nil { + return err + } + if err := e.w.WriteByte(byte(typeID)); err != nil { + return err + } + return nil +} + +func makeExtEncoder(typeID int8, enc encoderFunc) encoderFunc { + return func(e *Encoder, v reflect.Value) error { + buf := bufferPool.Get().(*bytes.Buffer) + defer bufferPool.Put(buf) + buf.Reset() + + oldw := e.w + e.w = buf + err := enc(e, v) + e.w = oldw + + if err != nil { + return err + } + + err = e.EncodeExtHeader(typeID, buf.Len()) + if err != nil { + return err + } + return e.write(buf.Bytes()) + } +} + +func makeExtDecoder(typeID int8, dec decoderFunc) decoderFunc { + return func(d *Decoder, v reflect.Value) error { + c, err := d.PeekCode() + if err != nil { + return err + } + + if !codes.IsExt(c) { + return dec(d, v) + } + + id, extLen, err := d.DecodeExtHeader() + if err != nil { + return err + } + + if id != typeID { + return fmt.Errorf("msgpack: got ext type=%d, wanted %d", id, typeID) + } + + d.extLen = extLen + return dec(d, v) + } +} + +func (e *Encoder) encodeExtLen(l int) error { + switch l { + case 1: + return e.writeCode(codes.FixExt1) + case 2: + return e.writeCode(codes.FixExt2) + case 4: + return e.writeCode(codes.FixExt4) + case 8: + return e.writeCode(codes.FixExt8) + case 16: + return e.writeCode(codes.FixExt16) + } + if l < 256 { + return e.write1(codes.Ext8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Ext16, uint16(l)) + } + return e.write4(codes.Ext32, uint32(l)) +} + +func (d *Decoder) parseExtLen(c codes.Code) (int, error) { + switch c { + case codes.FixExt1: + return 1, nil + case codes.FixExt2: + return 2, nil + case codes.FixExt4: + return 4, nil + case codes.FixExt8: + return 8, nil + case codes.FixExt16: + return 16, nil + case codes.Ext8: + n, err := d.uint8() + return int(n), err + case codes.Ext16: + n, err := d.uint16() + return int(n), err + case codes.Ext32: + n, err := d.uint32() + return int(n), err + default: + return 0, fmt.Errorf("msgpack: invalid code=%x decoding ext length", c) + } +} + +func (d *Decoder) extHeader(c codes.Code) (int8, int, error) { + length, err := d.parseExtLen(c) + if err != nil { + return 0, 0, err + } + + typeID, err := d.readCode() + if err != nil { + return 0, 0, err + } + + return int8(typeID), length, nil +} + +func (d *Decoder) DecodeExtHeader() (typeID int8, length int, err error) { + c, err := d.readCode() + if err != nil { + return + } + return d.extHeader(c) +} + +func (d *Decoder) extInterface(c codes.Code) (interface{}, error) { + extID, extLen, err := d.extHeader(c) + if err != nil { + return nil, err + } + + info, ok := extTypes[extID] + if !ok { + return nil, fmt.Errorf("msgpack: unknown ext id=%d", extID) + } + + v := reflect.New(info.Type) + + d.extLen = extLen + err = info.Decoder(d, v.Elem()) + d.extLen = 0 + if err != nil { + return nil, err + } + + return v.Interface(), nil +} + +func (d *Decoder) skipExt(c codes.Code) error { + n, err := d.parseExtLen(c) + if err != nil { + return err + } + return d.skipN(n + 1) +} + +func (d *Decoder) skipExtHeader(c codes.Code) error { + // Read ext type. + _, err := d.readCode() + if err != nil { + return err + } + // Read ext body len. + for i := 0; i < extHeaderLen(c); i++ { + _, err := d.readCode() + if err != nil { + return err + } + } + return nil +} + +func extHeaderLen(c codes.Code) int { + switch c { + case codes.Ext8: + return 1 + case codes.Ext16: + return 2 + case codes.Ext32: + return 4 + } + return 0 +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/go.mod b/vendor/github.com/vmihailenco/msgpack/v4/go.mod new file mode 100644 index 000000000..d935fa654 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/go.mod @@ -0,0 +1,13 @@ +module github.com/vmihailenco/msgpack/v4 + +require ( + github.com/davecgh/go-spew v1.1.1 + github.com/golang/protobuf v1.3.4 // indirect + github.com/kr/pretty v0.1.0 // indirect + github.com/vmihailenco/tagparser v0.1.1 + golang.org/x/net v0.0.0-20200301022130-244492dfa37a // indirect + google.golang.org/appengine v1.6.5 + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 +) + +go 1.11 diff --git a/vendor/github.com/vmihailenco/msgpack/v4/go.sum b/vendor/github.com/vmihailenco/msgpack/v4/go.sum new file mode 100644 index 000000000..be608ec08 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/go.sum @@ -0,0 +1,24 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/vmihailenco/msgpack/v4/intern.go b/vendor/github.com/vmihailenco/msgpack/v4/intern.go new file mode 100644 index 000000000..6ca569273 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/intern.go @@ -0,0 +1,236 @@ +package msgpack + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +var internStringExtID int8 = -128 + +var errUnexpectedCode = errors.New("msgpack: unexpected code") + +func encodeInternInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + v = v.Elem() + if v.Kind() == reflect.String { + return encodeInternStringValue(e, v) + } + return e.EncodeValue(v) +} + +func encodeInternStringValue(e *Encoder, v reflect.Value) error { + s := v.String() + + if s != "" { + if idx, ok := e.intern[s]; ok { + return e.internStringIndex(idx) + } + + if e.intern == nil { + e.intern = make(map[string]int) + } + + idx := len(e.intern) + e.intern[s] = idx + } + + return e.EncodeString(s) +} + +func (e *Encoder) internStringIndex(idx int) error { + if idx < math.MaxUint8 { + if err := e.writeCode(codes.FixExt1); err != nil { + return err + } + if err := e.w.WriteByte(byte(internStringExtID)); err != nil { + return err + } + return e.w.WriteByte(byte(idx)) + } + + if idx < math.MaxUint16 { + if err := e.writeCode(codes.FixExt2); err != nil { + return err + } + if err := e.w.WriteByte(byte(internStringExtID)); err != nil { + return err + } + if err := e.w.WriteByte(byte(idx >> 8)); err != nil { + return err + } + return e.w.WriteByte(byte(idx)) + } + + if int64(idx) < math.MaxUint32 { + if err := e.writeCode(codes.FixExt4); err != nil { + return err + } + if err := e.w.WriteByte(byte(internStringExtID)); err != nil { + return err + } + if err := e.w.WriteByte(byte(idx >> 24)); err != nil { + return err + } + if err := e.w.WriteByte(byte(idx >> 16)); err != nil { + return err + } + if err := e.w.WriteByte(byte(idx >> 8)); err != nil { + return err + } + return e.w.WriteByte(byte(idx)) + } + + return fmt.Errorf("msgpack: intern string index=%d is too large", idx) +} + +//------------------------------------------------------------------------------ + +func decodeInternInterfaceValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + s, err := d.internString(c) + if err == nil { + v.Set(reflect.ValueOf(s)) + return nil + } + if err != nil && err != errUnexpectedCode { + return err + } + + if err := d.s.UnreadByte(); err != nil { + return err + } + + return decodeInterfaceValue(d, v) +} + +func decodeInternStringValue(d *Decoder, v reflect.Value) error { + if err := mustSet(v); err != nil { + return err + } + + c, err := d.readCode() + if err != nil { + return err + } + + s, err := d.internString(c) + if err != nil { + if err == errUnexpectedCode { + return fmt.Errorf("msgpack: invalid code=%x decoding intern string", c) + } + return err + } + + v.SetString(s) + return nil +} + +func (d *Decoder) internString(c codes.Code) (string, error) { + if codes.IsFixedString(c) { + n := int(c & codes.FixedStrMask) + return d.internStringWithLen(n) + } + + switch c { + case codes.FixExt1, codes.FixExt2, codes.FixExt4: + typeID, length, err := d.extHeader(c) + if err != nil { + return "", err + } + if typeID != internStringExtID { + err := fmt.Errorf("msgpack: got ext type=%d, wanted %d", + typeID, internStringExtID) + return "", err + } + + idx, err := d.internStringIndex(length) + if err != nil { + return "", err + } + + return d.internStringAtIndex(idx) + case codes.Str8, codes.Bin8: + n, err := d.uint8() + if err != nil { + return "", err + } + return d.internStringWithLen(int(n)) + case codes.Str16, codes.Bin16: + n, err := d.uint16() + if err != nil { + return "", err + } + return d.internStringWithLen(int(n)) + case codes.Str32, codes.Bin32: + n, err := d.uint32() + if err != nil { + return "", err + } + return d.internStringWithLen(int(n)) + } + + return "", errUnexpectedCode +} + +func (d *Decoder) internStringIndex(length int) (int, error) { + switch length { + case 1: + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + return int(c), nil + case 2: + b, err := d.readN(2) + if err != nil { + return 0, err + } + n := binary.BigEndian.Uint16(b) + return int(n), nil + case 4: + b, err := d.readN(4) + if err != nil { + return 0, err + } + n := binary.BigEndian.Uint32(b) + return int(n), nil + } + + err := fmt.Errorf("msgpack: unsupported intern string index length=%d", length) + return 0, err +} + +func (d *Decoder) internStringAtIndex(idx int) (string, error) { + if idx >= len(d.intern) { + err := fmt.Errorf("msgpack: intern string with index=%d does not exist", idx) + return "", err + } + return d.intern[idx], nil +} + +func (d *Decoder) internStringWithLen(n int) (string, error) { + if n <= 0 { + return "", nil + } + + s, err := d.stringWithLen(n) + if err != nil { + return "", err + } + + d.intern = append(d.intern, s) + + return s, nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/msgpack.go b/vendor/github.com/vmihailenco/msgpack/v4/msgpack.go new file mode 100644 index 000000000..220b43c47 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/msgpack.go @@ -0,0 +1,17 @@ +package msgpack + +type Marshaler interface { + MarshalMsgpack() ([]byte, error) +} + +type Unmarshaler interface { + UnmarshalMsgpack([]byte) error +} + +type CustomEncoder interface { + EncodeMsgpack(*Encoder) error +} + +type CustomDecoder interface { + DecodeMsgpack(*Decoder) error +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/safe.go b/vendor/github.com/vmihailenco/msgpack/v4/safe.go new file mode 100644 index 000000000..8352c9dce --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/safe.go @@ -0,0 +1,13 @@ +// +build appengine + +package msgpack + +// bytesToString converts byte slice to string. +func bytesToString(b []byte) string { + return string(b) +} + +// stringToBytes converts string to byte slice. +func stringToBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/time.go b/vendor/github.com/vmihailenco/msgpack/v4/time.go new file mode 100644 index 000000000..bf53eb2a3 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/time.go @@ -0,0 +1,149 @@ +package msgpack + +import ( + "encoding/binary" + "fmt" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/v4/codes" +) + +var timeExtID int8 = -1 + +var timePtrType = reflect.TypeOf((*time.Time)(nil)) + +//nolint:gochecknoinits +func init() { + registerExt(timeExtID, timePtrType.Elem(), encodeTimeValue, decodeTimeValue) +} + +func (e *Encoder) EncodeTime(tm time.Time) error { + b := e.encodeTime(tm) + if err := e.encodeExtLen(len(b)); err != nil { + return err + } + if err := e.w.WriteByte(byte(timeExtID)); err != nil { + return err + } + return e.write(b) +} + +func (e *Encoder) encodeTime(tm time.Time) []byte { + secs := uint64(tm.Unix()) + if secs>>34 == 0 { + data := uint64(tm.Nanosecond())<<34 | secs + if data&0xffffffff00000000 == 0 { + b := e.timeBuf[:4] + binary.BigEndian.PutUint32(b, uint32(data)) + return b + } + b := e.timeBuf[:8] + binary.BigEndian.PutUint64(b, data) + return b + } + + b := e.timeBuf[:12] + binary.BigEndian.PutUint32(b, uint32(tm.Nanosecond())) + binary.BigEndian.PutUint64(b[4:], secs) + return b +} + +func (d *Decoder) DecodeTime() (time.Time, error) { + tm, err := d.decodeTime() + if err != nil { + return tm, err + } + + if tm.IsZero() { + // Assume that zero time does not have timezone information. + return tm.UTC(), nil + } + return tm, nil +} + +func (d *Decoder) decodeTime() (time.Time, error) { + extLen := d.extLen + d.extLen = 0 + if extLen == 0 { + c, err := d.readCode() + if err != nil { + return time.Time{}, err + } + + // Legacy format. + if c == codes.FixedArrayLow|2 { + sec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + nsec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + return time.Unix(sec, nsec), nil + } + + if codes.IsString(c) { + s, err := d.string(c) + if err != nil { + return time.Time{}, err + } + return time.Parse(time.RFC3339Nano, s) + } + + extLen, err = d.parseExtLen(c) + if err != nil { + return time.Time{}, err + } + + // Skip ext id. + _, err = d.s.ReadByte() + if err != nil { + return time.Time{}, nil + } + } + + b, err := d.readN(extLen) + if err != nil { + return time.Time{}, err + } + + switch len(b) { + case 4: + sec := binary.BigEndian.Uint32(b) + return time.Unix(int64(sec), 0), nil + case 8: + sec := binary.BigEndian.Uint64(b) + nsec := int64(sec >> 34) + sec &= 0x00000003ffffffff + return time.Unix(int64(sec), nsec), nil + case 12: + nsec := binary.BigEndian.Uint32(b) + sec := binary.BigEndian.Uint64(b[4:]) + return time.Unix(int64(sec), int64(nsec)), nil + default: + err = fmt.Errorf("msgpack: invalid ext len=%d decoding time", extLen) + return time.Time{}, err + } +} + +func encodeTimeValue(e *Encoder, v reflect.Value) error { + tm := v.Interface().(time.Time) + b := e.encodeTime(tm) + return e.write(b) +} + +func decodeTimeValue(d *Decoder, v reflect.Value) error { + tm, err := d.DecodeTime() + if err != nil { + return err + } + + ptr := v.Addr().Interface().(*time.Time) + *ptr = tm + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/types.go b/vendor/github.com/vmihailenco/msgpack/v4/types.go new file mode 100644 index 000000000..08cf099dd --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/types.go @@ -0,0 +1,382 @@ +package msgpack + +import ( + "encoding" + "fmt" + "log" + "reflect" + "sync" + + "github.com/vmihailenco/tagparser" +) + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +var ( + customEncoderType = reflect.TypeOf((*CustomEncoder)(nil)).Elem() + customDecoderType = reflect.TypeOf((*CustomDecoder)(nil)).Elem() +) + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +var ( + binaryMarshalerType = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + binaryUnmarshalerType = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() +) + +type ( + encoderFunc func(*Encoder, reflect.Value) error + decoderFunc func(*Decoder, reflect.Value) error +) + +var ( + typeEncMap sync.Map + typeDecMap sync.Map +) + +// Register registers encoder and decoder functions for a value. +// This is low level API and in most cases you should prefer implementing +// Marshaler/CustomEncoder and Unmarshaler/CustomDecoder interfaces. +func Register(value interface{}, enc encoderFunc, dec decoderFunc) { + typ := reflect.TypeOf(value) + if enc != nil { + typeEncMap.Store(typ, enc) + } + if dec != nil { + typeDecMap.Store(typ, dec) + } +} + +//------------------------------------------------------------------------------ + +var ( + structs = newStructCache(false) + jsonStructs = newStructCache(true) +) + +type structCache struct { + m sync.Map + + useJSONTag bool +} + +func newStructCache(useJSONTag bool) *structCache { + return &structCache{ + useJSONTag: useJSONTag, + } +} + +func (m *structCache) Fields(typ reflect.Type) *fields { + if v, ok := m.m.Load(typ); ok { + return v.(*fields) + } + + fs := getFields(typ, m.useJSONTag) + m.m.Store(typ, fs) + return fs +} + +//------------------------------------------------------------------------------ + +type field struct { + name string + index []int + omitEmpty bool + encoder encoderFunc + decoder decoderFunc +} + +func (f *field) Omit(strct reflect.Value) bool { + v, isNil := fieldByIndex(strct, f.index) + if isNil { + return true + } + return f.omitEmpty && isEmptyValue(v) +} + +func (f *field) EncodeValue(e *Encoder, strct reflect.Value) error { + v, isNil := fieldByIndex(strct, f.index) + if isNil { + return e.EncodeNil() + } + return f.encoder(e, v) +} + +func (f *field) DecodeValue(d *Decoder, strct reflect.Value) error { + v := fieldByIndexAlloc(strct, f.index) + return f.decoder(d, v) +} + +//------------------------------------------------------------------------------ + +type fields struct { + Type reflect.Type + Map map[string]*field + List []*field + AsArray bool + + hasOmitEmpty bool +} + +func newFields(typ reflect.Type) *fields { + return &fields{ + Type: typ, + Map: make(map[string]*field, typ.NumField()), + List: make([]*field, 0, typ.NumField()), + } +} + +func (fs *fields) Add(field *field) { + fs.warnIfFieldExists(field.name) + fs.Map[field.name] = field + fs.List = append(fs.List, field) + if field.omitEmpty { + fs.hasOmitEmpty = true + } +} + +func (fs *fields) warnIfFieldExists(name string) { + if _, ok := fs.Map[name]; ok { + log.Printf("msgpack: %s already has field=%s", fs.Type, name) + } +} + +func (fs *fields) OmitEmpty(strct reflect.Value) []*field { + if !fs.hasOmitEmpty { + return fs.List + } + + fields := make([]*field, 0, len(fs.List)) + + for _, f := range fs.List { + if !f.Omit(strct) { + fields = append(fields, f) + } + } + + return fields +} + +func getFields(typ reflect.Type, useJSONTag bool) *fields { + fs := newFields(typ) + + var omitEmpty bool + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + + tagStr := f.Tag.Get("msgpack") + if useJSONTag && tagStr == "" { + tagStr = f.Tag.Get("json") + } + + tag := tagparser.Parse(tagStr) + if tag.Name == "-" { + continue + } + + if f.Name == "_msgpack" { + if tag.HasOption("asArray") { + fs.AsArray = true + } + if tag.HasOption("omitempty") { + omitEmpty = true + } + } + + if f.PkgPath != "" && !f.Anonymous { + continue + } + + field := &field{ + name: tag.Name, + index: f.Index, + omitEmpty: omitEmpty || tag.HasOption("omitempty"), + } + + if tag.HasOption("intern") { + switch f.Type.Kind() { + case reflect.Interface: + field.encoder = encodeInternInterfaceValue + field.decoder = decodeInternInterfaceValue + case reflect.String: + field.encoder = encodeInternStringValue + field.decoder = decodeInternStringValue + default: + err := fmt.Errorf("msgpack: intern strings are not supported on %s", f.Type) + panic(err) + } + } else { + field.encoder = getEncoder(f.Type) + field.decoder = getDecoder(f.Type) + } + + if field.name == "" { + field.name = f.Name + } + + if f.Anonymous && !tag.HasOption("noinline") { + inline := tag.HasOption("inline") + if inline { + inlineFields(fs, f.Type, field, useJSONTag) + } else { + inline = shouldInline(fs, f.Type, field, useJSONTag) + } + + if inline { + if _, ok := fs.Map[field.name]; ok { + log.Printf("msgpack: %s already has field=%s", fs.Type, field.name) + } + fs.Map[field.name] = field + continue + } + } + + fs.Add(field) + + if alias, ok := tag.Options["alias"]; ok { + fs.warnIfFieldExists(alias) + fs.Map[alias] = field + } + } + return fs +} + +var ( + encodeStructValuePtr uintptr + decodeStructValuePtr uintptr +) + +//nolint:gochecknoinits +func init() { + encodeStructValuePtr = reflect.ValueOf(encodeStructValue).Pointer() + decodeStructValuePtr = reflect.ValueOf(decodeStructValue).Pointer() +} + +func inlineFields(fs *fields, typ reflect.Type, f *field, useJSONTag bool) { + inlinedFields := getFields(typ, useJSONTag).List + for _, field := range inlinedFields { + if _, ok := fs.Map[field.name]; ok { + // Don't inline shadowed fields. + continue + } + field.index = append(f.index, field.index...) + fs.Add(field) + } +} + +func shouldInline(fs *fields, typ reflect.Type, f *field, useJSONTag bool) bool { + var encoder encoderFunc + var decoder decoderFunc + + if typ.Kind() == reflect.Struct { + encoder = f.encoder + decoder = f.decoder + } else { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + encoder = getEncoder(typ) + decoder = getDecoder(typ) + } + if typ.Kind() != reflect.Struct { + return false + } + } + + if reflect.ValueOf(encoder).Pointer() != encodeStructValuePtr { + return false + } + if reflect.ValueOf(decoder).Pointer() != decodeStructValuePtr { + return false + } + + inlinedFields := getFields(typ, useJSONTag).List + for _, field := range inlinedFields { + if _, ok := fs.Map[field.name]; ok { + // Don't auto inline if there are shadowed fields. + return false + } + } + + for _, field := range inlinedFields { + field.index = append(f.index, field.index...) + fs.Add(field) + } + return true +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func fieldByIndex(v reflect.Value, index []int) (_ reflect.Value, isNil bool) { + if len(index) == 1 { + return v.Field(index[0]), false + } + + for i, idx := range index { + if i > 0 { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return v, true + } + v = v.Elem() + } + } + v = v.Field(idx) + } + + return v, false +} + +func fieldByIndexAlloc(v reflect.Value, index []int) reflect.Value { + if len(index) == 1 { + return v.Field(index[0]) + } + + for i, idx := range index { + if i > 0 { + var ok bool + v, ok = indirectNew(v) + if !ok { + return v + } + } + v = v.Field(idx) + } + + return v +} + +func indirectNew(v reflect.Value) (reflect.Value, bool) { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + return v, false + } + elemType := v.Type().Elem() + if elemType.Kind() != reflect.Struct { + return v, false + } + v.Set(reflect.New(elemType)) + } + v = v.Elem() + } + return v, true +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/unsafe.go b/vendor/github.com/vmihailenco/msgpack/v4/unsafe.go new file mode 100644 index 000000000..50c0da8b5 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v4/unsafe.go @@ -0,0 +1,22 @@ +// +build !appengine + +package msgpack + +import ( + "unsafe" +) + +// bytesToString converts byte slice to string. +func bytesToString(b []byte) string { //nolint:deadcode,unused + return *(*string)(unsafe.Pointer(&b)) +} + +// stringToBytes converts string to byte slice. +func stringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{s, len(s)}, + )) +} diff --git a/vendor/github.com/vmihailenco/tagparser/.travis.yml b/vendor/github.com/vmihailenco/tagparser/.travis.yml new file mode 100644 index 000000000..ec5384523 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/.travis.yml @@ -0,0 +1,24 @@ +dist: xenial +sudo: false +language: go + +go: + - 1.11.x + - 1.12.x + - tip + +matrix: + allow_failures: + - go: tip + +env: + - GO111MODULE=on + +go_import_path: github.com/vmihailenco/tagparser + +before_install: + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.17.1 + +script: + - make + - golangci-lint run diff --git a/vendor/github.com/vmihailenco/tagparser/LICENSE b/vendor/github.com/vmihailenco/tagparser/LICENSE new file mode 100644 index 000000000..3fc93fdff --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2019 The github.com/vmihailenco/tagparser Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmihailenco/tagparser/Makefile b/vendor/github.com/vmihailenco/tagparser/Makefile new file mode 100644 index 000000000..fe9dc5bdb --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/Makefile @@ -0,0 +1,8 @@ +all: + go test ./... + go test ./... -short -race + go test ./... -run=NONE -bench=. -benchmem + env GOOS=linux GOARCH=386 go test ./... + go vet ./... + go get github.com/gordonklaus/ineffassign + ineffassign . diff --git a/vendor/github.com/vmihailenco/tagparser/README.md b/vendor/github.com/vmihailenco/tagparser/README.md new file mode 100644 index 000000000..411aa5444 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/README.md @@ -0,0 +1,24 @@ +# Opinionated Golang tag parser + +[![Build Status](https://travis-ci.org/vmihailenco/tagparser.png?branch=master)](https://travis-ci.org/vmihailenco/tagparser) +[![GoDoc](https://godoc.org/github.com/vmihailenco/tagparser?status.svg)](https://godoc.org/github.com/vmihailenco/tagparser) + +## Installation + +Install: + +```shell +go get -u github.com/vmihailenco/tagparser +``` + +## Quickstart + +```go +func ExampleParse() { + tag := tagparser.Parse("some_name,key:value,key2:'complex value'") + fmt.Println(tag.Name) + fmt.Println(tag.Options) + // Output: some_name + // map[key:value key2:'complex value'] +} +``` diff --git a/vendor/github.com/vmihailenco/tagparser/go.mod b/vendor/github.com/vmihailenco/tagparser/go.mod new file mode 100644 index 000000000..961a46ddb --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/go.mod @@ -0,0 +1,3 @@ +module github.com/vmihailenco/tagparser + +go 1.13 diff --git a/vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go b/vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go new file mode 100644 index 000000000..2de1c6f7b --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go @@ -0,0 +1,82 @@ +package parser + +import ( + "bytes" + + "github.com/vmihailenco/tagparser/internal" +) + +type Parser struct { + b []byte + i int +} + +func New(b []byte) *Parser { + return &Parser{ + b: b, + } +} + +func NewString(s string) *Parser { + return New(internal.StringToBytes(s)) +} + +func (p *Parser) Bytes() []byte { + return p.b[p.i:] +} + +func (p *Parser) Valid() bool { + return p.i < len(p.b) +} + +func (p *Parser) Read() byte { + if p.Valid() { + c := p.b[p.i] + p.Advance() + return c + } + return 0 +} + +func (p *Parser) Peek() byte { + if p.Valid() { + return p.b[p.i] + } + return 0 +} + +func (p *Parser) Advance() { + p.i++ +} + +func (p *Parser) Skip(skip byte) bool { + if p.Peek() == skip { + p.Advance() + return true + } + return false +} + +func (p *Parser) SkipBytes(skip []byte) bool { + if len(skip) > len(p.b[p.i:]) { + return false + } + if !bytes.Equal(p.b[p.i:p.i+len(skip)], skip) { + return false + } + p.i += len(skip) + return true +} + +func (p *Parser) ReadSep(sep byte) ([]byte, bool) { + ind := bytes.IndexByte(p.b[p.i:], sep) + if ind == -1 { + b := p.b[p.i:] + p.i = len(p.b) + return b, false + } + + b := p.b[p.i : p.i+ind] + p.i += ind + 1 + return b, true +} diff --git a/vendor/github.com/vmihailenco/tagparser/internal/safe.go b/vendor/github.com/vmihailenco/tagparser/internal/safe.go new file mode 100644 index 000000000..870fe541f --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/internal/safe.go @@ -0,0 +1,11 @@ +// +build appengine + +package internal + +func BytesToString(b []byte) string { + return string(b) +} + +func StringToBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/vmihailenco/tagparser/internal/unsafe.go b/vendor/github.com/vmihailenco/tagparser/internal/unsafe.go new file mode 100644 index 000000000..f8bc18d91 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/internal/unsafe.go @@ -0,0 +1,22 @@ +// +build !appengine + +package internal + +import ( + "unsafe" +) + +// BytesToString converts byte slice to string. +func BytesToString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// StringToBytes converts string to byte slice. +func StringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{s, len(s)}, + )) +} diff --git a/vendor/github.com/vmihailenco/tagparser/tagparser.go b/vendor/github.com/vmihailenco/tagparser/tagparser.go new file mode 100644 index 000000000..431002aef --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/tagparser.go @@ -0,0 +1,181 @@ +package tagparser + +import ( + "strings" + + "github.com/vmihailenco/tagparser/internal/parser" +) + +type Tag struct { + Name string + Options map[string]string +} + +func (t *Tag) HasOption(name string) bool { + _, ok := t.Options[name] + return ok +} + +func Parse(s string) *Tag { + p := &tagParser{ + Parser: parser.NewString(s), + } + p.parseKey() + return &p.Tag +} + +type tagParser struct { + *parser.Parser + + Tag Tag + hasName bool + key string +} + +func (p *tagParser) setTagOption(key, value string) { + key = strings.TrimSpace(key) + value = strings.TrimSpace(value) + + if !p.hasName { + p.hasName = true + if key == "" { + p.Tag.Name = value + return + } + } + if p.Tag.Options == nil { + p.Tag.Options = make(map[string]string) + } + if key == "" { + p.Tag.Options[value] = "" + } else { + p.Tag.Options[key] = value + } +} + +func (p *tagParser) parseKey() { + p.key = "" + + var b []byte + for p.Valid() { + c := p.Read() + switch c { + case ',': + p.Skip(' ') + p.setTagOption("", string(b)) + p.parseKey() + return + case ':': + p.key = string(b) + p.parseValue() + return + case '\'': + p.parseQuotedValue() + return + default: + b = append(b, c) + } + } + + if len(b) > 0 { + p.setTagOption("", string(b)) + } +} + +func (p *tagParser) parseValue() { + const quote = '\'' + + c := p.Peek() + if c == quote { + p.Skip(quote) + p.parseQuotedValue() + return + } + + var b []byte + for p.Valid() { + c = p.Read() + switch c { + case '\\': + b = append(b, p.Read()) + case '(': + b = append(b, c) + b = p.readBrackets(b) + case ',': + p.Skip(' ') + p.setTagOption(p.key, string(b)) + p.parseKey() + return + default: + b = append(b, c) + } + } + p.setTagOption(p.key, string(b)) +} + +func (p *tagParser) readBrackets(b []byte) []byte { + var lvl int +loop: + for p.Valid() { + c := p.Read() + switch c { + case '\\': + b = append(b, p.Read()) + case '(': + b = append(b, c) + lvl++ + case ')': + b = append(b, c) + lvl-- + if lvl < 0 { + break loop + } + default: + b = append(b, c) + } + } + return b +} + +func (p *tagParser) parseQuotedValue() { + const quote = '\'' + + var b []byte + b = append(b, quote) + + for p.Valid() { + bb, ok := p.ReadSep(quote) + if !ok { + b = append(b, bb...) + break + } + + if len(bb) > 0 && bb[len(bb)-1] == '\\' { + b = append(b, bb[:len(bb)-1]...) + b = append(b, quote) + continue + } + + b = append(b, bb...) + b = append(b, quote) + break + } + + p.setTagOption(p.key, string(b)) + if p.Skip(',') { + p.Skip(' ') + } + p.parseKey() +} + +func Unquote(s string) (string, bool) { + const quote = '\'' + + if len(s) < 2 { + return s, false + } + if s[0] == quote && s[len(s)-1] == quote { + return s[1 : len(s)-1], true + } + return s, false +} diff --git a/vendor/github.com/xdg-go/scram/CHANGELOG.md b/vendor/github.com/xdg-go/scram/CHANGELOG.md index 425c122fa..21828db14 100644 --- a/vendor/github.com/xdg-go/scram/CHANGELOG.md +++ b/vendor/github.com/xdg-go/scram/CHANGELOG.md @@ -1,5 +1,13 @@ # CHANGELOG +## v1.1.1 - 2022-03-03 + +- Bump stringprep dependency to v1.0.3 for upstream CVE fix. + +## v1.1.0 - 2022-01-16 + +- Add SHA-512 hash generator function for convenience. + ## v1.0.2 - 2021-03-28 - Switch PBKDF2 dependency to github.com/xdg-go/pbkdf2 to diff --git a/vendor/github.com/xdg-go/scram/doc.go b/vendor/github.com/xdg-go/scram/doc.go index d43bee607..82e8aeed8 100644 --- a/vendor/github.com/xdg-go/scram/doc.go +++ b/vendor/github.com/xdg-go/scram/doc.go @@ -10,14 +10,16 @@ // // Usage // -// The scram package provides two variables, `SHA1` and `SHA256`, that are -// used to construct Client or Server objects. +// The scram package provides variables, `SHA1`, `SHA256`, and `SHA512`, that +// are used to construct Client or Server objects. // // clientSHA1, err := scram.SHA1.NewClient(username, password, authID) // clientSHA256, err := scram.SHA256.NewClient(username, password, authID) +// clientSHA512, err := scram.SHA512.NewClient(username, password, authID) // // serverSHA1, err := scram.SHA1.NewServer(credentialLookupFcn) // serverSHA256, err := scram.SHA256.NewServer(credentialLookupFcn) +// serverSHA512, err := scram.SHA512.NewServer(credentialLookupFcn) // // These objects are used to construct ClientConversation or // ServerConversation objects that are used to carry out authentication. diff --git a/vendor/github.com/xdg-go/scram/go.mod b/vendor/github.com/xdg-go/scram/go.mod index ad3763598..53324d05a 100644 --- a/vendor/github.com/xdg-go/scram/go.mod +++ b/vendor/github.com/xdg-go/scram/go.mod @@ -4,5 +4,5 @@ go 1.11 require ( github.com/xdg-go/pbkdf2 v1.0.0 - github.com/xdg-go/stringprep v1.0.2 + github.com/xdg-go/stringprep v1.0.3 ) diff --git a/vendor/github.com/xdg-go/scram/go.sum b/vendor/github.com/xdg-go/scram/go.sum index 04098820a..fa0938ac8 100644 --- a/vendor/github.com/xdg-go/scram/go.sum +++ b/vendor/github.com/xdg-go/scram/go.sum @@ -1,7 +1,7 @@ github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/xdg-go/scram/scram.go b/vendor/github.com/xdg-go/scram/scram.go index 927659969..a7b366027 100644 --- a/vendor/github.com/xdg-go/scram/scram.go +++ b/vendor/github.com/xdg-go/scram/scram.go @@ -9,6 +9,7 @@ package scram import ( "crypto/sha1" "crypto/sha256" + "crypto/sha512" "fmt" "hash" @@ -29,6 +30,10 @@ var SHA1 HashGeneratorFcn = func() hash.Hash { return sha1.New() } // to create Client objects configured for SHA-256 hashing. var SHA256 HashGeneratorFcn = func() hash.Hash { return sha256.New() } +// SHA512 is a function that returns a crypto/sha512 hasher and should be used +// to create Client objects configured for SHA-512 hashing. +var SHA512 HashGeneratorFcn = func() hash.Hash { return sha512.New() } + // NewClient constructs a SCRAM client component based on a given hash.Hash // factory receiver. This constructor will normalize the username, password // and authzID via the SASLprep algorithm, as recommended by RFC-5802. If diff --git a/vendor/github.com/xdg-go/stringprep/CHANGELOG.md b/vendor/github.com/xdg-go/stringprep/CHANGELOG.md index 2849637ca..e06787fba 100644 --- a/vendor/github.com/xdg-go/stringprep/CHANGELOG.md +++ b/vendor/github.com/xdg-go/stringprep/CHANGELOG.md @@ -1,5 +1,12 @@ # CHANGELOG + +## [v1.0.3] - 2022-03-01 + +### Maintenance + +- Bump golang.org/x/text to v0.3.7 due to CVE-2021-38561 + ## [v1.0.2] - 2021-03-27 diff --git a/vendor/github.com/xdg-go/stringprep/go.mod b/vendor/github.com/xdg-go/stringprep/go.mod index f57123b55..d71c7adbb 100644 --- a/vendor/github.com/xdg-go/stringprep/go.mod +++ b/vendor/github.com/xdg-go/stringprep/go.mod @@ -2,4 +2,4 @@ module github.com/xdg-go/stringprep go 1.11 -require golang.org/x/text v0.3.5 +require golang.org/x/text v0.3.7 diff --git a/vendor/github.com/xdg-go/stringprep/go.sum b/vendor/github.com/xdg-go/stringprep/go.sum index bbd33e892..2274b8038 100644 --- a/vendor/github.com/xdg-go/stringprep/go.sum +++ b/vendor/github.com/xdg-go/stringprep/go.sum @@ -1,3 +1,3 @@ -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/youmark/pkcs8/.travis.yml b/vendor/github.com/youmark/pkcs8/.travis.yml index 0bceef6fa..3608f7dc7 100644 --- a/vendor/github.com/youmark/pkcs8/.travis.yml +++ b/vendor/github.com/youmark/pkcs8/.travis.yml @@ -1,8 +1,13 @@ +arch: + - amd64 + - ppc64le language: go go: - - "1.9.x" - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" - master script: diff --git a/vendor/github.com/youmark/pkcs8/README.md b/vendor/github.com/youmark/pkcs8/README.md index f2167dbfe..ef6c76257 100644 --- a/vendor/github.com/youmark/pkcs8/README.md +++ b/vendor/github.com/youmark/pkcs8/README.md @@ -8,14 +8,15 @@ pkcs8 package fills the gap here. It implements functions to process private key [**Godoc**](http://godoc.org/github.com/youmark/pkcs8) ## Installation -Supports Go 1.9+ +Supports Go 1.10+. Release v1.1 is the last release supporting Go 1.9 ```text go get github.com/youmark/pkcs8 ``` ## dependency -This package depends on golang.org/x/crypto/pbkdf2 package. Use the following command to retrive pbkdf2 package +This package depends on golang.org/x/crypto/pbkdf2 and golang.org/x/crypto/scrypt packages. Use the following command to retrieve them ```text go get golang.org/x/crypto/pbkdf2 +go get golang.org/x/crypto/scrypt ``` diff --git a/vendor/github.com/youmark/pkcs8/cipher.go b/vendor/github.com/youmark/pkcs8/cipher.go new file mode 100644 index 000000000..2946c93e8 --- /dev/null +++ b/vendor/github.com/youmark/pkcs8/cipher.go @@ -0,0 +1,60 @@ +package pkcs8 + +import ( + "bytes" + "crypto/cipher" + "encoding/asn1" +) + +type cipherWithBlock struct { + oid asn1.ObjectIdentifier + ivSize int + keySize int + newBlock func(key []byte) (cipher.Block, error) +} + +func (c cipherWithBlock) IVSize() int { + return c.ivSize +} + +func (c cipherWithBlock) KeySize() int { + return c.keySize +} + +func (c cipherWithBlock) OID() asn1.ObjectIdentifier { + return c.oid +} + +func (c cipherWithBlock) Encrypt(key, iv, plaintext []byte) ([]byte, error) { + block, err := c.newBlock(key) + if err != nil { + return nil, err + } + return cbcEncrypt(block, key, iv, plaintext) +} + +func (c cipherWithBlock) Decrypt(key, iv, ciphertext []byte) ([]byte, error) { + block, err := c.newBlock(key) + if err != nil { + return nil, err + } + return cbcDecrypt(block, key, iv, ciphertext) +} + +func cbcEncrypt(block cipher.Block, key, iv, plaintext []byte) ([]byte, error) { + mode := cipher.NewCBCEncrypter(block, iv) + paddingLen := block.BlockSize() - (len(plaintext) % block.BlockSize()) + ciphertext := make([]byte, len(plaintext)+paddingLen) + copy(ciphertext, plaintext) + copy(ciphertext[len(plaintext):], bytes.Repeat([]byte{byte(paddingLen)}, paddingLen)) + mode.CryptBlocks(ciphertext, ciphertext) + return ciphertext, nil +} + +func cbcDecrypt(block cipher.Block, key, iv, ciphertext []byte) ([]byte, error) { + mode := cipher.NewCBCDecrypter(block, iv) + plaintext := make([]byte, len(ciphertext)) + mode.CryptBlocks(plaintext, ciphertext) + // TODO: remove padding + return plaintext, nil +} diff --git a/vendor/github.com/youmark/pkcs8/cipher_3des.go b/vendor/github.com/youmark/pkcs8/cipher_3des.go new file mode 100644 index 000000000..562966440 --- /dev/null +++ b/vendor/github.com/youmark/pkcs8/cipher_3des.go @@ -0,0 +1,24 @@ +package pkcs8 + +import ( + "crypto/des" + "encoding/asn1" +) + +var ( + oidDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} +) + +func init() { + RegisterCipher(oidDESEDE3CBC, func() Cipher { + return TripleDESCBC + }) +} + +// TripleDESCBC is the 168-bit key 3DES cipher in CBC mode. +var TripleDESCBC = cipherWithBlock{ + ivSize: des.BlockSize, + keySize: 24, + newBlock: des.NewTripleDESCipher, + oid: oidDESEDE3CBC, +} diff --git a/vendor/github.com/youmark/pkcs8/cipher_aes.go b/vendor/github.com/youmark/pkcs8/cipher_aes.go new file mode 100644 index 000000000..c0372d1ee --- /dev/null +++ b/vendor/github.com/youmark/pkcs8/cipher_aes.go @@ -0,0 +1,84 @@ +package pkcs8 + +import ( + "crypto/aes" + "encoding/asn1" +) + +var ( + oidAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} + oidAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6} + oidAES192CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 22} + oidAES192GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 26} + oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} + oidAES256GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 46} +) + +func init() { + RegisterCipher(oidAES128CBC, func() Cipher { + return AES128CBC + }) + RegisterCipher(oidAES128GCM, func() Cipher { + return AES128GCM + }) + RegisterCipher(oidAES192CBC, func() Cipher { + return AES192CBC + }) + RegisterCipher(oidAES192GCM, func() Cipher { + return AES192GCM + }) + RegisterCipher(oidAES256CBC, func() Cipher { + return AES256CBC + }) + RegisterCipher(oidAES256GCM, func() Cipher { + return AES256GCM + }) +} + +// AES128CBC is the 128-bit key AES cipher in CBC mode. +var AES128CBC = cipherWithBlock{ + ivSize: aes.BlockSize, + keySize: 16, + newBlock: aes.NewCipher, + oid: oidAES128CBC, +} + +// AES128GCM is the 128-bit key AES cipher in GCM mode. +var AES128GCM = cipherWithBlock{ + ivSize: aes.BlockSize, + keySize: 16, + newBlock: aes.NewCipher, + oid: oidAES128GCM, +} + +// AES192CBC is the 192-bit key AES cipher in CBC mode. +var AES192CBC = cipherWithBlock{ + ivSize: aes.BlockSize, + keySize: 24, + newBlock: aes.NewCipher, + oid: oidAES192CBC, +} + +// AES192GCM is the 912-bit key AES cipher in GCM mode. +var AES192GCM = cipherWithBlock{ + ivSize: aes.BlockSize, + keySize: 24, + newBlock: aes.NewCipher, + oid: oidAES192GCM, +} + +// AES256CBC is the 256-bit key AES cipher in CBC mode. +var AES256CBC = cipherWithBlock{ + ivSize: aes.BlockSize, + keySize: 32, + newBlock: aes.NewCipher, + oid: oidAES256CBC, +} + +// AES256GCM is the 256-bit key AES cipher in GCM mode. +var AES256GCM = cipherWithBlock{ + ivSize: aes.BlockSize, + keySize: 32, + newBlock: aes.NewCipher, + oid: oidAES256GCM, +} diff --git a/vendor/github.com/youmark/pkcs8/go.mod b/vendor/github.com/youmark/pkcs8/go.mod new file mode 100644 index 000000000..1fda53404 --- /dev/null +++ b/vendor/github.com/youmark/pkcs8/go.mod @@ -0,0 +1,5 @@ +module github.com/youmark/pkcs8 + +go 1.12 + +require golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 diff --git a/vendor/github.com/youmark/pkcs8/go.sum b/vendor/github.com/youmark/pkcs8/go.sum new file mode 100644 index 000000000..d59db129c --- /dev/null +++ b/vendor/github.com/youmark/pkcs8/go.sum @@ -0,0 +1,7 @@ +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/youmark/pkcs8/kdf_pbkdf2.go b/vendor/github.com/youmark/pkcs8/kdf_pbkdf2.go new file mode 100644 index 000000000..79697dd82 --- /dev/null +++ b/vendor/github.com/youmark/pkcs8/kdf_pbkdf2.go @@ -0,0 +1,91 @@ +package pkcs8 + +import ( + "crypto" + "crypto/sha1" + "crypto/sha256" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "hash" + + "golang.org/x/crypto/pbkdf2" +) + +var ( + oidPKCS5PBKDF2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12} + oidHMACWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 2, 7} + oidHMACWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 2, 9} +) + +func init() { + RegisterKDF(oidPKCS5PBKDF2, func() KDFParameters { + return new(pbkdf2Params) + }) +} + +func newHashFromPRF(ai pkix.AlgorithmIdentifier) (func() hash.Hash, error) { + switch { + case len(ai.Algorithm) == 0 || ai.Algorithm.Equal(oidHMACWithSHA1): + return sha1.New, nil + case ai.Algorithm.Equal(oidHMACWithSHA256): + return sha256.New, nil + default: + return nil, errors.New("pkcs8: unsupported hash function") + } +} + +func newPRFParamFromHash(h crypto.Hash) (pkix.AlgorithmIdentifier, error) { + switch h { + case crypto.SHA1: + return pkix.AlgorithmIdentifier{ + Algorithm: oidHMACWithSHA1, + Parameters: asn1.RawValue{Tag: asn1.TagNull}}, nil + case crypto.SHA256: + return pkix.AlgorithmIdentifier{ + Algorithm: oidHMACWithSHA256, + Parameters: asn1.RawValue{Tag: asn1.TagNull}}, nil + } + return pkix.AlgorithmIdentifier{}, errors.New("pkcs8: unsupported hash function") +} + +type pbkdf2Params struct { + Salt []byte + IterationCount int + PRF pkix.AlgorithmIdentifier `asn1:"optional"` +} + +func (p pbkdf2Params) DeriveKey(password []byte, size int) (key []byte, err error) { + h, err := newHashFromPRF(p.PRF) + if err != nil { + return nil, err + } + return pbkdf2.Key(password, p.Salt, p.IterationCount, size, h), nil +} + +// PBKDF2Opts contains options for the PBKDF2 key derivation function. +type PBKDF2Opts struct { + SaltSize int + IterationCount int + HMACHash crypto.Hash +} + +func (p PBKDF2Opts) DeriveKey(password, salt []byte, size int) ( + key []byte, params KDFParameters, err error) { + + key = pbkdf2.Key(password, salt, p.IterationCount, size, p.HMACHash.New) + prfParam, err := newPRFParamFromHash(p.HMACHash) + if err != nil { + return nil, nil, err + } + params = pbkdf2Params{salt, p.IterationCount, prfParam} + return key, params, nil +} + +func (p PBKDF2Opts) GetSaltSize() int { + return p.SaltSize +} + +func (p PBKDF2Opts) OID() asn1.ObjectIdentifier { + return oidPKCS5PBKDF2 +} diff --git a/vendor/github.com/youmark/pkcs8/kdf_scrypt.go b/vendor/github.com/youmark/pkcs8/kdf_scrypt.go new file mode 100644 index 000000000..36c4f4f59 --- /dev/null +++ b/vendor/github.com/youmark/pkcs8/kdf_scrypt.go @@ -0,0 +1,62 @@ +package pkcs8 + +import ( + "encoding/asn1" + + "golang.org/x/crypto/scrypt" +) + +var ( + oidScrypt = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11591, 4, 11} +) + +func init() { + RegisterKDF(oidScrypt, func() KDFParameters { + return new(scryptParams) + }) +} + +type scryptParams struct { + Salt []byte + CostParameter int + BlockSize int + ParallelizationParameter int +} + +func (p scryptParams) DeriveKey(password []byte, size int) (key []byte, err error) { + return scrypt.Key(password, p.Salt, p.CostParameter, p.BlockSize, + p.ParallelizationParameter, size) +} + +// ScryptOpts contains options for the scrypt key derivation function. +type ScryptOpts struct { + SaltSize int + CostParameter int + BlockSize int + ParallelizationParameter int +} + +func (p ScryptOpts) DeriveKey(password, salt []byte, size int) ( + key []byte, params KDFParameters, err error) { + + key, err = scrypt.Key(password, salt, p.CostParameter, p.BlockSize, + p.ParallelizationParameter, size) + if err != nil { + return nil, nil, err + } + params = scryptParams{ + BlockSize: p.BlockSize, + CostParameter: p.CostParameter, + ParallelizationParameter: p.ParallelizationParameter, + Salt: salt, + } + return key, params, nil +} + +func (p ScryptOpts) GetSaltSize() int { + return p.SaltSize +} + +func (p ScryptOpts) OID() asn1.ObjectIdentifier { + return oidScrypt +} diff --git a/vendor/github.com/youmark/pkcs8/pkcs8.go b/vendor/github.com/youmark/pkcs8/pkcs8.go index 9270a7970..f27f62752 100644 --- a/vendor/github.com/youmark/pkcs8/pkcs8.go +++ b/vendor/github.com/youmark/pkcs8/pkcs8.go @@ -2,304 +2,308 @@ package pkcs8 import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" + "crypto" "crypto/ecdsa" - "crypto/elliptic" "crypto/rand" "crypto/rsa" - "crypto/sha1" - "crypto/sha256" "crypto/x509" + "crypto/x509/pkix" "encoding/asn1" "errors" - - "golang.org/x/crypto/pbkdf2" -) - -// Copy from crypto/x509 -var ( - oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} - oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} - oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} + "fmt" ) -// Copy from crypto/x509 -var ( - oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} - oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} - oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} - oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} -) +// DefaultOpts are the default options for encrypting a key if none are given. +// The defaults can be changed by the library user. +var DefaultOpts = &Opts{ + Cipher: AES256CBC, + KDFOpts: PBKDF2Opts{ + SaltSize: 8, + IterationCount: 10000, + HMACHash: crypto.SHA256, + }, +} -// Copy from crypto/x509 -func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) { - switch curve { - case elliptic.P224(): - return oidNamedCurveP224, true - case elliptic.P256(): - return oidNamedCurveP256, true - case elliptic.P384(): - return oidNamedCurveP384, true - case elliptic.P521(): - return oidNamedCurveP521, true - } +// KDFOpts contains options for a key derivation function. +// An implementation of this interface must be specified when encrypting a PKCS#8 key. +type KDFOpts interface { + // DeriveKey derives a key of size bytes from the given password and salt. + // It returns the key and the ASN.1-encodable parameters used. + DeriveKey(password, salt []byte, size int) (key []byte, params KDFParameters, err error) + // GetSaltSize returns the salt size specified. + GetSaltSize() int + // OID returns the OID of the KDF specified. + OID() asn1.ObjectIdentifier +} - return nil, false +// KDFParameters contains parameters (salt, etc.) for a key deriviation function. +// It must be a ASN.1-decodable structure. +// An implementation of this interface is created when decoding an encrypted PKCS#8 key. +type KDFParameters interface { + // DeriveKey derives a key of size bytes from the given password. + // It uses the salt from the decoded parameters. + DeriveKey(password []byte, size int) (key []byte, err error) } -// Unecrypted PKCS8 -var ( - oidPKCS5PBKDF2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12} - oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13} - oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} - oidAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} - oidHMACWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 2, 9} - oidDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} -) +var kdfs = make(map[string]func() KDFParameters) -type ecPrivateKey struct { - Version int - PrivateKey []byte - NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"` - PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"` +// RegisterKDF registers a function that returns a new instance of the given KDF +// parameters. This allows the library to support client-provided KDFs. +func RegisterKDF(oid asn1.ObjectIdentifier, params func() KDFParameters) { + kdfs[oid.String()] = params } -type privateKeyInfo struct { - Version int - PrivateKeyAlgorithm []asn1.ObjectIdentifier - PrivateKey []byte +// Cipher represents a cipher for encrypting the key material. +type Cipher interface { + // IVSize returns the IV size of the cipher, in bytes. + IVSize() int + // KeySize returns the key size of the cipher, in bytes. + KeySize() int + // Encrypt encrypts the key material. + Encrypt(key, iv, plaintext []byte) ([]byte, error) + // Decrypt decrypts the key material. + Decrypt(key, iv, ciphertext []byte) ([]byte, error) + // OID returns the OID of the cipher specified. + OID() asn1.ObjectIdentifier } -// Encrypted PKCS8 -type prfParam struct { - IdPRF asn1.ObjectIdentifier - NullParam asn1.RawValue -} +var ciphers = make(map[string]func() Cipher) -type pbkdf2Params struct { - Salt []byte - IterationCount int - PrfParam prfParam `asn1:"optional"` +// RegisterCipher registers a function that returns a new instance of the given +// cipher. This allows the library to support client-provided ciphers. +func RegisterCipher(oid asn1.ObjectIdentifier, cipher func() Cipher) { + ciphers[oid.String()] = cipher } -type pbkdf2Algorithms struct { - IdPBKDF2 asn1.ObjectIdentifier - PBKDF2Params pbkdf2Params +// Opts contains options for encrypting a PKCS#8 key. +type Opts struct { + Cipher Cipher + KDFOpts KDFOpts } -type pbkdf2Encs struct { - EncryAlgo asn1.ObjectIdentifier - IV []byte -} +// Unecrypted PKCS8 +var ( + oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13} +) -type pbes2Params struct { - KeyDerivationFunc pbkdf2Algorithms - EncryptionScheme pbkdf2Encs +type encryptedPrivateKeyInfo struct { + EncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedData []byte } -type pbes2Algorithms struct { - IdPBES2 asn1.ObjectIdentifier - PBES2Params pbes2Params +type pbes2Params struct { + KeyDerivationFunc pkix.AlgorithmIdentifier + EncryptionScheme pkix.AlgorithmIdentifier } -type encryptedPrivateKeyInfo struct { - EncryptionAlgorithm pbes2Algorithms - EncryptedData []byte +type privateKeyInfo struct { + Version int + PrivateKeyAlgorithm pkix.AlgorithmIdentifier + PrivateKey []byte } -// ParsePKCS8PrivateKeyRSA parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. -// -// The function can decrypt the private key encrypted with AES-256-CBC mode, and stored in PKCS #5 v2.0 format. -func ParsePKCS8PrivateKeyRSA(der []byte, v ...[]byte) (*rsa.PrivateKey, error) { - key, err := ParsePKCS8PrivateKey(der, v...) - if err != nil { - return nil, err - } - typedKey, ok := key.(*rsa.PrivateKey) +func parseKeyDerivationFunc(keyDerivationFunc pkix.AlgorithmIdentifier) (KDFParameters, error) { + oid := keyDerivationFunc.Algorithm.String() + newParams, ok := kdfs[oid] if !ok { - return nil, errors.New("key block is not of type RSA") + return nil, fmt.Errorf("pkcs8: unsupported KDF (OID: %s)", oid) } - return typedKey, nil -} - -// ParsePKCS8PrivateKeyECDSA parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. -// -// The function can decrypt the private key encrypted with AES-256-CBC mode, and stored in PKCS #5 v2.0 format. -func ParsePKCS8PrivateKeyECDSA(der []byte, v ...[]byte) (*ecdsa.PrivateKey, error) { - key, err := ParsePKCS8PrivateKey(der, v...) + params := newParams() + _, err := asn1.Unmarshal(keyDerivationFunc.Parameters.FullBytes, params) if err != nil { - return nil, err + return nil, errors.New("pkcs8: invalid KDF parameters") } - typedKey, ok := key.(*ecdsa.PrivateKey) + return params, nil +} + +func parseEncryptionScheme(encryptionScheme pkix.AlgorithmIdentifier) (Cipher, []byte, error) { + oid := encryptionScheme.Algorithm.String() + newCipher, ok := ciphers[oid] if !ok { - return nil, errors.New("key block is not of type ECDSA") + return nil, nil, fmt.Errorf("pkcs8: unsupported cipher (OID: %s)", oid) } - return typedKey, nil + cipher := newCipher() + var iv []byte + if _, err := asn1.Unmarshal(encryptionScheme.Parameters.FullBytes, &iv); err != nil { + return nil, nil, errors.New("pkcs8: invalid cipher parameters") + } + return cipher, iv, nil } -// ParsePKCS8PrivateKey parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. -// -// The function can decrypt the private key encrypted with AES-256-CBC mode, and stored in PKCS #5 v2.0 format. -func ParsePKCS8PrivateKey(der []byte, v ...[]byte) (interface{}, error) { +// ParsePrivateKey parses a DER-encoded PKCS#8 private key. +// Password can be nil. +// This is equivalent to ParsePKCS8PrivateKey. +func ParsePrivateKey(der []byte, password []byte) (interface{}, KDFParameters, error) { // No password provided, assume the private key is unencrypted - if v == nil { - return x509.ParsePKCS8PrivateKey(der) + if len(password) == 0 { + privateKey, err := x509.ParsePKCS8PrivateKey(der) + return privateKey, nil, err } // Use the password provided to decrypt the private key - password := v[0] var privKey encryptedPrivateKeyInfo if _, err := asn1.Unmarshal(der, &privKey); err != nil { - return nil, errors.New("pkcs8: only PKCS #5 v2.0 supported") + return nil, nil, errors.New("pkcs8: only PKCS #5 v2.0 supported") } - if !privKey.EncryptionAlgorithm.IdPBES2.Equal(oidPBES2) { - return nil, errors.New("pkcs8: only PBES2 supported") + if !privKey.EncryptionAlgorithm.Algorithm.Equal(oidPBES2) { + return nil, nil, errors.New("pkcs8: only PBES2 supported") } - if !privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.IdPBKDF2.Equal(oidPKCS5PBKDF2) { - return nil, errors.New("pkcs8: only PBKDF2 supported") + var params pbes2Params + if _, err := asn1.Unmarshal(privKey.EncryptionAlgorithm.Parameters.FullBytes, ¶ms); err != nil { + return nil, nil, errors.New("pkcs8: invalid PBES2 parameters") } - encParam := privKey.EncryptionAlgorithm.PBES2Params.EncryptionScheme - kdfParam := privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.PBKDF2Params + cipher, iv, err := parseEncryptionScheme(params.EncryptionScheme) + if err != nil { + return nil, nil, err + } - iv := encParam.IV - salt := kdfParam.Salt - iter := kdfParam.IterationCount - keyHash := sha1.New - if kdfParam.PrfParam.IdPRF.Equal(oidHMACWithSHA256) { - keyHash = sha256.New + kdfParams, err := parseKeyDerivationFunc(params.KeyDerivationFunc) + if err != nil { + return nil, nil, err } - encryptedKey := privKey.EncryptedData - var symkey []byte - var block cipher.Block - var err error - switch { - case encParam.EncryAlgo.Equal(oidAES128CBC): - symkey = pbkdf2.Key(password, salt, iter, 16, keyHash) - block, err = aes.NewCipher(symkey) - case encParam.EncryAlgo.Equal(oidAES256CBC): - symkey = pbkdf2.Key(password, salt, iter, 32, keyHash) - block, err = aes.NewCipher(symkey) - case encParam.EncryAlgo.Equal(oidDESEDE3CBC): - symkey = pbkdf2.Key(password, salt, iter, 24, keyHash) - block, err = des.NewTripleDESCipher(symkey) - default: - return nil, errors.New("pkcs8: only AES-256-CBC, AES-128-CBC and DES-EDE3-CBC are supported") + keySize := cipher.KeySize() + symkey, err := kdfParams.DeriveKey(password, keySize) + if err != nil { + return nil, nil, err } + + encryptedKey := privKey.EncryptedData + decryptedKey, err := cipher.Decrypt(symkey, iv, encryptedKey) if err != nil { - return nil, err + return nil, nil, err } - mode := cipher.NewCBCDecrypter(block, iv) - mode.CryptBlocks(encryptedKey, encryptedKey) - key, err := x509.ParsePKCS8PrivateKey(encryptedKey) + key, err := x509.ParsePKCS8PrivateKey(decryptedKey) if err != nil { - return nil, errors.New("pkcs8: incorrect password") + return nil, nil, errors.New("pkcs8: incorrect password") } - return key, nil + return key, kdfParams, nil } -func convertPrivateKeyToPKCS8(priv interface{}) ([]byte, error) { - var pkey privateKeyInfo - - switch priv := priv.(type) { - case *ecdsa.PrivateKey: - eckey, err := x509.MarshalECPrivateKey(priv) - if err != nil { - return nil, err - } - - oidNamedCurve, ok := oidFromNamedCurve(priv.Curve) - if !ok { - return nil, errors.New("pkcs8: unknown elliptic curve") - } - - // Per RFC5958, if publicKey is present, then version is set to v2(1) else version is set to v1(0). - // But openssl set to v1 even publicKey is present - pkey.Version = 1 - pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 2) - pkey.PrivateKeyAlgorithm[0] = oidPublicKeyECDSA - pkey.PrivateKeyAlgorithm[1] = oidNamedCurve - pkey.PrivateKey = eckey - case *rsa.PrivateKey: - - // Per RFC5958, if publicKey is present, then version is set to v2(1) else version is set to v1(0). - // But openssl set to v1 even publicKey is present - pkey.Version = 0 - pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 1) - pkey.PrivateKeyAlgorithm[0] = oidPublicKeyRSA - pkey.PrivateKey = x509.MarshalPKCS1PrivateKey(priv) - } - - return asn1.Marshal(pkey) -} +// MarshalPrivateKey encodes a private key into DER-encoded PKCS#8 with the given options. +// Password can be nil. +func MarshalPrivateKey(priv interface{}, password []byte, opts *Opts) ([]byte, error) { + if len(password) == 0 { + return x509.MarshalPKCS8PrivateKey(priv) + } + + if opts == nil { + opts = DefaultOpts + } -func convertPrivateKeyToPKCS8Encrypted(priv interface{}, password []byte) ([]byte, error) { // Convert private key into PKCS8 format - pkey, err := convertPrivateKeyToPKCS8(priv) + pkey, err := x509.MarshalPKCS8PrivateKey(priv) if err != nil { return nil, err } - // Calculate key from password based on PKCS5 algorithm - // Use 8 byte salt, 16 byte IV, and 2048 iteration - iter := 2048 - salt := make([]byte, 8) - iv := make([]byte, 16) + encAlg := opts.Cipher + salt := make([]byte, opts.KDFOpts.GetSaltSize()) _, err = rand.Read(salt) if err != nil { return nil, err } + iv := make([]byte, encAlg.IVSize()) _, err = rand.Read(iv) if err != nil { return nil, err } + key, kdfParams, err := opts.KDFOpts.DeriveKey(password, salt, encAlg.KeySize()) + if err != nil { + return nil, err + } - key := pbkdf2.Key(password, salt, iter, 32, sha256.New) + encryptedKey, err := encAlg.Encrypt(key, iv, pkey) + if err != nil { + return nil, err + } - // Use AES256-CBC mode, pad plaintext with PKCS5 padding scheme - padding := aes.BlockSize - len(pkey)%aes.BlockSize - if padding > 0 { - n := len(pkey) - pkey = append(pkey, make([]byte, padding)...) - for i := 0; i < padding; i++ { - pkey[n+i] = byte(padding) - } + marshalledParams, err := asn1.Marshal(kdfParams) + if err != nil { + return nil, err + } + keyDerivationFunc := pkix.AlgorithmIdentifier{ + Algorithm: opts.KDFOpts.OID(), + Parameters: asn1.RawValue{FullBytes: marshalledParams}, + } + marshalledIV, err := asn1.Marshal(iv) + if err != nil { + return nil, err + } + encryptionScheme := pkix.AlgorithmIdentifier{ + Algorithm: encAlg.OID(), + Parameters: asn1.RawValue{FullBytes: marshalledIV}, } - encryptedKey := make([]byte, len(pkey)) - block, err := aes.NewCipher(key) + encryptionAlgorithmParams := pbes2Params{ + EncryptionScheme: encryptionScheme, + KeyDerivationFunc: keyDerivationFunc, + } + marshalledEncryptionAlgorithmParams, err := asn1.Marshal(encryptionAlgorithmParams) if err != nil { return nil, err } - mode := cipher.NewCBCEncrypter(block, iv) - mode.CryptBlocks(encryptedKey, pkey) + encryptionAlgorithm := pkix.AlgorithmIdentifier{ + Algorithm: oidPBES2, + Parameters: asn1.RawValue{FullBytes: marshalledEncryptionAlgorithmParams}, + } - // pbkdf2algo := pbkdf2Algorithms{oidPKCS5PBKDF2, pbkdf2Params{salt, iter, prfParam{oidHMACWithSHA256}}} + encryptedPkey := encryptedPrivateKeyInfo{ + EncryptionAlgorithm: encryptionAlgorithm, + EncryptedData: encryptedKey, + } - pbkdf2algo := pbkdf2Algorithms{oidPKCS5PBKDF2, pbkdf2Params{salt, iter, prfParam{oidHMACWithSHA256, asn1.RawValue{Tag: asn1.TagNull}}}} - pbkdf2encs := pbkdf2Encs{oidAES256CBC, iv} - pbes2algo := pbes2Algorithms{oidPBES2, pbes2Params{pbkdf2algo, pbkdf2encs}} + return asn1.Marshal(encryptedPkey) +} - encryptedPkey := encryptedPrivateKeyInfo{pbes2algo, encryptedKey} +// ParsePKCS8PrivateKey parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. +func ParsePKCS8PrivateKey(der []byte, v ...[]byte) (interface{}, error) { + var password []byte + if len(v) > 0 { + password = v[0] + } + privateKey, _, err := ParsePrivateKey(der, password) + return privateKey, err +} - return asn1.Marshal(encryptedPkey) +// ParsePKCS8PrivateKeyRSA parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. +func ParsePKCS8PrivateKeyRSA(der []byte, v ...[]byte) (*rsa.PrivateKey, error) { + key, err := ParsePKCS8PrivateKey(der, v...) + if err != nil { + return nil, err + } + typedKey, ok := key.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("key block is not of type RSA") + } + return typedKey, nil +} + +// ParsePKCS8PrivateKeyECDSA parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. +func ParsePKCS8PrivateKeyECDSA(der []byte, v ...[]byte) (*ecdsa.PrivateKey, error) { + key, err := ParsePKCS8PrivateKey(der, v...) + if err != nil { + return nil, err + } + typedKey, ok := key.(*ecdsa.PrivateKey) + if !ok { + return nil, errors.New("key block is not of type ECDSA") + } + return typedKey, nil } // ConvertPrivateKeyToPKCS8 converts the private key into PKCS#8 format. // To encrypt the private key, the password of []byte type should be provided as the second parameter. // -// The only supported key types are RSA and ECDSA (*rsa.PublicKey or *ecdsa.PublicKey for priv) +// The only supported key types are RSA and ECDSA (*rsa.PrivateKey or *ecdsa.PrivateKey for priv) func ConvertPrivateKeyToPKCS8(priv interface{}, v ...[]byte) ([]byte, error) { - if v == nil { - return convertPrivateKeyToPKCS8(priv) + var password []byte + if len(v) > 0 { + password = v[0] } - - password := string(v[0]) - return convertPrivateKeyToPKCS8Encrypted(priv, []byte(password)) + return MarshalPrivateKey(priv, password, nil) } diff --git a/vendor/github.com/zclconf/go-cty/cty/primitive_type.go b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go index 7b3d1196c..603901732 100644 --- a/vendor/github.com/zclconf/go-cty/cty/primitive_type.go +++ b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go @@ -52,6 +52,51 @@ func (t primitiveType) GoString() string { } } +// rawNumberEqual is our cty-specific definition of whether two big floats +// underlying cty.Number are "equal" for the purposes of the Value.Equals and +// Value.RawEquals methods. +// +// The built-in equality for big.Float is a direct comparison of the mantissa +// bits and the exponent, but that's too precise a check for cty because we +// routinely send numbers through decimal approximations and back and so +// we only promise to accurately represent the subset of binary floating point +// numbers that can be derived from a decimal string representation. +// +// In respect of the fact that cty only tries to preserve numbers that can +// reasonably be written in JSON documents, we use the string representation of +// a decimal approximation of the number as our comparison, relying on the +// big.Float type's heuristic for discarding extraneous mantissa bits that seem +// likely to only be there as a result of an earlier decimal-to-binary +// approximation during parsing, e.g. in ParseNumberVal. +func rawNumberEqual(a, b *big.Float) bool { + switch { + case (a == nil) != (b == nil): + return false + case a == nil: // b == nil too then, due to previous case + return true + default: + // This format and precision matches that used by cty/json.Marshal, + // and thus achieves our definition of "two numbers are equal if + // we'd use the same JSON serialization for both of them". + const format = 'f' + const prec = -1 + aStr := a.Text(format, prec) + bStr := b.Text(format, prec) + + // The one exception to our rule about equality-by-stringification is + // negative zero, because we want -0 to always be equal to +0. + const posZero = "0" + const negZero = "-0" + if aStr == negZero { + aStr = posZero + } + if bStr == negZero { + bStr = posZero + } + return aStr == bStr + } +} + // Number is the numeric type. Number values are arbitrary-precision // decimal numbers, which can then be converted into Go's various numeric // types only if they are in the appropriate range. diff --git a/vendor/github.com/zclconf/go-cty/cty/value_ops.go b/vendor/github.com/zclconf/go-cty/cty/value_ops.go index 55bebe036..cdcc1506f 100644 --- a/vendor/github.com/zclconf/go-cty/cty/value_ops.go +++ b/vendor/github.com/zclconf/go-cty/cty/value_ops.go @@ -191,7 +191,7 @@ func (val Value) Equals(other Value) Value { switch { case ty == Number: - result = val.v.(*big.Float).Cmp(other.v.(*big.Float)) == 0 + result = rawNumberEqual(val.v.(*big.Float), other.v.(*big.Float)) case ty == Bool: result = val.v.(bool) == other.v.(bool) case ty == String: diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go index 0c739dd0a..95ffc1078 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bson.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bson.go @@ -7,8 +7,6 @@ // Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer // See THIRD-PARTY-NOTICES for original license terms. -// +build go1.9 - package bson // import "go.mongodb.org/mongo-driver/bson" import ( diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go b/vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go deleted file mode 100644 index bbe779284..000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// +build !go1.9 - -package bson // import "go.mongodb.org/mongo-driver/bson" - -import ( - "math" - "strconv" - "strings" -) - -// Zeroer allows custom struct types to implement a report of zero -// state. All struct types that don't implement Zeroer or where IsZero -// returns false are considered to be not zero. -type Zeroer interface { - IsZero() bool -} - -// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters, -// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead. -// -// Example usage: -// -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} -type D []E - -// Map creates a map from the elements of the D. -func (d D) Map() M { - m := make(M, len(d)) - for _, e := range d { - m[e.Key] = e.Value - } - return m -} - -// E represents a BSON element for a D. It is usually used inside a D. -type E struct { - Key string - Value interface{} -} - -// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not -// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be -// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead. -// -// Example usage: -// -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} -type M map[string]interface{} - -// An A is an ordered representation of a BSON array. -// -// Example usage: -// -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} -type A []interface{} - -func formatDouble(f float64) string { - var s string - if math.IsInf(f, 1) { - s = "Infinity" - } else if math.IsInf(f, -1) { - s = "-Infinity" - } else if math.IsNaN(f) { - s = "NaN" - } else { - // Print exactly one decimalType place for integers; otherwise, print as many are necessary to - // perfectly represent it. - s = strconv.FormatFloat(f, 'G', -1, 64) - if !strings.ContainsRune(s, '.') { - s += ".0" - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go index 2c861b5cd..098ed69f9 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go @@ -13,6 +13,7 @@ import ( "go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" ) var ( @@ -43,7 +44,7 @@ type Unmarshaler interface { } // ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representaiton of themselves. The BSON bytes and type can be +// BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. type ValueUnmarshaler interface { @@ -118,11 +119,32 @@ type EncodeContext struct { type DecodeContext struct { *Registry Truncate bool + // Ancestor is the type of a containing document. This is mainly used to determine what type // should be used when decoding an embedded document into an empty interface. For example, if // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface // will be decoded into a bson.M. + // + // Deprecated: Use DefaultDocumentM or DefaultDocumentD instead. Ancestor reflect.Type + + // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the + // usage for this field is restricted to data typed as "interface{}" or "map[string]interface{}". If DocumentType is + // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an + // error. DocumentType overrides the Ancestor field. + defaultDocumentType reflect.Type +} + +// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (dc *DecodeContext) DefaultDocumentM() { + dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) +} + +// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (dc *DecodeContext) DefaultDocumentD() { + dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) } // ValueCodec is the interface that groups the methods to encode and decode diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index 32fd14278..e95cab585 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -53,7 +53,7 @@ type DefaultValueDecoders struct{} // RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with // the provided RegistryBuilder. // -// There is no support for decoding map[string]interface{} becuase there is no decoder for +// There is no support for decoding map[string]interface{} because there is no decoder for // interface{}, so users must either register this decoder themselves or use the // EmptyInterfaceDecoder available in the bson package. func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { @@ -1463,7 +1463,7 @@ func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr if !val.CanAddr() { return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} } - val = val.Addr() // If they type doesn't implement the interface, a pointer to it must. + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. } t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) @@ -1492,16 +1492,28 @@ func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bson val.Set(reflect.New(val.Type().Elem())) } + _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) + if err != nil { + return err + } + + // If the target Go value is a pointer and the BSON field value is empty, set the value to the + // zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to + // change the pointer value from within the function (only the value at the pointer address), + // so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON + // field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches + // the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and + // the JSON field value is "null". + if val.Kind() == reflect.Ptr && len(src) == 0 { + val.Set(reflect.Zero(val.Type())) + return nil + } + if !val.Type().Implements(tUnmarshaler) { if !val.CanAddr() { return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} } - val = val.Addr() // If they type doesn't implement the interface, a pointer to it must. - } - - _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) - if err != nil { - return err + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. } fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON") diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go index 49a0c3f14..6bdb43cb4 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -30,7 +30,7 @@ var errInvalidValue = errors.New("cannot encode invalid element") var sliceWriterPool = sync.Pool{ New: func() interface{} { - sw := make(bsonrw.SliceWriter, 0, 0) + sw := make(bsonrw.SliceWriter, 0) return &sw }, } @@ -333,14 +333,7 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum continue } - if enc, ok := currEncoder.(ValueEncoder); ok { - err = enc.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - continue - } - err = encoder.EncodeValue(ec, vw, currVal) + err = currEncoder.EncodeValue(ec, vw, currVal) if err != nil { return err } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go index c1e20f948..b0ae0e23f 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -1,3 +1,9 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + // Package bsoncodec provides a system for encoding values to BSON representations and decoding // values from BSON representations. This package considers both binary BSON and ExtendedJSON as // BSON representations. The types in this package enable a flexible system for handling this diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go index a15636d0a..eda417cff 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -57,11 +57,18 @@ func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWrit func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) { isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument - if isDocument && dc.Ancestor != nil { - // Using ancestor information rather than looking up the type map entry forces consistent decoding. - // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry - // has been registered. - return dc.Ancestor, nil + if isDocument { + if dc.defaultDocumentType != nil { + // If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return + // that type. + return dc.defaultDocumentType, nil + } + if dc.Ancestor != nil { + // Using ancestor information rather than looking up the type map entry forces consistent decoding. + // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry + // has been registered. + return dc.Ancestor, nil + } } rtype, err := dc.LookupTypeMapEntry(valueType) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go index fbb8ef427..e1fbef9c6 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -7,6 +7,7 @@ package bsoncodec import ( + "encoding" "fmt" "reflect" "strconv" @@ -126,14 +127,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v continue } - if enc, ok := currEncoder.(ValueEncoder); ok { - err = enc.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - continue - } - err = encoder.EncodeValue(ec, vw, currVal) + err = currEncoder.EncodeValue(ec, vw, currVal) if err != nil { return err } @@ -237,6 +231,19 @@ func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { } return "", err } + // keys implement encoding.TextMarshaler are marshaled. + if km, ok := val.Interface().(encoding.TextMarshaler); ok { + if val.Kind() == reflect.Ptr && val.IsNil() { + return "", nil + } + + buf, err := km.MarshalText() + if err != nil { + return "", err + } + + return string(buf), nil + } switch val.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: @@ -248,6 +255,7 @@ func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { } var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem() +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) { keyVal := reflect.ValueOf(key) @@ -259,23 +267,27 @@ func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, v := keyVal.Interface().(KeyUnmarshaler) err = v.UnmarshalKey(key) keyVal = keyVal.Elem() + // Try to decode encoding.TextUnmarshalers. + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + keyVal = reflect.New(keyType) + v := keyVal.Interface().(encoding.TextUnmarshaler) + err = v.UnmarshalText([]byte(key)) + keyVal = keyVal.Elem() // Otherwise, go to type specific behavior default: switch keyType.Kind() { case reflect.String: keyVal = reflect.ValueOf(key).Convert(keyType) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - s := string(key) - n, parseErr := strconv.ParseInt(s, 10, 64) + n, parseErr := strconv.ParseInt(key, 10, 64) if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) { - err = fmt.Errorf("failed to unmarshal number key %v", s) + err = fmt.Errorf("failed to unmarshal number key %v", key) } keyVal = reflect.ValueOf(n).Convert(keyType) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - s := string(key) - n, parseErr := strconv.ParseUint(s, 10, 64) + n, parseErr := strconv.ParseUint(key, 10, 64) if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) { - err = fmt.Errorf("failed to unmarshal number key %v", s) + err = fmt.Errorf("failed to unmarshal number key %v", key) break } keyVal = reflect.ValueOf(n).Convert(keyType) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go index 60abffb24..f6f3800d4 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -54,12 +54,6 @@ func (entme ErrNoTypeMapEntry) Error() string { // ErrNotInterface is returned when the provided type is not an interface. var ErrNotInterface = errors.New("The provided type is not an interface") -var defaultRegistry *Registry - -func init() { - defaultRegistry = buildDefaultRegistry() -} - // A RegistryBuilder is used to build a Registry. This type is not goroutine // safe. type RegistryBuilder struct { @@ -304,7 +298,7 @@ func (rb *RegistryBuilder) Build() *Registry { return registry } -// LookupEncoder inspects the registry for an encoder for the given type. The lookup precendence works as follows: +// LookupEncoder inspects the registry for an encoder for the given type. The lookup precedence works as follows: // // 1. An encoder registered for the exact type. If the given type represents an interface, an encoder registered using // RegisterTypeEncoder for the interface will be selected. @@ -374,7 +368,7 @@ func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (Value // in interfaceEncoders defaultEnc, found := r.lookupInterfaceEncoder(t, false) if !found { - defaultEnc, _ = r.kindEncoders[t.Kind()] + defaultEnc = r.kindEncoders[t.Kind()] } return newCondAddrEncoder(ienc.ve, defaultEnc), true } @@ -382,7 +376,7 @@ func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (Value return nil, false } -// LookupDecoder inspects the registry for an decoder for the given type. The lookup precendence works as follows: +// LookupDecoder inspects the registry for an decoder for the given type. The lookup precedence works as follows: // // 1. A decoder registered for the exact type. If the given type represents an interface, a decoder registered using // RegisterTypeDecoder for the interface will be selected. @@ -445,7 +439,7 @@ func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (Value // in interfaceDecoders defaultDec, found := r.lookupInterfaceDecoder(t, false) if !found { - defaultDec, _ = r.kindDecoders[t.Kind()] + defaultDec = r.kindDecoders[t.Kind()] } return newCondAddrDecoder(idec.vd, defaultDec), true } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go index 9ce901782..be3f2081e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -331,14 +331,7 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()}) } - if decoder, ok := fd.decoder.(ValueDecoder); ok { - err = decoder.DecodeValue(dctx, vr, field.Elem()) - if err != nil { - return newDecodeError(fd.name, err) - } - continue - } - err = fd.decoder.DecodeValue(dctx, vr, field) + err = fd.decoder.DecodeValue(dctx, vr, field.Elem()) if err != nil { return newDecodeError(fd.name, err) } @@ -567,7 +560,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr } dominant, ok := dominantField(fields[i : i+advance]) if !ok || !sc.OverwriteDuplicatedInlinedFields { - return nil, fmt.Errorf("struct %s) duplicated key %s", t.String(), name) + return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) } sd.fl = append(sd.fl, dominant) sd.fm[name] = dominant diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go index fb5b51084..07f4b70e6 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go @@ -16,36 +16,12 @@ import ( "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" ) -var ptBool = reflect.TypeOf((*bool)(nil)) -var ptInt8 = reflect.TypeOf((*int8)(nil)) -var ptInt16 = reflect.TypeOf((*int16)(nil)) -var ptInt32 = reflect.TypeOf((*int32)(nil)) -var ptInt64 = reflect.TypeOf((*int64)(nil)) -var ptInt = reflect.TypeOf((*int)(nil)) -var ptUint8 = reflect.TypeOf((*uint8)(nil)) -var ptUint16 = reflect.TypeOf((*uint16)(nil)) -var ptUint32 = reflect.TypeOf((*uint32)(nil)) -var ptUint64 = reflect.TypeOf((*uint64)(nil)) -var ptUint = reflect.TypeOf((*uint)(nil)) -var ptFloat32 = reflect.TypeOf((*float32)(nil)) -var ptFloat64 = reflect.TypeOf((*float64)(nil)) -var ptString = reflect.TypeOf((*string)(nil)) - var tBool = reflect.TypeOf(false) -var tFloat32 = reflect.TypeOf(float32(0)) var tFloat64 = reflect.TypeOf(float64(0)) -var tInt = reflect.TypeOf(int(0)) -var tInt8 = reflect.TypeOf(int8(0)) -var tInt16 = reflect.TypeOf(int16(0)) var tInt32 = reflect.TypeOf(int32(0)) var tInt64 = reflect.TypeOf(int64(0)) var tString = reflect.TypeOf("") var tTime = reflect.TypeOf(time.Time{}) -var tUint = reflect.TypeOf(uint(0)) -var tUint8 = reflect.TypeOf(uint8(0)) -var tUint16 = reflect.TypeOf(uint16(0)) -var tUint32 = reflect.TypeOf(uint32(0)) -var tUint64 = reflect.TypeOf(uint64(0)) var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() var tByteSlice = reflect.TypeOf([]byte(nil)) @@ -74,7 +50,6 @@ var tDecimal = reflect.TypeOf(primitive.Decimal128{}) var tMinKey = reflect.TypeOf(primitive.MinKey{}) var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) var tD = reflect.TypeOf(primitive.D{}) -var tM = reflect.TypeOf(primitive.M{}) var tA = reflect.TypeOf(primitive.A{}) var tE = reflect.TypeOf(primitive.E{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go index 8a690e37c..54c76bf74 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go @@ -423,7 +423,7 @@ func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { if ejp.canonical { return nil, invalidJSONErrorForType("object", t) } - return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as decribed in RFC-3339", t) + return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t) } ejp.advanceState() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go index 3af6fcf01..969570424 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go @@ -19,7 +19,7 @@ import ( ) func wrapperKeyBSONType(key string) bsontype.Type { - switch string(key) { + switch key { case "$numberInt": return bsontype.Int32 case "$numberLong": @@ -269,7 +269,7 @@ func (ejv *extJSONValue) parseDouble() (float64, error) { return 0, fmt.Errorf("$numberDouble value should be string, but instead is %s", ejv.t) } - switch string(ejv.v.(string)) { + switch ejv.v.(string) { case "Infinity": return math.Inf(1), nil case "-Infinity": @@ -364,7 +364,7 @@ func (ejv *extJSONValue) parseRegex() (pattern, options string, err error) { for i, key := range regexObj.keys { val := regexObj.values[i] - switch string(key) { + switch key { case "pattern": if patFound { return "", "", errors.New("duplicate pattern key in $regularExpression") diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go index 605e41a13..99ed524b7 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go @@ -10,7 +10,6 @@ import ( "bytes" "encoding/base64" "fmt" - "go.mongodb.org/mongo-driver/bson/primitive" "io" "math" "sort" @@ -19,13 +18,9 @@ import ( "sync" "time" "unicode/utf8" -) -var ejvwPool = sync.Pool{ - New: func() interface{} { - return new(extJSONValueWriter) - }, -} + "go.mongodb.org/mongo-driver/bson/primitive" +) // ExtJSONValueWriterPool is a pool for ExtJSON ValueWriters. type ExtJSONValueWriterPool struct { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go index 55378093a..ef5d837c2 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go @@ -86,12 +86,11 @@ type valueReader struct { // NewBSONDocumentReader returns a ValueReader using b for the underlying BSON // representation. Parameter b must be a BSON Document. -// -// TODO(skriptble): There's a lack of symmetry between the reader and writer, since the reader takes -// a []byte while the writer takes an io.Writer. We should have two versions of each, one that takes -// a []byte and one that takes an io.Reader or io.Writer. The []byte version will need to return a -// thing that can return the finished []byte since it might be reallocated when appended to. func NewBSONDocumentReader(b []byte) ValueReader { + // TODO(skriptble): There's a lack of symmetry between the reader and writer, since the reader takes a []byte while the + // TODO writer takes an io.Writer. We should have two versions of each, one that takes a []byte and one that takes an + // TODO io.Reader or io.Writer. The []byte version will need to return a thing that can return the finished []byte since + // TODO it might be reallocated when appended to. return newValueReader(b) } @@ -384,9 +383,13 @@ func (vr *valueReader) ReadBinary() (b []byte, btype byte, err error) { if err != nil { return nil, 0, err } + // Make a copy of the returned byte slice because it's just a subslice from the valueReader's + // buffer and is not safe to return in the unmarshaled value. + cp := make([]byte, len(b)) + copy(cp, b) vr.pop() - return b, btype, nil + return cp, btype, nil } func (vr *valueReader) ReadBoolean() (bool, error) { @@ -737,6 +740,9 @@ func (vr *valueReader) ReadValue() (ValueReader, error) { return vr, nil } +// readBytes reads length bytes from the valueReader starting at the current offset. Note that the +// returned byte slice is a subslice from the valueReader buffer and must be converted or copied +// before returning in an unmarshaled value. func (vr *valueReader) readBytes(length int32) ([]byte, error) { if length < 0 { return nil, fmt.Errorf("invalid length: %d", length) @@ -748,6 +754,7 @@ func (vr *valueReader) readBytes(length int32) ([]byte, error) { start := vr.offset vr.offset += int64(length) + return vr.d[start : start+int64(length)], nil } @@ -790,16 +797,6 @@ func (vr *valueReader) readCString() (string, error) { return string(vr.d[start : start+int64(idx)]), nil } -func (vr *valueReader) skipCString() error { - idx := bytes.IndexByte(vr.d[vr.offset:], 0x00) - if idx < 0 { - return io.EOF - } - // idx does not include the null byte - vr.offset += int64(idx) + 1 - return nil -} - func (vr *valueReader) readString() (string, error) { length, err := vr.readLength() if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go index a39c4ea4c..f95a08afd 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go @@ -529,7 +529,7 @@ func (vw *valueWriter) WriteDocumentEnd() error { vw.pop() if vw.stack[vw.frame].mode == mCodeWithScope { - // We ignore the error here because of the gaurantee of writeLength. + // We ignore the error here because of the guarantee of writeLength. // See the docs for writeLength for more info. _ = vw.writeLength() vw.pop() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go index 7644df129..dff65f87f 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go @@ -76,27 +76,3 @@ func (sw *SliceWriter) Write(p []byte) (int, error) { *sw = append(*sw, p...) return written, nil } - -type writer []byte - -func (w *writer) Write(p []byte) (int, error) { - index := len(*w) - return w.WriteAt(p, int64(index)) -} - -func (w *writer) WriteAt(p []byte, off int64) (int, error) { - newend := off + int64(len(p)) - if newend < int64(len(*w)) { - newend = int64(len(*w)) - } - - if newend > int64(cap(*w)) { - buf := make([]byte, int64(2*cap(*w))+newend) - copy(buf, *w) - *w = buf - } - - *w = []byte(*w)[:newend] - copy([]byte(*w)[off:], p) - return len(p), nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go index 63a59ca08..7c91ae518 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go @@ -38,6 +38,8 @@ const ( BinaryUUIDOld byte = 0x03 BinaryUUID byte = 0x04 BinaryMD5 byte = 0x05 + BinaryEncrypted byte = 0x06 + BinaryColumn byte = 0x07 BinaryUserDefined byte = 0x80 ) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go index 7f6b7694f..6e189fa58 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go @@ -33,6 +33,11 @@ var decPool = sync.Pool{ type Decoder struct { dc bsoncodec.DecodeContext vr bsonrw.ValueReader + + // We persist defaultDocumentM and defaultDocumentD on the Decoder to prevent overwriting from + // (*Decoder).SetContext. + defaultDocumentM bool + defaultDocumentD bool } // NewDecoder returns a new decoder that uses the DefaultRegistry to read from vr. @@ -95,6 +100,12 @@ func (d *Decoder) Decode(val interface{}) error { if err != nil { return err } + if d.defaultDocumentM { + d.dc.DefaultDocumentM() + } + if d.defaultDocumentD { + d.dc.DefaultDocumentD() + } return decoder.DecodeValue(d.dc, d.vr, rval) } @@ -116,3 +127,15 @@ func (d *Decoder) SetContext(dc bsoncodec.DecodeContext) error { d.dc = dc return nil } + +// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (d *Decoder) DefaultDocumentM() { + d.defaultDocumentM = true +} + +// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (d *Decoder) DefaultDocumentD() { + d.defaultDocumentD = true +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/doc.go index 094be934f..5e3825a23 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/doc.go @@ -118,7 +118,7 @@ // types, this tag is ignored. // // 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled -// into that field will be trucated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, +// into that field will be truncated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, // it will be unmarshalled as 3. If this tag is not specified, the decoder will throw an error if the value cannot be // decoded without losing precision. For float64 or non-numeric types, this tag is ignored. // diff --git a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go index 8d8fc8ce1..db8d8ee92 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go @@ -7,6 +7,9 @@ package bson import ( + "bytes" + "encoding/json" + "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/bsontype" @@ -221,3 +224,25 @@ func MarshalExtJSONAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val return *sw, nil } + +// IndentExtJSON will prefix and indent the provided extended JSON src and append it to dst. +func IndentExtJSON(dst *bytes.Buffer, src []byte, prefix, indent string) error { + return json.Indent(dst, src, prefix, indent) +} + +// MarshalExtJSONIndent returns the extended JSON encoding of val with each line with prefixed +// and indented. +func MarshalExtJSONIndent(val interface{}, canonical, escapeHTML bool, prefix, indent string) ([]byte, error) { + marshaled, err := MarshalExtJSON(val, canonical, escapeHTML) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + err = IndentExtJSON(&buf, marshaled, prefix, indent) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go index a57e1d698..ffe4eed07 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go @@ -133,11 +133,9 @@ Loop: } // BigInt returns significand as big.Int and exponent, bi * 10 ^ exp. -func (d Decimal128) BigInt() (bi *big.Int, exp int, err error) { +func (d Decimal128) BigInt() (*big.Int, int, error) { high, low := d.GetBytes() - var posSign bool // positive sign - - posSign = high>>63&1 == 0 + posSign := high>>63&1 == 0 // positive sign switch high >> 58 & (1<<5 - 1) { case 0x1F: @@ -149,6 +147,7 @@ func (d Decimal128) BigInt() (bi *big.Int, exp int, err error) { return nil, 0, ErrParseNegInf } + var exp int if high>>61&3 == 3 { // Bits: 1*sign 2*ignored 14*exponent 111*significand. // Implicit 0b100 prefix in significand. @@ -171,7 +170,7 @@ func (d Decimal128) BigInt() (bi *big.Int, exp int, err error) { return new(big.Int), 0, nil } - bi = big.NewInt(0) + bi := big.NewInt(0) const host32bit = ^uint(0)>>32 == 0 if host32bit { bi.SetBits([]big.Word{big.Word(low), big.Word(low >> 32), big.Word(high), big.Word(high >> 32)}) @@ -182,7 +181,7 @@ func (d Decimal128) BigInt() (bi *big.Int, exp int, err error) { if !posSign { return bi.Neg(bi), exp, nil } - return + return bi, exp, nil } // IsNaN returns whether d is NaN. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go index a5a7d7c69..652898fea 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go @@ -10,8 +10,8 @@ package primitive import ( - "bytes" "crypto/rand" + "encoding" "encoding/binary" "encoding/hex" "encoding/json" @@ -34,6 +34,9 @@ var NilObjectID ObjectID var objectIDCounter = readRandomUint32() var processUnique = processUniqueBytes() +var _ encoding.TextMarshaler = ObjectID{} +var _ encoding.TextUnmarshaler = &ObjectID{} + // NewObjectID generates a new ObjectID. func NewObjectID() ObjectID { return NewObjectIDFromTimestamp(time.Now()) @@ -67,7 +70,7 @@ func (id ObjectID) String() string { // IsZero returns true if id is the empty ObjectID. func (id ObjectID) IsZero() bool { - return bytes.Equal(id[:], NilObjectID[:]) + return id == NilObjectID } // ObjectIDFromHex creates a new ObjectID from a hex string. It returns an error if the hex string is not a @@ -83,7 +86,7 @@ func ObjectIDFromHex(s string) (ObjectID, error) { } var oid [12]byte - copy(oid[:], b[:]) + copy(oid[:], b) return oid, nil } @@ -94,6 +97,23 @@ func IsValidObjectID(s string) bool { return err == nil } +// MarshalText returns the ObjectID as UTF-8-encoded text. Implementing this allows us to use ObjectID +// as a map key when marshalling JSON. See https://pkg.go.dev/encoding#TextMarshaler +func (id ObjectID) MarshalText() ([]byte, error) { + return []byte(id.Hex()), nil +} + +// UnmarshalText populates the byte slice with the ObjectID. Implementing this allows us to use ObjectID +// as a map key when unmarshalling JSON. See https://pkg.go.dev/encoding#TextUnmarshaler +func (id *ObjectID) UnmarshalText(b []byte) error { + oid, err := ObjectIDFromHex(string(b)) + if err != nil { + return err + } + *id = oid + return nil +} + // MarshalJSON returns the ObjectID as a string func (id ObjectID) MarshalJSON() ([]byte, error) { return json.Marshal(id.Hex()) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go index 51984e223..b3cba1bf9 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go @@ -111,7 +111,7 @@ func (d DBPointer) String() string { // Equal compares d to d2 and returns true if they are equal. func (d DBPointer) Equal(d2 DBPointer) bool { - return d.DB == d2.DB && bytes.Equal(d.Pointer[:], d2.Pointer[:]) + return d == d2 } // IsZero returns if d is the empty DBPointer. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go index f397fa2d5..1cbe3884d 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go @@ -14,6 +14,9 @@ import ( "go.mongodb.org/mongo-driver/bson/bsonrw" ) +var tRawValue = reflect.TypeOf(RawValue{}) +var tRaw = reflect.TypeOf(Raw(nil)) + var primitiveCodecs PrimitiveCodecs // PrimitiveCodecs is a namespace for all of the default bsoncodec.Codecs for the primitive types @@ -87,25 +90,3 @@ func (PrimitiveCodecs) RawDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.Valu val.Set(reflect.ValueOf(rdr)) return err } - -func (pc PrimitiveCodecs) encodeRaw(ec bsoncodec.EncodeContext, dw bsonrw.DocumentWriter, raw Raw) error { - var copier bsonrw.Copier - elems, err := raw.Elements() - if err != nil { - return err - } - for _, elem := range elems { - dvw, err := dw.WriteDocumentElement(elem.Key()) - if err != nil { - return err - } - - val := elem.Value() - err = copier.CopyValueFromBytes(dvw, val.Type, val.Value) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw.go b/vendor/go.mongodb.org/mongo-driver/bson/raw.go index 2aae9f56a..efd705daa 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw.go @@ -15,7 +15,6 @@ import ( // ErrNilReader indicates that an operation was attempted on a nil bson.Reader. var ErrNilReader = errors.New("nil reader") -var errValidateDone = errors.New("validation loop complete") // Raw is a wrapper around a byte slice. It will interpret the slice as a // BSON document. This type is a wrapper around a bsoncore.Document. Errors returned from the @@ -84,9 +83,3 @@ func (r Raw) IndexErr(index uint) (RawElement, error) { // String implements the fmt.Stringer interface. func (r Raw) String() string { return bsoncore.Document(r).String() } - -// readi32 is a helper function for reading an int32 from slice of bytes. -func readi32(b []byte) int32 { - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/registry.go index 09062d208..16d7573e7 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/registry.go @@ -13,7 +13,7 @@ import "go.mongodb.org/mongo-driver/bson/bsoncodec" var DefaultRegistry = NewRegistryBuilder().Build() // NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and -// deocders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the +// decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the // PrimitiveCodecs type in this package. func NewRegistryBuilder() *bsoncodec.RegistryBuilder { rb := bsoncodec.NewRegistryBuilder() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/types.go b/vendor/go.mongodb.org/mongo-driver/bson/types.go index bf91f691a..13a1c35cf 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/types.go @@ -7,11 +7,7 @@ package bson import ( - "reflect" - "time" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" ) // These constants uniquely refer to each BSON type. @@ -38,48 +34,3 @@ const ( TypeMinKey = bsontype.MinKey TypeMaxKey = bsontype.MaxKey ) - -var tBinary = reflect.TypeOf(primitive.Binary{}) -var tBool = reflect.TypeOf(false) -var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{}) -var tDBPointer = reflect.TypeOf(primitive.DBPointer{}) -var tDecimal = reflect.TypeOf(primitive.Decimal128{}) -var tD = reflect.TypeOf(D{}) -var tA = reflect.TypeOf(A{}) -var tDateTime = reflect.TypeOf(primitive.DateTime(0)) -var tUndefined = reflect.TypeOf(primitive.Undefined{}) -var tNull = reflect.TypeOf(primitive.Null{}) -var tRawValue = reflect.TypeOf(RawValue{}) -var tFloat32 = reflect.TypeOf(float32(0)) -var tFloat64 = reflect.TypeOf(float64(0)) -var tInt = reflect.TypeOf(int(0)) -var tInt8 = reflect.TypeOf(int8(0)) -var tInt16 = reflect.TypeOf(int16(0)) -var tInt32 = reflect.TypeOf(int32(0)) -var tInt64 = reflect.TypeOf(int64(0)) -var tJavaScript = reflect.TypeOf(primitive.JavaScript("")) -var tOID = reflect.TypeOf(primitive.ObjectID{}) -var tRaw = reflect.TypeOf(Raw(nil)) -var tRegex = reflect.TypeOf(primitive.Regex{}) -var tString = reflect.TypeOf("") -var tSymbol = reflect.TypeOf(primitive.Symbol("")) -var tTime = reflect.TypeOf(time.Time{}) -var tTimestamp = reflect.TypeOf(primitive.Timestamp{}) -var tUint = reflect.TypeOf(uint(0)) -var tUint8 = reflect.TypeOf(uint8(0)) -var tUint16 = reflect.TypeOf(uint16(0)) -var tUint32 = reflect.TypeOf(uint32(0)) -var tUint64 = reflect.TypeOf(uint64(0)) -var tMinKey = reflect.TypeOf(primitive.MinKey{}) -var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) - -var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() -var tEmptySlice = reflect.TypeOf([]interface{}(nil)) - -var zeroVal reflect.Value - -// this references the quantity of milliseconds between zero time and -// the unix epoch. useful for making sure that we convert time.Time -// objects correctly to match the legacy bson library's handling of -// time.Time values. -const zeroEpochMs = int64(62135596800000) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go index 6f9ca04d3..f936ba183 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go @@ -23,7 +23,7 @@ type Unmarshaler interface { } // ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representaiton of themselves. The BSON bytes and type can be +// BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. type ValueUnmarshaler interface { diff --git a/vendor/go.mongodb.org/mongo-driver/event/monitoring.go b/vendor/go.mongodb.org/mongo-driver/event/monitoring.go index bc802f0f9..ac05e401c 100644 --- a/vendor/go.mongodb.org/mongo-driver/event/monitoring.go +++ b/vendor/go.mongodb.org/mongo-driver/event/monitoring.go @@ -22,6 +22,9 @@ type CommandStartedEvent struct { CommandName string RequestID int64 ConnectionID string + // ServerConnectionID contains the connection ID from the server of the operation. If the server does not return + // this value (e.g. on MDB < 4.2), it is unset. + ServerConnectionID *int32 // ServiceID contains the ID of the server to which the command was sent if it is running behind a load balancer. // Otherwise, it is unset. ServiceID *primitive.ObjectID @@ -33,6 +36,9 @@ type CommandFinishedEvent struct { CommandName string RequestID int64 ConnectionID string + // ServerConnectionID contains the connection ID from the server of the operation. If the server does not return + // this value (e.g. on MDB < 4.2), it is unset. + ServerConnectionID *int32 // ServiceID contains the ID of the server to which the command was sent if it is running behind a load balancer. // Otherwise, it is unset. ServiceID *primitive.ObjectID @@ -64,19 +70,22 @@ const ( ReasonStale = "stale" ReasonConnectionErrored = "connectionError" ReasonTimedOut = "timeout" + ReasonError = "error" ) // strings for pool command monitoring types const ( - ConnectionClosed = "ConnectionClosed" PoolCreated = "ConnectionPoolCreated" + PoolReady = "ConnectionPoolReady" + PoolCleared = "ConnectionPoolCleared" + PoolClosedEvent = "ConnectionPoolClosed" ConnectionCreated = "ConnectionCreated" ConnectionReady = "ConnectionReady" + ConnectionClosed = "ConnectionClosed" + GetStarted = "ConnectionCheckOutStarted" GetFailed = "ConnectionCheckOutFailed" GetSucceeded = "ConnectionCheckedOut" ConnectionReturned = "ConnectionCheckedIn" - PoolCleared = "ConnectionPoolCleared" - PoolClosedEvent = "ConnectionPoolClosed" ) // MonitorPoolOptions contains pool options as formatted in pool events diff --git a/vendor/go.mongodb.org/mongo-driver/internal/const.go b/vendor/go.mongodb.org/mongo-driver/internal/const.go index 772053ad2..a7ef69d13 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/const.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/const.go @@ -9,7 +9,11 @@ package internal // import "go.mongodb.org/mongo-driver/internal" // Version is the current version of the driver. var Version = "local build" -// SetMockServiceID enables a mode in which the driver mocks server support for returning a "serviceId" field in "hello" -// command responses by using the value of "topologyVersion.processId". This is used for testing load balancer support -// until an upstream service can support running behind a load balancer. -var SetMockServiceID = false +// LegacyHello is the legacy version of the hello command. +var LegacyHello = "isMaster" + +// LegacyHelloLowercase is the lowercase, legacy version of the hello command. +var LegacyHelloLowercase = "ismaster" + +// LegacyNotPrimary is the legacy version of the "not primary" server error message. +var LegacyNotPrimary = "not master" diff --git a/vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go b/vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go new file mode 100644 index 000000000..635d8e353 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go @@ -0,0 +1,39 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package internal + +import ( + "fmt" + + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +const ( + EncryptedCacheCollection = "ecc" + EncryptedStateCollection = "esc" + EncryptedCompactionCollection = "ecoc" +) + +// GetEncryptedStateCollectionName returns the encrypted state collection name associated with dataCollectionName. +func GetEncryptedStateCollectionName(efBSON bsoncore.Document, dataCollectionName string, stateCollection string) (string, error) { + fieldName := stateCollection + "Collection" + val, err := efBSON.LookupErr(fieldName) + if err != nil { + if err != bsoncore.ErrElementNotFound { + return "", err + } + // Return default name. + defaultName := "enxcol_." + dataCollectionName + "." + stateCollection + return defaultName, nil + } + + stateCollectionName, ok := val.StringValueOK() + if !ok { + return "", fmt.Errorf("expected string for '%v', got: %v", fieldName, val.Type) + } + return stateCollectionName, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/csot_util.go b/vendor/go.mongodb.org/mongo-driver/internal/csot_util.go new file mode 100644 index 000000000..ea07637bc --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/csot_util.go @@ -0,0 +1,34 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package internal + +import ( + "context" + "time" +) + +type timeoutKey struct{} + +// MakeTimeoutContext returns a new context with Client-Side Operation Timeout (CSOT) feature-gated behavior +// and a Timeout set to the passed in Duration. Setting a Timeout on a single operation is not supported in +// public API. +// +// TODO(GODRIVER-2348) We may be able to remove this function once CSOT feature-gated behavior becomes the +// TODO default behavior. +func MakeTimeoutContext(ctx context.Context, to time.Duration) (context.Context, context.CancelFunc) { + // Only use the passed in Duration as a timeout on the Context if it + // is non-zero. + cancelFunc := func() {} + if to != 0 { + ctx, cancelFunc = context.WithTimeout(ctx, to) + } + return context.WithValue(ctx, timeoutKey{}, true), cancelFunc +} + +func IsTimeoutContext(ctx context.Context) bool { + return ctx.Value(timeoutKey{}) != nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/error.go b/vendor/go.mongodb.org/mongo-driver/internal/error.go index 6a105af4f..1fec3f183 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/error.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/error.go @@ -117,3 +117,7 @@ func (e *wrappedError) Error() string { func (e *wrappedError) Inner() error { return e.inner } + +func (e *wrappedError) Unwrap() error { + return e.inner +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go new file mode 100644 index 000000000..447900914 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go @@ -0,0 +1,38 @@ +// Copied from https://cs.opensource.google/go/go/+/946b4baaf6521d521928500b2b57429c149854e7:src/math/bits.go + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +// Add64 returns the sum with carry of x, y and carry: sum = x + y + carry. +// The carry input must be 0 or 1; otherwise the behavior is undefined. +// The carryOut output is guaranteed to be 0 or 1. +func Add64(x, y, carry uint64) (sum, carryOut uint64) { + yc := y + carry + sum = x + yc + if sum < x || yc < y { + carryOut = 1 + } + return +} + +// Mul64 returns the 128-bit product of x and y: (hi, lo) = x * y +// with the product bits' upper half returned in hi and the lower +// half returned in lo. +func Mul64(x, y uint64) (hi, lo uint64) { + const mask32 = 1<<32 - 1 + x0 := x & mask32 + x1 := x >> 32 + y0 := y & mask32 + y1 := y >> 32 + w0 := x0 * y0 + t := x1*y0 + w0>>32 + w1 := t & mask32 + w2 := t >> 32 + w1 += x0 * y1 + hi = x1*y1 + w2 + w1>>32 + lo = x * y + return +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go new file mode 100644 index 000000000..859e4e0e4 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go @@ -0,0 +1,223 @@ +// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/exp.go + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import ( + "math" +) + +/* + * Exponential distribution + * + * See "The Ziggurat Method for Generating Random Variables" + * (Marsaglia & Tsang, 2000) + * http://www.jstatsoft.org/v05/i08/paper [pdf] + */ + +const ( + re = 7.69711747013104972 +) + +// ExpFloat64 returns an exponentially distributed float64 in the range +// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter +// (lambda) is 1 and whose mean is 1/lambda (1). +// To produce a distribution with a different rate parameter, +// callers can adjust the output using: +// +// sample = ExpFloat64() / desiredRateParameter +func (r *Rand) ExpFloat64() float64 { + for { + j := r.Uint32() + i := j & 0xFF + x := float64(j) * float64(we[i]) + if j < ke[i] { + return x + } + if i == 0 { + return re - math.Log(r.Float64()) + } + if fe[i]+float32(r.Float64())*(fe[i-1]-fe[i]) < float32(math.Exp(-x)) { + return x + } + } +} + +var ke = [256]uint32{ + 0xe290a139, 0x0, 0x9beadebc, 0xc377ac71, 0xd4ddb990, + 0xde893fb8, 0xe4a8e87c, 0xe8dff16a, 0xebf2deab, 0xee49a6e8, + 0xf0204efd, 0xf19bdb8e, 0xf2d458bb, 0xf3da104b, 0xf4b86d78, + 0xf577ad8a, 0xf61de83d, 0xf6afb784, 0xf730a573, 0xf7a37651, + 0xf80a5bb6, 0xf867189d, 0xf8bb1b4f, 0xf9079062, 0xf94d70ca, + 0xf98d8c7d, 0xf9c8928a, 0xf9ff175b, 0xfa319996, 0xfa6085f8, + 0xfa8c3a62, 0xfab5084e, 0xfadb36c8, 0xfaff0410, 0xfb20a6ea, + 0xfb404fb4, 0xfb5e2951, 0xfb7a59e9, 0xfb95038c, 0xfbae44ba, + 0xfbc638d8, 0xfbdcf892, 0xfbf29a30, 0xfc0731df, 0xfc1ad1ed, + 0xfc2d8b02, 0xfc3f6c4d, 0xfc5083ac, 0xfc60ddd1, 0xfc708662, + 0xfc7f8810, 0xfc8decb4, 0xfc9bbd62, 0xfca9027c, 0xfcb5c3c3, + 0xfcc20864, 0xfccdd70a, 0xfcd935e3, 0xfce42ab0, 0xfceebace, + 0xfcf8eb3b, 0xfd02c0a0, 0xfd0c3f59, 0xfd156b7b, 0xfd1e48d6, + 0xfd26daff, 0xfd2f2552, 0xfd372af7, 0xfd3eeee5, 0xfd4673e7, + 0xfd4dbc9e, 0xfd54cb85, 0xfd5ba2f2, 0xfd62451b, 0xfd68b415, + 0xfd6ef1da, 0xfd750047, 0xfd7ae120, 0xfd809612, 0xfd8620b4, + 0xfd8b8285, 0xfd90bcf5, 0xfd95d15e, 0xfd9ac10b, 0xfd9f8d36, + 0xfda43708, 0xfda8bf9e, 0xfdad2806, 0xfdb17141, 0xfdb59c46, + 0xfdb9a9fd, 0xfdbd9b46, 0xfdc170f6, 0xfdc52bd8, 0xfdc8ccac, + 0xfdcc542d, 0xfdcfc30b, 0xfdd319ef, 0xfdd6597a, 0xfdd98245, + 0xfddc94e5, 0xfddf91e6, 0xfde279ce, 0xfde54d1f, 0xfde80c52, + 0xfdeab7de, 0xfded5034, 0xfdefd5be, 0xfdf248e3, 0xfdf4aa06, + 0xfdf6f984, 0xfdf937b6, 0xfdfb64f4, 0xfdfd818d, 0xfdff8dd0, + 0xfe018a08, 0xfe03767a, 0xfe05536c, 0xfe07211c, 0xfe08dfc9, + 0xfe0a8fab, 0xfe0c30fb, 0xfe0dc3ec, 0xfe0f48b1, 0xfe10bf76, + 0xfe122869, 0xfe1383b4, 0xfe14d17c, 0xfe1611e7, 0xfe174516, + 0xfe186b2a, 0xfe19843e, 0xfe1a9070, 0xfe1b8fd6, 0xfe1c8289, + 0xfe1d689b, 0xfe1e4220, 0xfe1f0f26, 0xfe1fcfbc, 0xfe2083ed, + 0xfe212bc3, 0xfe21c745, 0xfe225678, 0xfe22d95f, 0xfe234ffb, + 0xfe23ba4a, 0xfe241849, 0xfe2469f2, 0xfe24af3c, 0xfe24e81e, + 0xfe25148b, 0xfe253474, 0xfe2547c7, 0xfe254e70, 0xfe25485a, + 0xfe25356a, 0xfe251586, 0xfe24e88f, 0xfe24ae64, 0xfe2466e1, + 0xfe2411df, 0xfe23af34, 0xfe233eb4, 0xfe22c02c, 0xfe22336b, + 0xfe219838, 0xfe20ee58, 0xfe20358c, 0xfe1f6d92, 0xfe1e9621, + 0xfe1daef0, 0xfe1cb7ac, 0xfe1bb002, 0xfe1a9798, 0xfe196e0d, + 0xfe1832fd, 0xfe16e5fe, 0xfe15869d, 0xfe141464, 0xfe128ed3, + 0xfe10f565, 0xfe0f478c, 0xfe0d84b1, 0xfe0bac36, 0xfe09bd73, + 0xfe07b7b5, 0xfe059a40, 0xfe03644c, 0xfe011504, 0xfdfeab88, + 0xfdfc26e9, 0xfdf98629, 0xfdf6c83b, 0xfdf3ec01, 0xfdf0f04a, + 0xfdedd3d1, 0xfdea953d, 0xfde7331e, 0xfde3abe9, 0xfddffdfb, + 0xfddc2791, 0xfdd826cd, 0xfdd3f9a8, 0xfdcf9dfc, 0xfdcb1176, + 0xfdc65198, 0xfdc15bb3, 0xfdbc2ce2, 0xfdb6c206, 0xfdb117be, + 0xfdab2a63, 0xfda4f5fd, 0xfd9e7640, 0xfd97a67a, 0xfd908192, + 0xfd8901f2, 0xfd812182, 0xfd78d98e, 0xfd7022bb, 0xfd66f4ed, + 0xfd5d4732, 0xfd530f9c, 0xfd48432b, 0xfd3cd59a, 0xfd30b936, + 0xfd23dea4, 0xfd16349e, 0xfd07a7a3, 0xfcf8219b, 0xfce7895b, + 0xfcd5c220, 0xfcc2aadb, 0xfcae1d5e, 0xfc97ed4e, 0xfc7fe6d4, + 0xfc65ccf3, 0xfc495762, 0xfc2a2fc8, 0xfc07ee19, 0xfbe213c1, + 0xfbb8051a, 0xfb890078, 0xfb5411a5, 0xfb180005, 0xfad33482, + 0xfa839276, 0xfa263b32, 0xf9b72d1c, 0xf930a1a2, 0xf889f023, + 0xf7b577d2, 0xf69c650c, 0xf51530f0, 0xf2cb0e3c, 0xeeefb15d, + 0xe6da6ecf, +} +var we = [256]float32{ + 2.0249555e-09, 1.486674e-11, 2.4409617e-11, 3.1968806e-11, + 3.844677e-11, 4.4228204e-11, 4.9516443e-11, 5.443359e-11, + 5.905944e-11, 6.344942e-11, 6.7643814e-11, 7.1672945e-11, + 7.556032e-11, 7.932458e-11, 8.298079e-11, 8.654132e-11, + 9.0016515e-11, 9.3415074e-11, 9.674443e-11, 1.0001099e-10, + 1.03220314e-10, 1.06377254e-10, 1.09486115e-10, 1.1255068e-10, + 1.1557435e-10, 1.1856015e-10, 1.2151083e-10, 1.2442886e-10, + 1.2731648e-10, 1.3017575e-10, 1.3300853e-10, 1.3581657e-10, + 1.3860142e-10, 1.4136457e-10, 1.4410738e-10, 1.4683108e-10, + 1.4953687e-10, 1.5222583e-10, 1.54899e-10, 1.5755733e-10, + 1.6020171e-10, 1.6283301e-10, 1.6545203e-10, 1.6805951e-10, + 1.7065617e-10, 1.732427e-10, 1.7581973e-10, 1.7838787e-10, + 1.8094774e-10, 1.8349985e-10, 1.8604476e-10, 1.8858298e-10, + 1.9111498e-10, 1.9364126e-10, 1.9616223e-10, 1.9867835e-10, + 2.0119004e-10, 2.0369768e-10, 2.0620168e-10, 2.087024e-10, + 2.1120022e-10, 2.136955e-10, 2.1618855e-10, 2.1867974e-10, + 2.2116936e-10, 2.2365775e-10, 2.261452e-10, 2.2863202e-10, + 2.311185e-10, 2.3360494e-10, 2.360916e-10, 2.3857874e-10, + 2.4106667e-10, 2.4355562e-10, 2.4604588e-10, 2.485377e-10, + 2.5103128e-10, 2.5352695e-10, 2.560249e-10, 2.585254e-10, + 2.6102867e-10, 2.6353494e-10, 2.6604446e-10, 2.6855745e-10, + 2.7107416e-10, 2.7359479e-10, 2.761196e-10, 2.7864877e-10, + 2.8118255e-10, 2.8372119e-10, 2.8626485e-10, 2.888138e-10, + 2.9136826e-10, 2.939284e-10, 2.9649452e-10, 2.9906677e-10, + 3.016454e-10, 3.0423064e-10, 3.0682268e-10, 3.0942177e-10, + 3.1202813e-10, 3.1464195e-10, 3.1726352e-10, 3.19893e-10, + 3.2253064e-10, 3.251767e-10, 3.2783135e-10, 3.3049485e-10, + 3.3316744e-10, 3.3584938e-10, 3.3854083e-10, 3.4124212e-10, + 3.4395342e-10, 3.46675e-10, 3.4940711e-10, 3.5215003e-10, + 3.5490397e-10, 3.5766917e-10, 3.6044595e-10, 3.6323455e-10, + 3.660352e-10, 3.6884823e-10, 3.7167386e-10, 3.745124e-10, + 3.773641e-10, 3.802293e-10, 3.8310827e-10, 3.860013e-10, + 3.8890866e-10, 3.918307e-10, 3.9476775e-10, 3.9772008e-10, + 4.0068804e-10, 4.0367196e-10, 4.0667217e-10, 4.09689e-10, + 4.1272286e-10, 4.1577405e-10, 4.1884296e-10, 4.2192994e-10, + 4.250354e-10, 4.281597e-10, 4.313033e-10, 4.3446652e-10, + 4.3764986e-10, 4.408537e-10, 4.4407847e-10, 4.4732465e-10, + 4.5059267e-10, 4.5388301e-10, 4.571962e-10, 4.6053267e-10, + 4.6389292e-10, 4.6727755e-10, 4.70687e-10, 4.741219e-10, + 4.7758275e-10, 4.810702e-10, 4.845848e-10, 4.8812715e-10, + 4.9169796e-10, 4.9529775e-10, 4.989273e-10, 5.0258725e-10, + 5.0627835e-10, 5.100013e-10, 5.1375687e-10, 5.1754584e-10, + 5.21369e-10, 5.2522725e-10, 5.2912136e-10, 5.330522e-10, + 5.370208e-10, 5.4102806e-10, 5.45075e-10, 5.491625e-10, + 5.532918e-10, 5.5746385e-10, 5.616799e-10, 5.6594107e-10, + 5.7024857e-10, 5.746037e-10, 5.7900773e-10, 5.834621e-10, + 5.8796823e-10, 5.925276e-10, 5.971417e-10, 6.018122e-10, + 6.065408e-10, 6.113292e-10, 6.1617933e-10, 6.2109295e-10, + 6.260722e-10, 6.3111916e-10, 6.3623595e-10, 6.4142497e-10, + 6.4668854e-10, 6.5202926e-10, 6.5744976e-10, 6.6295286e-10, + 6.6854156e-10, 6.742188e-10, 6.79988e-10, 6.858526e-10, + 6.9181616e-10, 6.978826e-10, 7.04056e-10, 7.103407e-10, + 7.167412e-10, 7.2326256e-10, 7.2990985e-10, 7.366886e-10, + 7.4360473e-10, 7.5066453e-10, 7.5787476e-10, 7.6524265e-10, + 7.7277595e-10, 7.80483e-10, 7.883728e-10, 7.9645507e-10, + 8.047402e-10, 8.1323964e-10, 8.219657e-10, 8.309319e-10, + 8.401528e-10, 8.496445e-10, 8.594247e-10, 8.6951274e-10, + 8.799301e-10, 8.9070046e-10, 9.018503e-10, 9.134092e-10, + 9.254101e-10, 9.378904e-10, 9.508923e-10, 9.644638e-10, + 9.786603e-10, 9.935448e-10, 1.0091913e-09, 1.025686e-09, + 1.0431306e-09, 1.0616465e-09, 1.08138e-09, 1.1025096e-09, + 1.1252564e-09, 1.1498986e-09, 1.1767932e-09, 1.206409e-09, + 1.2393786e-09, 1.276585e-09, 1.3193139e-09, 1.3695435e-09, + 1.4305498e-09, 1.508365e-09, 1.6160854e-09, 1.7921248e-09, +} +var fe = [256]float32{ + 1, 0.9381437, 0.90046996, 0.87170434, 0.8477855, 0.8269933, + 0.8084217, 0.7915276, 0.77595687, 0.7614634, 0.7478686, + 0.7350381, 0.72286767, 0.71127474, 0.70019263, 0.6895665, + 0.67935055, 0.6695063, 0.66000086, 0.65080583, 0.6418967, + 0.63325197, 0.6248527, 0.6166822, 0.60872537, 0.60096896, + 0.5934009, 0.58601034, 0.5787874, 0.57172304, 0.5648092, + 0.5580383, 0.5514034, 0.5448982, 0.5385169, 0.53225386, + 0.5261042, 0.52006316, 0.5141264, 0.50828975, 0.5025495, + 0.496902, 0.49134386, 0.485872, 0.48048335, 0.4751752, + 0.46994483, 0.46478975, 0.45970762, 0.45469615, 0.44975325, + 0.44487688, 0.44006512, 0.43531612, 0.43062815, 0.42599955, + 0.42142874, 0.4169142, 0.41245446, 0.40804818, 0.403694, + 0.3993907, 0.39513698, 0.39093173, 0.38677382, 0.38266218, + 0.37859577, 0.37457356, 0.37059465, 0.3666581, 0.362763, + 0.35890847, 0.35509375, 0.351318, 0.3475805, 0.34388044, + 0.34021714, 0.3365899, 0.33299807, 0.32944095, 0.32591796, + 0.3224285, 0.3189719, 0.31554767, 0.31215525, 0.30879408, + 0.3054636, 0.3021634, 0.29889292, 0.2956517, 0.29243928, + 0.28925523, 0.28609908, 0.28297043, 0.27986884, 0.27679393, + 0.2737453, 0.2707226, 0.2677254, 0.26475343, 0.26180625, + 0.25888354, 0.25598502, 0.2531103, 0.25025907, 0.24743107, + 0.24462597, 0.24184346, 0.23908329, 0.23634516, 0.23362878, + 0.23093392, 0.2282603, 0.22560766, 0.22297576, 0.22036438, + 0.21777324, 0.21520215, 0.21265087, 0.21011916, 0.20760682, + 0.20511365, 0.20263945, 0.20018397, 0.19774707, 0.19532852, + 0.19292815, 0.19054577, 0.1881812, 0.18583426, 0.18350479, + 0.1811926, 0.17889754, 0.17661946, 0.17435817, 0.17211354, + 0.1698854, 0.16767362, 0.16547804, 0.16329853, 0.16113494, + 0.15898713, 0.15685499, 0.15473837, 0.15263714, 0.15055119, + 0.14848037, 0.14642459, 0.14438373, 0.14235765, 0.14034624, + 0.13834943, 0.13636707, 0.13439907, 0.13244532, 0.13050574, + 0.1285802, 0.12666863, 0.12477092, 0.12288698, 0.12101672, + 0.119160056, 0.1173169, 0.115487166, 0.11367077, 0.11186763, + 0.11007768, 0.10830083, 0.10653701, 0.10478614, 0.10304816, + 0.101323, 0.09961058, 0.09791085, 0.09622374, 0.09454919, + 0.09288713, 0.091237515, 0.08960028, 0.087975375, 0.08636274, + 0.08476233, 0.083174095, 0.081597984, 0.08003395, 0.07848195, + 0.076941945, 0.07541389, 0.07389775, 0.072393484, 0.07090106, + 0.069420435, 0.06795159, 0.066494495, 0.06504912, 0.063615434, + 0.062193416, 0.060783047, 0.059384305, 0.057997175, + 0.05662164, 0.05525769, 0.053905312, 0.052564494, 0.051235236, + 0.049917534, 0.048611384, 0.047316793, 0.046033762, 0.0447623, + 0.043502413, 0.042254124, 0.041017443, 0.039792392, + 0.038578995, 0.037377283, 0.036187284, 0.035009038, + 0.033842582, 0.032687962, 0.031545233, 0.030414443, 0.02929566, + 0.02818895, 0.027094385, 0.026012046, 0.024942026, 0.023884421, + 0.022839336, 0.021806888, 0.020787204, 0.019780423, 0.0187867, + 0.0178062, 0.016839107, 0.015885621, 0.014945968, 0.014020392, + 0.013109165, 0.012212592, 0.011331013, 0.01046481, 0.009614414, + 0.008780315, 0.007963077, 0.0071633533, 0.006381906, + 0.0056196423, 0.0048776558, 0.004157295, 0.0034602648, + 0.0027887989, 0.0021459677, 0.0015362998, 0.0009672693, + 0.00045413437, +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go new file mode 100644 index 000000000..8c74a358d --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go @@ -0,0 +1,158 @@ +// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/normal.go + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import ( + "math" +) + +/* + * Normal distribution + * + * See "The Ziggurat Method for Generating Random Variables" + * (Marsaglia & Tsang, 2000) + * http://www.jstatsoft.org/v05/i08/paper [pdf] + */ + +const ( + rn = 3.442619855899 +) + +func absInt32(i int32) uint32 { + if i < 0 { + return uint32(-i) + } + return uint32(i) +} + +// NormFloat64 returns a normally distributed float64 in the range +// [-math.MaxFloat64, +math.MaxFloat64] with +// standard normal distribution (mean = 0, stddev = 1). +// To produce a different normal distribution, callers can +// adjust the output using: +// +// sample = NormFloat64() * desiredStdDev + desiredMean +func (r *Rand) NormFloat64() float64 { + for { + j := int32(r.Uint32()) // Possibly negative + i := j & 0x7F + x := float64(j) * float64(wn[i]) + if absInt32(j) < kn[i] { + // This case should be hit better than 99% of the time. + return x + } + + if i == 0 { + // This extra work is only required for the base strip. + for { + x = -math.Log(r.Float64()) * (1.0 / rn) + y := -math.Log(r.Float64()) + if y+y >= x*x { + break + } + } + if j > 0 { + return rn + x + } + return -rn - x + } + if fn[i]+float32(r.Float64())*(fn[i-1]-fn[i]) < float32(math.Exp(-.5*x*x)) { + return x + } + } +} + +var kn = [128]uint32{ + 0x76ad2212, 0x0, 0x600f1b53, 0x6ce447a6, 0x725b46a2, + 0x7560051d, 0x774921eb, 0x789a25bd, 0x799045c3, 0x7a4bce5d, + 0x7adf629f, 0x7b5682a6, 0x7bb8a8c6, 0x7c0ae722, 0x7c50cce7, + 0x7c8cec5b, 0x7cc12cd6, 0x7ceefed2, 0x7d177e0b, 0x7d3b8883, + 0x7d5bce6c, 0x7d78dd64, 0x7d932886, 0x7dab0e57, 0x7dc0dd30, + 0x7dd4d688, 0x7de73185, 0x7df81cea, 0x7e07c0a3, 0x7e163efa, + 0x7e23b587, 0x7e303dfd, 0x7e3beec2, 0x7e46db77, 0x7e51155d, + 0x7e5aabb3, 0x7e63abf7, 0x7e6c222c, 0x7e741906, 0x7e7b9a18, + 0x7e82adfa, 0x7e895c63, 0x7e8fac4b, 0x7e95a3fb, 0x7e9b4924, + 0x7ea0a0ef, 0x7ea5b00d, 0x7eaa7ac3, 0x7eaf04f3, 0x7eb3522a, + 0x7eb765a5, 0x7ebb4259, 0x7ebeeafd, 0x7ec2620a, 0x7ec5a9c4, + 0x7ec8c441, 0x7ecbb365, 0x7ece78ed, 0x7ed11671, 0x7ed38d62, + 0x7ed5df12, 0x7ed80cb4, 0x7eda175c, 0x7edc0005, 0x7eddc78e, + 0x7edf6ebf, 0x7ee0f647, 0x7ee25ebe, 0x7ee3a8a9, 0x7ee4d473, + 0x7ee5e276, 0x7ee6d2f5, 0x7ee7a620, 0x7ee85c10, 0x7ee8f4cd, + 0x7ee97047, 0x7ee9ce59, 0x7eea0eca, 0x7eea3147, 0x7eea3568, + 0x7eea1aab, 0x7ee9e071, 0x7ee98602, 0x7ee90a88, 0x7ee86d08, + 0x7ee7ac6a, 0x7ee6c769, 0x7ee5bc9c, 0x7ee48a67, 0x7ee32efc, + 0x7ee1a857, 0x7edff42f, 0x7ede0ffa, 0x7edbf8d9, 0x7ed9ab94, + 0x7ed7248d, 0x7ed45fae, 0x7ed1585c, 0x7ece095f, 0x7eca6ccb, + 0x7ec67be2, 0x7ec22eee, 0x7ebd7d1a, 0x7eb85c35, 0x7eb2c075, + 0x7eac9c20, 0x7ea5df27, 0x7e9e769f, 0x7e964c16, 0x7e8d44ba, + 0x7e834033, 0x7e781728, 0x7e6b9933, 0x7e5d8a1a, 0x7e4d9ded, + 0x7e3b737a, 0x7e268c2f, 0x7e0e3ff5, 0x7df1aa5d, 0x7dcf8c72, + 0x7da61a1e, 0x7d72a0fb, 0x7d30e097, 0x7cd9b4ab, 0x7c600f1a, + 0x7ba90bdc, 0x7a722176, 0x77d664e5, +} +var wn = [128]float32{ + 1.7290405e-09, 1.2680929e-10, 1.6897518e-10, 1.9862688e-10, + 2.2232431e-10, 2.4244937e-10, 2.601613e-10, 2.7611988e-10, + 2.9073963e-10, 3.042997e-10, 3.1699796e-10, 3.289802e-10, + 3.4035738e-10, 3.5121603e-10, 3.616251e-10, 3.7164058e-10, + 3.8130857e-10, 3.9066758e-10, 3.9975012e-10, 4.08584e-10, + 4.1719309e-10, 4.2559822e-10, 4.338176e-10, 4.418672e-10, + 4.497613e-10, 4.5751258e-10, 4.651324e-10, 4.7263105e-10, + 4.8001775e-10, 4.87301e-10, 4.944885e-10, 5.015873e-10, + 5.0860405e-10, 5.155446e-10, 5.2241467e-10, 5.2921934e-10, + 5.359635e-10, 5.426517e-10, 5.4928817e-10, 5.5587696e-10, + 5.624219e-10, 5.6892646e-10, 5.753941e-10, 5.818282e-10, + 5.882317e-10, 5.946077e-10, 6.00959e-10, 6.072884e-10, + 6.135985e-10, 6.19892e-10, 6.2617134e-10, 6.3243905e-10, + 6.386974e-10, 6.449488e-10, 6.511956e-10, 6.5744005e-10, + 6.6368433e-10, 6.699307e-10, 6.7618144e-10, 6.824387e-10, + 6.8870465e-10, 6.949815e-10, 7.012715e-10, 7.075768e-10, + 7.1389966e-10, 7.202424e-10, 7.266073e-10, 7.329966e-10, + 7.394128e-10, 7.4585826e-10, 7.5233547e-10, 7.58847e-10, + 7.653954e-10, 7.719835e-10, 7.7861395e-10, 7.852897e-10, + 7.920138e-10, 7.987892e-10, 8.0561924e-10, 8.125073e-10, + 8.194569e-10, 8.2647167e-10, 8.3355556e-10, 8.407127e-10, + 8.479473e-10, 8.55264e-10, 8.6266755e-10, 8.7016316e-10, + 8.777562e-10, 8.8545243e-10, 8.932582e-10, 9.0117996e-10, + 9.09225e-10, 9.174008e-10, 9.2571584e-10, 9.341788e-10, + 9.427997e-10, 9.515889e-10, 9.605579e-10, 9.697193e-10, + 9.790869e-10, 9.88676e-10, 9.985036e-10, 1.0085882e-09, + 1.0189509e-09, 1.0296151e-09, 1.0406069e-09, 1.0519566e-09, + 1.063698e-09, 1.0758702e-09, 1.0885183e-09, 1.1016947e-09, + 1.1154611e-09, 1.1298902e-09, 1.1450696e-09, 1.1611052e-09, + 1.1781276e-09, 1.1962995e-09, 1.2158287e-09, 1.2369856e-09, + 1.2601323e-09, 1.2857697e-09, 1.3146202e-09, 1.347784e-09, + 1.3870636e-09, 1.4357403e-09, 1.5008659e-09, 1.6030948e-09, +} +var fn = [128]float32{ + 1, 0.9635997, 0.9362827, 0.9130436, 0.89228165, 0.87324303, + 0.8555006, 0.8387836, 0.8229072, 0.8077383, 0.793177, + 0.7791461, 0.7655842, 0.7524416, 0.73967725, 0.7272569, + 0.7151515, 0.7033361, 0.69178915, 0.68049186, 0.6694277, + 0.658582, 0.6479418, 0.63749546, 0.6272325, 0.6171434, + 0.6072195, 0.5974532, 0.58783704, 0.5783647, 0.56903, + 0.5598274, 0.5507518, 0.54179835, 0.5329627, 0.52424055, + 0.5156282, 0.50712204, 0.49871865, 0.49041483, 0.48220766, + 0.4740943, 0.46607214, 0.4581387, 0.45029163, 0.44252872, + 0.43484783, 0.427247, 0.41972435, 0.41227803, 0.40490642, + 0.39760786, 0.3903808, 0.3832238, 0.37613547, 0.36911446, + 0.3621595, 0.35526937, 0.34844297, 0.34167916, 0.33497685, + 0.3283351, 0.3217529, 0.3152294, 0.30876362, 0.30235484, + 0.29600215, 0.28970486, 0.2834622, 0.2772735, 0.27113807, + 0.2650553, 0.25902456, 0.2530453, 0.24711695, 0.241239, + 0.23541094, 0.22963232, 0.2239027, 0.21822165, 0.21258877, + 0.20700371, 0.20146611, 0.19597565, 0.19053204, 0.18513499, + 0.17978427, 0.17447963, 0.1692209, 0.16400786, 0.15884037, + 0.15371831, 0.14864157, 0.14361008, 0.13862377, 0.13368265, + 0.12878671, 0.12393598, 0.119130544, 0.11437051, 0.10965602, + 0.104987256, 0.10036444, 0.095787846, 0.0912578, 0.08677467, + 0.0823389, 0.077950984, 0.073611505, 0.06932112, 0.06508058, + 0.06089077, 0.056752663, 0.0526674, 0.048636295, 0.044660863, + 0.040742867, 0.03688439, 0.033087887, 0.029356318, + 0.025693292, 0.022103304, 0.018592102, 0.015167298, + 0.011839478, 0.008624485, 0.005548995, 0.0026696292, +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go new file mode 100644 index 000000000..ffd0509bd --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go @@ -0,0 +1,374 @@ +// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/rand.go + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rand implements pseudo-random number generators. +// +// Random numbers are generated by a Source. Top-level functions, such as +// Float64 and Int, use a default shared Source that produces a deterministic +// sequence of values each time a program is run. Use the Seed function to +// initialize the default Source if different behavior is required for each run. +// The default Source, a LockedSource, is safe for concurrent use by multiple +// goroutines, but Sources created by NewSource are not. However, Sources are small +// and it is reasonable to have a separate Source for each goroutine, seeded +// differently, to avoid locking. +// +// For random numbers suitable for security-sensitive work, see the crypto/rand +// package. +package rand + +import "sync" + +// A Source represents a source of uniformly-distributed +// pseudo-random int64 values in the range [0, 1<<64). +type Source interface { + Uint64() uint64 + Seed(seed uint64) +} + +// NewSource returns a new pseudo-random Source seeded with the given value. +func NewSource(seed uint64) Source { + var rng PCGSource + rng.Seed(seed) + return &rng +} + +// A Rand is a source of random numbers. +type Rand struct { + src Source + + // readVal contains remainder of 64-bit integer used for bytes + // generation during most recent Read call. + // It is saved so next Read call can start where the previous + // one finished. + readVal uint64 + // readPos indicates the number of low-order bytes of readVal + // that are still valid. + readPos int8 +} + +// New returns a new Rand that uses random values from src +// to generate other random values. +func New(src Source) *Rand { + return &Rand{src: src} +} + +// Seed uses the provided seed value to initialize the generator to a deterministic state. +// Seed should not be called concurrently with any other Rand method. +func (r *Rand) Seed(seed uint64) { + if lk, ok := r.src.(*LockedSource); ok { + lk.seedPos(seed, &r.readPos) + return + } + + r.src.Seed(seed) + r.readPos = 0 +} + +// Uint64 returns a pseudo-random 64-bit integer as a uint64. +func (r *Rand) Uint64() uint64 { return r.src.Uint64() } + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. +func (r *Rand) Int63() int64 { return int64(r.src.Uint64() &^ (1 << 63)) } + +// Uint32 returns a pseudo-random 32-bit value as a uint32. +func (r *Rand) Uint32() uint32 { return uint32(r.Uint64() >> 32) } + +// Int31 returns a non-negative pseudo-random 31-bit integer as an int32. +func (r *Rand) Int31() int32 { return int32(r.Uint64() >> 33) } + +// Int returns a non-negative pseudo-random int. +func (r *Rand) Int() int { + u := uint(r.Uint64()) + return int(u << 1 >> 1) // clear sign bit. +} + +const maxUint64 = (1 << 64) - 1 + +// Uint64n returns, as a uint64, a pseudo-random number in [0,n). +// It is guaranteed more uniform than taking a Source value mod n +// for any n that is not a power of 2. +func (r *Rand) Uint64n(n uint64) uint64 { + if n&(n-1) == 0 { // n is power of two, can mask + if n == 0 { + panic("invalid argument to Uint64n") + } + return r.Uint64() & (n - 1) + } + // If n does not divide v, to avoid bias we must not use + // a v that is within maxUint64%n of the top of the range. + v := r.Uint64() + if v > maxUint64-n { // Fast check. + ceiling := maxUint64 - maxUint64%n + for v >= ceiling { + v = r.Uint64() + } + } + + return v % n +} + +// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func (r *Rand) Int63n(n int64) int64 { + if n <= 0 { + panic("invalid argument to Int63n") + } + return int64(r.Uint64n(uint64(n))) +} + +// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func (r *Rand) Int31n(n int32) int32 { + if n <= 0 { + panic("invalid argument to Int31n") + } + // TODO: Avoid some 64-bit ops to make it more efficient on 32-bit machines. + return int32(r.Uint64n(uint64(n))) +} + +// Intn returns, as an int, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func (r *Rand) Intn(n int) int { + if n <= 0 { + panic("invalid argument to Intn") + } + // TODO: Avoid some 64-bit ops to make it more efficient on 32-bit machines. + return int(r.Uint64n(uint64(n))) +} + +// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0). +func (r *Rand) Float64() float64 { + // There is one bug in the value stream: r.Int63() may be so close + // to 1<<63 that the division rounds up to 1.0, and we've guaranteed + // that the result is always less than 1.0. + // + // We tried to fix this by mapping 1.0 back to 0.0, but since float64 + // values near 0 are much denser than near 1, mapping 1 to 0 caused + // a theoretically significant overshoot in the probability of returning 0. + // Instead of that, if we round up to 1, just try again. + // Getting 1 only happens 1/2⁵³ of the time, so most clients + // will not observe it anyway. +again: + f := float64(r.Uint64n(1<<53)) / (1 << 53) + if f == 1.0 { + goto again // resample; this branch is taken O(never) + } + return f +} + +// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0). +func (r *Rand) Float32() float32 { + // We do not want to return 1.0. + // This only happens 1/2²⁴ of the time (plus the 1/2⁵³ of the time in Float64). +again: + f := float32(r.Float64()) + if f == 1 { + goto again // resample; this branch is taken O(very rarely) + } + return f +} + +// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n). +func (r *Rand) Perm(n int) []int { + m := make([]int, n) + // In the following loop, the iteration when i=0 always swaps m[0] with m[0]. + // A change to remove this useless iteration is to assign 1 to i in the init + // statement. But Perm also effects r. Making this change will affect + // the final state of r. So this change can't be made for compatibility + // reasons for Go 1. + for i := 0; i < n; i++ { + j := r.Intn(i + 1) + m[i] = m[j] + m[j] = i + } + return m +} + +// Shuffle pseudo-randomizes the order of elements. +// n is the number of elements. Shuffle panics if n < 0. +// swap swaps the elements with indexes i and j. +func (r *Rand) Shuffle(n int, swap func(i, j int)) { + if n < 0 { + panic("invalid argument to Shuffle") + } + + // Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle + // Shuffle really ought not be called with n that doesn't fit in 32 bits. + // Not only will it take a very long time, but with 2³¹! possible permutations, + // there's no way that any PRNG can have a big enough internal state to + // generate even a minuscule percentage of the possible permutations. + // Nevertheless, the right API signature accepts an int n, so handle it as best we can. + i := n - 1 + for ; i > 1<<31-1-1; i-- { + j := int(r.Int63n(int64(i + 1))) + swap(i, j) + } + for ; i > 0; i-- { + j := int(r.Int31n(int32(i + 1))) + swap(i, j) + } +} + +// Read generates len(p) random bytes and writes them into p. It +// always returns len(p) and a nil error. +// Read should not be called concurrently with any other Rand method unless +// the underlying source is a LockedSource. +func (r *Rand) Read(p []byte) (n int, err error) { + if lk, ok := r.src.(*LockedSource); ok { + return lk.Read(p, &r.readVal, &r.readPos) + } + return read(p, r.src, &r.readVal, &r.readPos) +} + +func read(p []byte, src Source, readVal *uint64, readPos *int8) (n int, err error) { + pos := *readPos + val := *readVal + rng, _ := src.(*PCGSource) + for n = 0; n < len(p); n++ { + if pos == 0 { + if rng != nil { + val = rng.Uint64() + } else { + val = src.Uint64() + } + pos = 8 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + *readPos = pos + *readVal = val + return +} + +/* + * Top-level convenience functions + */ + +var globalRand = New(&LockedSource{src: *NewSource(1).(*PCGSource)}) + +// Type assert that globalRand's source is a LockedSource whose src is a PCGSource. +var _ PCGSource = globalRand.src.(*LockedSource).src + +// Seed uses the provided seed value to initialize the default Source to a +// deterministic state. If Seed is not called, the generator behaves as +// if seeded by Seed(1). +// Seed, unlike the Rand.Seed method, is safe for concurrent use. +func Seed(seed uint64) { globalRand.Seed(seed) } + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64 +// from the default Source. +func Int63() int64 { return globalRand.Int63() } + +// Uint32 returns a pseudo-random 32-bit value as a uint32 +// from the default Source. +func Uint32() uint32 { return globalRand.Uint32() } + +// Uint64 returns a pseudo-random 64-bit value as a uint64 +// from the default Source. +func Uint64() uint64 { return globalRand.Uint64() } + +// Int31 returns a non-negative pseudo-random 31-bit integer as an int32 +// from the default Source. +func Int31() int32 { return globalRand.Int31() } + +// Int returns a non-negative pseudo-random int from the default Source. +func Int() int { return globalRand.Int() } + +// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n) +// from the default Source. +// It panics if n <= 0. +func Int63n(n int64) int64 { return globalRand.Int63n(n) } + +// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n) +// from the default Source. +// It panics if n <= 0. +func Int31n(n int32) int32 { return globalRand.Int31n(n) } + +// Intn returns, as an int, a non-negative pseudo-random number in [0,n) +// from the default Source. +// It panics if n <= 0. +func Intn(n int) int { return globalRand.Intn(n) } + +// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0) +// from the default Source. +func Float64() float64 { return globalRand.Float64() } + +// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0) +// from the default Source. +func Float32() float32 { return globalRand.Float32() } + +// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n) +// from the default Source. +func Perm(n int) []int { return globalRand.Perm(n) } + +// Shuffle pseudo-randomizes the order of elements using the default Source. +// n is the number of elements. Shuffle panics if n < 0. +// swap swaps the elements with indexes i and j. +func Shuffle(n int, swap func(i, j int)) { globalRand.Shuffle(n, swap) } + +// Read generates len(p) random bytes from the default Source and +// writes them into p. It always returns len(p) and a nil error. +// Read, unlike the Rand.Read method, is safe for concurrent use. +func Read(p []byte) (n int, err error) { return globalRand.Read(p) } + +// NormFloat64 returns a normally distributed float64 in the range +// [-math.MaxFloat64, +math.MaxFloat64] with +// standard normal distribution (mean = 0, stddev = 1) +// from the default Source. +// To produce a different normal distribution, callers can +// adjust the output using: +// +// sample = NormFloat64() * desiredStdDev + desiredMean +func NormFloat64() float64 { return globalRand.NormFloat64() } + +// ExpFloat64 returns an exponentially distributed float64 in the range +// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter +// (lambda) is 1 and whose mean is 1/lambda (1) from the default Source. +// To produce a distribution with a different rate parameter, +// callers can adjust the output using: +// +// sample = ExpFloat64() / desiredRateParameter +func ExpFloat64() float64 { return globalRand.ExpFloat64() } + +// LockedSource is an implementation of Source that is concurrency-safe. +// A Rand using a LockedSource is safe for concurrent use. +// +// The zero value of LockedSource is valid, but should be seeded before use. +type LockedSource struct { + lk sync.Mutex + src PCGSource +} + +func (s *LockedSource) Uint64() (n uint64) { + s.lk.Lock() + n = s.src.Uint64() + s.lk.Unlock() + return +} + +func (s *LockedSource) Seed(seed uint64) { + s.lk.Lock() + s.src.Seed(seed) + s.lk.Unlock() +} + +// seedPos implements Seed for a LockedSource without a race condiiton. +func (s *LockedSource) seedPos(seed uint64, readPos *int8) { + s.lk.Lock() + s.src.Seed(seed) + *readPos = 0 + s.lk.Unlock() +} + +// Read implements Read for a LockedSource. +func (s *LockedSource) Read(p []byte, readVal *uint64, readPos *int8) (n int, err error) { + s.lk.Lock() + n, err = read(p, &s.src, readVal, readPos) + s.lk.Unlock() + return +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go new file mode 100644 index 000000000..f04f98798 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go @@ -0,0 +1,93 @@ +// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/rng.go + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import ( + "encoding/binary" + "io" + "math/bits" +) + +// PCGSource is an implementation of a 64-bit permuted congruential +// generator as defined in +// +// PCG: A Family of Simple Fast Space-Efficient Statistically Good +// Algorithms for Random Number Generation +// Melissa E. O’Neill, Harvey Mudd College +// http://www.pcg-random.org/pdf/toms-oneill-pcg-family-v1.02.pdf +// +// The generator here is the congruential generator PCG XSL RR 128/64 (LCG) +// as found in the software available at http://www.pcg-random.org/. +// It has period 2^128 with 128 bits of state, producing 64-bit values. +// Is state is represented by two uint64 words. +type PCGSource struct { + low uint64 + high uint64 +} + +const ( + maxUint32 = (1 << 32) - 1 + + multiplier = 47026247687942121848144207491837523525 + mulHigh = multiplier >> 64 + mulLow = multiplier & maxUint64 + + increment = 117397592171526113268558934119004209487 + incHigh = increment >> 64 + incLow = increment & maxUint64 + + // TODO: Use these? + initializer = 245720598905631564143578724636268694099 + initHigh = initializer >> 64 + initLow = initializer & maxUint64 +) + +// Seed uses the provided seed value to initialize the generator to a deterministic state. +func (pcg *PCGSource) Seed(seed uint64) { + pcg.low = seed + pcg.high = seed // TODO: What is right? +} + +// Uint64 returns a pseudo-random 64-bit unsigned integer as a uint64. +func (pcg *PCGSource) Uint64() uint64 { + pcg.multiply() + pcg.add() + // XOR high and low 64 bits together and rotate right by high 6 bits of state. + return bits.RotateLeft64(pcg.high^pcg.low, -int(pcg.high>>58)) +} + +func (pcg *PCGSource) add() { + var carry uint64 + pcg.low, carry = Add64(pcg.low, incLow, 0) + pcg.high, _ = Add64(pcg.high, incHigh, carry) +} + +func (pcg *PCGSource) multiply() { + hi, lo := Mul64(pcg.low, mulLow) + hi += pcg.high * mulLow + hi += pcg.low * mulHigh + pcg.low = lo + pcg.high = hi +} + +// MarshalBinary returns the binary representation of the current state of the generator. +func (pcg *PCGSource) MarshalBinary() ([]byte, error) { + var buf [16]byte + binary.BigEndian.PutUint64(buf[:8], pcg.high) + binary.BigEndian.PutUint64(buf[8:], pcg.low) + return buf[:], nil +} + +// UnmarshalBinary sets the state of the generator to the state represented in data. +func (pcg *PCGSource) UnmarshalBinary(data []byte) error { + if len(data) < 16 { + return io.ErrUnexpectedEOF + } + pcg.low = binary.BigEndian.Uint64(data[8:]) + pcg.high = binary.BigEndian.Uint64(data[:8]) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go new file mode 100644 index 000000000..961607432 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go @@ -0,0 +1,39 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package randutil provides common random number utilities. +package randutil + +import ( + crand "crypto/rand" + "fmt" + "io" + + xrand "go.mongodb.org/mongo-driver/internal/randutil/rand" +) + +// NewLockedRand returns a new "x/exp/rand" pseudo-random number generator seeded with a +// cryptographically-secure random number. +// It is safe to use from multiple goroutines. +func NewLockedRand() *xrand.Rand { + var randSrc = new(xrand.LockedSource) + randSrc.Seed(cryptoSeed()) + return xrand.New(randSrc) +} + +// cryptoSeed returns a random uint64 read from the "crypto/rand" random number generator. It is +// intended to be used to seed pseudorandom number generators at package initialization. It panics +// if it encounters any errors. +func cryptoSeed() uint64 { + var b [8]byte + _, err := io.ReadFull(crand.Reader, b[:]) + if err != nil { + panic(fmt.Errorf("failed to read 8 bytes from a \"crypto/rand\".Reader: %v", err)) + } + + return (uint64(b[0]) << 0) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) | + (uint64(b[4]) << 32) | (uint64(b[5]) << 40) | (uint64(b[6]) << 48) | (uint64(b[7]) << 56) +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/semaphore.go b/vendor/go.mongodb.org/mongo-driver/internal/semaphore.go deleted file mode 100644 index 792e531a5..000000000 --- a/vendor/go.mongodb.org/mongo-driver/internal/semaphore.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package internal - -import ( - "context" - "errors" -) - -// NewSemaphore creates a new semaphore. -func NewSemaphore(slots uint64) *Semaphore { - ch := make(chan struct{}, slots) - for i := uint64(0); i < slots; i++ { - ch <- struct{}{} - } - - return &Semaphore{ - permits: ch, - } -} - -// Semaphore is a synchronization primitive that controls access -// to a common resource. -type Semaphore struct { - permits chan struct{} -} - -// Len gets the number of permits available. -func (s *Semaphore) Len() uint64 { - return uint64(len(s.permits)) -} - -// Wait waits until a resource is available or until the context -// is done. -func (s *Semaphore) Wait(ctx context.Context) error { - select { - case <-s.permits: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -// Release releases a resource back into the pool. -func (s *Semaphore) Release() error { - select { - case s.permits <- struct{}{}: - default: - return errors.New("internal.Semaphore.Release: attempt to release more resources than are available") - } - - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/string_util.go b/vendor/go.mongodb.org/mongo-driver/internal/string_util.go index db1e1890e..6cafa791d 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/string_util.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/string_util.go @@ -33,7 +33,7 @@ func StringSliceFromRawValue(name string, val bson.RawValue) ([]string, error) { return nil, err } - var strs []string + strs := make([]string, 0, len(arrayValues)) for _, arrayVal := range arrayValues { str, ok := arrayVal.StringValueOK() if !ok { diff --git a/vendor/go.mongodb.org/mongo-driver/internal/uri_validation_errors.go b/vendor/go.mongodb.org/mongo-driver/internal/uri_validation_errors.go index 8d1fcdc75..21e73002a 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/uri_validation_errors.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/uri_validation_errors.go @@ -15,4 +15,8 @@ var ( ErrLoadBalancedWithReplicaSet = errors.New("loadBalanced cannot be set to true if a replica set name is specified") // ErrLoadBalancedWithDirectConnection is returned when loadBalanced=true is specified in a URI with the directConnection option. ErrLoadBalancedWithDirectConnection = errors.New("loadBalanced cannot be set to true if the direct connection option is specified") + // ErrSRVMaxHostsWithReplicaSet is returned when srvMaxHosts > 0 is specified in a URI with the replicaSet option. + ErrSRVMaxHostsWithReplicaSet = errors.New("srvMaxHosts cannot be a positive value if a replica set name is specified") + // ErrSRVMaxHostsWithLoadBalanced is returned when srvMaxHosts > 0 is specified in a URI with loadBalanced=true. + ErrSRVMaxHostsWithLoadBalanced = errors.New("srvMaxHosts cannot be a positive value if loadBalanced is set to true") ) diff --git a/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go b/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go new file mode 100644 index 000000000..78f16645d --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go @@ -0,0 +1,53 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package uuid + +import ( + "io" + + "go.mongodb.org/mongo-driver/internal/randutil" +) + +// UUID represents a UUID. +type UUID [16]byte + +// A source is a UUID generator that reads random values from a io.Reader. +// It should be safe to use from multiple goroutines. +type source struct { + random io.Reader +} + +// new returns a random UUIDv4 with bytes read from the source's random number generator. +func (s *source) new() (UUID, error) { + var uuid UUID + _, err := io.ReadFull(s.random, uuid[:]) + if err != nil { + return UUID{}, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + +// newSource returns a source that uses a pseudo-random number generator in reandutil package. +// It is intended to be used to initialize the package-global UUID generator. +func newSource() *source { + return &source{ + random: randutil.NewLockedRand(), + } +} + +// globalSource is a package-global pseudo-random UUID generator. +var globalSource = newSource() + +// New returns a random UUIDv4. It uses a global pseudo-random number generator in randutil +// at package initialization. +// +// New should not be used to generate cryptographically-secure random UUIDs. +func New() (UUID, error) { + return globalSource.new() +} diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go b/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go index 0b7432f40..966e43cda 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go @@ -1,3 +1,9 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + package mongo import ( diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go index 56736a238..2c58f2229 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go @@ -27,6 +27,7 @@ type bulkWriteBatch struct { // bulkWrite perfoms a bulkwrite operation type bulkWrite struct { + comment interface{} ordered *bool bypassDocumentValidation *bool models []WriteModel @@ -35,6 +36,7 @@ type bulkWrite struct { selector description.ServerSelector writeConcern *writeconcern.WriteConcern result BulkWriteResult + let interface{} } func (bw *bulkWrite) execute(ctx context.Context) error { @@ -59,11 +61,6 @@ func (bw *bulkWrite) execute(ctx context.Context) error { continue } - bypassDocValidation := bw.bypassDocumentValidation - if bypassDocValidation != nil && !*bypassDocValidation { - bypassDocValidation = nil - } - batchRes, batchErr, err := bw.runBatch(ctx, batch) bw.mergeResults(batchRes) @@ -118,7 +115,7 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr batchErr.Labels = writeErr.Labels batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError) } - batchRes.InsertedCount = int64(res.N) + batchRes.InsertedCount = res.N case *DeleteOneModel, *DeleteManyModel: res, err := bw.runDelete(ctx, batch) if err != nil { @@ -130,7 +127,7 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr batchErr.Labels = writeErr.Labels batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError) } - batchRes.DeletedCount = int64(res.N) + batchRes.DeletedCount = res.N case *ReplaceOneModel, *UpdateOneModel, *UpdateManyModel: res, err := bw.runUpdate(ctx, batch) if err != nil { @@ -142,8 +139,8 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr batchErr.Labels = writeErr.Labels batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError) } - batchRes.MatchedCount = int64(res.N) - batchRes.ModifiedCount = int64(res.NModified) + batchRes.MatchedCount = res.N + batchRes.ModifiedCount = res.NModified batchRes.UpsertedCount = int64(len(res.Upserted)) for _, upsert := range res.Upserted { batchRes.UpsertedIDs[int64(batch.indexes[upsert.Index])] = upsert.ID @@ -182,7 +179,14 @@ func (bw *bulkWrite) runInsert(ctx context.Context, batch bulkWriteBatch) (opera ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock). Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE). - ServerAPI(bw.collection.client.serverAPI) + ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout) + if bw.comment != nil { + comment, err := transformValue(bw.collection.registry, bw.comment, true, "comment") + if err != nil { + return op.Result(), err + } + op.Comment(comment) + } if bw.bypassDocumentValidation != nil && *bw.bypassDocumentValidation { op = op.BypassDocumentValidation(*bw.bypassDocumentValidation) } @@ -232,7 +236,21 @@ func (bw *bulkWrite) runDelete(ctx context.Context, batch bulkWriteBatch) (opera ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock). Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE).Hint(hasHint). - ServerAPI(bw.collection.client.serverAPI) + ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout) + if bw.comment != nil { + comment, err := transformValue(bw.collection.registry, bw.comment, true, "comment") + if err != nil { + return op.Result(), err + } + op.Comment(comment) + } + if bw.let != nil { + let, err := transformBsoncoreDocument(bw.collection.registry, bw.let, true, "let") + if err != nil { + return operation.DeleteResult{}, err + } + op = op.Let(let) + } if bw.ordered != nil { op = op.Ordered(*bw.ordered) } @@ -313,7 +331,21 @@ func (bw *bulkWrite) runUpdate(ctx context.Context, batch bulkWriteBatch) (opera ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock). Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE).Hint(hasHint). - ArrayFilters(hasArrayFilters).ServerAPI(bw.collection.client.serverAPI) + ArrayFilters(hasArrayFilters).ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout) + if bw.comment != nil { + comment, err := transformValue(bw.collection.registry, bw.comment, true, "comment") + if err != nil { + return op.Result(), err + } + op.Comment(comment) + } + if bw.let != nil { + let, err := transformBsoncoreDocument(bw.collection.registry, bw.let, true, "let") + if err != nil { + return operation.UpdateResult{}, err + } + op = op.Let(let) + } if bw.ordered != nil { op = op.Ordered(*bw.ordered) } @@ -385,7 +417,6 @@ func createUpdateDoc( } updateDoc, _ = bsoncore.AppendDocumentEnd(updateDoc, uidx) - return updateDoc, nil } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go index b4b8e3ef8..64f458918 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go @@ -152,7 +152,7 @@ func (rom *ReplaceOneModel) SetFilter(filter interface{}) *ReplaceOneModel { } // SetReplacement specifies a document that will be used to replace the selected document. It cannot be nil and cannot -// contain any update operators (https://docs.mongodb.com/manual/reference/operator/update/). +// contain any update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). func (rom *ReplaceOneModel) SetReplacement(rep interface{}) *ReplaceOneModel { rom.Replacement = rep return rom @@ -210,7 +210,7 @@ func (uom *UpdateOneModel) SetFilter(filter interface{}) *UpdateOneModel { } // SetUpdate specifies the modifications to be made to the selected document. The value must be a document containing -// update operators (https://docs.mongodb.com/manual/reference/operator/update/). It cannot be nil or empty. +// update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). It cannot be nil or empty. func (uom *UpdateOneModel) SetUpdate(update interface{}) *UpdateOneModel { uom.Update = update return uom @@ -274,7 +274,7 @@ func (umm *UpdateManyModel) SetFilter(filter interface{}) *UpdateManyModel { } // SetUpdate specifies the modifications to be made to the selected documents. The value must be a document containing -// update operators (https://docs.mongodb.com/manual/reference/operator/update/). It cannot be nil or empty. +// update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). It cannot be nil or empty. func (umm *UpdateManyModel) SetUpdate(update interface{}) *UpdateManyModel { umm.Update = update return umm diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go index c10c0396b..c809002ab 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go @@ -17,6 +17,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" @@ -38,7 +39,7 @@ var ( resumableErrorLabel = "ResumableChangeStreamError" errorCursorNotFound int32 = 43 // CursorNotFound error code - // Whitelist of error codes that are considered resumable. + // Allowlist of error codes that are considered resumable. resumableChangeStreamErrors = map[int32]struct{}{ 6: {}, // HostUnreachable 7: {}, // HostNotFound @@ -47,11 +48,11 @@ var ( 189: {}, // PrimarySteppedDown 262: {}, // ExceededTimeLimit 9001: {}, // SocketException - 10107: {}, // NotMaster + 10107: {}, // NotPrimary 11600: {}, // InterruptedAtShutdown 11602: {}, // InterruptedDueToReplStateChange - 13435: {}, // NotMasterNoSlaveOk - 13436: {}, // NotMasterOrSecondary + 13435: {}, // NotPrimaryNoSecondaryOK + 13436: {}, // NotPrimaryOrSecondary 63: {}, // StaleShardVersion 150: {}, // StaleEpoch 13388: {}, // StaleConfig @@ -63,27 +64,28 @@ var ( // ChangeStream is used to iterate over a stream of events. Each event can be decoded into a Go type via the Decode // method or accessed as raw BSON via the Current field. This type is not goroutine safe and must not be used // concurrently by multiple goroutines. For more information about change streams, see -// https://docs.mongodb.com/manual/changeStreams/. +// https://www.mongodb.com/docs/manual/changeStreams/. type ChangeStream struct { // Current is the BSON bytes of the current event. This property is only valid until the next call to Next or // TryNext. If continued access is required, a copy must be made. Current bson.Raw - aggregate *operation.Aggregate - pipelineSlice []bsoncore.Document - cursor changeStreamCursor - cursorOptions driver.CursorOptions - batch []bsoncore.Document - resumeToken bson.Raw - err error - sess *session.Client - client *Client - registry *bsoncodec.Registry - streamType StreamType - options *options.ChangeStreamOptions - selector description.ServerSelector - operationTime *primitive.Timestamp - wireVersion *description.VersionRange + aggregate *operation.Aggregate + pipelineSlice []bsoncore.Document + pipelineOptions map[string]bsoncore.Value + cursor changeStreamCursor + cursorOptions driver.CursorOptions + batch []bsoncore.Document + resumeToken bson.Raw + err error + sess *session.Client + client *Client + registry *bsoncodec.Registry + streamType StreamType + options *options.ChangeStreamOptions + selector description.ServerSelector + operationTime *primitive.Timestamp + wireVersion *description.VersionRange } type changeStreamConfig struct { @@ -94,7 +96,7 @@ type changeStreamConfig struct { streamType StreamType collectionName string databaseName string - crypt *driver.Crypt + crypt driver.Crypt } func newChangeStream(ctx context.Context, config changeStreamConfig, pipeline interface{}, @@ -131,17 +133,57 @@ func newChangeStream(ctx context.Context, config changeStreamConfig, pipeline in ReadPreference(config.readPreference).ReadConcern(config.readConcern). Deployment(cs.client.deployment).ClusterClock(cs.client.clock). CommandMonitor(cs.client.monitor).Session(cs.sess).ServerSelector(cs.selector).Retry(driver.RetryNone). - ServerAPI(cs.client.serverAPI).Crypt(config.crypt) + ServerAPI(cs.client.serverAPI).Crypt(config.crypt).Timeout(cs.client.timeout) if cs.options.Collation != nil { cs.aggregate.Collation(bsoncore.Document(cs.options.Collation.ToDocument())) } + if comment := cs.options.Comment; comment != nil { + cs.aggregate.Comment(*comment) + + commentVal, err := transformValue(cs.registry, comment, true, "comment") + if err != nil { + return nil, err + } + cs.cursorOptions.Comment = commentVal + } if cs.options.BatchSize != nil { cs.aggregate.BatchSize(*cs.options.BatchSize) cs.cursorOptions.BatchSize = *cs.options.BatchSize } if cs.options.MaxAwaitTime != nil { - cs.cursorOptions.MaxTimeMS = int64(time.Duration(*cs.options.MaxAwaitTime) / time.Millisecond) + cs.cursorOptions.MaxTimeMS = int64(*cs.options.MaxAwaitTime / time.Millisecond) + } + if cs.options.Custom != nil { + // Marshal all custom options before passing to the initial aggregate. Return + // any errors from Marshaling. + customOptions := make(map[string]bsoncore.Value) + for optionName, optionValue := range cs.options.Custom { + bsonType, bsonData, err := bson.MarshalValueWithRegistry(cs.registry, optionValue) + if err != nil { + cs.err = err + closeImplicitSession(cs.sess) + return nil, cs.Err() + } + optionValueBSON := bsoncore.Value{Type: bsonType, Data: bsonData} + customOptions[optionName] = optionValueBSON + } + cs.aggregate.CustomOptions(customOptions) + } + if cs.options.CustomPipeline != nil { + // Marshal all custom pipeline options before building pipeline slice. Return + // any errors from Marshaling. + cs.pipelineOptions = make(map[string]bsoncore.Value) + for optionName, optionValue := range cs.options.CustomPipeline { + bsonType, bsonData, err := bson.MarshalValueWithRegistry(cs.registry, optionValue) + if err != nil { + cs.err = err + closeImplicitSession(cs.sess) + return nil, cs.Err() + } + optionValueBSON := bsoncore.Value{Type: bsonType, Data: bsonData} + cs.pipelineOptions[optionName] = optionValueBSON + } } switch cs.streamType { @@ -212,7 +254,7 @@ func (cs *ChangeStream) executeOperation(ctx context.Context, resuming bool) err cs.aggregate.Deployment(cs.createOperationDeployment(server, conn)) if resuming { - cs.replaceOptions(ctx, cs.wireVersion) + cs.replaceOptions(cs.wireVersion) csOptDoc := cs.createPipelineOptionsDoc() pipIdx, pipDoc := bsoncore.AppendDocumentStart(nil) @@ -229,6 +271,16 @@ func (cs *ChangeStream) executeOperation(ctx context.Context, resuming bool) err cs.aggregate.Pipeline(plArr) } + // If no deadline is set on the passed-in context, cs.client.timeout is set, and context is not already + // a Timeout context, honor cs.client.timeout in new Timeout context for change stream operation execution + // and potential retry. + if _, deadlineSet := ctx.Deadline(); !deadlineSet && cs.client.timeout != nil && !internal.IsTimeoutContext(ctx) { + newCtx, cancelFunc := internal.MakeTimeoutContext(ctx, *cs.client.timeout) + // Redefine ctx to be the new timeout-derived context. + ctx = newCtx + // Cancel the timeout-derived context at the end of executeOperation to avoid a context leak. + defer cancelFunc() + } if original := cs.aggregate.Execute(ctx); original != nil { retryableRead := cs.client.retryReads && cs.wireVersion != nil && cs.wireVersion.Max >= 6 if !retryableRead { @@ -363,7 +415,16 @@ func (cs *ChangeStream) createPipelineOptionsDoc() bsoncore.Document { } if cs.options.FullDocument != nil { - plDoc = bsoncore.AppendStringElement(plDoc, "fullDocument", string(*cs.options.FullDocument)) + // Only append a default "fullDocument" field if wire version is less than 6 (3.6). Otherwise, + // the server will assume users want the default behavior, and "fullDocument" does not need to be + // specified. + if *cs.options.FullDocument != options.Default || (cs.wireVersion != nil && cs.wireVersion.Max < 6) { + plDoc = bsoncore.AppendStringElement(plDoc, "fullDocument", string(*cs.options.FullDocument)) + } + } + + if cs.options.FullDocumentBeforeChange != nil { + plDoc = bsoncore.AppendStringElement(plDoc, "fullDocumentBeforeChange", string(*cs.options.FullDocumentBeforeChange)) } if cs.options.ResumeAfter != nil { @@ -376,6 +437,10 @@ func (cs *ChangeStream) createPipelineOptionsDoc() bsoncore.Document { plDoc = bsoncore.AppendDocumentElement(plDoc, "resumeAfter", raDoc) } + if cs.options.ShowExpandedEvents != nil { + plDoc = bsoncore.AppendBooleanElement(plDoc, "showExpandedEvents", *cs.options.ShowExpandedEvents) + } + if cs.options.StartAfter != nil { var saDoc bsoncore.Document saDoc, cs.err = transformBsoncoreDocument(cs.registry, cs.options.StartAfter, true, "startAfter") @@ -390,6 +455,11 @@ func (cs *ChangeStream) createPipelineOptionsDoc() bsoncore.Document { plDoc = bsoncore.AppendTimestampElement(plDoc, "startAtOperationTime", cs.options.StartAtOperationTime.T, cs.options.StartAtOperationTime.I) } + // Append custom pipeline options. + for optionName, optionValue := range cs.pipelineOptions { + plDoc = bsoncore.AppendValueElement(plDoc, optionName, optionValue) + } + if plDoc, cs.err = bsoncore.AppendDocumentEnd(plDoc, plDocIdx); cs.err != nil { return nil } @@ -408,7 +478,7 @@ func (cs *ChangeStream) pipelineToBSON() (bsoncore.Document, error) { return pipelineArr, cs.err } -func (cs *ChangeStream) replaceOptions(ctx context.Context, wireVersion *description.VersionRange) { +func (cs *ChangeStream) replaceOptions(wireVersion *description.VersionRange) { // Cached resume token: use the resume token as the resumeAfter option and set no other resume options if cs.resumeToken != nil { cs.options.SetResumeAfter(cs.resumeToken) @@ -604,7 +674,7 @@ func (cs *ChangeStream) isResumableError() bool { return commandErr.HasErrorLabel(resumableErrorLabel) } - // For wire versions below 9, a server error is resumable if its code is on the whitelist. + // For wire versions below 9, a server error is resumable if its code is on the allowlist. _, resumable := resumableChangeStreamErrors[commandErr.Code] return resumable } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go index 5ff82c5f5..9c61123c3 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go @@ -8,6 +8,7 @@ package mongo import ( "context" + "time" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/mongo/driver" @@ -35,6 +36,14 @@ func (c *changeStreamDeployment) Connection(context.Context) (driver.Connection, return c.conn, nil } +func (c *changeStreamDeployment) MinRTT() time.Duration { + return c.server.MinRTT() +} + +func (c *changeStreamDeployment) RTT90() time.Duration { + return c.server.RTT90() +} + func (c *changeStreamDeployment) ProcessError(err error, conn driver.Connection) driver.ProcessErrorResult { ep, ok := c.server.(driver.ErrorProcessor) if !ok { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client.go b/vendor/go.mongodb.org/mongo-driver/mongo/client.go index 4f4292079..d409135a7 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client.go @@ -17,6 +17,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/uuid" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" @@ -25,15 +26,18 @@ import ( "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/auth" - "go.mongodb.org/mongo-driver/x/mongo/driver/connstring" + "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt" + mcopts "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options" "go.mongodb.org/mongo-driver/x/mongo/driver/ocsp" "go.mongodb.org/mongo-driver/x/mongo/driver/operation" "go.mongodb.org/mongo-driver/x/mongo/driver/session" "go.mongodb.org/mongo-driver/x/mongo/driver/topology" - "go.mongodb.org/mongo-driver/x/mongo/driver/uuid" ) -const defaultLocalThreshold = 15 * time.Millisecond +const ( + defaultLocalThreshold = 15 * time.Millisecond + defaultMaxPoolSize uint64 = 100 +) var ( // keyVaultCollOpts specifies options used to communicate with the key vault collection @@ -52,7 +56,6 @@ type Client struct { id uuid.UUID topologyOptions []topology.Option deployment driver.Deployment - connString connstring.ConnString localThreshold time.Duration retryWrites bool retryReads bool @@ -61,19 +64,20 @@ type Client struct { readConcern *readconcern.ReadConcern writeConcern *writeconcern.WriteConcern registry *bsoncodec.Registry - marshaller BSONAppender monitor *event.CommandMonitor serverAPI *driver.ServerAPIOptions serverMonitor *event.ServerMonitor sessionPool *session.Pool + timeout *time.Duration // client-side encryption fields - keyVaultClientFLE *Client - keyVaultCollFLE *Collection - mongocryptdFLE *mcryptClient - cryptFLE *driver.Crypt - metadataClientFLE *Client - internalClientFLE *Client + keyVaultClientFLE *Client + keyVaultCollFLE *Collection + mongocryptdFLE *mongocryptdClient + cryptFLE driver.Crypt + metadataClientFLE *Client + internalClientFLE *Client + encryptedFieldsMap map[string]interface{} } // Connect creates a new Client and then initializes it using the Connect method. This is equivalent to calling @@ -274,6 +278,9 @@ func (c *Client) Ping(ctx context.Context, rp *readpref.ReadPref) error { // StartSession does not actually communicate with the server and will not error if the client is // disconnected. // +// StartSession is safe to call from multiple goroutines concurrently. However, Sessions returned by StartSession are +// not safe for concurrent use by multiple goroutines. +// // If the DefaultReadConcern, DefaultWriteConcern, or DefaultReadPreference options are not set, the client's read // concern, write concern, or read preference will be used, respectively. func (c *Client) StartSession(opts ...*options.SessionOptions) (Session, error) { @@ -351,6 +358,12 @@ func (c *Client) endSessions(ctx context.Context) { } func (c *Client) configure(opts *options.ClientOptions) error { + var defaultOptions int + // Set default options + if opts.MaxPoolSize == nil { + defaultOptions++ + opts.SetMaxPoolSize(defaultMaxPoolSize) + } if err := opts.Validate(); err != nil { return err } @@ -375,10 +388,22 @@ func (c *Client) configure(opts *options.ClientOptions) error { // ClusterClock c.clock = new(session.ClusterClock) - // Pass down URI so topology can determine whether or not SRV polling is required - topologyOpts = append(topologyOpts, topology.WithURI(func(uri string) string { - return opts.GetURI() - })) + // Pass down URI, SRV service name, and SRV max hosts so topology can poll SRV records correctly. + topologyOpts = append(topologyOpts, + topology.WithURI(func(uri string) string { return opts.GetURI() }), + topology.WithSRVServiceName(func(srvName string) string { + if opts.SRVServiceName != nil { + return *opts.SRVServiceName + } + return "" + }), + topology.WithSRVMaxHosts(func(srvMaxHosts int) int { + if opts.SRVMaxHosts != nil { + return *opts.SRVMaxHosts + } + return 0 + }), + ) // AppName var appName string @@ -425,7 +450,7 @@ func (c *Client) configure(opts *options.ClientOptions) error { // Handshaker var handshaker = func(driver.Handshaker) driver.Handshaker { - return operation.NewIsMaster().AppName(appName).Compressors(comps).ClusterClock(c.clock). + return operation.NewHello().AppName(appName).Compressors(comps).ClusterClock(c.clock). ServerAPI(c.serverAPI).LoadBalanced(loadBalanced) } // Auth & Database & Password & Username @@ -537,6 +562,13 @@ func (c *Client) configure(opts *options.ClientOptions) error { topology.WithMinConnections(func(uint64) uint64 { return *opts.MinPoolSize }), ) } + // MaxConnecting + if opts.MaxConnecting != nil { + serverOpts = append( + serverOpts, + topology.WithMaxConnecting(func(uint64) uint64 { return *opts.MaxConnecting }), + ) + } // PoolMonitor if opts.PoolMonitor != nil { serverOpts = append( @@ -608,6 +640,8 @@ func (c *Client) configure(opts *options.ClientOptions) error { topology.WithWriteTimeout(func(time.Duration) time.Duration { return *opts.SocketTimeout }), ) } + // Timeout + c.timeout = opts.Timeout // TLSConfig if opts.TLSConfig != nil { connOpts = append(connOpts, topology.WithTLSConfig( @@ -625,6 +659,8 @@ func (c *Client) configure(opts *options.ClientOptions) error { if err := c.configureAutoEncryption(opts); err != nil { return err } + } else { + c.cryptFLE = opts.Crypt } // OCSP cache @@ -663,15 +699,16 @@ func (c *Client) configure(opts *options.ClientOptions) error { topology.WithClock(func(*session.ClusterClock) *session.ClusterClock { return c.clock }), topology.WithConnectionOptions(func(...topology.ConnectionOption) []topology.ConnectionOption { return connOpts }), ) - c.topologyOptions = append(topologyOpts, topology.WithServerOptions( + topologyOpts = append(topologyOpts, topology.WithServerOptions( func(...topology.ServerOption) []topology.ServerOption { return serverOpts }, )) + c.topologyOptions = topologyOpts // Deployment if opts.Deployment != nil { - // topology options: WithSeedlist and WithURI - // server options: WithClock and WithConnectionOptions - if len(serverOpts) > 2 || len(topologyOpts) > 2 { + // topology options: WithSeedlist, WithURI, WithSRVServiceName, WithSRVMaxHosts, and WithServerOptions + // server options: WithClock and WithConnectionOptions + default maxPoolSize + if len(serverOpts) > 2+defaultOptions || len(topologyOpts) > 5 { return errors.New("cannot specify topology or server options with a deployment") } c.deployment = opts.Deployment @@ -681,16 +718,30 @@ func (c *Client) configure(opts *options.ClientOptions) error { } func (c *Client) configureAutoEncryption(clientOpts *options.ClientOptions) error { + c.encryptedFieldsMap = clientOpts.AutoEncryptionOptions.EncryptedFieldsMap if err := c.configureKeyVaultClientFLE(clientOpts); err != nil { return err } if err := c.configureMetadataClientFLE(clientOpts); err != nil { return err } - if err := c.configureMongocryptdClientFLE(clientOpts.AutoEncryptionOptions); err != nil { + + mc, err := c.newMongoCrypt(clientOpts.AutoEncryptionOptions) + if err != nil { + return err + } + + // If the crypt_shared library was loaded successfully, signal to the mongocryptd client creator + // that it can bypass spawning mongocryptd. + cryptSharedLibAvailable := mc.CryptSharedLibVersionString() != "" + mongocryptdFLE, err := newMongocryptdClient(cryptSharedLibAvailable, clientOpts.AutoEncryptionOptions) + if err != nil { return err } - return c.configureCryptFLE(clientOpts.AutoEncryptionOptions) + c.mongocryptdFLE = mongocryptdFLE + + c.configureCryptFLE(mc, clientOpts.AutoEncryptionOptions) + return nil } func (c *Client) getOrCreateInternalClient(clientOpts *options.ClientOptions) (*Client, error) { @@ -745,32 +796,90 @@ func (c *Client) configureMetadataClientFLE(clientOpts *options.ClientOptions) e return err } -func (c *Client) configureMongocryptdClientFLE(opts *options.AutoEncryptionOptions) error { - var err error - c.mongocryptdFLE, err = newMcryptClient(opts) - return err -} - -func (c *Client) configureCryptFLE(opts *options.AutoEncryptionOptions) error { +func (c *Client) newMongoCrypt(opts *options.AutoEncryptionOptions) (*mongocrypt.MongoCrypt, error) { // convert schemas in SchemaMap to bsoncore documents cryptSchemaMap := make(map[string]bsoncore.Document) for k, v := range opts.SchemaMap { schema, err := transformBsoncoreDocument(c.registry, v, true, "schemaMap") if err != nil { - return err + return nil, err } cryptSchemaMap[k] = schema } + + // convert schemas in EncryptedFieldsMap to bsoncore documents + cryptEncryptedFieldsMap := make(map[string]bsoncore.Document) + for k, v := range opts.EncryptedFieldsMap { + encryptedFields, err := transformBsoncoreDocument(c.registry, v, true, "encryptedFieldsMap") + if err != nil { + return nil, err + } + cryptEncryptedFieldsMap[k] = encryptedFields + } + kmsProviders, err := transformBsoncoreDocument(c.registry, opts.KmsProviders, true, "kmsProviders") if err != nil { - return fmt.Errorf("error creating KMS providers document: %v", err) + return nil, fmt.Errorf("error creating KMS providers document: %v", err) } - // configure options - var bypass bool - if opts.BypassAutoEncryption != nil { - bypass = *opts.BypassAutoEncryption + // Set the crypt_shared library override path from the "cryptSharedLibPath" extra option if one + // was set. + cryptSharedLibPath := "" + if val, ok := opts.ExtraOptions["cryptSharedLibPath"]; ok { + str, ok := val.(string) + if !ok { + return nil, fmt.Errorf( + `expected AutoEncryption extra option "cryptSharedLibPath" to be a string, but is a %T`, val) + } + cryptSharedLibPath = str + } + + // Explicitly disable loading the crypt_shared library if requested. Note that this is ONLY + // intended for use from tests; there is no supported public API for explicitly disabling + // loading the crypt_shared library. + cryptSharedLibDisabled := false + if v, ok := opts.ExtraOptions["__cryptSharedLibDisabledForTestOnly"]; ok { + cryptSharedLibDisabled = v.(bool) + } + + bypassAutoEncryption := opts.BypassAutoEncryption != nil && *opts.BypassAutoEncryption + bypassQueryAnalysis := opts.BypassQueryAnalysis != nil && *opts.BypassQueryAnalysis + + mc, err := mongocrypt.NewMongoCrypt(mcopts.MongoCrypt(). + SetKmsProviders(kmsProviders). + SetLocalSchemaMap(cryptSchemaMap). + SetBypassQueryAnalysis(bypassQueryAnalysis). + SetEncryptedFieldsMap(cryptEncryptedFieldsMap). + SetCryptSharedLibDisabled(cryptSharedLibDisabled || bypassAutoEncryption). + SetCryptSharedLibOverridePath(cryptSharedLibPath)) + if err != nil { + return nil, err + } + + var cryptSharedLibRequired bool + if val, ok := opts.ExtraOptions["cryptSharedLibRequired"]; ok { + b, ok := val.(bool) + if !ok { + return nil, fmt.Errorf( + `expected AutoEncryption extra option "cryptSharedLibRequired" to be a bool, but is a %T`, val) + } + cryptSharedLibRequired = b } + + // If the "cryptSharedLibRequired" extra option is set to true, check the MongoCrypt version + // string to confirm that the library was successfully loaded. If the version string is empty, + // return an error indicating that we couldn't load the crypt_shared library. + if cryptSharedLibRequired && mc.CryptSharedLibVersionString() == "" { + return nil, errors.New( + `AutoEncryption extra option "cryptSharedLibRequired" is true, but we failed to load the crypt_shared library`) + } + + return mc, nil +} + +//nolint:unused // the unused linter thinks that this function is unreachable because "c.newMongoCrypt" always panics without the "cse" build tag set. +func (c *Client) configureCryptFLE(mc *mongocrypt.MongoCrypt, opts *options.AutoEncryptionOptions) { + bypass := opts.BypassAutoEncryption != nil && *opts.BypassAutoEncryption kr := keyRetriever{coll: c.keyVaultCollFLE} var cir collInfoRetriever // If bypass is true, c.metadataClientFLE is nil and the collInfoRetriever @@ -779,22 +888,20 @@ func (c *Client) configureCryptFLE(opts *options.AutoEncryptionOptions) error { if !bypass { cir = collInfoRetriever{client: c.metadataClientFLE} } - cryptOpts := &driver.CryptOptions{ + + c.cryptFLE = driver.NewCrypt(&driver.CryptOptions{ + MongoCrypt: mc, CollInfoFn: cir.cryptCollInfo, KeyFn: kr.cryptKeys, MarkFn: c.mongocryptdFLE.markCommand, - KmsProviders: kmsProviders, + TLSConfig: opts.TLSConfig, BypassAutoEncryption: bypass, - SchemaMap: cryptSchemaMap, - } - - c.cryptFLE, err = driver.NewCrypt(cryptOpts) - return err + }) } // validSession returns an error if the session doesn't belong to the client func (c *Client) validSession(sess *session.Client) error { - if sess != nil && !uuid.Equal(sess.ClientID, c.id) { + if sess != nil && sess.ClientID != c.id { return ErrWrongClient } return nil @@ -823,9 +930,9 @@ func (c *Client) Database(name string, opts ...*options.DatabaseOptions) *Databa // databases are included in the result. It cannot be nil. An empty document (e.g. bson.D{}) should be used to include // all databases. // -// The opts paramter can be used to specify options for this operation (see the options.ListDatabasesOptions documentation). +// The opts parameter can be used to specify options for this operation (see the options.ListDatabasesOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listDatabases/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listDatabases/. func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) (ListDatabasesResult, error) { if ctx == nil { ctx = context.Background() @@ -834,6 +941,9 @@ func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ... sess := sessionFromContext(ctx) err := c.validSession(sess) + if err != nil { + return ListDatabasesResult{}, err + } if sess == nil && c.sessionPool != nil { sess, err = session.NewClientSession(c.sessionPool, c.id, session.Implicit) if err != nil { @@ -862,7 +972,7 @@ func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ... op := operation.NewListDatabases(filterDoc). Session(sess).ReadPreference(c.readPreference).CommandMonitor(c.monitor). ServerSelector(selector).ClusterClock(c.clock).Database("admin").Deployment(c.deployment).Crypt(c.cryptFLE). - ServerAPI(c.serverAPI) + ServerAPI(c.serverAPI).Timeout(c.timeout) if ldo.NameOnly != nil { op = op.NameOnly(*ldo.NameOnly) @@ -895,7 +1005,7 @@ func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ... // The opts parameter can be used to specify options for this operation (see the options.ListDatabasesOptions // documentation.) // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listDatabases/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listDatabases/. func (c *Client) ListDatabaseNames(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) ([]string, error) { opts = append(opts, options.ListDatabases().SetNameOnly(true)) @@ -916,6 +1026,9 @@ func (c *Client) ListDatabaseNames(ctx context.Context, filter interface{}, opts // SessionContext must be used as the Context parameter for any operations in the fn callback that should be executed // under the session. // +// WithSession is safe to call from multiple goroutines concurrently. However, the SessionContext passed to the +// WithSession callback function is not safe for concurrent use by multiple goroutines. +// // If the ctx parameter already contains a Session, that Session will be replaced with the one provided. // // Any error returned by the fn callback will be returned without any modifications. @@ -928,6 +1041,9 @@ func WithSession(ctx context.Context, sess Session, fn func(SessionContext) erro // be executed under a session. After the callback returns, the created Session is ended, meaning that any in-progress // transactions started by fn will be aborted even if fn returns an error. // +// UseSession is safe to call from multiple goroutines concurrently. However, the SessionContext passed to the +// UseSession callback function is not safe for concurrent use by multiple goroutines. +// // If the ctx parameter already contains a Session, that Session will be replaced with the newly created one. // // Any error returned by the fn callback will be returned without any modifications. @@ -936,6 +1052,9 @@ func (c *Client) UseSession(ctx context.Context, fn func(SessionContext) error) } // UseSessionWithOptions operates like UseSession but uses the given SessionOptions to create the Session. +// +// UseSessionWithOptions is safe to call from multiple goroutines concurrently. However, the SessionContext passed to +// the UseSessionWithOptions callback function is not safe for concurrent use by multiple goroutines. func (c *Client) UseSessionWithOptions(ctx context.Context, opts *options.SessionOptions, fn func(SessionContext) error) error { defaultSess, err := c.StartSession(opts) if err != nil { @@ -947,13 +1066,13 @@ func (c *Client) UseSessionWithOptions(ctx context.Context, opts *options.Sessio } // Watch returns a change stream for all changes on the deployment. See -// https://docs.mongodb.com/manual/changeStreams/ for more information about change streams. +// https://www.mongodb.com/docs/manual/changeStreams/ for more information about change streams. // // The client must be configured with read concern majority or no read concern for a change stream to be created // successfully. // // The pipeline parameter must be an array of documents, each representing a pipeline stage. The pipeline cannot be -// nil or empty. The stage documents must all be non-nil. See https://docs.mongodb.com/manual/changeStreams/ for a list +// nil or empty. The stage documents must all be non-nil. See https://www.mongodb.com/docs/manual/changeStreams/ for a list // of pipeline stages that can be used with change streams. For a pipeline of bson.D documents, the mongo.Pipeline{} // type can be used. // diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go b/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go index 4b1f12d39..f88b7bede 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go @@ -17,12 +17,13 @@ import ( "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" - cryptOpts "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options" + "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt" + mcopts "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options" ) // ClientEncryption is used to create data keys and explicitly encrypt and decrypt BSON values. type ClientEncryption struct { - crypt *driver.Crypt + crypt driver.Crypt keyVaultClient *Client keyVaultColl *Collection } @@ -47,35 +48,56 @@ func NewClientEncryption(keyVaultClient *Client, opts ...*options.ClientEncrypti return nil, fmt.Errorf("error creating KMS providers map: %v", err) } + mc, err := mongocrypt.NewMongoCrypt(mcopts.MongoCrypt(). + SetKmsProviders(kmsProviders). + // Explicitly disable loading the crypt_shared library for the Crypt used for + // ClientEncryption because it's only needed for AutoEncryption and we don't expect users to + // have the crypt_shared library installed if they're using ClientEncryption. + SetCryptSharedLibDisabled(true)) + if err != nil { + return nil, err + } + // create Crypt kr := keyRetriever{coll: ce.keyVaultColl} cir := collInfoRetriever{client: ce.keyVaultClient} - ce.crypt, err = driver.NewCrypt(&driver.CryptOptions{ - KeyFn: kr.cryptKeys, - CollInfoFn: cir.cryptCollInfo, - KmsProviders: kmsProviders, + ce.crypt = driver.NewCrypt(&driver.CryptOptions{ + MongoCrypt: mc, + KeyFn: kr.cryptKeys, + CollInfoFn: cir.cryptCollInfo, + TLSConfig: ceo.TLSConfig, }) - if err != nil { - return nil, err - } return ce, nil } -// CreateDataKey creates a new key document and inserts it into the key vault collection. Returns the _id of the -// created document. -func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider string, opts ...*options.DataKeyOptions) (primitive.Binary, error) { - // translate opts to cryptOpts.DataKeyOptions +// AddKeyAltName adds a keyAltName to the keyAltNames array of the key document in the key vault collection with the +// given UUID (BSON binary subtype 0x04). Returns the previous version of the key document. +func (ce *ClientEncryption) AddKeyAltName(ctx context.Context, id primitive.Binary, keyAltName string) *SingleResult { + filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build() + keyAltNameDoc := bsoncore.NewDocumentBuilder().AppendString("keyAltNames", keyAltName).Build() + update := bsoncore.NewDocumentBuilder().AppendDocument("$addToSet", keyAltNameDoc).Build() + return ce.keyVaultColl.FindOneAndUpdate(ctx, filter, update) +} + +// CreateDataKey creates a new key document and inserts into the key vault collection. Returns the _id of the created +// document as a UUID (BSON binary subtype 0x04). +func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider string, + opts ...*options.DataKeyOptions) (primitive.Binary, error) { + + // translate opts to mcopts.DataKeyOptions dko := options.MergeDataKeyOptions(opts...) - co := cryptOpts.DataKey().SetKeyAltNames(dko.KeyAltNames) + co := mcopts.DataKey().SetKeyAltNames(dko.KeyAltNames) if dko.MasterKey != nil { keyDoc, err := transformBsoncoreDocument(ce.keyVaultClient.registry, dko.MasterKey, true, "masterKey") if err != nil { return primitive.Binary{}, err } - co.SetMasterKey(keyDoc) } + if dko.KeyMaterial != nil { + co.SetKeyMaterial(dko.KeyMaterial) + } // create data key document dataKeyDoc, err := ce.crypt.CreateDataKey(ctx, kmsProvider, co) @@ -94,9 +116,11 @@ func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider strin } // Encrypt encrypts a BSON value with the given key and algorithm. Returns an encrypted value (BSON binary of subtype 6). -func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue, opts ...*options.EncryptOptions) (primitive.Binary, error) { +func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue, + opts ...*options.EncryptOptions) (primitive.Binary, error) { + eo := options.MergeEncryptOptions(opts...) - transformed := cryptOpts.ExplicitEncryption() + transformed := mcopts.ExplicitEncryption() if eo.KeyID != nil { transformed.SetKeyID(*eo.KeyID) } @@ -104,6 +128,11 @@ func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue, opts transformed.SetKeyAltName(*eo.KeyAltName) } transformed.SetAlgorithm(eo.Algorithm) + transformed.SetQueryType(eo.QueryType) + + if eo.ContentionFactor != nil { + transformed.SetContentionFactor(*eo.ContentionFactor) + } subtype, data, err := ce.crypt.EncryptExplicit(ctx, bsoncore.Value{Type: val.Type, Data: val.Value}, transformed) if err != nil { @@ -129,6 +158,143 @@ func (ce *ClientEncryption) Close(ctx context.Context) error { return ce.keyVaultClient.Disconnect(ctx) } +// DeleteKey removes the key document with the given UUID (BSON binary subtype 0x04) from the key vault collection. +// Returns the result of the internal deleteOne() operation on the key vault collection. +func (ce *ClientEncryption) DeleteKey(ctx context.Context, id primitive.Binary) (*DeleteResult, error) { + filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build() + return ce.keyVaultColl.DeleteOne(ctx, filter) +} + +// GetKeyByAltName returns a key document in the key vault collection with the given keyAltName. +func (ce *ClientEncryption) GetKeyByAltName(ctx context.Context, keyAltName string) *SingleResult { + filter := bsoncore.NewDocumentBuilder().AppendString("keyAltNames", keyAltName).Build() + return ce.keyVaultColl.FindOne(ctx, filter) +} + +// GetKey finds a single key document with the given UUID (BSON binary subtype 0x04). Returns the result of the +// internal find() operation on the key vault collection. +func (ce *ClientEncryption) GetKey(ctx context.Context, id primitive.Binary) *SingleResult { + filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build() + return ce.keyVaultColl.FindOne(ctx, filter) +} + +// GetKeys finds all documents in the key vault collection. Returns the result of the internal find() operation on the +// key vault collection. +func (ce *ClientEncryption) GetKeys(ctx context.Context) (*Cursor, error) { + return ce.keyVaultColl.Find(ctx, bson.D{}) +} + +// RemoveKeyAltName removes a keyAltName from the keyAltNames array of the key document in the key vault collection with +// the given UUID (BSON binary subtype 0x04). Returns the previous version of the key document. +func (ce *ClientEncryption) RemoveKeyAltName(ctx context.Context, id primitive.Binary, keyAltName string) *SingleResult { + filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build() + update := bson.A{bson.D{{"$set", bson.D{{"keyAltNames", bson.D{{"$cond", bson.A{bson.D{{"$eq", + bson.A{"$keyAltNames", bson.A{keyAltName}}}}, "$$REMOVE", bson.D{{"$filter", + bson.D{{"input", "$keyAltNames"}, {"cond", bson.D{{"$ne", bson.A{"$$this", keyAltName}}}}}}}}}}}}}}} + return ce.keyVaultColl.FindOneAndUpdate(ctx, filter, update) +} + +// setRewrapManyDataKeyWriteModels will prepare the WriteModel slice for a bulk updating rewrapped documents. +func setRewrapManyDataKeyWriteModels(rewrappedDocuments []bsoncore.Document, writeModels *[]WriteModel) error { + const idKey = "_id" + const keyMaterial = "keyMaterial" + const masterKey = "masterKey" + + if writeModels == nil { + return fmt.Errorf("writeModels pointer not set for location referenced") + } + + // Append a slice of WriteModel with the update document per each rewrappedDoc _id filter. + for _, rewrappedDocument := range rewrappedDocuments { + // Prepare the new master key for update. + masterKeyValue, err := rewrappedDocument.LookupErr(masterKey) + if err != nil { + return err + } + masterKeyDoc := masterKeyValue.Document() + + // Prepare the new material key for update. + keyMaterialValue, err := rewrappedDocument.LookupErr(keyMaterial) + if err != nil { + return err + } + keyMaterialSubtype, keyMaterialData := keyMaterialValue.Binary() + keyMaterialBinary := primitive.Binary{Subtype: keyMaterialSubtype, Data: keyMaterialData} + + // Prepare the _id filter for documents to update. + id, err := rewrappedDocument.LookupErr(idKey) + if err != nil { + return err + } + + idSubtype, idData, ok := id.BinaryOK() + if !ok { + return fmt.Errorf("expected to assert %q as binary, got type %T", idKey, id) + } + binaryID := primitive.Binary{Subtype: idSubtype, Data: idData} + + // Append the mutable document to the slice for bulk update. + *writeModels = append(*writeModels, NewUpdateOneModel(). + SetFilter(bson.D{{idKey, binaryID}}). + SetUpdate( + bson.D{ + {"$set", bson.D{{keyMaterial, keyMaterialBinary}, {masterKey, masterKeyDoc}}}, + {"$currentDate", bson.D{{"updateDate", true}}}, + }, + )) + } + return nil +} + +// RewrapManyDataKey decrypts and encrypts all matching data keys with a possibly new masterKey value. For all +// matching documents, this method will overwrite the "masterKey", "updateDate", and "keyMaterial". On error, some +// matching data keys may have been rewrapped. +func (ce *ClientEncryption) RewrapManyDataKey(ctx context.Context, filter interface{}, + opts ...*options.RewrapManyDataKeyOptions) (*RewrapManyDataKeyResult, error) { + + rmdko := options.MergeRewrapManyDataKeyOptions(opts...) + if ctx == nil { + ctx = context.Background() + } + + // Transfer rmdko options to /x/ package options to publish the mongocrypt feed. + co := mcopts.RewrapManyDataKey() + if rmdko.MasterKey != nil { + keyDoc, err := transformBsoncoreDocument(ce.keyVaultClient.registry, rmdko.MasterKey, true, "masterKey") + if err != nil { + return nil, err + } + co.SetMasterKey(keyDoc) + } + if rmdko.Provider != nil { + co.SetProvider(*rmdko.Provider) + } + + // Prepare the filters and rewrap the data key using mongocrypt. + filterdoc, err := transformBsoncoreDocument(ce.keyVaultClient.registry, filter, true, "filter") + if err != nil { + return nil, err + } + + rewrappedDocuments, err := ce.crypt.RewrapDataKey(ctx, filterdoc, co) + if err != nil { + return nil, err + } + if len(rewrappedDocuments) == 0 { + // If there are no documents to rewrap, then do nothing. + return new(RewrapManyDataKeyResult), nil + } + + // Prepare the WriteModel slice for bulk updating the rewrapped data keys. + models := []WriteModel{} + if err := setRewrapManyDataKeyWriteModels(rewrappedDocuments, &models); err != nil { + return nil, err + } + + bulkWriteResults, err := ce.keyVaultColl.BulkWrite(ctx, models) + return &RewrapManyDataKeyResult{BulkWriteResult: bulkWriteResults}, err +} + // splitNamespace takes a namespace in the form "database.collection" and returns (database name, collection name) func splitNamespace(ns string) (string, string) { firstDot := strings.Index(ns, ".") diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go index 02e197d5a..aa3ffbe95 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go @@ -16,6 +16,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" @@ -166,7 +167,7 @@ func (coll *Collection) Database() *Database { return coll.db } -// BulkWrite performs a bulk write operation (https://docs.mongodb.com/manual/core/bulk-write-operations/). +// BulkWrite performs a bulk write operation (https://www.mongodb.com/docs/manual/core/bulk-write-operations/). // // The models parameter must be a slice of operations to be executed in this bulk write. It cannot be nil or empty. // All of the models must be non-nil. See the mongo.WriteModel documentation for a list of valid model types and @@ -218,6 +219,7 @@ func (coll *Collection) BulkWrite(ctx context.Context, models []WriteModel, bwo := options.MergeBulkWriteOptions(opts...) op := bulkWrite{ + comment: bwo.Comment, ordered: bwo.Ordered, bypassDocumentValidation: bwo.BypassDocumentValidation, models: models, @@ -225,6 +227,7 @@ func (coll *Collection) BulkWrite(ctx context.Context, models []WriteModel, collection: coll, selector: selector, writeConcern: wc, + let: bwo.Let, } err = op.execute(ctx) @@ -280,11 +283,18 @@ func (coll *Collection) insert(ctx context.Context, documents []interface{}, ServerSelector(selector).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Ordered(true). - ServerAPI(coll.client.serverAPI) + ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) imo := options.MergeInsertManyOptions(opts...) if imo.BypassDocumentValidation != nil && *imo.BypassDocumentValidation { op = op.BypassDocumentValidation(*imo.BypassDocumentValidation) } + if imo.Comment != nil { + comment, err := transformValue(coll.registry, imo.Comment, true, "comment") + if err != nil { + return nil, err + } + op = op.Comment(comment) + } if imo.Ordered != nil { op = op.Ordered(*imo.Ordered) } @@ -323,7 +333,7 @@ func (coll *Collection) insert(ctx context.Context, documents []interface{}, // // The opts parameter can be used to specify options for the operation (see the options.InsertOneOptions documentation.) // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/insert/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/insert/. func (coll *Collection) InsertOne(ctx context.Context, document interface{}, opts ...*options.InsertOneOptions) (*InsertOneResult, error) { @@ -333,6 +343,9 @@ func (coll *Collection) InsertOne(ctx context.Context, document interface{}, if ioOpts.BypassDocumentValidation != nil && *ioOpts.BypassDocumentValidation { imOpts.SetBypassDocumentValidation(*ioOpts.BypassDocumentValidation) } + if ioOpts.Comment != nil { + imOpts.SetComment(ioOpts.Comment) + } res, err := coll.insert(ctx, []interface{}{document}, imOpts) rr, err := processWriteError(err) @@ -352,7 +365,7 @@ func (coll *Collection) InsertOne(ctx context.Context, document interface{}, // // The opts parameter can be used to specify options for the operation (see the options.InsertManyOptions documentation.) // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/insert/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/insert/. func (coll *Collection) InsertMany(ctx context.Context, documents []interface{}, opts ...*options.InsertManyOptions) (*InsertManyResult, error) { @@ -450,10 +463,24 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn ServerSelector(selector).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Ordered(true). - ServerAPI(coll.client.serverAPI) + ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) + if do.Comment != nil { + comment, err := transformValue(coll.registry, do.Comment, true, "comment") + if err != nil { + return nil, err + } + op = op.Comment(comment) + } if do.Hint != nil { op = op.Hint(true) } + if do.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, do.Let, true, "let") + if err != nil { + return nil, err + } + op = op.Let(let) + } // deleteMany cannot be retried retryMode := driver.RetryNone @@ -465,7 +492,7 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn if rr&expectedRr == 0 { return nil, err } - return &DeleteResult{DeletedCount: int64(op.Result().N)}, err + return &DeleteResult{DeletedCount: op.Result().N}, err } // DeleteOne executes a delete command to delete at most one document from the collection. @@ -477,7 +504,7 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn // // The opts parameter can be used to specify options for the operation (see the options.DeleteOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/delete/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/delete/. func (coll *Collection) DeleteOne(ctx context.Context, filter interface{}, opts ...*options.DeleteOptions) (*DeleteResult, error) { @@ -493,7 +520,7 @@ func (coll *Collection) DeleteOne(ctx context.Context, filter interface{}, // // The opts parameter can be used to specify options for the operation (see the options.DeleteOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/delete/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/delete/. func (coll *Collection) DeleteMany(ctx context.Context, filter interface{}, opts ...*options.DeleteOptions) (*DeleteResult, error) { @@ -547,11 +574,26 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc ServerSelector(selector).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Hint(uo.Hint != nil). - ArrayFilters(uo.ArrayFilters != nil).Ordered(true).ServerAPI(coll.client.serverAPI) + ArrayFilters(uo.ArrayFilters != nil).Ordered(true).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout) + if uo.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, uo.Let, true, "let") + if err != nil { + return nil, err + } + op = op.Let(let) + } if uo.BypassDocumentValidation != nil && *uo.BypassDocumentValidation { op = op.BypassDocumentValidation(*uo.BypassDocumentValidation) } + if uo.Comment != nil { + comment, err := transformValue(coll.registry, uo.Comment, true, "comment") + if err != nil { + return nil, err + } + op = op.Comment(comment) + } retry := driver.RetryNone // retryable writes are only enabled updateOne/replaceOne operations if !multi && coll.client.retryWrites { @@ -567,8 +609,8 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc opRes := op.Result() res := &UpdateResult{ - MatchedCount: int64(opRes.N), - ModifiedCount: int64(opRes.NModified), + MatchedCount: opRes.N, + ModifiedCount: opRes.NModified, UpsertedCount: int64(len(opRes.Upserted)), } if len(opRes.Upserted) > 0 { @@ -586,12 +628,12 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc // the operation will succeed and an UpdateResult with a MatchedCount of 0 will be returned. // // The update parameter must be a document containing update operators -// (https://docs.mongodb.com/manual/reference/operator/update/) and can be used to specify the modifications to be +// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be // made to the selected document. It cannot be nil or empty. // // The opts parameter can be used to specify options for the operation (see the options.UpdateOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/update/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/. func (coll *Collection) UpdateByID(ctx context.Context, id interface{}, update interface{}, opts ...*options.UpdateOptions) (*UpdateResult, error) { if id == nil { @@ -608,12 +650,12 @@ func (coll *Collection) UpdateByID(ctx context.Context, id interface{}, update i // matched set and MatchedCount will equal 1. // // The update parameter must be a document containing update operators -// (https://docs.mongodb.com/manual/reference/operator/update/) and can be used to specify the modifications to be +// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be // made to the selected document. It cannot be nil or empty. // // The opts parameter can be used to specify options for the operation (see the options.UpdateOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/update/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/. func (coll *Collection) UpdateOne(ctx context.Context, filter interface{}, update interface{}, opts ...*options.UpdateOptions) (*UpdateResult, error) { @@ -636,12 +678,12 @@ func (coll *Collection) UpdateOne(ctx context.Context, filter interface{}, updat // with a MatchedCount of 0 will be returned. // // The update parameter must be a document containing update operators -// (https://docs.mongodb.com/manual/reference/operator/update/) and can be used to specify the modifications to be made +// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be made // to the selected documents. It cannot be nil or empty. // // The opts parameter can be used to specify options for the operation (see the options.UpdateOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/update/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/. func (coll *Collection) UpdateMany(ctx context.Context, filter interface{}, update interface{}, opts ...*options.UpdateOptions) (*UpdateResult, error) { @@ -665,11 +707,11 @@ func (coll *Collection) UpdateMany(ctx context.Context, filter interface{}, upda // selected from the matched set and MatchedCount will equal 1. // // The replacement parameter must be a document that will be used to replace the selected document. It cannot be nil -// and cannot contain any update operators (https://docs.mongodb.com/manual/reference/operator/update/). +// and cannot contain any update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). // // The opts parameter can be used to specify options for the operation (see the options.ReplaceOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/update/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/. func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{}, replacement interface{}, opts ...*options.ReplaceOptions) (*UpdateResult, error) { @@ -693,11 +735,16 @@ func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{}, updateOptions := make([]*options.UpdateOptions, 0, len(opts)) for _, opt := range opts { + if opt == nil { + continue + } uOpts := options.Update() uOpts.BypassDocumentValidation = opt.BypassDocumentValidation uOpts.Collation = opt.Collation uOpts.Upsert = opt.Upsert uOpts.Hint = opt.Hint + uOpts.Let = opt.Let + uOpts.Comment = opt.Comment updateOptions = append(updateOptions, uOpts) } @@ -709,12 +756,12 @@ func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{}, // The pipeline parameter must be an array of documents, each representing an aggregation stage. The pipeline cannot // be nil but can be empty. The stage documents must all be non-nil. For a pipeline of bson.D documents, the // mongo.Pipeline type can be used. See -// https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline/#db-collection-aggregate-stages for a list of +// https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/#db-collection-aggregate-stages for a list of // valid stages in aggregations. // // The opts parameter can be used to specify options for the operation (see the options.AggregateOptions documentation.) // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/aggregate/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/aggregate/. func (coll *Collection) Aggregate(ctx context.Context, pipeline interface{}, opts ...*options.AggregateOptions) (*Cursor, error) { a := aggregateParams{ @@ -735,9 +782,8 @@ func (coll *Collection) Aggregate(ctx context.Context, pipeline interface{}, return aggregate(a) } -// aggreate is the helper method for Aggregate -func aggregate(a aggregateParams) (*Cursor, error) { - +// aggregate is the helper method for Aggregate +func aggregate(a aggregateParams) (cur *Cursor, err error) { if a.ctx == nil { a.ctx = context.Background() } @@ -748,6 +794,12 @@ func aggregate(a aggregateParams) (*Cursor, error) { } sess := sessionFromContext(a.ctx) + // Always close any created implicit sessions if aggregate returns an error. + defer func() { + if err != nil && sess != nil { + closeImplicitSession(sess) + } + }() if sess == nil && a.client.sessionPool != nil { sess, err = session.NewClientSession(a.client.sessionPool, a.client.id, session.Implicit) if err != nil { @@ -772,9 +824,9 @@ func aggregate(a aggregateParams) (*Cursor, error) { sess = nil } - selector := makePinnedSelector(sess, a.writeSelector) - if !hasOutputStage { - selector = makeReadPrefSelector(sess, a.readSelector, a.client.localThreshold) + selector := makeReadPrefSelector(sess, a.readSelector, a.client.localThreshold) + if hasOutputStage { + selector = makeOutputAggregateSelector(sess, a.readPreference, a.client.localThreshold) } ao := options.MergeAggregateOptions(a.opts...) @@ -784,6 +836,7 @@ func aggregate(a aggregateParams) (*Cursor, error) { Session(sess). WriteConcern(wc). ReadConcern(rc). + ReadPreference(a.readPreference). CommandMonitor(a.client.monitor). ServerSelector(selector). ClusterClock(a.client.clock). @@ -791,13 +844,9 @@ func aggregate(a aggregateParams) (*Cursor, error) { Collection(a.col). Deployment(a.client.deployment). Crypt(a.client.cryptFLE). - ServerAPI(a.client.serverAPI) - if !hasOutputStage { - // Only pass the user-specified read preference if the aggregation doesn't have a $out or $merge stage. - // Otherwise, the read preference could be forwarded to a mongos, which would error if the aggregation were - // executed against a non-primary node. - op.ReadPreference(a.readPreference) - } + ServerAPI(a.client.serverAPI). + HasOutputStage(hasOutputStage). + Timeout(a.client.timeout) if ao.AllowDiskUse != nil { op.AllowDiskUse(*ao.AllowDiskUse) @@ -821,11 +870,16 @@ func aggregate(a aggregateParams) (*Cursor, error) { } if ao.Comment != nil { op.Comment(*ao.Comment) + + commentVal, err := transformValue(a.registry, ao.Comment, true, "comment") + if err != nil { + return nil, err + } + cursorOpts.Comment = commentVal } if ao.Hint != nil { hintVal, err := transformValue(a.registry, ao.Hint, false, "hint") if err != nil { - closeImplicitSession(sess) return nil, err } op.Hint(hintVal) @@ -833,11 +887,24 @@ func aggregate(a aggregateParams) (*Cursor, error) { if ao.Let != nil { let, err := transformBsoncoreDocument(a.registry, ao.Let, true, "let") if err != nil { - closeImplicitSession(sess) return nil, err } op.Let(let) } + if ao.Custom != nil { + // Marshal all custom options before passing to the aggregate operation. Return + // any errors from Marshaling. + customOptions := make(map[string]bsoncore.Value) + for optionName, optionValue := range ao.Custom { + bsonType, bsonData, err := bson.MarshalValueWithRegistry(a.registry, optionValue) + if err != nil { + return nil, err + } + optionValueBSON := bsoncore.Value{Type: bsonType, Data: bsonData} + customOptions[optionName] = optionValueBSON + } + op.CustomOptions(customOptions) + } retry := driver.RetryNone if a.retryRead && !hasOutputStage { @@ -847,7 +914,6 @@ func aggregate(a aggregateParams) (*Cursor, error) { err = op.Execute(a.ctx) if err != nil { - closeImplicitSession(sess) if wce, ok := err.(driver.WriteCommandError); ok && wce.WriteConcernError != nil { return nil, *convertDriverWriteConcernError(wce.WriteConcernError) } @@ -856,7 +922,6 @@ func aggregate(a aggregateParams) (*Cursor, error) { bc, err := op.Result(cursorOpts) if err != nil { - closeImplicitSession(sess) return nil, replaceErrors(err) } cursor, err := newCursorWithSession(bc, a.registry, sess) @@ -905,10 +970,14 @@ func (coll *Collection) CountDocuments(ctx context.Context, filter interface{}, selector := makeReadPrefSelector(sess, coll.readSelector, coll.client.localThreshold) op := operation.NewAggregate(pipelineArr).Session(sess).ReadConcern(rc).ReadPreference(coll.readPreference). CommandMonitor(coll.client.monitor).ServerSelector(selector).ClusterClock(coll.client.clock).Database(coll.db.name). - Collection(coll.name).Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI) + Collection(coll.name).Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout) if countOpts.Collation != nil { op.Collation(bsoncore.Document(countOpts.Collation.ToDocument())) } + if countOpts.Comment != nil { + op.Comment(*countOpts.Comment) + } if countOpts.MaxTime != nil { op.MaxTimeMS(int64(*countOpts.MaxTime / time.Millisecond)) } @@ -954,7 +1023,7 @@ func (coll *Collection) CountDocuments(ctx context.Context, filter interface{}, // The opts parameter can be used to specify options for the operation (see the options.EstimatedDocumentCountOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/count/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/count/. func (coll *Collection) EstimatedDocumentCount(ctx context.Context, opts ...*options.EstimatedDocumentCountOptions) (int64, error) { @@ -987,9 +1056,17 @@ func (coll *Collection) EstimatedDocumentCount(ctx context.Context, op := operation.NewCount().Session(sess).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name).CommandMonitor(coll.client.monitor). Deployment(coll.client.deployment).ReadConcern(rc).ReadPreference(coll.readPreference). - ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI) + ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout) co := options.MergeEstimatedDocumentCountOptions(opts...) + if co.Comment != nil { + comment, err := transformValue(coll.registry, co.Comment, false, "comment") + if err != nil { + return 0, err + } + op = op.Comment(comment) + } if co.MaxTime != nil { op = op.MaxTimeMS(int64(*co.MaxTime / time.Millisecond)) } @@ -1013,7 +1090,7 @@ func (coll *Collection) EstimatedDocumentCount(ctx context.Context, // // The opts parameter can be used to specify options for the operation (see the options.DistinctOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/distinct/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/distinct/. func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter interface{}, opts ...*options.DistinctOptions) ([]interface{}, error) { @@ -1049,15 +1126,23 @@ func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter i selector := makeReadPrefSelector(sess, coll.readSelector, coll.client.localThreshold) option := options.MergeDistinctOptions(opts...) - op := operation.NewDistinct(fieldName, bsoncore.Document(f)). + op := operation.NewDistinct(fieldName, f). Session(sess).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name).CommandMonitor(coll.client.monitor). Deployment(coll.client.deployment).ReadConcern(rc).ReadPreference(coll.readPreference). - ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI) + ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout) if option.Collation != nil { op.Collation(bsoncore.Document(option.Collation.ToDocument())) } + if option.Comment != nil { + comment, err := transformValue(coll.registry, option.Comment, true, "comment") + if err != nil { + return nil, err + } + op.Comment(comment) + } if option.MaxTime != nil { op.MaxTimeMS(int64(*option.MaxTime / time.Millisecond)) } @@ -1102,9 +1187,9 @@ func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter i // // The opts parameter can be used to specify options for the operation (see the options.FindOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/find/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/find/. func (coll *Collection) Find(ctx context.Context, filter interface{}, - opts ...*options.FindOptions) (*Cursor, error) { + opts ...*options.FindOptions) (cur *Cursor, err error) { if ctx == nil { ctx = context.Background() @@ -1116,6 +1201,12 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, } sess := sessionFromContext(ctx) + // Always close any created implicit sessions if Find returns an error. + defer func() { + if err != nil && sess != nil { + closeImplicitSession(sess) + } + }() if sess == nil && coll.client.sessionPool != nil { var err error sess, err = session.NewClientSession(coll.client.sessionPool, coll.client.id, session.Implicit) @@ -1126,7 +1217,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, err = coll.client.validSession(sess) if err != nil { - closeImplicitSession(sess) return nil, err } @@ -1140,7 +1230,8 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, Session(sess).ReadConcern(rc).ReadPreference(coll.readPreference). CommandMonitor(coll.client.monitor).ServerSelector(selector). ClusterClock(coll.client.clock).Database(coll.db.name).Collection(coll.name). - Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI) + Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout) fo := options.MergeFindOptions(opts...) cursorOpts := coll.client.createBaseCursorOptions() @@ -1160,6 +1251,12 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, } if fo.Comment != nil { op.Comment(*fo.Comment) + + commentVal, err := transformValue(coll.registry, fo.Comment, true, "comment") + if err != nil { + return nil, err + } + cursorOpts.Comment = commentVal } if fo.CursorType != nil { switch *fo.CursorType { @@ -1173,11 +1270,17 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Hint != nil { hint, err := transformValue(coll.registry, fo.Hint, false, "hint") if err != nil { - closeImplicitSession(sess) return nil, err } op.Hint(hint) } + if fo.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, fo.Let, true, "let") + if err != nil { + return nil, err + } + op.Let(let) + } if fo.Limit != nil { limit := *fo.Limit if limit < 0 { @@ -1190,7 +1293,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Max != nil { max, err := transformBsoncoreDocument(coll.registry, fo.Max, true, "max") if err != nil { - closeImplicitSession(sess) return nil, err } op.Max(max) @@ -1204,7 +1306,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Min != nil { min, err := transformBsoncoreDocument(coll.registry, fo.Min, true, "min") if err != nil { - closeImplicitSession(sess) return nil, err } op.Min(min) @@ -1218,7 +1319,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Projection != nil { proj, err := transformBsoncoreDocument(coll.registry, fo.Projection, true, "projection") if err != nil { - closeImplicitSession(sess) return nil, err } op.Projection(proj) @@ -1238,7 +1338,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Sort != nil { sort, err := transformBsoncoreDocument(coll.registry, fo.Sort, false, "sort") if err != nil { - closeImplicitSession(sess) return nil, err } op.Sort(sort) @@ -1250,13 +1349,11 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, op = op.Retry(retry) if err = op.Execute(ctx); err != nil { - closeImplicitSession(sess) return nil, replaceErrors(err) } bc, err := op.Result(cursorOpts) if err != nil { - closeImplicitSession(sess) return nil, replaceErrors(err) } return newCursorWithSession(bc, coll.registry, sess) @@ -1270,7 +1367,7 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, // // The opts parameter can be used to specify options for this operation (see the options.FindOneOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/find/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/find/. func (coll *Collection) FindOne(ctx context.Context, filter interface{}, opts ...*options.FindOneOptions) *SingleResult { @@ -1278,9 +1375,12 @@ func (coll *Collection) FindOne(ctx context.Context, filter interface{}, ctx = context.Background() } - findOpts := make([]*options.FindOptions, len(opts)) - for i, opt := range opts { - findOpts[i] = &options.FindOptions{ + findOpts := make([]*options.FindOptions, 0, len(opts)) + for _, opt := range opts { + if opt == nil { + continue + } + findOpts = append(findOpts, &options.FindOptions{ AllowPartialResults: opt.AllowPartialResults, BatchSize: opt.BatchSize, Collation: opt.Collation, @@ -1299,7 +1399,7 @@ func (coll *Collection) FindOne(ctx context.Context, filter interface{}, Skip: opt.Skip, Snapshot: opt.Snapshot, Sort: opt.Sort, - } + }) } // Unconditionally send a limit to make sure only one document is returned and the cursor is not kept open // by the server. @@ -1373,7 +1473,7 @@ func (coll *Collection) findAndModify(ctx context.Context, op *operation.FindAnd // The opts parameter can be used to specify options for the operation (see the options.FindOneAndDeleteOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/findAndModify/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/findAndModify/. func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{}, opts ...*options.FindOneAndDeleteOptions) *SingleResult { @@ -1382,10 +1482,17 @@ func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{} return &SingleResult{err: err} } fod := options.MergeFindOneAndDeleteOptions(opts...) - op := operation.NewFindAndModify(f).Remove(true).ServerAPI(coll.client.serverAPI) + op := operation.NewFindAndModify(f).Remove(true).ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) if fod.Collation != nil { op = op.Collation(bsoncore.Document(fod.Collation.ToDocument())) } + if fod.Comment != nil { + comment, err := transformValue(coll.registry, fod.Comment, true, "comment") + if err != nil { + return &SingleResult{err: err} + } + op = op.Comment(comment) + } if fod.MaxTime != nil { op = op.MaxTimeMS(int64(*fod.MaxTime / time.Millisecond)) } @@ -1410,6 +1517,13 @@ func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{} } op = op.Hint(hint) } + if fod.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, fod.Let, true, "let") + if err != nil { + return &SingleResult{err: err} + } + op = op.Let(let) + } return coll.findAndModify(ctx, op) } @@ -1422,12 +1536,12 @@ func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{} // ErrNoDocuments wil be returned. If the filter matches multiple documents, one will be selected from the matched set. // // The replacement parameter must be a document that will be used to replace the selected document. It cannot be nil -// and cannot contain any update operators (https://docs.mongodb.com/manual/reference/operator/update/). +// and cannot contain any update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). // // The opts parameter can be used to specify options for the operation (see the options.FindOneAndReplaceOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/findAndModify/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/findAndModify/. func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{}, replacement interface{}, opts ...*options.FindOneAndReplaceOptions) *SingleResult { @@ -1445,13 +1559,20 @@ func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{ fo := options.MergeFindOneAndReplaceOptions(opts...) op := operation.NewFindAndModify(f).Update(bsoncore.Value{Type: bsontype.EmbeddedDocument, Data: r}). - ServerAPI(coll.client.serverAPI) + ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) if fo.BypassDocumentValidation != nil && *fo.BypassDocumentValidation { op = op.BypassDocumentValidation(*fo.BypassDocumentValidation) } if fo.Collation != nil { op = op.Collation(bsoncore.Document(fo.Collation.ToDocument())) } + if fo.Comment != nil { + comment, err := transformValue(coll.registry, fo.Comment, true, "comment") + if err != nil { + return &SingleResult{err: err} + } + op = op.Comment(comment) + } if fo.MaxTime != nil { op = op.MaxTimeMS(int64(*fo.MaxTime / time.Millisecond)) } @@ -1482,6 +1603,13 @@ func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{ } op = op.Hint(hint) } + if fo.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, fo.Let, true, "let") + if err != nil { + return &SingleResult{err: err} + } + op = op.Let(let) + } return coll.findAndModify(ctx, op) } @@ -1494,13 +1622,13 @@ func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{ // ErrNoDocuments wil be returned. If the filter matches multiple documents, one will be selected from the matched set. // // The update parameter must be a document containing update operators -// (https://docs.mongodb.com/manual/reference/operator/update/) and can be used to specify the modifications to be made +// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be made // to the selected document. It cannot be nil or empty. // // The opts parameter can be used to specify options for the operation (see the options.FindOneAndUpdateOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/findAndModify/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/findAndModify/. func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{}, update interface{}, opts ...*options.FindOneAndUpdateOptions) *SingleResult { @@ -1514,7 +1642,7 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} } fo := options.MergeFindOneAndUpdateOptions(opts...) - op := operation.NewFindAndModify(f).ServerAPI(coll.client.serverAPI) + op := operation.NewFindAndModify(f).ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) u, err := transformUpdateValue(coll.registry, update, true) if err != nil { @@ -1535,6 +1663,13 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} if fo.Collation != nil { op = op.Collation(bsoncore.Document(fo.Collation.ToDocument())) } + if fo.Comment != nil { + comment, err := transformValue(coll.registry, fo.Comment, true, "comment") + if err != nil { + return &SingleResult{err: err} + } + op = op.Comment(comment) + } if fo.MaxTime != nil { op = op.MaxTimeMS(int64(*fo.MaxTime / time.Millisecond)) } @@ -1565,18 +1700,25 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} } op = op.Hint(hint) } + if fo.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, fo.Let, true, "let") + if err != nil { + return &SingleResult{err: err} + } + op = op.Let(let) + } return coll.findAndModify(ctx, op) } // Watch returns a change stream for all changes on the corresponding collection. See -// https://docs.mongodb.com/manual/changeStreams/ for more information about change streams. +// https://www.mongodb.com/docs/manual/changeStreams/ for more information about change streams. // // The Collection must be configured with read concern majority or no read concern for a change stream to be created // successfully. // // The pipeline parameter must be an array of documents, each representing a pipeline stage. The pipeline cannot be -// nil but can be empty. The stage documents must all be non-nil. See https://docs.mongodb.com/manual/changeStreams/ for +// nil but can be empty. The stage documents must all be non-nil. See https://www.mongodb.com/docs/manual/changeStreams/ for // a list of pipeline stages that can be used with change streams. For a pipeline of bson.D documents, the // mongo.Pipeline{} type can be used. // @@ -1606,6 +1748,69 @@ func (coll *Collection) Indexes() IndexView { // Drop drops the collection on the server. This method ignores "namespace not found" errors so it is safe to drop // a collection that does not exist on the server. func (coll *Collection) Drop(ctx context.Context) error { + // Follow Client-Side Encryption specification to check for encryptedFields. + // Drop does not have an encryptedFields option. See: GODRIVER-2413. + // Check for encryptedFields from the client EncryptedFieldsMap. + // Check for encryptedFields from the server if EncryptedFieldsMap is set. + ef := coll.db.getEncryptedFieldsFromMap(coll.name) + if ef == nil && coll.db.client.encryptedFieldsMap != nil { + var err error + if ef, err = coll.db.getEncryptedFieldsFromServer(ctx, coll.name); err != nil { + return err + } + } + + if ef != nil { + return coll.dropEncryptedCollection(ctx, ef) + } + + return coll.drop(ctx) +} + +// dropEncryptedCollection drops a collection with EncryptedFields. +func (coll *Collection) dropEncryptedCollection(ctx context.Context, ef interface{}) error { + efBSON, err := transformBsoncoreDocument(coll.registry, ef, true /* mapAllowed */, "encryptedFields") + if err != nil { + return fmt.Errorf("error transforming document: %v", err) + } + + // Drop the three encryption-related, associated collections: `escCollection`, `eccCollection` and `ecocCollection`. + // Drop ESCCollection. + escCollection, err := internal.GetEncryptedStateCollectionName(efBSON, coll.name, internal.EncryptedStateCollection) + if err != nil { + return err + } + if err := coll.db.Collection(escCollection).drop(ctx); err != nil { + return err + } + + // Drop ECCCollection. + eccCollection, err := internal.GetEncryptedStateCollectionName(efBSON, coll.name, internal.EncryptedCacheCollection) + if err != nil { + return err + } + if err := coll.db.Collection(eccCollection).drop(ctx); err != nil { + return err + } + + // Drop ECOCCollection. + ecocCollection, err := internal.GetEncryptedStateCollectionName(efBSON, coll.name, internal.EncryptedCompactionCollection) + if err != nil { + return err + } + if err := coll.db.Collection(ecocCollection).drop(ctx); err != nil { + return err + } + + // Drop the data collection. + if err := coll.drop(ctx); err != nil { + return err + } + return nil +} + +// drop drops a collection without EncryptedFields. +func (coll *Collection) drop(ctx context.Context) error { if ctx == nil { ctx = context.Background() } @@ -1680,3 +1885,16 @@ func makeReadPrefSelector(sess *session.Client, selector description.ServerSelec return makePinnedSelector(sess, selector) } + +func makeOutputAggregateSelector(sess *session.Client, rp *readpref.ReadPref, localThreshold time.Duration) description.ServerSelectorFunc { + if sess != nil && sess.TransactionRunning() { + // Use current transaction's read preference if available + rp = sess.CurrentRp + } + + selector := description.CompositeSelector([]description.ServerSelector{ + description.OutputAggregateSelector(rp), + description.LatencySelector(localThreshold), + }) + return makePinnedSelector(sess, selector) +} diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/crypt_retrievers.go b/vendor/go.mongodb.org/mongo-driver/mongo/crypt_retrievers.go index e29523e48..5e96da731 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/crypt_retrievers.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/crypt_retrievers.go @@ -18,6 +18,9 @@ type keyRetriever struct { } func (kr *keyRetriever) cryptKeys(ctx context.Context, filter bsoncore.Document) ([]bsoncore.Document, error) { + // Remove the explicit session from the context if one is set. + // The explicit session may be from a different client. + ctx = NewSessionContext(ctx, nil) cursor, err := kr.coll.Find(ctx, filter) if err != nil { return nil, EncryptionKeyVaultError{Wrapped: err} @@ -43,6 +46,9 @@ type collInfoRetriever struct { } func (cir *collInfoRetriever) cryptCollInfo(ctx context.Context, db string, filter bsoncore.Document) (bsoncore.Document, error) { + // Remove the explicit session from the context if one is set. + // The explicit session may be from a different client. + ctx = NewSessionContext(ctx, nil) cursor, err := cir.client.Database(db).ListCollections(ctx, filter) if err != nil { return nil, err diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go b/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go index 3ec03baf4..d21005fed 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go @@ -15,6 +15,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" + "go.mongodb.org/mongo-driver/x/bsonx" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -67,6 +68,47 @@ func newEmptyCursor() *Cursor { return &Cursor{bc: driver.NewEmptyBatchCursor()} } +// NewCursorFromDocuments creates a new Cursor pre-loaded with the provided documents, error and registry. If no registry is provided, +// bson.DefaultRegistry will be used. +// +// The documents parameter must be a slice of documents. The slice may be nil or empty, but all elements must be non-nil. +func NewCursorFromDocuments(documents []interface{}, err error, registry *bsoncodec.Registry) (*Cursor, error) { + if registry == nil { + registry = bson.DefaultRegistry + } + + // Convert documents slice to a sequence-style byte array. + var docsBytes []byte + for _, doc := range documents { + switch t := doc.(type) { + case nil: + return nil, ErrNilDocument + case bsonx.Doc: + doc = t.Copy() + case []byte: + // Slight optimization so we'll just use MarshalBSON and not go through the codec machinery. + doc = bson.Raw(t) + } + var marshalErr error + docsBytes, marshalErr = bson.MarshalAppendWithRegistry(registry, docsBytes, doc) + if marshalErr != nil { + return nil, marshalErr + } + } + + c := &Cursor{ + bc: driver.NewBatchCursorFromDocuments(docsBytes), + registry: registry, + err: err, + } + + // Initialize batch and batchLength here. The underlying batch cursor will be preloaded with the + // provided contents, and thus already has a batch before calls to Next/TryNext. + c.batch = c.bc.Batch() + c.batchLength = c.bc.Batch().DocumentCount() + return c, nil +} + // ID returns the ID of this cursor, or 0 if the cursor has been closed or exhausted. func (c *Cursor) ID() int64 { return c.bc.ID() } @@ -83,7 +125,7 @@ func (c *Cursor) Next(ctx context.Context) bool { // TryNext attempts to get the next document for this cursor. It returns true if there were no errors and the next // document is available. This is only recommended for use with tailable cursors as a non-blocking alternative to -// Next. See https://docs.mongodb.com/manual/core/tailable-cursors/ for more information about tailable cursors. +// Next. See https://www.mongodb.com/docs/manual/core/tailable-cursors/ for more information about tailable cursors. // // TryNext returns false if the cursor is exhausted, an error occurs when getting results from the server, the next // document is not yet available, or ctx expires. If ctx expires, the error will be set to ctx.Err(). diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/database.go b/vendor/go.mongodb.org/mongo-driver/mongo/database.go index 207873344..57b5417fd 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/database.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/database.go @@ -13,12 +13,12 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" + "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" "go.mongodb.org/mongo-driver/mongo/readpref" "go.mongodb.org/mongo-driver/mongo/writeconcern" - "go.mongodb.org/mongo-driver/x/bsonx" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/operation" @@ -107,12 +107,12 @@ func (db *Database) Collection(name string, opts ...*options.CollectionOptions) // The pipeline parameter must be a slice of documents, each representing an aggregation stage. The pipeline // cannot be nil but can be empty. The stage documents must all be non-nil. For a pipeline of bson.D documents, the // mongo.Pipeline type can be used. See -// https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline/#db-aggregate-stages for a list of valid +// https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/#db-aggregate-stages for a list of valid // stages in database-level aggregations. // // The opts parameter can be used to specify options for this operation (see the options.AggregateOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/aggregate/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/aggregate/. func (db *Database) Aggregate(ctx context.Context, pipeline interface{}, opts ...*options.AggregateOptions) (*Cursor, error) { a := aggregateParams{ @@ -176,7 +176,8 @@ func (db *Database) processRunCommand(ctx context.Context, cmd interface{}, return op.Session(sess).CommandMonitor(db.client.monitor). ServerSelector(readSelect).ClusterClock(db.client.clock). Database(db.name).Deployment(db.client.deployment).ReadConcern(db.readConcern). - Crypt(db.client.cryptFLE).ReadPreference(ro.ReadPreference).ServerAPI(db.client.serverAPI), sess, nil + Crypt(db.client.cryptFLE).ReadPreference(ro.ReadPreference).ServerAPI(db.client.serverAPI). + Timeout(db.client.timeout), sess, nil } // RunCommand executes the given command against the database. This function does not obey the Database's read @@ -184,11 +185,13 @@ func (db *Database) processRunCommand(ctx context.Context, cmd interface{}, // // The runCommand parameter must be a document for the command to be executed. It cannot be nil. // This must be an order-preserving type such as bson.D. Map types such as bson.M are not valid. -// If the command document contains a session ID or any transaction-specific fields, the behavior is undefined. -// Specifying API versioning options in the command document and declaring an API version on the client is not supported. -// The behavior of RunCommand is undefined in this case. // // The opts parameter can be used to specify options for this operation (see the options.RunCmdOptions documentation). +// +// The behavior of RunCommand is undefined if the command document contains any of the following: +// - A session ID or any transaction-specific fields +// - API versioning options when an API version is already declared on the Client +// - maxTimeMS when Timeout is set on the Client func (db *Database) RunCommand(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) *SingleResult { if ctx == nil { ctx = context.Background() @@ -217,9 +220,13 @@ func (db *Database) RunCommand(ctx context.Context, runCommand interface{}, opts // // The runCommand parameter must be a document for the command to be executed. It cannot be nil. // This must be an order-preserving type such as bson.D. Map types such as bson.M are not valid. -// If the command document contains a session ID or any transaction-specific fields, the behavior is undefined. // // The opts parameter can be used to specify options for this operation (see the options.RunCmdOptions documentation). +// +// The behavior of RunCommandCursor is undefined if the command document contains any of the following: +// - A session ID or any transaction-specific fields +// - API versioning options when an API version is already declared on the Client +// - maxTimeMS when Timeout is set on the Client func (db *Database) RunCommandCursor(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) (*Cursor, error) { if ctx == nil { ctx = context.Background() @@ -302,7 +309,10 @@ func (db *Database) Drop(ctx context.Context) error { // The opts parameter can be used to specify options for the operation (see the options.ListCollectionsOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listCollections/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listCollections/. +// +// BUG(benjirewis): ListCollectionSpecifications prevents listing more than 100 collections per database when running +// against MongoDB version 2.6. func (db *Database) ListCollectionSpecifications(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) ([]*CollectionSpecification, error) { @@ -336,7 +346,10 @@ func (db *Database) ListCollectionSpecifications(ctx context.Context, filter int // The opts parameter can be used to specify options for the operation (see the options.ListCollectionsOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listCollections/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listCollections/. +// +// BUG(benjirewis): ListCollections prevents listing more than 100 collections per database when running against +// MongoDB version 2.6. func (db *Database) ListCollections(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) (*Cursor, error) { if ctx == nil { ctx = context.Background() @@ -372,7 +385,7 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt Session(sess).ReadPreference(db.readPreference).CommandMonitor(db.client.monitor). ServerSelector(selector).ClusterClock(db.client.clock). Database(db.name).Deployment(db.client.deployment).Crypt(db.client.cryptFLE). - ServerAPI(db.client.serverAPI) + ServerAPI(db.client.serverAPI).Timeout(db.client.timeout) cursorOpts := db.client.createBaseCursorOptions() if lco.NameOnly != nil { @@ -382,6 +395,9 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt cursorOpts.BatchSize = *lco.BatchSize op = op.BatchSize(*lco.BatchSize) } + if lco.AuthorizedCollections != nil { + op = op.AuthorizedCollections(*lco.AuthorizedCollections) + } retry := driver.RetryNone if db.client.retryReads { @@ -414,7 +430,10 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt // The opts parameter can be used to specify options for the operation (see the options.ListCollectionsOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listCollections/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listCollections/. +// +// BUG(benjirewis): ListCollectionNames prevents listing more than 100 collections per database when running against +// MongoDB version 2.6. func (db *Database) ListCollectionNames(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) ([]string, error) { opts = append(opts, options.ListCollections().SetNameOnly(true)) @@ -427,19 +446,13 @@ func (db *Database) ListCollectionNames(ctx context.Context, filter interface{}, names := make([]string, 0) for res.Next(ctx) { - next := &bsonx.Doc{} - err = res.Decode(next) - if err != nil { - return nil, err - } - - elem, err := next.LookupErr("name") + elem, err := res.Current.LookupErr("name") if err != nil { return nil, err } - if elem.Type() != bson.TypeString { - return nil, fmt.Errorf("incorrect type for 'name'. got %v. want %v", elem.Type(), bson.TypeString) + if elem.Type != bson.TypeString { + return nil, fmt.Errorf("incorrect type for 'name'. got %v. want %v", elem.Type, bson.TypeString) } elemName := elem.StringValue() @@ -466,13 +479,13 @@ func (db *Database) WriteConcern() *writeconcern.WriteConcern { } // Watch returns a change stream for all changes to the corresponding database. See -// https://docs.mongodb.com/manual/changeStreams/ for more information about change streams. +// https://www.mongodb.com/docs/manual/changeStreams/ for more information about change streams. // // The Database must be configured with read concern majority or no read concern for a change stream to be created // successfully. // // The pipeline parameter must be a slice of documents, each representing a pipeline stage. The pipeline cannot be -// nil but can be empty. The stage documents must all be non-nil. See https://docs.mongodb.com/manual/changeStreams/ for +// nil but can be empty. The stage documents must all be non-nil. See https://www.mongodb.com/docs/manual/changeStreams/ for // a list of pipeline stages that can be used with change streams. For a pipeline of bson.D documents, the // mongo.Pipeline{} type can be used. // @@ -500,8 +513,141 @@ func (db *Database) Watch(ctx context.Context, pipeline interface{}, // The opts parameter can be used to specify options for the operation (see the options.CreateCollectionOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/create/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/create/. func (db *Database) CreateCollection(ctx context.Context, name string, opts ...*options.CreateCollectionOptions) error { + cco := options.MergeCreateCollectionOptions(opts...) + // Follow Client-Side Encryption specification to check for encryptedFields. + // Check for encryptedFields from create options. + ef := cco.EncryptedFields + // Check for encryptedFields from the client EncryptedFieldsMap. + if ef == nil { + ef = db.getEncryptedFieldsFromMap(name) + } + if ef != nil { + return db.createCollectionWithEncryptedFields(ctx, name, ef, opts...) + } + + return db.createCollection(ctx, name, opts...) +} + +// getEncryptedFieldsFromServer tries to get an "encryptedFields" document associated with collectionName by running the "listCollections" command. +// Returns nil and no error if the listCollections command succeeds, but "encryptedFields" is not present. +func (db *Database) getEncryptedFieldsFromServer(ctx context.Context, collectionName string) (interface{}, error) { + // Check if collection has an EncryptedFields configured server-side. + collSpecs, err := db.ListCollectionSpecifications(ctx, bson.D{{"name", collectionName}}) + if err != nil { + return nil, err + } + if len(collSpecs) == 0 { + return nil, nil + } + if len(collSpecs) > 1 { + return nil, fmt.Errorf("expected 1 or 0 results from listCollections, got %v", len(collSpecs)) + } + collSpec := collSpecs[0] + rawValue, err := collSpec.Options.LookupErr("encryptedFields") + if err == bsoncore.ErrElementNotFound { + return nil, nil + } else if err != nil { + return nil, err + } + + encryptedFields, ok := rawValue.DocumentOK() + if !ok { + return nil, fmt.Errorf("expected encryptedFields of %v to be document, got %v", collectionName, rawValue.Type) + } + + return encryptedFields, nil +} + +// getEncryptedFieldsFromServer tries to get an "encryptedFields" document associated with collectionName by checking the client EncryptedFieldsMap. +// Returns nil and no error if an EncryptedFieldsMap is not configured, or does not contain an entry for collectionName. +func (db *Database) getEncryptedFieldsFromMap(collectionName string) interface{} { + // Check the EncryptedFieldsMap + efMap := db.client.encryptedFieldsMap + if efMap == nil { + return nil + } + + namespace := db.name + "." + collectionName + + ef, ok := efMap[namespace] + if ok { + return ef + } + return nil +} + +// createCollectionWithEncryptedFields creates a collection with an EncryptedFields. +func (db *Database) createCollectionWithEncryptedFields(ctx context.Context, name string, ef interface{}, opts ...*options.CreateCollectionOptions) error { + efBSON, err := transformBsoncoreDocument(db.registry, ef, true /* mapAllowed */, "encryptedFields") + if err != nil { + return fmt.Errorf("error transforming document: %v", err) + } + + // Create the three encryption-related, associated collections: `escCollection`, `eccCollection` and `ecocCollection`. + + stateCollectionOpts := options.CreateCollection(). + SetClusteredIndex(bson.D{{"key", bson.D{{"_id", 1}}}, {"unique", true}}) + // Create ESCCollection. + escCollection, err := internal.GetEncryptedStateCollectionName(efBSON, name, internal.EncryptedStateCollection) + if err != nil { + return err + } + + if err := db.createCollection(ctx, escCollection, stateCollectionOpts); err != nil { + return err + } + + // Create ECCCollection. + eccCollection, err := internal.GetEncryptedStateCollectionName(efBSON, name, internal.EncryptedCacheCollection) + if err != nil { + return err + } + + if err := db.createCollection(ctx, eccCollection, stateCollectionOpts); err != nil { + return err + } + + // Create ECOCCollection. + ecocCollection, err := internal.GetEncryptedStateCollectionName(efBSON, name, internal.EncryptedCompactionCollection) + if err != nil { + return err + } + + if err := db.createCollection(ctx, ecocCollection, stateCollectionOpts); err != nil { + return err + } + + // Create a data collection with the 'encryptedFields' option. + op, err := db.createCollectionOperation(name, opts...) + if err != nil { + return err + } + + op.EncryptedFields(efBSON) + if err := db.executeCreateOperation(ctx, op); err != nil { + return err + } + + // Create an index on the __safeContent__ field in the collection @collectionName. + if _, err := db.Collection(name).Indexes().CreateOne(ctx, IndexModel{Keys: bson.D{{"__safeContent__", 1}}}); err != nil { + return fmt.Errorf("error creating safeContent index: %v", err) + } + + return nil +} + +// createCollection creates a collection without EncryptedFields. +func (db *Database) createCollection(ctx context.Context, name string, opts ...*options.CreateCollectionOptions) error { + op, err := db.createCollectionOperation(name, opts...) + if err != nil { + return err + } + return db.executeCreateOperation(ctx, op) +} + +func (db *Database) createCollectionOperation(name string, opts ...*options.CreateCollectionOptions) (*operation.Create, error) { cco := options.MergeCreateCollectionOptions(opts...) op := operation.NewCreate(name).ServerAPI(db.client.serverAPI) @@ -511,19 +657,26 @@ func (db *Database) CreateCollection(ctx context.Context, name string, opts ...* if cco.Collation != nil { op.Collation(bsoncore.Document(cco.Collation.ToDocument())) } + if cco.ChangeStreamPreAndPostImages != nil { + csppi, err := transformBsoncoreDocument(db.registry, cco.ChangeStreamPreAndPostImages, true, "changeStreamPreAndPostImages") + if err != nil { + return nil, err + } + op.ChangeStreamPreAndPostImages(csppi) + } if cco.DefaultIndexOptions != nil { idx, doc := bsoncore.AppendDocumentStart(nil) if cco.DefaultIndexOptions.StorageEngine != nil { storageEngine, err := transformBsoncoreDocument(db.registry, cco.DefaultIndexOptions.StorageEngine, true, "storageEngine") if err != nil { - return err + return nil, err } doc = bsoncore.AppendDocumentElement(doc, "storageEngine", storageEngine) } doc, err := bsoncore.AppendDocumentEnd(doc, idx) if err != nil { - return err + return nil, err } op.IndexOptionDefaults(doc) @@ -537,7 +690,7 @@ func (db *Database) CreateCollection(ctx context.Context, name string, opts ...* if cco.StorageEngine != nil { storageEngine, err := transformBsoncoreDocument(db.registry, cco.StorageEngine, true, "storageEngine") if err != nil { - return err + return nil, err } op.StorageEngine(storageEngine) } @@ -550,7 +703,7 @@ func (db *Database) CreateCollection(ctx context.Context, name string, opts ...* if cco.Validator != nil { validator, err := transformBsoncoreDocument(db.registry, cco.Validator, true, "validator") if err != nil { - return err + return nil, err } op.Validator(validator) } @@ -570,17 +723,24 @@ func (db *Database) CreateCollection(ctx context.Context, name string, opts ...* doc, err := bsoncore.AppendDocumentEnd(doc, idx) if err != nil { - return err + return nil, err } op.TimeSeries(doc) } + if cco.ClusteredIndex != nil { + clusteredIndex, err := transformBsoncoreDocument(db.registry, cco.ClusteredIndex, true, "clusteredIndex") + if err != nil { + return nil, err + } + op.ClusteredIndex(clusteredIndex) + } - return db.executeCreateOperation(ctx, op) + return op, nil } // CreateView executes a create command to explicitly create a view on the server. See -// https://docs.mongodb.com/manual/core/views/ for more information about views. This method requires driver version >= +// https://www.mongodb.com/docs/manual/core/views/ for more information about views. This method requires driver version >= // 1.4.0 and MongoDB version >= 3.4. // // The viewName parameter specifies the name of the view to create. diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go index 7de7df5e6..405efe94b 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go @@ -25,7 +25,7 @@ type SelectedServer struct { Kind TopologyKind } -// Server contains information about a node in a cluster. This is created from isMaster command responses. If the value +// Server contains information about a node in a cluster. This is created from hello command responses. If the value // of the Kind field is LoadBalancer, only the Addr and Kind fields will be set. All other fields will be set to the // zero value of the field's type. type Server struct { @@ -38,6 +38,7 @@ type Server struct { CanonicalAddr address.Address ElectionID primitive.ObjectID HeartbeatInterval time.Duration + HelloOK bool Hosts []string LastError error LastUpdateTime time.Time @@ -47,6 +48,7 @@ type Server struct { MaxMessageSize uint32 Members []address.Address Passives []string + Passive bool Primary address.Address ReadOnly bool ServiceID *primitive.ObjectID // Only set for servers that are deployed behind a load balancer. @@ -59,7 +61,7 @@ type Server struct { WireVersion *VersionRange } -// NewServer creates a new server description from the given isMaster command response. +// NewServer creates a new server description from the given hello command response. func NewServer(addr address.Address, response bson.Raw) Server { desc := Server{Addr: addr, CanonicalAddr: addr, LastUpdateTime: time.Now().UTC()} elements, err := response.Elements() @@ -99,6 +101,12 @@ func NewServer(addr address.Address, response bson.Raw) Server { desc.LastError = fmt.Errorf("expected 'electionId' to be a objectID but it's a BSON %s", element.Value().Type) return desc } + case "helloOk": + desc.HelloOK, ok = element.Value().BooleanOK() + if !ok { + desc.LastError = fmt.Errorf("expected 'helloOk' to be a boolean but it's a BSON %s", element.Value().Type) + return desc + } case "hidden": hidden, ok = element.Value().BooleanOK() if !ok { @@ -118,10 +126,10 @@ func NewServer(addr address.Address, response bson.Raw) Server { desc.LastError = fmt.Errorf("expected 'isWritablePrimary' to be a boolean but it's a BSON %s", element.Value().Type) return desc } - case "ismaster": + case internal.LegacyHelloLowercase: isWritablePrimary, ok = element.Value().BooleanOK() if !ok { - desc.LastError = fmt.Errorf("expected 'ismaster' to be a boolean but it's a BSON %s", element.Value().Type) + desc.LastError = fmt.Errorf("expected legacy hello to be a boolean but it's a BSON %s", element.Value().Type) return desc } case "isreplicaset": @@ -215,6 +223,12 @@ func NewServer(addr address.Address, response bson.Raw) Server { desc.LastError = err return desc } + case "passive": + desc.Passive, ok = element.Value().BooleanOK() + if !ok { + desc.LastError = fmt.Errorf("expected 'passive' to be a boolean but it's a BSON %s", element.Value().Type) + return desc + } case "primary": primary, ok := element.Value().StringValueOK() if !ok { @@ -272,10 +286,6 @@ func NewServer(addr address.Address, response bson.Raw) Server { desc.LastError = err return desc } - - if internal.SetMockServiceID { - desc.ServiceID = &desc.TopologyVersion.ProcessID - } } } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go index f0c728e4b..8e810cb9c 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go @@ -130,10 +130,20 @@ func WriteSelector() ServerSelector { // ReadPrefSelector selects servers based on the provided read preference. func ReadPrefSelector(rp *readpref.ReadPref) ServerSelector { + return readPrefSelector(rp, false) +} + +// OutputAggregateSelector selects servers based on the provided read preference given that the underlying operation is +// aggregate with an output stage. +func OutputAggregateSelector(rp *readpref.ReadPref) ServerSelector { + return readPrefSelector(rp, true) +} + +func readPrefSelector(rp *readpref.ReadPref, isOutputAggregate bool) ServerSelector { return ServerSelectorFunc(func(t Topology, candidates []Server) ([]Server, error) { if t.Kind == LoadBalanced { // In LoadBalanced mode, there should only be one server in the topology and it must be selected. We check - // this before checking MaxStaleness support becuase there's no monitoring in this mode, so the candidate + // this before checking MaxStaleness support because there's no monitoring in this mode, so the candidate // server wouldn't have a wire version set, which would result in an error. return candidates, nil } @@ -152,7 +162,7 @@ func ReadPrefSelector(rp *readpref.ReadPref) ServerSelector { case Single: return candidates, nil case ReplicaSetNoPrimary, ReplicaSetWithPrimary: - return selectForReplicaSet(rp, t, candidates) + return selectForReplicaSet(rp, isOutputAggregate, t, candidates) case Sharded: return selectByKind(candidates, Mongos), nil } @@ -170,11 +180,21 @@ func maxStalenessSupported(wireVersion *VersionRange) error { return nil } -func selectForReplicaSet(rp *readpref.ReadPref, t Topology, candidates []Server) ([]Server, error) { +func selectForReplicaSet(rp *readpref.ReadPref, isOutputAggregate bool, t Topology, candidates []Server) ([]Server, error) { if err := verifyMaxStaleness(rp, t); err != nil { return nil, err } + // If underlying operation is an aggregate with an output stage, only apply read preference + // if all candidates are 5.0+. Otherwise, operate under primary read preference. + if isOutputAggregate { + for _, s := range candidates { + if s.WireVersion.Max < 13 { + return selectByKind(candidates, RSPrimary), nil + } + } + } + switch rp.Mode() { case readpref.PrimaryMode: return selectByKind(candidates, RSPrimary), nil diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/doc.go b/vendor/go.mongodb.org/mongo-driver/mongo/doc.go index d184a575a..76a063fac 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/doc.go @@ -105,8 +105,14 @@ // // Note: Auto encryption is an enterprise-only feature. // -// The libmongocrypt C library is required when using client-side encryption. libmongocrypt version 1.1.0 or higher is -// required when using driver version 1.5.0 or higher. To install libmongocrypt, follow the instructions for your +// The libmongocrypt C library is required when using client-side encryption. Specific versions of libmongocrypt +// are required for different versions of the Go Driver: +// - Go Driver v1.2.0 requires libmongocrypt v1.0.0 or higher +// - Go Driver v1.5.0 requires libmongocrypt v1.1.0 or higher +// - Go Driver v1.8.0 requires libmongocrypt v1.3.0 or higher +// - Go Driver v1.10.0 requires libmongocrypt v1.5.0 or higher +// +// To install libmongocrypt, follow the instructions for your // operating system: // // 1. Linux: follow the instructions listed at @@ -117,6 +123,7 @@ // to install packages via brew and compile the libmongocrypt source code. // // 3. Windows: +// // mkdir -p c:/libmongocrypt/bin // mkdir -p c:/libmongocrypt/include // @@ -128,18 +135,8 @@ // cp ./include/mongocrypt/*.h c:/libmongocrypt/include // export PATH=$PATH:/cygdrive/c/libmongocrypt/bin // -// libmongocrypt communicates with the mongocryptd process for automatic encryption. This process can be started manually -// or auto-spawned by the driver itself. To enable auto-spawning, ensure the process binary is on the PATH. To start it -// manually, use AutoEncryptionOptions: -// -// aeo := options.AutoEncryption() -// mongocryptdOpts := map[string]interface{}{ -// "mongocryptdBypassSpawn": true, -// } -// aeo.SetExtraOptions(mongocryptdOpts) -// To specify a process URI for mongocryptd, the "mongocryptdURI" option can be passed in the ExtraOptions map as well. -// See the ClientSideEncryption and ClientSideEncryptionCreateKey examples below for code samples about using this -// feature. +// libmongocrypt communicates with the mongocryptd process or mongo_crypt shared library for automatic encryption. +// See AutoEncryptionOpts.SetExtraOptions for options to configure use of mongocryptd or mongo_crypt. // -// [1] See https://docs.mongodb.com/manual/reference/connection-string/#dns-seedlist-connection-format +// [1] See https://www.mongodb.com/docs/manual/reference/connection-string/#dns-seedlist-connection-format package mongo diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/errors.go b/vendor/go.mongodb.org/mongo-driver/mongo/errors.go index 2c3ae1579..33e23573f 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/errors.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/errors.go @@ -56,6 +56,7 @@ func replaceErrors(err error) error { Labels: de.Labels, Name: de.Name, Wrapped: de.Wrapped, + Raw: bson.Raw(de.Raw), } } if qe, ok := err.(driver.QueryFailureError); ok { @@ -63,6 +64,7 @@ func replaceErrors(err error) error { ce := CommandError{ Name: qe.Message, Wrapped: qe.Wrapped, + Raw: bson.Raw(qe.Response), } dollarErr, err := qe.Response.LookupErr("$err") @@ -102,6 +104,9 @@ func IsTimeout(err error) bool { if err == context.DeadlineExceeded { return true } + if err == driver.ErrDeadlineWouldBeExceeded { + return true + } if ne, ok := err.(net.Error); ok { return ne.Timeout() } @@ -207,6 +212,7 @@ type ServerError interface { } var _ ServerError = CommandError{} +var _ ServerError = WriteError{} var _ ServerError = WriteException{} var _ ServerError = BulkWriteException{} @@ -217,6 +223,7 @@ type CommandError struct { Labels []string // Categories to which the error belongs Name string // A human-readable name corresponding to the error code Wrapped error // The underlying error, if one exists. + Raw bson.Raw // The original server response containing the error. } // Error implements the error interface. @@ -276,6 +283,9 @@ type WriteError struct { Code int Message string Details bson.Raw + + // The original write error from the server response. + Raw bson.Raw } func (we WriteError) Error() string { @@ -286,6 +296,30 @@ func (we WriteError) Error() string { return msg } +// HasErrorCode returns true if the error has the specified code. +func (we WriteError) HasErrorCode(code int) bool { + return we.Code == code +} + +// HasErrorLabel returns true if the error contains the specified label. WriteErrors do not contain labels, +// so we always return false. +func (we WriteError) HasErrorLabel(label string) bool { + return false +} + +// HasErrorMessage returns true if the error contains the specified message. +func (we WriteError) HasErrorMessage(message string) bool { + return strings.Contains(we.Message, message) +} + +// HasErrorCodeWithMessage returns true if the error has the specified code and Message contains the specified message. +func (we WriteError) HasErrorCodeWithMessage(code int, message string) bool { + return we.Code == code && strings.Contains(we.Message, message) +} + +// serverError implements the ServerError interface. +func (we WriteError) serverError() {} + // WriteErrors is a group of write errors that occurred during execution of a write operation. type WriteErrors []WriteError @@ -307,6 +341,7 @@ func writeErrorsFromDriverWriteErrors(errs driver.WriteErrors) WriteErrors { Code: int(err.Code), Message: err.Message, Details: bson.Raw(err.Details), + Raw: bson.Raw(err.Raw), }) } return wes @@ -319,6 +354,7 @@ type WriteConcernError struct { Code int Message string Details bson.Raw + Raw bson.Raw // The original write concern error from the server response. } // Error implements the error interface. @@ -340,6 +376,9 @@ type WriteException struct { // The categories to which the exception belongs. Labels []string + + // The original server response containing the error. + Raw bson.Raw } // Error implements the error interface. @@ -426,6 +465,7 @@ func convertDriverWriteConcernError(wce *driver.WriteConcernError) *WriteConcern Code: int(wce.Code), Message: wce.Message, Details: bson.Raw(wce.Details), + Raw: bson.Raw(wce.Raw), } } @@ -559,6 +599,7 @@ func processWriteError(err error) (returnResult, error) { WriteConcernError: convertDriverWriteConcernError(tt.WriteConcernError), WriteErrors: writeErrorsFromDriverWriteErrors(tt.WriteErrors), Labels: tt.Labels, + Raw: bson.Raw(tt.Raw), } default: return rrNone, replaceErrors(err) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go index 18fc52443..a393c7e7c 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go @@ -45,7 +45,7 @@ type IndexView struct { // IndexModel represents a new index to be created. type IndexModel struct { // A document describing which keys should be used for the index. It cannot be nil. This must be an order-preserving - // type such as bson.D. Map types such as bson.M are not valid. See https://docs.mongodb.com/manual/indexes/#indexes + // type such as bson.D. Map types such as bson.M are not valid. See https://www.mongodb.com/docs/manual/indexes/#indexes // for examples of valid documents. Keys interface{} @@ -65,7 +65,7 @@ func isNamespaceNotFoundError(err error) bool { // The opts parameter can be used to specify options for this operation (see the options.ListIndexesOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listIndexes/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listIndexes/. func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOptions) (*Cursor, error) { if ctx == nil { ctx = context.Background() @@ -95,7 +95,8 @@ func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOption Session(sess).CommandMonitor(iv.coll.client.monitor). ServerSelector(selector).ClusterClock(iv.coll.client.clock). Database(iv.coll.db.name).Collection(iv.coll.name). - Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI) + Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI). + Timeout(iv.coll.client.timeout) cursorOpts := iv.coll.client.createBaseCursorOptions() lio := options.MergeListIndexesOptions(opts...) @@ -175,7 +176,7 @@ func (iv IndexView) CreateOne(ctx context.Context, model IndexModel, opts ...*op // The opts parameter can be used to specify options for this operation (see the options.CreateIndexesOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/createIndexes/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/createIndexes/. func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts ...*options.CreateIndexesOptions) ([]string, error) { names := make([]string, 0, len(models)) @@ -256,7 +257,8 @@ func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts .. op := operation.NewCreateIndexes(indexes). Session(sess).WriteConcern(wc).ClusterClock(iv.coll.client.clock). Database(iv.coll.db.name).Collection(iv.coll.name).CommandMonitor(iv.coll.client.monitor). - Deployment(iv.coll.client.deployment).ServerSelector(selector).ServerAPI(iv.coll.client.serverAPI) + Deployment(iv.coll.client.deployment).ServerSelector(selector).ServerAPI(iv.coll.client.serverAPI). + Timeout(iv.coll.client.timeout) if option.MaxTime != nil { op.MaxTimeMS(int64(*option.MaxTime / time.Millisecond)) @@ -272,7 +274,8 @@ func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts .. err = op.Execute(ctx) if err != nil { - return nil, replaceErrors(err) + _, err = processWriteError(err) + return nil, err } return names, nil @@ -399,7 +402,8 @@ func (iv IndexView) drop(ctx context.Context, name string, opts ...*options.Drop Session(sess).WriteConcern(wc).CommandMonitor(iv.coll.client.monitor). ServerSelector(selector).ClusterClock(iv.coll.client.clock). Database(iv.coll.db.name).Collection(iv.coll.name). - Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI) + Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI). + Timeout(iv.coll.client.timeout) if dio.MaxTime != nil { op.MaxTimeMS(int64(*dio.MaxTime / time.Millisecond)) } @@ -426,7 +430,7 @@ func (iv IndexView) drop(ctx context.Context, name string, opts ...*options.Drop // The opts parameter can be used to specify options for this operation (see the options.DropIndexesOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/dropIndexes/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/dropIndexes/. func (iv IndexView) DropOne(ctx context.Context, name string, opts ...*options.DropIndexesOptions) (bson.Raw, error) { if name == "*" { return nil, ErrMultipleIndexDrop @@ -442,7 +446,7 @@ func (iv IndexView) DropOne(ctx context.Context, name string, opts ...*options.D // The opts parameter can be used to specify options for this operation (see the options.DropIndexesOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/dropIndexes/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/dropIndexes/. func (iv IndexView) DropAll(ctx context.Context, opts ...*options.DropIndexesOptions) (bson.Raw, error) { return iv.drop(ctx, "*", opts...) } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go b/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go index 3ceb7aa97..80282527e 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go @@ -123,17 +123,6 @@ func transformAndEnsureID(registry *bsoncodec.Registry, val interface{}) (bsonco return doc, id, nil } -func transformDocument(registry *bsoncodec.Registry, val interface{}) (bsonx.Doc, error) { - if doc, ok := val.(bsonx.Doc); ok { - return doc.Copy(), nil - } - b, err := transformBsoncoreDocument(registry, val, true, "document") - if err != nil { - return nil, err - } - return bsonx.ReadDoc(b) -} - func transformBsoncoreDocument(registry *bsoncodec.Registry, val interface{}, mapAllowed bool, paramName string) (bsoncore.Document, error) { if registry == nil { registry = bson.DefaultRegistry @@ -161,21 +150,6 @@ func transformBsoncoreDocument(registry *bsoncodec.Registry, val interface{}, ma return b, nil } -func ensureID(d bsonx.Doc) (bsonx.Doc, interface{}) { - var id interface{} - - elem, err := d.LookupElementErr("_id") - switch err.(type) { - case nil: - id = elem - default: - oid := primitive.NewObjectID() - d = append(d, bsonx.Elem{"_id", bsonx.ObjectID(oid)}) - id = oid - } - return d, id -} - func ensureDollarKey(doc bsoncore.Document) error { firstElem, err := doc.IndexErr(0) if err != nil { @@ -190,7 +164,7 @@ func ensureDollarKey(doc bsoncore.Document) error { func ensureNoDollarKey(doc bsoncore.Document) error { if elem, err := doc.IndexErr(0); err == nil && strings.HasPrefix(elem.Key(), "$") { - return errors.New("replacement document cannot contains keys beginning with '$") + return errors.New("replacement document cannot contain keys beginning with '$'") } return nil @@ -225,20 +199,43 @@ func transformAggregatePipeline(registry *bsoncodec.Registry, pipeline interface return nil, false, fmt.Errorf("can only transform slices and arrays into aggregation pipelines, but got %v", val.Kind()) } - aidx, arr := bsoncore.AppendArrayStart(nil) var hasOutputStage bool valLen := val.Len() + switch t := pipeline.(type) { // Explicitly forbid non-empty pipelines that are semantically single documents // and are implemented as slices. - switch t := pipeline.(type) { case bson.D, bson.Raw, bsoncore.Document: if valLen > 0 { return nil, false, fmt.Errorf("%T is not an allowed pipeline type as it represents a single document. Use bson.A or mongo.Pipeline instead", t) } + // bsoncore.Arrays do not need to be transformed. Only check validity and presence of output stage. + case bsoncore.Array: + if err := t.Validate(); err != nil { + return nil, false, err + } + + values, err := t.Values() + if err != nil { + return nil, false, err + } + + numVals := len(values) + if numVals == 0 { + return bsoncore.Document(t), false, nil + } + + // If not empty, check if first value of the last stage is $out or $merge. + if lastStage, ok := values[numVals-1].DocumentOK(); ok { + if elem, err := lastStage.IndexErr(0); err == nil && (elem.Key() == "$out" || elem.Key() == "$merge") { + hasOutputStage = true + } + } + return bsoncore.Document(t), hasOutputStage, nil } + aidx, arr := bsoncore.AppendArrayStart(nil) for idx := 0; idx < valLen; idx++ { doc, err := transformBsoncoreDocument(registry, val.Index(idx).Interface(), true, fmt.Sprintf("pipeline stage :%v", idx)) if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go b/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go index 2eab62f97..016ccef62 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go @@ -28,17 +28,18 @@ const ( var defaultTimeoutArgs = []string{"--idleShutdownTimeoutSecs=60"} var databaseOpts = options.Database().SetReadConcern(readconcern.New()).SetReadPreference(readpref.Primary()) -type mcryptClient struct { +type mongocryptdClient struct { bypassSpawn bool client *Client path string spawnArgs []string } -func newMcryptClient(opts *options.AutoEncryptionOptions) (*mcryptClient, error) { +func newMongocryptdClient(cryptSharedLibAvailable bool, opts *options.AutoEncryptionOptions) (*mongocryptdClient, error) { // create mcryptClient instance and spawn process if necessary var bypassSpawn bool var bypassAutoEncryption bool + if bypass, ok := opts.ExtraOptions["mongocryptdBypassSpawn"]; ok { bypassSpawn = bypass.(bool) } @@ -46,10 +47,15 @@ func newMcryptClient(opts *options.AutoEncryptionOptions) (*mcryptClient, error) bypassAutoEncryption = *opts.BypassAutoEncryption } - mc := &mcryptClient{ - // mongocryptd should not be spawned if mongocryptdBypassSpawn is passed or if bypassAutoEncryption is - // specified because it is not used during decryption - bypassSpawn: bypassSpawn || bypassAutoEncryption, + bypassQueryAnalysis := opts.BypassQueryAnalysis != nil && *opts.BypassQueryAnalysis + + mc := &mongocryptdClient{ + // mongocryptd should not be spawned if any of these conditions are true: + // - mongocryptdBypassSpawn is passed + // - bypassAutoEncryption is true because mongocryptd is not used during decryption + // - bypassQueryAnalysis is true because mongocryptd is not used during decryption + // - the crypt_shared library is available because it replaces all mongocryptd functionality. + bypassSpawn: bypassSpawn || bypassAutoEncryption || bypassQueryAnalysis || cryptSharedLibAvailable, } if !mc.bypassSpawn { @@ -76,7 +82,11 @@ func newMcryptClient(opts *options.AutoEncryptionOptions) (*mcryptClient, error) } // markCommand executes the given command on mongocryptd. -func (mc *mcryptClient) markCommand(ctx context.Context, dbName string, cmd bsoncore.Document) (bsoncore.Document, error) { +func (mc *mongocryptdClient) markCommand(ctx context.Context, dbName string, cmd bsoncore.Document) (bsoncore.Document, error) { + // Remove the explicit session from the context if one is set. + // The explicit session will be from a different client. + // If an explicit session is set, it is applied after automatic encryption. + ctx = NewSessionContext(ctx, nil) db := mc.client.Database(dbName, databaseOpts) res, err := db.RunCommand(ctx, cmd).DecodeBytes() @@ -101,16 +111,18 @@ func (mc *mcryptClient) markCommand(ctx context.Context, dbName string, cmd bson } // connect connects the underlying Client instance. This must be called before performing any mark operations. -func (mc *mcryptClient) connect(ctx context.Context) error { +func (mc *mongocryptdClient) connect(ctx context.Context) error { return mc.client.Connect(ctx) } // disconnect disconnects the underlying Client instance. This should be called after all operations have completed. -func (mc *mcryptClient) disconnect(ctx context.Context) error { +func (mc *mongocryptdClient) disconnect(ctx context.Context) error { return mc.client.Disconnect(ctx) } -func (mc *mcryptClient) spawnProcess() error { +func (mc *mongocryptdClient) spawnProcess() error { + // Ignore gosec warning about subprocess launched with externally-provided path variable. + /* #nosec G204 */ cmd := exec.Command(mc.path, mc.spawnArgs...) cmd.Stdout = nil cmd.Stderr = nil diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go index e1f710fd2..983eba24f 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go @@ -6,7 +6,11 @@ package options -import "time" +import ( + "time" + + "go.mongodb.org/mongo-driver/bson" +) // AggregateOptions represents options that can be used to configure an Aggregate operation. type AggregateOptions struct { @@ -19,7 +23,7 @@ type AggregateOptions struct { // If true, writes executed as part of the operation will opt out of document-level validation on the server. This // option is valid for MongoDB versions >= 3.2 and is ignored for previous server versions. The default value is - // false. See https://docs.mongodb.com/manual/core/schema-validation/ for more information about document + // false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about document // validation. BypassDocumentValidation *bool @@ -30,6 +34,10 @@ type AggregateOptions struct { // The maximum amount of time that the query can run on the server. The default value is nil, meaning that there // is no time limit for query execution. + // + // Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. The more general + // Timeout option should be used in its place to control the amount of time that the Aggregate operation can run before + // returning an error. MaxTime is still usable through the deprecated setter. MaxTime *time.Duration // The maximum amount of time that the server should wait for new documents to satisfy a tailable cursor query. @@ -37,7 +45,7 @@ type AggregateOptions struct { MaxAwaitTime *time.Duration // A string that will be included in server logs, profiling logs, and currentOp queries to help trace the operation. - // The default is the empty string, which means that no comment will be included in the logs. + // The default is nil, which means that no comment will be included in the logs. Comment *string // The index to use for the aggregation. This should either be the index name as a string or the index specification @@ -50,6 +58,11 @@ type AggregateOptions struct { // Values must be constant or closed expressions that do not reference document fields. Parameters can then be // accessed as variables in an aggregate expression context (e.g. "$$var"). Let interface{} + + // Custom options to be added to aggregate expression. Key-value pairs of the BSON map should correlate with desired + // option names and values. Values must be Marshalable. Custom options may conflict with non-custom options, and custom + // options bypass client-side validation. Prefer using non-custom options where possible. + Custom bson.M } // Aggregate creates a new AggregateOptions instance. @@ -82,6 +95,10 @@ func (ao *AggregateOptions) SetCollation(c *Collation) *AggregateOptions { } // SetMaxTime sets the value for the MaxTime field. +// +// Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. +// The more general Timeout option should be used in its place to control the amount of time that the +// Aggregate operation can run before returning an error. func (ao *AggregateOptions) SetMaxTime(d time.Duration) *AggregateOptions { ao.MaxTime = &d return ao @@ -111,6 +128,15 @@ func (ao *AggregateOptions) SetLet(let interface{}) *AggregateOptions { return ao } +// SetCustom sets the value for the Custom field. Key-value pairs of the BSON map should correlate +// with desired option names and values. Values must be Marshalable. Custom options may conflict +// with non-custom options, and custom options bypass client-side validation. Prefer using non-custom +// options where possible. +func (ao *AggregateOptions) SetCustom(c bson.M) *AggregateOptions { + ao.Custom = c + return ao +} + // MergeAggregateOptions combines the given AggregateOptions instances into a single AggregateOptions in a last-one-wins // fashion. func MergeAggregateOptions(opts ...*AggregateOptions) *AggregateOptions { @@ -146,6 +172,9 @@ func MergeAggregateOptions(opts ...*AggregateOptions) *AggregateOptions { if ao.Let != nil { aggOpts.Let = ao.Let } + if ao.Custom != nil { + aggOpts.Custom = ao.Custom + } } return aggOpts diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go index 517b69e6c..375d89991 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go @@ -6,6 +6,10 @@ package options +import ( + "crypto/tls" +) + // AutoEncryptionOptions represents options used to configure auto encryption/decryption behavior for a mongo.Client // instance. // @@ -27,6 +31,9 @@ type AutoEncryptionOptions struct { SchemaMap map[string]interface{} BypassAutoEncryption *bool ExtraOptions map[string]interface{} + TLSConfig map[string]*tls.Config + EncryptedFieldsMap map[string]interface{} + BypassQueryAnalysis *bool } // AutoEncryption creates a new AutoEncryptionOptions configured with default values. @@ -85,12 +92,73 @@ func (a *AutoEncryptionOptions) SetBypassAutoEncryption(bypass bool) *AutoEncryp return a } -// SetExtraOptions specifies a map of options to configure the mongocryptd process. +// SetExtraOptions specifies a map of options to configure the mongocryptd process or mongo_crypt shared library. +// +// Supported Extra Options +// +// "mongocryptdURI" - The mongocryptd URI. Allows setting a custom URI used to communicate with the +// mongocryptd process. The default is "mongodb://localhost:27020", which works with the default +// mongocryptd process spawned by the Client. Must be a string. +// +// "mongocryptdBypassSpawn" - If set to true, the Client will not attempt to spawn a mongocryptd +// process. Must be a bool. +// +// "mongocryptdSpawnPath" - The path used when spawning mongocryptd. +// Defaults to empty string and spawns mongocryptd from system path. Must be a string. +// +// "mongocryptdSpawnArgs" - Command line arguments passed when spawning mongocryptd. +// Defaults to ["--idleShutdownTimeoutSecs=60"]. Must be an array of strings. +// +// "cryptSharedLibRequired" - If set to true, Client creation will return an error if the +// crypt_shared library is not loaded. If unset or set to false, Client creation will not return an +// error if the crypt_shared library is not loaded. The default is unset. Must be a bool. +// +// "cryptSharedLibPath" - The crypt_shared library override path. This must be the path to the +// crypt_shared dynamic library file (for example, a .so, .dll, or .dylib file), not the directory +// that contains it. If the override path is a relative path, it will be resolved relative to the +// working directory of the process. If the override path is a relative path and the first path +// component is the literal string "$ORIGIN", the "$ORIGIN" component will be replaced by the +// absolute path to the directory containing the linked libmongocrypt library. Setting an override +// path disables the default system library search path. If an override path is specified but the +// crypt_shared library cannot be loaded, Client creation will return an error. Must be a string. func (a *AutoEncryptionOptions) SetExtraOptions(extraOpts map[string]interface{}) *AutoEncryptionOptions { a.ExtraOptions = extraOpts return a } +// SetTLSConfig specifies tls.Config instances for each KMS provider to use to configure TLS on all connections created +// to the KMS provider. +// +// This should only be used to set custom TLS configurations. By default, the connection will use an empty tls.Config{} with MinVersion set to tls.VersionTLS12. +func (a *AutoEncryptionOptions) SetTLSConfig(tlsOpts map[string]*tls.Config) *AutoEncryptionOptions { + tlsConfigs := make(map[string]*tls.Config) + for provider, config := range tlsOpts { + // use TLS min version 1.2 to enforce more secure hash algorithms and advanced cipher suites + if config.MinVersion == 0 { + config.MinVersion = tls.VersionTLS12 + } + tlsConfigs[provider] = config + } + a.TLSConfig = tlsConfigs + return a +} + +// SetEncryptedFieldsMap specifies a map from namespace to local EncryptedFieldsMap document. +// EncryptedFieldsMap is used for Queryable Encryption. +// Queryable Encryption is in Public Technical Preview. Queryable Encryption should not be used in production and is subject to backwards breaking changes. +func (a *AutoEncryptionOptions) SetEncryptedFieldsMap(ef map[string]interface{}) *AutoEncryptionOptions { + a.EncryptedFieldsMap = ef + return a +} + +// SetBypassQueryAnalysis specifies whether or not query analysis should be used for automatic encryption. +// Use this option when using explicit encryption with Queryable Encryption. +// Queryable Encryption is in Public Technical Preview. Queryable Encryption should not be used in production and is subject to backwards breaking changes. +func (a *AutoEncryptionOptions) SetBypassQueryAnalysis(bypass bool) *AutoEncryptionOptions { + a.BypassQueryAnalysis = &bypass + return a +} + // MergeAutoEncryptionOptions combines the argued AutoEncryptionOptions in a last-one wins fashion. func MergeAutoEncryptionOptions(opts ...*AutoEncryptionOptions) *AutoEncryptionOptions { aeo := AutoEncryption() @@ -117,6 +185,15 @@ func MergeAutoEncryptionOptions(opts ...*AutoEncryptionOptions) *AutoEncryptionO if opt.ExtraOptions != nil { aeo.ExtraOptions = opt.ExtraOptions } + if opt.TLSConfig != nil { + aeo.TLSConfig = opt.TLSConfig + } + if opt.EncryptedFieldsMap != nil { + aeo.EncryptedFieldsMap = opt.EncryptedFieldsMap + } + if opt.BypassQueryAnalysis != nil { + aeo.BypassQueryAnalysis = opt.BypassQueryAnalysis + } } return aeo diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go index 57f98f83d..0c36d0b7b 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go @@ -13,12 +13,22 @@ var DefaultOrdered = true type BulkWriteOptions struct { // If true, writes executed as part of the operation will opt out of document-level validation on the server. This // option is valid for MongoDB versions >= 3.2 and is ignored for previous server versions. The default value is - // false. See https://docs.mongodb.com/manual/core/schema-validation/ for more information about document + // false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about document // validation. BypassDocumentValidation *bool + // A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace + // the operation. The default value is nil, which means that no comment will be included in the logs. + Comment interface{} + // If true, no writes will be executed after one fails. The default value is true. Ordered *bool + + // Specifies parameters for all update and delete commands in the BulkWrite. This option is only valid for MongoDB + // versions >= 5.0. Older servers will report an error for using this option. This must be a document mapping + // parameter names to values. Values must be constant or closed expressions that do not reference document fields. + // Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + Let interface{} } // BulkWrite creates a new *BulkWriteOptions instance. @@ -28,6 +38,12 @@ func BulkWrite() *BulkWriteOptions { } } +// SetComment sets the value for the Comment field. +func (b *BulkWriteOptions) SetComment(comment interface{}) *BulkWriteOptions { + b.Comment = comment + return b +} + // SetOrdered sets the value for the Ordered field. func (b *BulkWriteOptions) SetOrdered(ordered bool) *BulkWriteOptions { b.Ordered = &ordered @@ -40,6 +56,15 @@ func (b *BulkWriteOptions) SetBypassDocumentValidation(bypass bool) *BulkWriteOp return b } +// SetLet sets the value for the Let field. Let specifies parameters for all update and delete commands in the BulkWrite. +// This option is only valid for MongoDB versions >= 5.0. Older servers will report an error for using this option. +// This must be a document mapping parameter names to values. Values must be constant or closed expressions that do not +// reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). +func (b *BulkWriteOptions) SetLet(let interface{}) *BulkWriteOptions { + b.Let = &let + return b +} + // MergeBulkWriteOptions combines the given BulkWriteOptions instances into a single BulkWriteOptions in a last-one-wins // fashion. func MergeBulkWriteOptions(opts ...*BulkWriteOptions) *BulkWriteOptions { @@ -48,12 +73,18 @@ func MergeBulkWriteOptions(opts ...*BulkWriteOptions) *BulkWriteOptions { if opt == nil { continue } + if opt.Comment != nil { + b.Comment = opt.Comment + } if opt.Ordered != nil { b.Ordered = opt.Ordered } if opt.BypassDocumentValidation != nil { b.BypassDocumentValidation = opt.BypassDocumentValidation } + if opt.Let != nil { + b.Let = opt.Let + } } return b diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go index fe19f45eb..862abcd34 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go @@ -9,6 +9,7 @@ package options import ( "time" + "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" ) @@ -22,11 +23,18 @@ type ChangeStreamOptions struct { // default value is nil, which means the default collation of the collection will be used. Collation *Collation - // Specifies whether the updated document should be returned in change notifications for update operations along - // with the deltas describing the changes made to the document. The default is options.Default, which means that - // the updated document will not be included in the change notification. + // A string that will be included in server logs, profiling logs, and currentOp queries to help trace the operation. + // The default is nil, which means that no comment will be included in the logs. + Comment *string + + // Specifies how the updated document should be returned in change notifications for update operations. The default + // is options.Default, which means that only partial update deltas will be included in the change notification. FullDocument *FullDocument + // Specifies how the pre-update document should be returned in change notifications for update operations. The default + // is options.Off, which means that the pre-update document will not be included in the change notification. + FullDocumentBeforeChange *FullDocument + // The maximum amount of time that the server should wait for new documents to satisfy a tailable cursor query. MaxAwaitTime *time.Duration @@ -35,6 +43,11 @@ type ChangeStreamOptions struct { // StartAfter must not be set. ResumeAfter interface{} + // ShowExpandedEvents specifies whether the server will return an expanded list of change stream events. Additional + // events include: createIndexes, dropIndexes, modify, create, shardCollection, reshardCollection and + // refineCollectionShardKey. This option is only valid for MongoDB versions >= 6.0. + ShowExpandedEvents *bool + // If specified, the change stream will only return changes that occurred at or after the given timestamp. This // option is only valid for MongoDB versions >= 4.0. If this is specified, ResumeAfter and StartAfter must not be // set. @@ -46,6 +59,16 @@ type ChangeStreamOptions struct { // corresponding to an oplog entry immediately after the specified token will be returned. If this is specified, // ResumeAfter and StartAtOperationTime must not be set. This option is only valid for MongoDB versions >= 4.1.1. StartAfter interface{} + + // Custom options to be added to the initial aggregate for the change stream. Key-value pairs of the BSON map should + // correlate with desired option names and values. Values must be Marshalable. Custom options may conflict with + // non-custom options, and custom options bypass client-side validation. Prefer using non-custom options where possible. + Custom bson.M + + // Custom options to be added to the $changeStream stage in the initial aggregate. Key-value pairs of the BSON map should + // correlate with desired option names and values. Values must be Marshalable. Custom pipeline options bypass client-side + // validation. Prefer using non-custom options where possible. + CustomPipeline bson.M } // ChangeStream creates a new ChangeStreamOptions instance. @@ -67,12 +90,24 @@ func (cso *ChangeStreamOptions) SetCollation(c Collation) *ChangeStreamOptions { return cso } +// SetComment sets the value for the Comment field. +func (cso *ChangeStreamOptions) SetComment(comment string) *ChangeStreamOptions { + cso.Comment = &comment + return cso +} + // SetFullDocument sets the value for the FullDocument field. func (cso *ChangeStreamOptions) SetFullDocument(fd FullDocument) *ChangeStreamOptions { cso.FullDocument = &fd return cso } +// SetFullDocumentBeforeChange sets the value for the FullDocumentBeforeChange field. +func (cso *ChangeStreamOptions) SetFullDocumentBeforeChange(fdbc FullDocument) *ChangeStreamOptions { + cso.FullDocumentBeforeChange = &fdbc + return cso +} + // SetMaxAwaitTime sets the value for the MaxAwaitTime field. func (cso *ChangeStreamOptions) SetMaxAwaitTime(d time.Duration) *ChangeStreamOptions { cso.MaxAwaitTime = &d @@ -85,6 +120,12 @@ func (cso *ChangeStreamOptions) SetResumeAfter(rt interface{}) *ChangeStreamOpti return cso } +// SetShowExpandedEvents sets the value for the ShowExpandedEvents field. +func (cso *ChangeStreamOptions) SetShowExpandedEvents(see bool) *ChangeStreamOptions { + cso.ShowExpandedEvents = &see + return cso +} + // SetStartAtOperationTime sets the value for the StartAtOperationTime field. func (cso *ChangeStreamOptions) SetStartAtOperationTime(t *primitive.Timestamp) *ChangeStreamOptions { cso.StartAtOperationTime = t @@ -97,6 +138,23 @@ func (cso *ChangeStreamOptions) SetStartAfter(sa interface{}) *ChangeStreamOptio return cso } +// SetCustom sets the value for the Custom field. Key-value pairs of the BSON map should correlate +// with desired option names and values. Values must be Marshalable. Custom options may conflict +// with non-custom options, and custom options bypass client-side validation. Prefer using non-custom +// options where possible. +func (cso *ChangeStreamOptions) SetCustom(c bson.M) *ChangeStreamOptions { + cso.Custom = c + return cso +} + +// SetCustomPipeline sets the value for the CustomPipeline field. Key-value pairs of the BSON map +// should correlate with desired option names and values. Values must be Marshalable. Custom pipeline +// options bypass client-side validation. Prefer using non-custom options where possible. +func (cso *ChangeStreamOptions) SetCustomPipeline(cp bson.M) *ChangeStreamOptions { + cso.CustomPipeline = cp + return cso +} + // MergeChangeStreamOptions combines the given ChangeStreamOptions instances into a single ChangeStreamOptions in a // last-one-wins fashion. func MergeChangeStreamOptions(opts ...*ChangeStreamOptions) *ChangeStreamOptions { @@ -111,21 +169,36 @@ func MergeChangeStreamOptions(opts ...*ChangeStreamOptions) *ChangeStreamOptions if cso.Collation != nil { csOpts.Collation = cso.Collation } + if cso.Comment != nil { + csOpts.Comment = cso.Comment + } if cso.FullDocument != nil { csOpts.FullDocument = cso.FullDocument } + if cso.FullDocumentBeforeChange != nil { + csOpts.FullDocumentBeforeChange = cso.FullDocumentBeforeChange + } if cso.MaxAwaitTime != nil { csOpts.MaxAwaitTime = cso.MaxAwaitTime } if cso.ResumeAfter != nil { csOpts.ResumeAfter = cso.ResumeAfter } + if cso.ShowExpandedEvents != nil { + csOpts.ShowExpandedEvents = cso.ShowExpandedEvents + } if cso.StartAtOperationTime != nil { csOpts.StartAtOperationTime = cso.StartAtOperationTime } if cso.StartAfter != nil { csOpts.StartAfter = cso.StartAfter } + if cso.Custom != nil { + csOpts.Custom = cso.Custom + } + if cso.CustomPipeline != nil { + csOpts.CustomPipeline = cso.CustomPipeline + } } return csOpts diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go index ea770413a..b8f6e8710 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go @@ -6,10 +6,16 @@ package options +import ( + "crypto/tls" + "fmt" +) + // ClientEncryptionOptions represents all possible options used to configure a ClientEncryption instance. type ClientEncryptionOptions struct { KeyVaultNamespace string KmsProviders map[string]map[string]interface{} + TLSConfig map[string]*tls.Config } // ClientEncryption creates a new ClientEncryptionOptions instance. @@ -29,6 +35,86 @@ func (c *ClientEncryptionOptions) SetKmsProviders(providers map[string]map[strin return c } +// SetTLSConfig specifies tls.Config instances for each KMS provider to use to configure TLS on all connections created +// to the KMS provider. +// +// This should only be used to set custom TLS configurations. By default, the connection will use an empty tls.Config{} with MinVersion set to tls.VersionTLS12. +func (c *ClientEncryptionOptions) SetTLSConfig(tlsOpts map[string]*tls.Config) *ClientEncryptionOptions { + tlsConfigs := make(map[string]*tls.Config) + for provider, config := range tlsOpts { + // use TLS min version 1.2 to enforce more secure hash algorithms and advanced cipher suites + if config.MinVersion == 0 { + config.MinVersion = tls.VersionTLS12 + } + tlsConfigs[provider] = config + } + c.TLSConfig = tlsConfigs + return c +} + +// BuildTLSConfig specifies tls.Config options for each KMS provider to use to configure TLS on all connections created +// to the KMS provider. The input map should contain a mapping from each KMS provider to a document containing the necessary +// options, as follows: +// +// { +// "kmip": { +// "tlsCertificateKeyFile": "foo.pem", +// "tlsCAFile": "fooCA.pem" +// } +// } +// +// Currently, the following TLS options are supported: +// +// 1. "tlsCertificateKeyFile" (or "sslClientCertificateKeyFile"): The "tlsCertificateKeyFile" option specifies a path to +// the client certificate and private key, which must be concatenated into one file. +// +// 2. "tlsCertificateKeyFilePassword" (or "sslClientCertificateKeyPassword"): Specify the password to decrypt the client +// private key file (e.g. "tlsCertificateKeyFilePassword=password"). +// +// 3. "tlsCaFile" (or "sslCertificateAuthorityFile"): Specify the path to a single or bundle of certificate authorities +// to be considered trusted when making a TLS connection (e.g. "tlsCaFile=/path/to/caFile"). +// +// This should only be used to set custom TLS options. By default, the connection will use an empty tls.Config{} with MinVersion set to tls.VersionTLS12. +func BuildTLSConfig(tlsOpts map[string]interface{}) (*tls.Config, error) { + // use TLS min version 1.2 to enforce more secure hash algorithms and advanced cipher suites + cfg := &tls.Config{MinVersion: tls.VersionTLS12} + + for name := range tlsOpts { + var err error + switch name { + case "tlsCertificateKeyFile", "sslClientCertificateKeyFile": + clientCertPath, ok := tlsOpts[name].(string) + if !ok { + return nil, fmt.Errorf("expected %q value to be of type string, got %T", name, tlsOpts[name]) + } + // apply custom key file password if found, otherwise use empty string + if keyPwd, found := tlsOpts["tlsCertificateKeyFilePassword"].(string); found { + _, err = addClientCertFromConcatenatedFile(cfg, clientCertPath, keyPwd) + } else if keyPwd, found := tlsOpts["sslClientCertificateKeyPassword"].(string); found { + _, err = addClientCertFromConcatenatedFile(cfg, clientCertPath, keyPwd) + } else { + _, err = addClientCertFromConcatenatedFile(cfg, clientCertPath, "") + } + case "tlsCertificateKeyFilePassword", "sslClientCertificateKeyPassword": + continue + case "tlsCAFile", "sslCertificateAuthorityFile": + caPath, ok := tlsOpts[name].(string) + if !ok { + return nil, fmt.Errorf("expected %q value to be of type string, got %T", name, tlsOpts[name]) + } + err = addCACertFromFile(cfg, caPath) + default: + return nil, fmt.Errorf("unrecognized TLS option %v", name) + } + + if err != nil { + return nil, err + } + } + + return cfg, nil +} + // MergeClientEncryptionOptions combines the argued ClientEncryptionOptions in a last-one wins fashion. func MergeClientEncryptionOptions(opts ...*ClientEncryptionOptions) *ClientEncryptionOptions { ceo := ClientEncryption() @@ -43,6 +129,9 @@ func MergeClientEncryptionOptions(opts ...*ClientEncryptionOptions) *ClientEncry if opt.KmsProviders != nil { ceo.KmsProviders = opt.KmsProviders } + if opt.TLSConfig != nil { + ceo.TLSConfig = opt.TLSConfig + } } return ceo diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go index d66114fad..05f974f50 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go @@ -45,7 +45,7 @@ type ContextDialer interface { // AuthMechanism: the mechanism to use for authentication. Supported values include "SCRAM-SHA-256", "SCRAM-SHA-1", // "MONGODB-CR", "PLAIN", "GSSAPI", "MONGODB-X509", and "MONGODB-AWS". This can also be set through the "authMechanism" // URI option. (e.g. "authMechanism=PLAIN"). For more information, see -// https://docs.mongodb.com/manual/core/authentication-mechanisms/. +// https://www.mongodb.com/docs/manual/core/authentication-mechanisms/. // // AuthMechanismProperties can be used to specify additional configuration options for certain mechanisms. They can also // be set through the "authMechanismProperites" URI option @@ -109,6 +109,7 @@ type ClientOptions struct { MaxConnIdleTime *time.Duration MaxPoolSize *uint64 MinPoolSize *uint64 + MaxConnecting *uint64 PoolMonitor *event.PoolMonitor Monitor *event.CommandMonitor ServerMonitor *event.ServerMonitor @@ -120,7 +121,9 @@ type ClientOptions struct { RetryWrites *bool ServerAPIOptions *ServerAPIOptions ServerSelectionTimeout *time.Duration - SocketTimeout *time.Duration + SRVMaxHosts *int + SRVServiceName *string + Timeout *time.Duration TLSConfig *tls.Config WriteConcern *writeconcern.WriteConcern ZlibLevel *int @@ -136,11 +139,25 @@ type ClientOptions struct { // release. AuthenticateToAnything *bool + // Crypt specifies a custom driver.Crypt to be used to encrypt and decrypt documents. The default is no + // encryption. + // + // Deprecated: This option is for internal use only and should not be set (see GODRIVER-2149). It may be + // changed or removed in any release. + Crypt driver.Crypt + // Deployment specifies a custom deployment to use for the new Client. // // Deprecated: This option is for internal use only and should not be set. It may be changed or removed in any // release. Deployment driver.Deployment + + // SocketTimeout specifies the timeout to be used for the Client's socket reads and writes. + // + // Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. The more general + // Timeout option should be used in its place to control the amount of time that a single operation can run on the Client + // before returning an error. SocketTimeout is still usable through the deprecated setter. + SocketTimeout *time.Duration } // Client creates a new ClientOptions instance. @@ -150,44 +167,58 @@ func Client() *ClientOptions { // Validate validates the client options. This method will return the first error found. func (c *ClientOptions) Validate() error { - c.validateAndSetError() - return c.err -} - -func (c *ClientOptions) validateAndSetError() { if c.err != nil { - return + return c.err } + c.err = c.validate() + return c.err +} +func (c *ClientOptions) validate() error { // Direct connections cannot be made if multiple hosts are specified or an SRV URI is used. if c.Direct != nil && *c.Direct { if len(c.Hosts) > 1 { - c.err = errors.New("a direct connection cannot be made if multiple hosts are specified") - return + return errors.New("a direct connection cannot be made if multiple hosts are specified") } if c.cs != nil && c.cs.Scheme == connstring.SchemeMongoDBSRV { - c.err = errors.New("a direct connection cannot be made if an SRV URI is used") - return + return errors.New("a direct connection cannot be made if an SRV URI is used") } } + if c.MaxPoolSize != nil && c.MinPoolSize != nil && *c.MaxPoolSize != 0 && *c.MinPoolSize > *c.MaxPoolSize { + return fmt.Errorf("minPoolSize must be less than or equal to maxPoolSize, got minPoolSize=%d maxPoolSize=%d", *c.MinPoolSize, *c.MaxPoolSize) + } + // verify server API version if ServerAPIOptions are passed in. if c.ServerAPIOptions != nil { - c.err = c.ServerAPIOptions.ServerAPIVersion.Validate() + if err := c.ServerAPIOptions.ServerAPIVersion.Validate(); err != nil { + return err + } } // Validation for load-balanced mode. if c.LoadBalanced != nil && *c.LoadBalanced { if len(c.Hosts) > 1 { - c.err = internal.ErrLoadBalancedWithMultipleHosts + return internal.ErrLoadBalancedWithMultipleHosts } if c.ReplicaSet != nil { - c.err = internal.ErrLoadBalancedWithReplicaSet + return internal.ErrLoadBalancedWithReplicaSet } if c.Direct != nil { - c.err = internal.ErrLoadBalancedWithDirectConnection + return internal.ErrLoadBalancedWithDirectConnection } } + + // Validation for srvMaxHosts. + if c.SRVMaxHosts != nil && *c.SRVMaxHosts > 0 { + if c.ReplicaSet != nil { + return internal.ErrSRVMaxHostsWithReplicaSet + } + if c.LoadBalanced != nil && *c.LoadBalanced { + return internal.ErrSRVMaxHostsWithLoadBalanced + } + } + return nil } // GetURI returns the original URI used to configure the ClientOptions instance. If ApplyURI was not called during @@ -208,7 +239,7 @@ func (c *ClientOptions) GetURI() string { // If the URI format is incorrect or there are conflicting options specified in the URI an error will be recorded and // can be retrieved by calling Validate. // -// For more information about the URI format, see https://docs.mongodb.com/manual/reference/connection-string/. See +// For more information about the URI format, see https://www.mongodb.com/docs/manual/reference/connection-string/. See // mongo.Connect documentation for examples of using URIs for different Client configurations. func (c *ClientOptions) ApplyURI(uri string) *ClientOptions { if c.err != nil { @@ -290,6 +321,10 @@ func (c *ClientOptions) ApplyURI(uri string) *ClientOptions { c.MinPoolSize = &cs.MinPoolSize } + if cs.MaxConnectingSet { + c.MaxConnecting = &cs.MaxConnecting + } + if cs.ReadConcernLevel != "" { c.ReadConcern = readconcern.New(readconcern.Level(cs.ReadConcernLevel)) } @@ -338,6 +373,14 @@ func (c *ClientOptions) ApplyURI(uri string) *ClientOptions { c.SocketTimeout = &cs.SocketTimeout } + if cs.SRVMaxHosts != 0 { + c.SRVMaxHosts = &cs.SRVMaxHosts + } + + if cs.SRVServiceName != "" { + c.SRVServiceName = &cs.SRVServiceName + } + if cs.SSL { tlsConfig := new(tls.Config) @@ -410,6 +453,10 @@ func (c *ClientOptions) ApplyURI(uri string) *ClientOptions { c.DisableOCSPEndpointCheck = &cs.SSLDisableOCSPEndpointCheck } + if cs.TimeoutSet { + c.Timeout = &cs.Timeout + } + return c } @@ -435,12 +482,12 @@ func (c *ClientOptions) SetAuth(auth Credential) *ClientOptions { // // 2. "zlib" - requires server version >= 3.6 // -// 3. "zstd" - requires server version >= 4.2, and driver version >= 1.2.0 with cgo support enabled or driver version >= 1.3.0 -// without cgo +// 3. "zstd" - requires server version >= 4.2, and driver version >= 1.2.0 with cgo support enabled or driver +// version >= 1.3.0 without cgo. // // If this option is specified, the driver will perform a negotiation with the server to determine a common list of of // compressors and will use the first one in that list when performing operations. See -// https://docs.mongodb.com/manual/reference/program/mongod/#cmdoption-mongod-networkmessagecompressors for more +// https://www.mongodb.com/docs/manual/reference/program/mongod/#cmdoption-mongod-networkmessagecompressors for more // information about configuring compression on the server and the server-side defaults. // // This can also be set through the "compressors" URI option (e.g. "compressors=zstd,zlib,snappy"). The default is @@ -539,7 +586,7 @@ func (c *ClientOptions) SetMaxConnIdleTime(d time.Duration) *ClientOptions { // SetMaxPoolSize specifies that maximum number of connections allowed in the driver's connection pool to each server. // Requests to a server will block if this maximum is reached. This can also be set through the "maxPoolSize" URI option -// (e.g. "maxPoolSize=100"). The default is 100. If this is 0, it will be set to math.MaxInt64. +// (e.g. "maxPoolSize=100"). If this is 0, maximum connection pool size is not limited. The default is 100. func (c *ClientOptions) SetMaxPoolSize(u uint64) *ClientOptions { c.MaxPoolSize = &u return c @@ -553,6 +600,14 @@ func (c *ClientOptions) SetMinPoolSize(u uint64) *ClientOptions { return c } +// SetMaxConnecting specifies the maximum number of connections a connection pool may establish simultaneously. This can +// also be set through the "maxConnecting" URI option (e.g. "maxConnecting=2"). If this is 0, the default is used. The +// default is 2. Values greater than 100 are not recommended. +func (c *ClientOptions) SetMaxConnecting(u uint64) *ClientOptions { + c.MaxConnecting = &u + return c +} + // SetPoolMonitor specifies a PoolMonitor to receive connection pool events. See the event.PoolMonitor documentation // for more information about the structure of the monitor and events that can be received. func (c *ClientOptions) SetPoolMonitor(m *event.PoolMonitor) *ClientOptions { @@ -593,7 +648,7 @@ func (c *ClientOptions) SetReadConcern(rc *readconcern.ReadConcern) *ClientOptio // 3. "maxStalenessSeconds" (or "maxStaleness"): Specify a maximum replication lag for reads from secondaries in a // replica set (e.g. "maxStalenessSeconds=10"). // -// The default is readpref.Primary(). See https://docs.mongodb.com/manual/core/read-preference/#read-preference for +// The default is readpref.Primary(). See https://www.mongodb.com/docs/manual/core/read-preference/#read-preference for // more information about read preferences. func (c *ClientOptions) SetReadPreference(rp *readpref.ReadPref) *ClientOptions { c.ReadPreference = rp @@ -659,11 +714,30 @@ func (c *ClientOptions) SetServerSelectionTimeout(d time.Duration) *ClientOption // SetSocketTimeout specifies how long the driver will wait for a socket read or write to return before returning a // network error. This can also be set through the "socketTimeoutMS" URI option (e.g. "socketTimeoutMS=1000"). The // default value is 0, meaning no timeout is used and socket operations can block indefinitely. +// +// Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. The more general +// Timeout option should be used in its place to control the amount of time that a single operation can run on the Client +// before returning an error. func (c *ClientOptions) SetSocketTimeout(d time.Duration) *ClientOptions { c.SocketTimeout = &d return c } +// SetTimeout specifies the amount of time that a single operation run on this Client can execute before returning an error. +// The deadline of any operation run through the Client will be honored above any Timeout set on the Client; Timeout will only +// be honored if there is no deadline on the operation Context. Timeout can also be set through the "timeoutMS" URI option +// (e.g. "timeoutMS=1000"). The default value is nil, meaning operations do not inherit a timeout from the Client. +// +// If any Timeout is set (even 0) on the Client, the values of other, deprecated timeout-related options will be ignored. +// In particular: ClientOptions.SocketTimeout, WriteConcern.wTimeout, MaxTime on operations, and TransactionOptions.MaxCommitTime. +// +// NOTE(benjirewis): SetTimeout represents unstable, provisional API. The behavior of the driver when a Timeout is specified is +// subject to change. +func (c *ClientOptions) SetTimeout(d time.Duration) *ClientOptions { + c.Timeout = &d + return c +} + // SetTLSConfig specifies a tls.Config instance to use use to configure TLS on all connections created to the cluster. // This can also be set through the following URI options: // @@ -760,6 +834,22 @@ func (c *ClientOptions) SetServerAPIOptions(opts *ServerAPIOptions) *ClientOptio return c } +// SetSRVMaxHosts specifies the maximum number of SRV results to randomly select during polling. To limit the number +// of hosts selected in SRV discovery, this function must be called before ApplyURI. This can also be set through +// the "srvMaxHosts" URI option. +func (c *ClientOptions) SetSRVMaxHosts(srvMaxHosts int) *ClientOptions { + c.SRVMaxHosts = &srvMaxHosts + return c +} + +// SetSRVServiceName specifies a custom SRV service name to use in SRV polling. To use a custom SRV service name +// in SRV discovery, this function must be called before ApplyURI. This can also be set through the "srvServiceName" +// URI option. +func (c *ClientOptions) SetSRVServiceName(srvName string) *ClientOptions { + c.SRVServiceName = &srvName + return c +} + // MergeClientOptions combines the given *ClientOptions into a single *ClientOptions in a last one wins fashion. // The specified options are merged with the existing options on the client, with the specified options taking // precedence. @@ -789,6 +879,9 @@ func MergeClientOptions(opts ...*ClientOptions) *ClientOptions { if opt.ConnectTimeout != nil { c.ConnectTimeout = opt.ConnectTimeout } + if opt.Crypt != nil { + c.Crypt = opt.Crypt + } if opt.HeartbeatInterval != nil { c.HeartbeatInterval = opt.HeartbeatInterval } @@ -810,6 +903,9 @@ func MergeClientOptions(opts ...*ClientOptions) *ClientOptions { if opt.MinPoolSize != nil { c.MinPoolSize = opt.MinPoolSize } + if opt.MaxConnecting != nil { + c.MaxConnecting = opt.MaxConnecting + } if opt.PoolMonitor != nil { c.PoolMonitor = opt.PoolMonitor } @@ -849,6 +945,15 @@ func MergeClientOptions(opts ...*ClientOptions) *ClientOptions { if opt.SocketTimeout != nil { c.SocketTimeout = opt.SocketTimeout } + if opt.SRVMaxHosts != nil { + c.SRVMaxHosts = opt.SRVMaxHosts + } + if opt.SRVServiceName != nil { + c.SRVServiceName = opt.SRVServiceName + } + if opt.Timeout != nil { + c.Timeout = opt.Timeout + } if opt.TLSConfig != nil { c.TLSConfig = opt.TLSConfig } @@ -912,7 +1017,9 @@ func addClientCertFromSeparateFiles(cfg *tls.Config, keyFile, certFile, keyPassw return "", err } - data := append(keyData, '\n') + data := make([]byte, 0, len(keyData)+len(certData)+1) + data = append(data, keyData...) + data = append(data, '\n') data = append(data, certData...) return addClientCertFromBytes(cfg, data, keyPassword) } @@ -930,7 +1037,8 @@ func addClientCertFromConcatenatedFile(cfg *tls.Config, certKeyFile, keyPassword // containing file and returns the certificate's subject name. func addClientCertFromBytes(cfg *tls.Config, data []byte, keyPasswd string) (string, error) { var currentBlock *pem.Block - var certBlock, certDecodedBlock, keyBlock []byte + var certDecodedBlock []byte + var certBlocks, keyBlocks [][]byte remaining := data start := 0 @@ -941,7 +1049,8 @@ func addClientCertFromBytes(cfg *tls.Config, data []byte, keyPasswd string) (str } if currentBlock.Type == "CERTIFICATE" { - certBlock = data[start : len(data)-len(remaining)] + certBlock := data[start : len(data)-len(remaining)] + certBlocks = append(certBlocks, certBlock) certDecodedBlock = currentBlock.Bytes start += len(certBlock) } else if strings.HasSuffix(currentBlock.Type, "PRIVATE KEY") { @@ -966,29 +1075,31 @@ func addClientCertFromBytes(cfg *tls.Config, data []byte, keyPasswd string) (str if err != nil { return "", err } - keyBytes, err = x509MarshalPKCS8PrivateKey(decrypted) + keyBytes, err = x509.MarshalPKCS8PrivateKey(decrypted) if err != nil { return "", err } } var encoded bytes.Buffer pem.Encode(&encoded, &pem.Block{Type: currentBlock.Type, Bytes: keyBytes}) - keyBlock = encoded.Bytes() + keyBlock := encoded.Bytes() + keyBlocks = append(keyBlocks, keyBlock) start = len(data) - len(remaining) } else { - keyBlock = data[start : len(data)-len(remaining)] + keyBlock := data[start : len(data)-len(remaining)] + keyBlocks = append(keyBlocks, keyBlock) start += len(keyBlock) } } } - if len(certBlock) == 0 { + if len(certBlocks) == 0 { return "", fmt.Errorf("failed to find CERTIFICATE") } - if len(keyBlock) == 0 { + if len(keyBlocks) == 0 { return "", fmt.Errorf("failed to find PRIVATE KEY") } - cert, err := tls.X509KeyPair(certBlock, keyBlock) + cert, err := tls.X509KeyPair(bytes.Join(certBlocks, []byte("\n")), bytes.Join(keyBlocks, []byte("\n"))) if err != nil { return "", err } @@ -1002,7 +1113,7 @@ func addClientCertFromBytes(cfg *tls.Config, data []byte, keyPasswd string) (str return "", err } - return x509CertSubject(crt), nil + return crt.Subject.String(), nil } func stringSliceContains(source []string, target string) bool { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_10.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_10.go deleted file mode 100644 index 1943d9c50..000000000 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_10.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build go1.10 - -package options - -import "crypto/x509" - -func x509CertSubject(cert *x509.Certificate) string { - return cert.Subject.String() -} - -func x509MarshalPKCS8PrivateKey(pkcs8 interface{}) ([]byte, error) { - return x509.MarshalPKCS8PrivateKey(pkcs8) -} diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_9.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_9.go deleted file mode 100644 index e10992296..000000000 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_9.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !go1.10 - -package options - -import ( - "crypto/x509" - "fmt" -) - -// We don't support Go versions less than 1.10, but Evergreen needs to be able to compile the driver -// using version 1.9 and cert.Subject -func x509CertSubject(cert *x509.Certificate) string { - return "" -} - -// We don't support Go versions less than 1.10, but Evergreen needs to be able to compile the driver -// using version 1.9 and x509.MarshalPKCS8PrivateKey() -func x509MarshalPKCS8PrivateKey(pkcs8 interface{}) ([]byte, error) { - return nil, fmt.Errorf("PKCS8-encrypted client private keys are only supported with go1.10+") -} diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go index 5c8111471..e8b68a270 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go @@ -15,20 +15,20 @@ import ( // CollectionOptions represents options that can be used to configure a Collection. type CollectionOptions struct { - // The read concern to use for operations executed on the Collection. The default value is nil, which means that - // the read concern of the database used to configure the Collection will be used. + // ReadConcern is the read concern to use for operations executed on the Collection. The default value is nil, which means that + // the read concern of the Database used to configure the Collection will be used. ReadConcern *readconcern.ReadConcern - // The write concern to use for operations executed on the Collection. The default value is nil, which means that - // the write concern of the database used to configure the Collection will be used. + // WriteConcern is the write concern to use for operations executed on the Collection. The default value is nil, which means that + // the write concern of the Database used to configure the Collection will be used. WriteConcern *writeconcern.WriteConcern - // The read preference to use for operations executed on the Collection. The default value is nil, which means that - // the read preference of the database used to configure the Collection will be used. + // ReadPreference is the read preference to use for operations executed on the Collection. The default value is nil, which means that + // the read preference of the Database used to configure the Collection will be used. ReadPreference *readpref.ReadPref - // The BSON registry to marshal and unmarshal documents for operations executed on the Collection. The default value - // is nil, which means that the registry of the database used to configure the Collection will be used. + // Registry is the BSON registry to marshal and unmarshal documents for operations executed on the Collection. The default value + // is nil, which means that the registry of the Database used to configure the Collection will be used. Registry *bsoncodec.Registry } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go index 094524c10..06f5dce76 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go @@ -15,6 +15,13 @@ type CountOptions struct { // default value is nil, which means the default collation of the collection will be used. Collation *Collation + // TODO(GODRIVER-2386): CountOptions executor uses aggregation under the hood, which means this type has to be + // TODO a string for now. This can be replaced with `Comment interface{}` once 2386 is implemented. + + // A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace + // the operation. The default is nil, which means that no comment will be included in the logs. + Comment *string + // The index to use for the aggregation. This should either be the index name as a string or the index specification // as a document. The driver will return an error if the hint parameter is a multi-key map. The default value is nil, // which means that no hint will be sent. @@ -26,6 +33,10 @@ type CountOptions struct { // The maximum amount of time that the query can run on the server. The default value is nil, meaning that there is // no time limit for query execution. + // + // Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. The more general + // Timeout option should be used in its place to control the amount of time that the count operation can run before + // returning an error. MaxTime is still usable through the deprecated setter. MaxTime *time.Duration // The number of documents to skip before counting. The default value is 0. @@ -43,6 +54,12 @@ func (co *CountOptions) SetCollation(c *Collation) *CountOptions { return co } +// SetComment sets the value for the Comment field. +func (co *CountOptions) SetComment(c string) *CountOptions { + co.Comment = &c + return co +} + // SetHint sets the value for the Hint field. func (co *CountOptions) SetHint(h interface{}) *CountOptions { co.Hint = h @@ -56,6 +73,10 @@ func (co *CountOptions) SetLimit(i int64) *CountOptions { } // SetMaxTime sets the value for the MaxTime field. +// +// Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. The more general +// Timeout option should be used in its place to control the amount of time that the count operation can run before +// returning an error. func (co *CountOptions) SetMaxTime(d time.Duration) *CountOptions { co.MaxTime = &d return co @@ -77,6 +98,9 @@ func MergeCountOptions(opts ...*CountOptions) *CountOptions { if co.Collation != nil { countOpts.Collation = co.Collation } + if co.Comment != nil { + countOpts.Comment = co.Comment + } if co.Hint != nil { countOpts.Hint = co.Hint } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go index 3fe63c323..6fc7d066a 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go @@ -67,7 +67,7 @@ func (tso *TimeSeriesOptions) SetGranularity(granularity string) *TimeSeriesOpti // CreateCollectionOptions represents options that can be used to configure a CreateCollection operation. type CreateCollectionOptions struct { - // Specifies if the collection is capped (see https://docs.mongodb.com/manual/core/capped-collections/). If true, + // Specifies if the collection is capped (see https://www.mongodb.com/docs/manual/core/capped-collections/). If true, // the SizeInBytes option must also be specified. The default value is false. Capped *bool @@ -75,6 +75,12 @@ type CreateCollectionOptions struct { // For previous server versions, the driver will return an error if this option is used. The default value is nil. Collation *Collation + // Specifies how change streams opened against the collection can return pre- and post-images of updated + // documents. The value must be a document in the form {