diff --git a/Gopkg.lock b/Gopkg.lock index 5c8dfd904e..5ce9b81bf4 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -3,95 +3,74 @@ [[projects]] branch = "master" - digest = "1:81f8c061c3d18ed1710957910542bc17d2b789c6cd19e0f654c30b35fd255ca5" name = "github.com/Azure/go-ansiterm" packages = [ ".", - "winterm", + "winterm" ] - pruneopts = "NUT" revision = "d6e3b3328b783f23731bc4d058875b0371ff8109" [[projects]] - digest = "1:d848e2bdc690ea54c4b49894b67a05db318a97ee6561879b814c2c1f82f61406" name = "github.com/Sirupsen/logrus" packages = ["."] - pruneopts = "NUT" revision = "bcd833dfe83d3cebad139e4a29ed79cb2318bf95" version = "v1.2.0" [[projects]] branch = "master" - digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd" name = "github.com/beorn7/perks" packages = ["quantile"] - pruneopts = "NUT" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] - digest = "1:94ffc0947c337d618b6ff5ed9abaddc1217b090c1b3a1ae4739b35b7b25851d5" name = "github.com/container-storage-interface/spec" packages = ["lib/go/csi"] - pruneopts = "NUT" revision = "ed0bb0e1557548aa028307f48728767cfe8f6345" version = "v1.0.0" [[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" name = "github.com/davecgh/go-spew" packages = ["spew"] - pruneopts = "NUT" revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" version = "v1.1.1" [[projects]] - digest = "1:53e99d883df3e940f5f0223795f300eb32b8c044f226132bfc0e74930f24ea4b" name = "github.com/docker/docker" packages = [ "pkg/term", - "pkg/term/windows", + "pkg/term/windows" ] - pruneopts = "NUT" revision = "092cba3727bb9b4a2f0e922cd6c0f93ea270e363" version = "v1.13.1" [[projects]] - digest = "1:32598368f409bbee79deb9d43569fcd92b9fb27f39155f5e166b3371217f051f" name = "github.com/evanphx/json-patch" packages = ["."] - pruneopts = "NUT" revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5" version = "v4.1.0" [[projects]] - digest = "1:8679b8a64f3613e9749c5640c3535c83399b8e69f67ce54d91dc73f6d77373af" name = "github.com/gogo/protobuf" packages = [ "proto", - "sortkeys", + "sortkeys" ] - pruneopts = "NUT" revision = "636bf0302bc95575d69441b25a2603156ffdddf1" version = "v1.1.1" [[projects]] branch = "master" - digest = "1:3fb07f8e222402962fa190eb060608b34eddfb64562a18e2167df2de0ece85d8" name = "github.com/golang/groupcache" packages = ["lru"] - pruneopts = "NUT" revision = "c65c006176ff7ff98bb916961c7abbc6b0afc0aa" [[projects]] - digest = "1:bc38c7c481812e178d85160472e231c5e1c9a7f5845d67e23ee4e706933c10d8" name = "github.com/golang/mock" packages = ["gomock"] - pruneopts = "NUT" revision = "c34cdb4725f4c3844d095133c6e40e448b86589b" version = "v1.1.1" [[projects]] - digest = "1:bff0ce7c8e3d6357fa5a8549bbe4bdb620bddc13c11ae569aa7248ea92e2139f" name = "github.com/golang/protobuf" packages = [ "descriptor", @@ -101,118 +80,94 @@ "ptypes/any", "ptypes/duration", "ptypes/timestamp", - "ptypes/wrappers", + "ptypes/wrappers" ] - pruneopts = "NUT" revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" version = "v1.2.0" [[projects]] branch = "master" - digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107" name = "github.com/google/btree" packages = ["."] - pruneopts = "NUT" revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" [[projects]] branch = "master" - digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc" name = "github.com/google/gofuzz" packages = ["."] - pruneopts = "NUT" revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" [[projects]] - digest = "1:56a1f3949ebb7fa22fa6b4e4ac0fe0f77cc4faee5b57413e6fa9199a8458faf1" name = "github.com/google/uuid" packages = ["."] - pruneopts = "NUT" revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8" version = "v1.1.0" [[projects]] - digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e" name = "github.com/googleapis/gnostic" packages = [ "OpenAPIv2", "compiler", - "extensions", + "extensions" ] - pruneopts = "NUT" revision = "7c663266750e7d82587642f65e60bc4083f1f84e" version = "v0.2.0" [[projects]] branch = "master" - digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621" name = "github.com/gregjones/httpcache" packages = [ ".", - "diskcache", + "diskcache" ] - pruneopts = "NUT" revision = "c63ab54fda8f77302f8d414e19933f2b6026a089" [[projects]] - digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6" name = "github.com/hashicorp/golang-lru" packages = [ ".", - "simplelru", + "simplelru" ] - pruneopts = "NUT" revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768" version = "v0.5.0" [[projects]] - digest = "1:9a52adf44086cead3b384e5d0dbf7a1c1cce65e67552ee3383a8561c42a18cd3" name = "github.com/imdario/mergo" packages = ["."] - pruneopts = "NUT" revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4" version = "v0.3.6" [[projects]] - digest = "1:8e36686e8b139f8fe240c1d5cf3a145bc675c22ff8e707857cdd3ae17b00d728" name = "github.com/json-iterator/go" packages = ["."] - pruneopts = "NUT" revision = "1624edc4454b8682399def8740d46db5e4362ba4" version = "v1.1.5" [[projects]] - digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed" name = "github.com/konsorten/go-windows-terminal-sequences" packages = ["."] - pruneopts = "NUT" revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242" version = "v1.0.1" [[projects]] - digest = "1:b09c9ac14d93f481c768e73a12d4e8527ac25232d593f2ad918ffe462901b654" name = "github.com/kubernetes-csi/csi-lib-utils" packages = [ "connection", - "protosanitizer", + "protosanitizer" ] - pruneopts = "NUT" revision = "8053f37bf1d11d769c20f9514538c4b3b906e1f7" version = "v0.4.0-rc1" [[projects]] - digest = "1:0f47ba38b647bb8e7cddb71c3341134b2c2eaa8cef2af82291b5c9870ee7f572" name = "github.com/kubernetes-csi/csi-test" packages = [ "driver", - "utils", + "utils" ] - pruneopts = "NUT" revision = "722eead38c269060656e0fc91f280610ea56f19b" version = "v1.0.3" [[projects]] - digest = "1:70e121796a8b5b538b08bf54c8d34245069a82300d8f6e8476874e4dfa450bf6" name = "github.com/kubernetes-csi/external-snapshotter" packages = [ "pkg/apis/volumesnapshot/v1alpha1", @@ -220,148 +175,108 @@ "pkg/client/clientset/versioned/fake", "pkg/client/clientset/versioned/scheme", "pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1", - "pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake", + "pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake" ] - pruneopts = "NUT" revision = "7f0070ed917ebab6e931cfa403e7e3e0e4490138" version = "v1.0.0-rc4" [[projects]] - digest = "1:c20dc06168b9d2cc276b0ba050c0154dfe37b30e382c7049716af2c7cf1fe83e" - name = "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner" - packages = [ - "controller", - "controller/metrics", - "util", - ] - pruneopts = "NUT" - revision = "99ac517d9c047238af8b8a6c411084b9f7888a87" - version = "v2.2.0" - -[[projects]] - digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] - pruneopts = "NUT" revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" version = "v1.0.1" [[projects]] - digest = "1:c56487cd684ec90714710940143f021e5866ccda72b872d74ac579f45b7fa3d3" name = "github.com/miekg/dns" packages = ["."] - pruneopts = "NUT" revision = "7064f7248f5fa5fd79382a76328b4e200b79e4ae" version = "v1.0.15" [[projects]] - digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" name = "github.com/modern-go/concurrent" packages = ["."] - pruneopts = "NUT" revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" version = "1.0.3" [[projects]] - digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6" name = "github.com/modern-go/reflect2" packages = ["."] - pruneopts = "NUT" revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" version = "1.0.1" [[projects]] - digest = "1:93b1d84c5fa6d1ea52f4114c37714cddd84d5b78f151b62bb101128dd51399bf" name = "github.com/pborman/uuid" packages = ["."] - pruneopts = "NUT" revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1" version = "v1.2" [[projects]] branch = "master" - digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2" name = "github.com/petar/GoLLRB" packages = ["llrb"] - pruneopts = "NUT" revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" [[projects]] - digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6" name = "github.com/peterbourgon/diskv" packages = ["."] - pruneopts = "NUT" revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" version = "v2.0.1" [[projects]] - digest = "1:a8512715947d49f24e6aa2d0fdd06f46bdd8d2e792517ba0ac95a2c089eaae28" name = "github.com/prometheus/client_golang" packages = [ "prometheus", "prometheus/internal", - "prometheus/promhttp", + "prometheus/promhttp" ] - pruneopts = "NUT" revision = "abad2d1bd44235a26707c172eab6bca5bf2dbad3" version = "v0.9.1" [[projects]] branch = "master" - digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" name = "github.com/prometheus/client_model" packages = ["go"] - pruneopts = "NUT" revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" [[projects]] branch = "master" - digest = "1:06375f3b602de9c99fa99b8484f0e949fd5273e6e9c6592b5a0dd4cd9085f3ea" name = "github.com/prometheus/common" packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", - "model", + "model" ] - pruneopts = "NUT" revision = "aeab699e26f4d4089dba5e522e5d9babd2adbaf7" [[projects]] branch = "master" - digest = "1:102dea0c03a915acfc634b7c67f2662012b5483b56d9025e33f5188e112759b6" name = "github.com/prometheus/procfs" packages = [ ".", "internal/util", "nfs", - "xfs", + "xfs" ] - pruneopts = "NUT" revision = "185b4288413d2a0dd0806f78c90dde719829e5ae" [[projects]] - digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779" name = "github.com/spf13/pflag" packages = ["."] - pruneopts = "NUT" revision = "298182f68c66c05229eb03ac171abe6e309ee79a" version = "v1.0.3" [[projects]] branch = "master" - digest = "1:583f2f436bab7b59a7bd3e759f1375b06f460760ed1f9235604d143eaab83009" name = "golang.org/x/crypto" packages = [ "ed25519", "ed25519/internal/edwards25519", - "ssh/terminal", + "ssh/terminal" ] - pruneopts = "NUT" revision = "3d3f9f413869b949e48070b5bc593aa22cc2b8f2" [[projects]] branch = "master" - digest = "1:c1117b8501c7d848516d1db06f1375cd0913b353763d5a40982344f3d5bb3dc6" name = "golang.org/x/net" packages = [ "bpf", @@ -376,35 +291,29 @@ "internal/timeseries", "ipv4", "ipv6", - "trace", + "trace" ] - pruneopts = "NUT" revision = "adae6a3d119ae4890b46832a2e88a95adc62b8e7" [[projects]] branch = "master" - digest = "1:eaed6885bd08c25f569dea6ced07a0730623466e27e6c41639fa4a3e1cbaaa76" name = "golang.org/x/oauth2" packages = [ ".", - "internal", + "internal" ] - pruneopts = "NUT" revision = "8f65e3013ebad444f13bc19536f7865efc793816" [[projects]] branch = "master" - digest = "1:1435bb97e37d0ba44239bcaf413539d3bcfc37098c24f1c795c49d654af5e956" name = "golang.org/x/sys" packages = [ "unix", - "windows", + "windows" ] - pruneopts = "NUT" revision = "ec83556a53fe16b65c452a104ea9d1e86a671852" [[projects]] - digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619" name = "golang.org/x/text" packages = [ "collate", @@ -420,22 +329,18 @@ "unicode/bidi", "unicode/cldr", "unicode/norm", - "unicode/rangetable", + "unicode/rangetable" ] - pruneopts = "NUT" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" [[projects]] branch = "master" - digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90" name = "golang.org/x/time" packages = ["rate"] - pruneopts = "NUT" revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd" [[projects]] - digest = "1:655acbef8faad33db29b6a35655341fb4a594e87e56635f05a3e70ed0bd7bd95" name = "google.golang.org/appengine" packages = [ "internal", @@ -444,22 +349,18 @@ "internal/log", "internal/remote_api", "internal/urlfetch", - "urlfetch", + "urlfetch" ] - pruneopts = "NUT" revision = "4a4468ece617fc8205e99368fa2200e9d1fad421" version = "v1.3.0" [[projects]] branch = "master" - digest = "1:56b0bca90b7e5d1facf5fbdacba23e4e0ce069d25381b8e2f70ef1e7ebfb9c1a" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] - pruneopts = "NUT" revision = "b5d43981345bdb2c233eb4bf3277847b48c6fdc6" [[projects]] - digest = "1:d0b680e0e10bfbcbf2e3fed13f7e2ba52096518341a176cce93400e23f427b0d" name = "google.golang.org/grpc" packages = [ ".", @@ -489,30 +390,24 @@ "resolver/passthrough", "stats", "status", - "tap", + "tap" ] - pruneopts = "NUT" revision = "2e463a05d100327ca47ac218281906921038fd95" version = "v1.16.0" [[projects]] - digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" name = "gopkg.in/inf.v0" packages = ["."] - pruneopts = "NUT" revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" version = "v0.9.1" [[projects]] - digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082" name = "gopkg.in/yaml.v2" packages = ["."] - pruneopts = "NUT" revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" version = "v2.2.1" [[projects]] - digest = "1:c453ddc26bdab1e4267683a588ad9046e48d803a73f124fe2927adbab6ff02a5" name = "k8s.io/api" packages = [ "admissionregistration/v1alpha1", @@ -546,14 +441,12 @@ "settings/v1alpha1", "storage/v1", "storage/v1alpha1", - "storage/v1beta1", + "storage/v1beta1" ] - pruneopts = "NUT" revision = "d01564359763a39d310efc27866b63d4f5c92f1d" version = "kubernetes-1.13.0-beta.1" [[projects]] - digest = "1:b81d0fe2db41d154077527c004654d59d4f5bfd45d79aa159e777a2d44c336f0" name = "k8s.io/apimachinery" packages = [ "pkg/api/errors", @@ -599,26 +492,22 @@ "pkg/version", "pkg/watch", "third_party/forked/golang/json", - "third_party/forked/golang/reflect", + "third_party/forked/golang/reflect" ] - pruneopts = "NUT" revision = "0028e7a3cc82b29fea214c5793c77c24a23bb3ef" version = "kubernetes-1.13.0-beta.1" [[projects]] - digest = "1:fd571a0b0a945e8f8cddda7a33b600cc4d0328be52dc9420b758324d217fca31" name = "k8s.io/apiserver" packages = [ "pkg/util/feature", "pkg/util/feature/testing", - "pkg/util/flag", + "pkg/util/flag" ] - pruneopts = "NUT" revision = "6b360527ed84a1a6bb3883faef76c71cc17499ba" version = "kubernetes-1.13.0-beta.1" [[projects]] - digest = "1:f6a1f2ac66fd4a03e9dda70c91b42c18db4830c262b41a2c6a537852e792af89" name = "k8s.io/client-go" packages = [ "discovery", @@ -792,14 +681,12 @@ "util/homedir", "util/integer", "util/retry", - "util/workqueue", + "util/workqueue" ] - pruneopts = "NUT" revision = "46a8dc78ec29761d6cb89a9eb50ddde947c84030" version = "kubernetes-1.13.0-beta.1" [[projects]] - digest = "1:2a675cf943e6dc6375162a5c7bdb35ddced6dc04a2f84f82faee7817ea46a97c" name = "k8s.io/csi-api" packages = [ "pkg/apis/csi/v1alpha1", @@ -807,87 +694,51 @@ "pkg/client/clientset/versioned/fake", "pkg/client/clientset/versioned/scheme", "pkg/client/clientset/versioned/typed/csi/v1alpha1", - "pkg/client/clientset/versioned/typed/csi/v1alpha1/fake", + "pkg/client/clientset/versioned/typed/csi/v1alpha1/fake" ] - pruneopts = "NUT" revision = "504ff913b8b7b6ee9cca269105b6c4a4c7923655" version = "kubernetes-1.13.0-beta.1" [[projects]] - digest = "1:9cc257b3c9ff6a0158c9c661ab6eebda1fe8a4a4453cd5c4044dc9a2ebfb992b" name = "k8s.io/klog" packages = ["."] - pruneopts = "NUT" revision = "a5bc97fbc634d635061f3146511332c7e313a55a" version = "v0.1.0" [[projects]] branch = "master" - digest = "1:03a96603922fc1f6895ae083e1e16d943b55ef0656b56965351bd87e7d90485f" name = "k8s.io/kube-openapi" packages = ["pkg/util/proto"] - pruneopts = "NUT" revision = "0317810137be915b9cf888946c6e115c1bfac693" [[projects]] - digest = "1:c7b9c8307ce2aa9f3d067c6d03e811b3a1c54bc42257f75a92a59f449d31679f" name = "k8s.io/kubernetes" packages = [ "pkg/apis/core", - "pkg/apis/core/helper", + "pkg/apis/core/helper" ] - pruneopts = "NUT" revision = "17c77c7898218073f14c8d573582e8d2313dc740" version = "v1.12.2" [[projects]] - digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c" + name = "sigs.k8s.io/sig-storage-lib-external-provisioner" + packages = [ + "controller", + "controller/metrics", + "util" + ] + revision = "0fcd2a19e047b6a1e9203b8ed3511ed8679b23f4" + version = "v3.0.0-beta" + +[[projects]] name = "sigs.k8s.io/yaml" packages = ["."] - pruneopts = "NUT" revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480" version = "v1.1.0" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - input-imports = [ - "github.com/container-storage-interface/spec/lib/go/csi", - "github.com/golang/mock/gomock", - "github.com/kubernetes-csi/csi-lib-utils/connection", - "github.com/kubernetes-csi/csi-lib-utils/protosanitizer", - "github.com/kubernetes-csi/csi-test/driver", - "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1", - "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned", - "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/fake", - "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller", - "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/util", - "github.com/spf13/pflag", - "google.golang.org/grpc", - "k8s.io/api/core/v1", - "k8s.io/api/storage/v1beta1", - "k8s.io/apimachinery/pkg/api/resource", - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/runtime", - "k8s.io/apimachinery/pkg/types", - "k8s.io/apimachinery/pkg/util/json", - "k8s.io/apimachinery/pkg/util/sets", - "k8s.io/apimachinery/pkg/util/validation", - "k8s.io/apimachinery/pkg/util/wait", - "k8s.io/apiserver/pkg/util/feature", - "k8s.io/apiserver/pkg/util/feature/testing", - "k8s.io/apiserver/pkg/util/flag", - "k8s.io/client-go/kubernetes", - "k8s.io/client-go/kubernetes/fake", - "k8s.io/client-go/rest", - "k8s.io/client-go/testing", - "k8s.io/client-go/tools/clientcmd", - "k8s.io/client-go/util/workqueue", - "k8s.io/csi-api/pkg/apis/csi/v1alpha1", - "k8s.io/csi-api/pkg/client/clientset/versioned", - "k8s.io/csi-api/pkg/client/clientset/versioned/fake", - "k8s.io/klog", - "k8s.io/kubernetes/pkg/apis/core/helper", - ] + inputs-digest = "ffbfc2ba22dffd5761b35f3f3f8a6e0989cb2f0f6c1a67390fe9c20133254060" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 0be166930f..d3b45d46c3 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -25,40 +25,8 @@ version = "1.0.0" [[constraint]] - name = "github.com/kubernetes-csi/csi-test" - version = "~v1.0.3" - -[[constraint]] - name = "github.com/kubernetes-csi/external-snapshotter" - version = "1.0.0-rc4" - -[[constraint]] - name = "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner" - version = "v2.2.0" - -[[constraint]] - name = "google.golang.org/grpc" - version = "1.9.2" - -[[constraint]] - name = "k8s.io/apimachinery" - version = "kubernetes-1.13.0-beta.1" - -[[constraint]] - name = "k8s.io/client-go" - version = "kubernetes-1.13.0-beta.1" - -[[constraint]] - name = "k8s.io/api" - version = "kubernetes-1.13.0-beta.1" - -[[constraint]] - name = "k8s.io/csi-api" - version = "kubernetes-1.13.0-beta.1" - -[[constraint]] - name = "k8s.io/apiserver" - version = "kubernetes-1.13.0-beta.1" + name = "sigs.k8s.io/sig-storage-lib-external-provisioner" + version = ">=v3.0.0-beta" [[constraint]] name = "github.com/kubernetes-csi/csi-lib-utils" diff --git a/cmd/csi-provisioner/csi-provisioner.go b/cmd/csi-provisioner/csi-provisioner.go index e0b9d04c9c..4cfff64bb8 100644 --- a/cmd/csi-provisioner/csi-provisioner.go +++ b/cmd/csi-provisioner/csi-provisioner.go @@ -31,8 +31,8 @@ import ( ctrl "github.com/kubernetes-csi/external-provisioner/pkg/controller" snapclientset "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned" - "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller" csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned" + "sigs.k8s.io/sig-storage-lib-external-provisioner/controller" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 170ccf03f8..1bd6b4dfcc 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -30,8 +30,8 @@ import ( "github.com/kubernetes-csi/external-provisioner/pkg/features" snapapi "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" snapclientset "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned" - "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller" - "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/util" + "sigs.k8s.io/sig-storage-lib-external-provisioner/controller" + "sigs.k8s.io/sig-storage-lib-external-provisioner/util" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index a066ccb2dc..f83f677110 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -35,7 +35,6 @@ import ( "github.com/kubernetes-csi/external-provisioner/pkg/features" crdv1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/fake" - "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller" "google.golang.org/grpc" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" @@ -50,6 +49,7 @@ import ( fakeclientset "k8s.io/client-go/kubernetes/fake" k8stesting "k8s.io/client-go/testing" fakecsiclientset "k8s.io/csi-api/pkg/client/clientset/versioned/fake" + "sigs.k8s.io/sig-storage-lib-external-provisioner/controller" ) const ( diff --git a/vendor/github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/LICENSE b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/LICENSE similarity index 100% rename from vendor/github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/LICENSE rename to vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/LICENSE diff --git a/vendor/github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller/controller.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/controller.go similarity index 81% rename from vendor/github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller/controller.go rename to vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/controller.go index 96d2d242ca..a988f06e04 100644 --- a/vendor/github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller/controller.go +++ b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/controller.go @@ -28,8 +28,6 @@ import ( "sync" "time" - "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller/metrics" - "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/util" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "golang.org/x/time/rate" @@ -53,6 +51,8 @@ import ( ref "k8s.io/client-go/tools/reference" "k8s.io/client-go/util/workqueue" glog "k8s.io/klog" + "sigs.k8s.io/sig-storage-lib-external-provisioner/controller/metrics" + "sigs.k8s.io/sig-storage-lib-external-provisioner/util" ) // annClass annotation represents the storage class associated with a resource: @@ -80,6 +80,8 @@ const annSelectedNode = "volume.kubernetes.io/selected-node" // Finalizer for PVs so we know to clean them up const finalizerPV = "external-provisioner.volume.kubernetes.io/finalizer" +const uidIndex = "uid" + // ProvisionController is a controller that provisions PersistentVolumes for // PersistentVolumeClaims. type ProvisionController struct { @@ -90,6 +92,10 @@ type ProvisionController struct { // annStorageProvisioner to set & watch for, respectively provisionerName string + // additional provisioner names (beyond provisionerName) that the + // provisioner should watch for and handle in annStorageProvisioner + additionalProvisionerNames []string + // The provisioner the controller will use to provision and delete volumes. // Presumably this implementer of Provisioner carries its own // volume-specific options and such that it needs in order to provision @@ -104,8 +110,8 @@ type ProvisionController struct { // * 1.6: storage classes enter GA kubeVersion *utilversion.Version - claimInformer cache.SharedInformer - claims cache.Store + claimInformer cache.SharedIndexInformer + claimsIndexer cache.Indexer volumeInformer cache.SharedInformer volumes cache.Store classInformer cache.SharedInformer @@ -134,6 +140,7 @@ type ProvisionController struct { createProvisionedPVBackoff *wait.Backoff createProvisionedPVRetryCount int createProvisionedPVInterval time.Duration + createProvisionerPVLimiter workqueue.RateLimiter failedProvisionThreshold, failedDeleteThreshold int @@ -153,6 +160,11 @@ type ProvisionController struct { hasRun bool hasRunLock *sync.Mutex + + // Map UID -> *PVC with all claims that may be provisioned in the background. + claimsInProgress sync.Map + + volumeStore VolumeStore } const ( @@ -240,6 +252,10 @@ func ExponentialBackOffOnError(exponentialBackOffOnError bool) func(*ProvisionCo // CreateProvisionedPVRetryCount is the number of retries when we create a PV // object for a provisioned volume. Defaults to 5. +// If PV is not saved after given number of retries, corresponding storage asset (volume) is deleted! +// Only one of CreateProvisionedPVInterval+CreateProvisionedPVRetryCount or CreateProvisionedPVBackoff or +// CreateProvisionedPVLimiter can be used. +// Deprecated: Use CreateProvisionedPVLimiter instead, it tries indefinitely. func CreateProvisionedPVRetryCount(createProvisionedPVRetryCount int) func(*ProvisionController) error { return func(c *ProvisionController) error { if c.HasRun() { @@ -248,6 +264,9 @@ func CreateProvisionedPVRetryCount(createProvisionedPVRetryCount int) func(*Prov if c.createProvisionedPVBackoff != nil { return fmt.Errorf("CreateProvisionedPVBackoff cannot be used together with CreateProvisionedPVRetryCount") } + if c.createProvisionerPVLimiter != nil { + return fmt.Errorf("CreateProvisionedPVBackoff cannot be used together with CreateProvisionedPVLimiter") + } c.createProvisionedPVRetryCount = createProvisionedPVRetryCount return nil } @@ -255,6 +274,10 @@ func CreateProvisionedPVRetryCount(createProvisionedPVRetryCount int) func(*Prov // CreateProvisionedPVInterval is the interval between retries when we create a // PV object for a provisioned volume. Defaults to 10 seconds. +// If PV is not saved after given number of retries, corresponding storage asset (volume) is deleted! +// Only one of CreateProvisionedPVInterval+CreateProvisionedPVRetryCount or CreateProvisionedPVBackoff or +// CreateProvisionedPVLimiter can be used. +// Deprecated: Use CreateProvisionedPVLimiter instead, it tries indefinitely. func CreateProvisionedPVInterval(createProvisionedPVInterval time.Duration) func(*ProvisionController) error { return func(c *ProvisionController) error { if c.HasRun() { @@ -263,6 +286,9 @@ func CreateProvisionedPVInterval(createProvisionedPVInterval time.Duration) func if c.createProvisionedPVBackoff != nil { return fmt.Errorf("CreateProvisionedPVBackoff cannot be used together with CreateProvisionedPVInterval") } + if c.createProvisionerPVLimiter != nil { + return fmt.Errorf("CreateProvisionedPVInterval cannot be used together with CreateProvisionedPVLimiter") + } c.createProvisionedPVInterval = createProvisionedPVInterval return nil } @@ -270,8 +296,10 @@ func CreateProvisionedPVInterval(createProvisionedPVInterval time.Duration) func // CreateProvisionedPVBackoff is the configuration of exponential backoff between retries when we create a // PV object for a provisioned volume. Defaults to linear backoff, 10 seconds 5 times. -// Only one of CreateProvisionedPVInterval+CreateProvisionedPVRetryCount or CreateProvisionedPVBackoff -// can be used. +// If PV is not saved after given number of retries, corresponding storage asset (volume) is deleted! +// Only one of CreateProvisionedPVInterval+CreateProvisionedPVRetryCount or CreateProvisionedPVBackoff or +// CreateProvisionedPVLimiter can be used. +// Deprecated: Use CreateProvisionedPVLimiter instead, it tries indefinitely. func CreateProvisionedPVBackoff(backoff wait.Backoff) func(*ProvisionController) error { return func(c *ProvisionController) error { if c.HasRun() { @@ -283,11 +311,40 @@ func CreateProvisionedPVBackoff(backoff wait.Backoff) func(*ProvisionController) if c.createProvisionedPVInterval != 0 { return fmt.Errorf("CreateProvisionedPVBackoff cannot be used together with CreateProvisionedPVInterval") } + if c.createProvisionerPVLimiter != nil { + return fmt.Errorf("CreateProvisionedPVBackoff cannot be used together with CreateProvisionedPVLimiter") + } c.createProvisionedPVBackoff = &backoff return nil } } +// CreateProvisionedPVLimiter is the configuration of rate limiter for queue of unsaved PersistentVolumes. +// If set, PVs that fail to be saved to Kubernetes API server will be re-enqueued to a separate workqueue +// with this limiter and re-tried until they are saved to API server. There is no limit of retries. +// The main difference to other CreateProvisionedPV* option is that the storage asset is never deleted +// and the controller continues saving PV to API server indefinitely. +// This option cannot be used with CreateProvisionedPVBackoff or CreateProvisionedPVInterval +// or CreateProvisionedPVRetryCount. +func CreateProvisionedPVLimiter(limiter workqueue.RateLimiter) func(*ProvisionController) error { + return func(c *ProvisionController) error { + if c.HasRun() { + return errRuntime + } + if c.createProvisionedPVRetryCount != 0 { + return fmt.Errorf("CreateProvisionedPVLimiter cannot be used together with CreateProvisionedPVRetryCount") + } + if c.createProvisionedPVInterval != 0 { + return fmt.Errorf("CreateProvisionedPVLimiter cannot be used together with CreateProvisionedPVInterval") + } + if c.createProvisionedPVBackoff != nil { + return fmt.Errorf("CreateProvisionedPVLimiter cannot be used together with CreateProvisionedPVBackoff") + } + c.createProvisionerPVLimiter = limiter + return nil + } +} + // FailedProvisionThreshold is the threshold for max number of retries on // failures of Provision. Set to 0 to retry indefinitely. Defaults to 15. func FailedProvisionThreshold(failedProvisionThreshold int) func(*ProvisionController) error { @@ -376,7 +433,7 @@ func RetryPeriod(retryPeriod time.Duration) func(*ProvisionController) error { // ClaimsInformer sets the informer to use for accessing PersistentVolumeClaims. // Defaults to using a internal informer. -func ClaimsInformer(informer cache.SharedInformer) func(*ProvisionController) error { +func ClaimsInformer(informer cache.SharedIndexInformer) func(*ProvisionController) error { return func(c *ProvisionController) error { if c.HasRun() { return errRuntime @@ -448,6 +505,17 @@ func MetricsPath(metricsPath string) func(*ProvisionController) error { } } +// AdditionalProvisionerNames sets additional names for the provisioner +func AdditionalProvisionerNames(additionalProvisionerNames []string) func(*ProvisionController) error { + return func(c *ProvisionController) error { + if c.HasRun() { + return errRuntime + } + c.additionalProvisionerNames = additionalProvisionerNames + return nil + } +} + // HasRun returns whether the controller has Run func (ctrl *ProvisionController) HasRun() bool { ctrl.hasRunLock.Lock() @@ -528,19 +596,27 @@ func NewProvisionController( controller.claimQueue = workqueue.NewNamedRateLimitingQueue(rateLimiter, "claims") controller.volumeQueue = workqueue.NewNamedRateLimitingQueue(rateLimiter, "volumes") - if controller.createProvisionedPVBackoff == nil { - // Use linear backoff with createProvisionedPVInterval and createProvisionedPVRetryCount by default. - if controller.createProvisionedPVInterval == 0 { - controller.createProvisionedPVInterval = DefaultCreateProvisionedPVInterval - } - if controller.createProvisionedPVRetryCount == 0 { - controller.createProvisionedPVRetryCount = DefaultCreateProvisionedPVRetryCount - } - controller.createProvisionedPVBackoff = &wait.Backoff{ - Duration: controller.createProvisionedPVInterval, - Factor: 1, // linear backoff - Steps: controller.createProvisionedPVRetryCount, + if controller.createProvisionerPVLimiter != nil { + glog.V(2).Infof("Using saving PVs to API server in background") + controller.volumeStore = NewVolumeStoreQueue(client, controller.createProvisionerPVLimiter) + } else { + if controller.createProvisionedPVBackoff == nil { + // Use linear backoff with createProvisionedPVInterval and createProvisionedPVRetryCount by default. + if controller.createProvisionedPVInterval == 0 { + controller.createProvisionedPVInterval = DefaultCreateProvisionedPVInterval + } + if controller.createProvisionedPVRetryCount == 0 { + controller.createProvisionedPVRetryCount = DefaultCreateProvisionedPVRetryCount + } + controller.createProvisionedPVBackoff = &wait.Backoff{ + Duration: controller.createProvisionedPVInterval, + Factor: 1, // linear backoff + Steps: controller.createProvisionedPVRetryCount, + //Cap: controller.createProvisionedPVInterval, + } } + glog.V(2).Infof("Using blocking saving PVs to API server") + controller.volumeStore = NewBackoffStore(client, controller.eventRecorder, controller.createProvisionedPVBackoff, controller) } informer := informers.NewSharedInformerFactory(client, controller.resyncPeriod) @@ -549,9 +625,12 @@ func NewProvisionController( // PersistentVolumeClaims claimHandler := cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { controller.enqueueWork(controller.claimQueue, obj) }, - UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.claimQueue, newObj) }, - DeleteFunc: func(obj interface{}) { controller.forgetWork(controller.claimQueue, obj) }, + AddFunc: func(obj interface{}) { controller.enqueueClaim(obj) }, + UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueClaim(newObj) }, + DeleteFunc: func(obj interface{}) { + // NOOP. The claim is either in claimsInProgress and in the queue, so it will be processed as usual + // or it's not in claimsInProgress and then we don't care + }, } if controller.claimInformer != nil { @@ -560,15 +639,22 @@ func NewProvisionController( controller.claimInformer = informer.Core().V1().PersistentVolumeClaims().Informer() controller.claimInformer.AddEventHandler(claimHandler) } - controller.claims = controller.claimInformer.GetStore() + controller.claimInformer.AddIndexers(cache.Indexers{uidIndex: func(obj interface{}) ([]string, error) { + uid, err := getObjectUID(obj) + if err != nil { + return nil, err + } + return []string{uid}, nil + }}) + controller.claimsIndexer = controller.claimInformer.GetIndexer() // ----------------- // PersistentVolumes volumeHandler := cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { controller.enqueueWork(controller.volumeQueue, obj) }, - UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.volumeQueue, newObj) }, - DeleteFunc: func(obj interface{}) { controller.forgetWork(controller.volumeQueue, obj) }, + AddFunc: func(obj interface{}) { controller.enqueueVolume(obj) }, + UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueVolume(newObj) }, + DeleteFunc: func(obj interface{}) { controller.forgetVolume(obj) }, } if controller.volumeInformer != nil { @@ -591,13 +677,40 @@ func NewProvisionController( } } controller.classes = controller.classInformer.GetStore() - return controller } -// enqueueWork takes an obj and converts it into a namespace/name string which +func getObjectUID(obj interface{}) (string, error) { + var object metav1.Object + var ok bool + if object, ok = obj.(metav1.Object); !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + return "", fmt.Errorf("error decoding object, invalid type") + } + object, ok = tombstone.Obj.(metav1.Object) + if !ok { + return "", fmt.Errorf("error decoding object tombstone, invalid type") + } + } + return string(object.GetUID()), nil +} + +// enqueueClaim takes an obj and converts it into UID that is then put onto claim work queue. +func (ctrl *ProvisionController) enqueueClaim(obj interface{}) { + uid, err := getObjectUID(obj) + if err != nil { + utilruntime.HandleError(err) + return + } + if ctrl.claimQueue.NumRequeues(uid) == 0 { + ctrl.claimQueue.Add(uid) + } +} + +// enqueueVolume takes an obj and converts it into a namespace/name string which // is then put onto the given work queue. -func (ctrl *ProvisionController) enqueueWork(queue workqueue.RateLimitingInterface, obj interface{}) { +func (ctrl *ProvisionController) enqueueVolume(obj interface{}) { var key string var err error if key, err = cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err != nil { @@ -606,22 +719,22 @@ func (ctrl *ProvisionController) enqueueWork(queue workqueue.RateLimitingInterfa } // Re-Adding is harmless but try to add it to the queue only if it is not // already there, because if it is already there we *must* be retrying it - if queue.NumRequeues(key) == 0 { - queue.Add(key) + if ctrl.volumeQueue.NumRequeues(key) == 0 { + ctrl.volumeQueue.Add(key) } } -// forgetWork Forgets an obj from the given work queue, telling the queue to +// forgetVolume Forgets an obj from the given work queue, telling the queue to // stop tracking its retries because e.g. the obj was deleted -func (ctrl *ProvisionController) forgetWork(queue workqueue.RateLimitingInterface, obj interface{}) { +func (ctrl *ProvisionController) forgetVolume(obj interface{}) { var key string var err error if key, err = cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err != nil { utilruntime.HandleError(err) return } - queue.Forget(key) - queue.Done(key) + ctrl.volumeQueue.Forget(key) + ctrl.volumeQueue.Done(key) } // Run starts all of this controller's control loops @@ -686,11 +799,14 @@ func (ctrl *ProvisionController) Run(_ <-chan struct{}) { select {} } + go ctrl.volumeStore.Run(context.TODO(), DefaultThreadiness) + if ctrl.leaderElection { rl, err := resourcelock.New("endpoints", ctrl.leaderElectionNamespace, strings.Replace(ctrl.provisionerName, "/", "-", -1), ctrl.client.CoreV1(), + nil, resourcelock.ResourceLockConfig{ Identity: ctrl.id, EventRecorder: ctrl.eventRecorder, @@ -744,7 +860,7 @@ func (ctrl *ProvisionController) processNextClaimWorkItem() bool { return fmt.Errorf("expected string in workqueue but got %#v", obj) } - if err := ctrl.syncClaimHandler(key); err != nil { + if _, err := ctrl.syncClaimHandler(key); err != nil { if ctrl.failedProvisionThreshold == 0 { glog.Warningf("Retrying syncing claim %q, failure %v", key, ctrl.claimQueue.NumRequeues(obj)) ctrl.claimQueue.AddRateLimited(obj) @@ -753,6 +869,8 @@ func (ctrl *ProvisionController) processNextClaimWorkItem() bool { ctrl.claimQueue.AddRateLimited(obj) } else { glog.Errorf("Giving up syncing claim %q because failures %v >= threshold %v", key, ctrl.claimQueue.NumRequeues(obj), ctrl.failedProvisionThreshold) + glog.V(2).Infof("Removing PVC %s from claims in progress", key) + ctrl.claimsInProgress.Delete(key) // This can leak a volume that's being provisioned in the background! // Done but do not Forget: it will not be in the queue but NumRequeues // will be saved until the obj is deleted from kubernetes } @@ -760,6 +878,8 @@ func (ctrl *ProvisionController) processNextClaimWorkItem() bool { } ctrl.claimQueue.Forget(obj) + glog.V(2).Infof("Provisioning succeeded, removing PVC %s from claims in progress", key) + ctrl.claimsInProgress.Delete(key) return nil }(obj) @@ -816,17 +936,40 @@ func (ctrl *ProvisionController) processNextVolumeWorkItem() bool { } // syncClaimHandler gets the claim from informer's cache then calls syncClaim -func (ctrl *ProvisionController) syncClaimHandler(key string) error { - claimObj, exists, err := ctrl.claims.GetByKey(key) +func (ctrl *ProvisionController) syncClaimHandler(key string) (ProvisioningState, error) { + objs, err := ctrl.claimsIndexer.ByIndex(uidIndex, key) if err != nil { - return err + return ProvisioningFinished, err } - if !exists { - utilruntime.HandleError(fmt.Errorf("claim %q in work queue no longer exists", key)) - return nil + var claimObj interface{} + if len(objs) > 0 { + claimObj = objs[0] + } else { + obj, found := ctrl.claimsInProgress.Load(key) + if !found { + utilruntime.HandleError(fmt.Errorf("claim %q in work queue no longer exists", key)) + return ProvisioningFinished, nil + } + claimObj = obj + } + status, err := ctrl.syncClaim(claimObj) + if err == nil || status == ProvisioningFinished { + // Provisioning is 100% finished / not in progress. + glog.V(2).Infof("Final error received, removing PVC %s from claims in progress", key) + ctrl.claimsInProgress.Delete(key) + return status, err + } + if status == ProvisioningInBackground { + // Provisioning is in progress in background. + glog.V(2).Infof("Temporary error received, adding PVC %s to claims in progress", key) + ctrl.claimsInProgress.Store(key, claimObj) + } else { + // status == ProvisioningNoChange. + // Don't change claimsInProgress: + // - the claim is already there if previous status was ProvisioningInBackground. + // - the claim is not there if if previous status was ProvisioningFinished. } - - return ctrl.syncClaim(claimObj) + return status, err } // syncVolumeHandler gets the volume from informer's cache then calls syncVolume @@ -845,19 +988,22 @@ func (ctrl *ProvisionController) syncVolumeHandler(key string) error { // syncClaim checks if the claim should have a volume provisioned for it and // provisions one if so. -func (ctrl *ProvisionController) syncClaim(obj interface{}) error { +func (ctrl *ProvisionController) syncClaim(obj interface{}) (ProvisioningState, error) { claim, ok := obj.(*v1.PersistentVolumeClaim) if !ok { - return fmt.Errorf("expected claim but got %+v", obj) + return ProvisioningFinished, fmt.Errorf("expected claim but got %+v", obj) } if ctrl.shouldProvision(claim) { startTime := time.Now() - err := ctrl.provisionClaimOperation(claim) + + var status ProvisioningState + var err error + status, err = ctrl.provisionClaimOperation(claim) ctrl.updateProvisionStats(claim, err, startTime) - return err + return status, err } - return nil + return ProvisioningFinished, nil } // syncVolume checks if the volume should be deleted and deletes if so @@ -876,6 +1022,20 @@ func (ctrl *ProvisionController) syncVolume(obj interface{}) error { return nil } +// knownProvisioner checks if provisioner name has been +// configured to provision volumes for +func (ctrl *ProvisionController) knownProvisioner(provisioner string) bool { + if provisioner == ctrl.provisionerName { + return true + } + for _, p := range ctrl.additionalProvisionerNames { + if p == provisioner { + return true + } + } + return false +} + // shouldProvision returns whether a claim should have a volume provisioned for // it, i.e. whether a Provision is "desired" func (ctrl *ProvisionController) shouldProvision(claim *v1.PersistentVolumeClaim) bool { @@ -892,10 +1052,9 @@ func (ctrl *ProvisionController) shouldProvision(claim *v1.PersistentVolumeClaim // Kubernetes 1.5 provisioning with annStorageProvisioner if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.5.0")) { if provisioner, found := claim.Annotations[annStorageProvisioner]; found { - if provisioner == ctrl.provisionerName { + if ctrl.knownProvisioner(provisioner) { return true } - return false } } else { // Kubernetes 1.4 provisioning, evaluating class.Provisioner @@ -1004,7 +1163,7 @@ func (ctrl *ProvisionController) updateDeleteStats(volume *v1.PersistentVolume, // provisionClaimOperation attempts to provision a volume for the given claim. // Returns error, which indicates whether provisioning should be retried // (requeue the claim) or not -func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVolumeClaim) error { +func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVolumeClaim) (ProvisioningState, error) { // Most code here is identical to that found in controller.go of kube's PV controller... claimClass := util.GetPersistentVolumeClaimClass(claim) operation := fmt.Sprintf("provision %q class %q", claimToClaimKey(claim), claimClass) @@ -1018,7 +1177,7 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol if err == nil && volume != nil { // Volume has been already provisioned, nothing to do. glog.Info(logOperation(operation, "persistentvolume %q already exists, skipping", pvName)) - return nil + return ProvisioningFinished, nil } // Prepare a claimRef to the claim early (to fail before a volume is @@ -1026,40 +1185,40 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol claimRef, err := ref.GetReference(scheme.Scheme, claim) if err != nil { glog.Error(logOperation(operation, "unexpected error getting claim reference: %v", err)) - return nil + return ProvisioningNoChange, nil } provisioner, parameters, err := ctrl.getStorageClassFields(claimClass) if err != nil { glog.Error(logOperation(operation, "error getting claim's StorageClass's fields: %v", err)) - return nil + return ProvisioningFinished, nil } - if provisioner != ctrl.provisionerName { + if !ctrl.knownProvisioner(provisioner) { // class.Provisioner has either changed since shouldProvision() or // annDynamicallyProvisioned contains different provisioner than // class.Provisioner. glog.Error(logOperation(operation, "unknown provisioner %q requested in claim's StorageClass", provisioner)) - return nil + return ProvisioningFinished, nil } // Check if this provisioner can provision this claim. if err = ctrl.canProvision(claim); err != nil { ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error()) glog.Error(logOperation(operation, "failed to provision volume: %v", err)) - return nil + return ProvisioningFinished, nil } reclaimPolicy := v1.PersistentVolumeReclaimDelete if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.8.0")) { reclaimPolicy, err = ctrl.fetchReclaimPolicy(claimClass) if err != nil { - return err + return ProvisioningFinished, err } } mountOptions, err := ctrl.fetchMountOptions(claimClass) if err != nil { - return err + return ProvisioningFinished, err } var selectedNode *v1.Node @@ -1071,7 +1230,7 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol if err != nil { err = fmt.Errorf("failed to get target node: %v", err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error()) - return err + return ProvisioningNoChange, err } } @@ -1080,7 +1239,7 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol if err != nil { err = fmt.Errorf("failed to get AllowedTopologies from StorageClass: %v", err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error()) - return err + return ProvisioningNoChange, err } } @@ -1096,16 +1255,21 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, "Provisioning", fmt.Sprintf("External provisioner is provisioning volume for claim %q", claimToClaimKey(claim))) - volume, err = ctrl.provisioner.Provision(options) + result := ProvisioningFinished + if p, ok := ctrl.provisioner.(ProvisionerExt); ok { + volume, result, err = p.ProvisionExt(options) + } else { + volume, err = ctrl.provisioner.Provision(options) + } if err != nil { if ierr, ok := err.(*IgnoredError); ok { // Provision ignored, do nothing and hope another provisioner will provision it. glog.Info(logOperation(operation, "volume provision ignored: %v", ierr)) - return nil + return ProvisioningFinished, nil } err = fmt.Errorf("failed to provision volume with StorageClass %q: %v", claimClass, err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error()) - return err + return result, err } glog.Info(logOperation(operation, "volume %q provisioned", volume.Name)) @@ -1125,60 +1289,12 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol metav1.SetMetaDataAnnotation(&volume.ObjectMeta, annClass, claimClass) } - // Try to create the PV object several times - var lastSaveError error - err = wait.ExponentialBackoff(*ctrl.createProvisionedPVBackoff, func() (bool, error) { - glog.Info(logOperation(operation, "trying to save persistentvolume %q", volume.Name)) - if _, err = ctrl.client.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) { - // Save succeeded. - if err != nil { - glog.Info(logOperation(operation, "persistentvolume %q already exists, reusing", volume.Name)) - } else { - glog.Info(logOperation(operation, "persistentvolume %q saved", volume.Name)) - } - return true, nil - } - // Save failed, try again after a while. - glog.Info(logOperation(operation, "failed to save persistentvolume %q: %v", volume.Name, err)) - lastSaveError = err - return false, nil - }) + glog.Info(logOperation(operation, "succeeded")) - if err != nil { - // Save failed. Now we have a storage asset outside of Kubernetes, - // but we don't have appropriate PV object for it. - // Emit some event here and try to delete the storage asset several - // times. - strerr := fmt.Sprintf("Error creating provisioned PV object for claim %s: %v. Deleting the volume.", claimToClaimKey(claim), lastSaveError) - glog.Error(logOperation(operation, strerr)) - ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", strerr) - - var lastDeleteError error - err = wait.ExponentialBackoff(*ctrl.createProvisionedPVBackoff, func() (bool, error) { - if err = ctrl.provisioner.Delete(volume); err == nil { - // Delete succeeded - glog.Info(logOperation(operation, "cleaning volume %q succeeded", volume.Name)) - return true, nil - } - // Delete failed, try again after a while. - glog.Info(logOperation(operation, "failed to clean volume %q: %v", volume.Name, err)) - lastDeleteError = err - return false, nil - }) - if err != nil { - // Delete failed several times. There is an orphaned volume and there - // is nothing we can do about it. - strerr := fmt.Sprintf("Error cleaning provisioned volume for claim %s: %v. Please delete manually.", claimToClaimKey(claim), lastDeleteError) - glog.Error(logOperation(operation, strerr)) - ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningCleanupFailed", strerr) - } - } else { - msg := fmt.Sprintf("Successfully provisioned volume %s", volume.Name) - ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, "ProvisioningSucceeded", msg) + if err := ctrl.volumeStore.StoreVolume(claim, volume); err != nil { + return ProvisioningFinished, err } - - glog.Info(logOperation(operation, "succeeded")) - return nil + return ProvisioningFinished, nil } // deleteVolumeOperation attempts to delete the volume backing the given diff --git a/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/doc.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/doc.go new file mode 100644 index 0000000000..f1db463b9a --- /dev/null +++ b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller // import "sigs.k8s.io/sig-storage-lib-external-provisioner/controller" diff --git a/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/metrics/doc.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/metrics/doc.go new file mode 100644 index 0000000000..eac1f359c0 --- /dev/null +++ b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/metrics/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics // import "sigs.k8s.io/sig-storage-lib-external-provisioner/controller/metrics" diff --git a/vendor/github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller/metrics/metrics.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/metrics/metrics.go similarity index 100% rename from vendor/github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller/metrics/metrics.go rename to vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/metrics/metrics.go diff --git a/vendor/github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller/volume.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/volume.go similarity index 63% rename from vendor/github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller/volume.go rename to vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/volume.go index f1d42b7fce..368d0c3f70 100644 --- a/vendor/github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller/volume.go +++ b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/volume.go @@ -62,6 +62,42 @@ type BlockProvisioner interface { SupportsBlock() bool } +// ProvisionerExt is an optional interface implemented by provisioners that +// can return enhanced error code from provisioner. +type ProvisionerExt interface { + // ProvisionExt creates a volume i.e. the storage asset and returns a PV object + // for the volume. The provisioner can return an error (e.g. timeout) and state + // ProvisioningInBackground to tell the controller that provisioning may be in + // progress after ProvisionExt() finishes. The controller will call ProvisionExt() + // again with the same parameters, assuming that the provisioner continues + // provisioning the volume. The provisioner must return either final error (with + // ProvisioningFinished) or success eventually, otherwise the controller will try + // forever (unless FailedProvisionThreshold is set). + ProvisionExt(options VolumeOptions) (*v1.PersistentVolume, ProvisioningState, error) +} + +// ProvisioningState is state of volume provisioning. It tells the controller if +// provisioning could be in progress in the background after ProvisionExt() call +// returns or the provisioning is 100% finished (either with success or error). +type ProvisioningState string + +const ( + // ProvisioningInBackground tells the controller that provisioning may be in + // progress in background after ProvisionExt call finished. + ProvisioningInBackground ProvisioningState = "Background" + // ProvisioningFinished tells the controller that provisioning for sure does + // not continue in background, error code of ProvisionExt() is final. + ProvisioningFinished ProvisioningState = "Finished" + // ProvisioningNoChange tells the controller that provisioning state is the same as + // before the call - either ProvisioningInBackground or ProvisioningFinished from + // the previous ProvisionExt(). This state is typically returned by a provisioner + // before it could reach storage backend - the provisioner could not check status + // of provisioning and previous state applies. If this state is returned from the + // first ProvisionExt call, ProvisioningFinished is assumed (the provisioning + // could not even start). + ProvisioningNoChange ProvisioningState = "NoChange" +) + // IgnoredError is the value for Delete to return to indicate that the call has // been ignored and no action taken. In case multiple provisioners are serving // the same storage class, provisioners may ignore PVs they are not responsible diff --git a/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/volume_store.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/volume_store.go new file mode 100644 index 0000000000..1d47ca490a --- /dev/null +++ b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/volume_store.go @@ -0,0 +1,244 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "sync" + "time" + + "k8s.io/client-go/tools/record" + + "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" +) + +// VolumeStore is an interface that's used to save PersistentVolumes to API server. +// Implementation of the interface add custom error recovery policy. +// A volume is added via StoreVolume(). It's enough to store the volume only once. +// It is not possible to remove a volume, even when corresponding PVC is deleted +// and PV is not necessary any longer. PV will be always created. +// If corresponding PVC is deleted, the PV will be deleted by Kubernetes using +// standard deletion procedure. It saves us some code here. +type VolumeStore interface { + // StoreVolume makes sure a volume is saved to Kubernetes API server. + // If no error is returned, caller can assume that PV was saved or + // is being saved in background. + // In error is returned, no PV was saved and corresponding PVC needs + // to be re-queued (so whole provisioning needs to be done again). + StoreVolume(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) error + + // Runs any background goroutines for implementation of the interface. + Run(ctx context.Context, threadiness int) +} + +// queueStore is implementation of VolumeStore that re-tries saving +// PVs to API server using a workqueue running in its own goroutine(s). +// After failed save, volume is re-qeueued with exponential backoff. +type queueStore struct { + client kubernetes.Interface + queue workqueue.RateLimitingInterface + + volumes sync.Map +} + +var _ VolumeStore = &queueStore{} + +// NewVolumeStoreQueue returns VolumeStore that uses asynchronous workqueue to save PVs. +func NewVolumeStoreQueue( + client kubernetes.Interface, + limiter workqueue.RateLimiter, +) VolumeStore { + + return &queueStore{ + client: client, + queue: workqueue.NewNamedRateLimitingQueue(limiter, "unsavedpvs"), + } +} + +func (q *queueStore) StoreVolume(_ *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) error { + if err := q.doSaveVolume(volume); err != nil { + q.volumes.Store(volume.Name, volume) + q.queue.Add(volume.Name) + klog.Errorf("Failed to save volume %s: %s", volume.Name, err) + } + // Consume any error, this Store will retry in background. + return nil +} + +func (q *queueStore) Run(ctx context.Context, threadiness int) { + klog.Infof("Starting save volume queue") + defer q.queue.ShutDown() + + for i := 0; i < threadiness; i++ { + go wait.Until(q.saveVolumeWorker, time.Second, ctx.Done()) + } + <-ctx.Done() + klog.Infof("Stopped save volume queue") +} + +func (q *queueStore) saveVolumeWorker() { + for q.processNextWorkItem() { + } +} + +func (q *queueStore) processNextWorkItem() bool { + obj, shutdown := q.queue.Get() + defer q.queue.Done(obj) + + if shutdown { + return false + } + + var volumeName string + var ok bool + if volumeName, ok = obj.(string); !ok { + q.queue.Forget(obj) + utilruntime.HandleError(fmt.Errorf("expected string in save workqueue but got %#v", obj)) + return true + } + + volumeObj, found := q.volumes.Load(volumeName) + if !found { + q.queue.Forget(volumeName) + utilruntime.HandleError(fmt.Errorf("did not find saved volume %s", volumeName)) + return true + } + + volume, ok := volumeObj.(*v1.PersistentVolume) + if !ok { + q.queue.Forget(volumeName) + utilruntime.HandleError(fmt.Errorf("saved object is not volume: %+v", volumeObj)) + return true + } + + if err := q.doSaveVolume(volume); err != nil { + q.queue.AddRateLimited(volumeName) + utilruntime.HandleError(err) + klog.V(5).Infof("Volume %s enqueued", volume.Name) + return true + } + q.volumes.Delete(volumeName) + q.queue.Forget(volumeName) + return true +} + +func (q *queueStore) doSaveVolume(volume *v1.PersistentVolume) error { + klog.V(5).Infof("Saving volume %s", volume.Name) + _, err := q.client.CoreV1().PersistentVolumes().Create(volume) + if err == nil || apierrs.IsAlreadyExists(err) { + klog.V(5).Infof("Volume %s saved", volume.Name) + return nil + } + return fmt.Errorf("error saving volume %s: %s", volume.Name, err) +} + +// backoffStore is implementation of VolumeStore that blocks and tries to save +// a volume to API server with configurable backoff. If saving fails, +// StoreVolume() deletes the storage asset in the end and returns appropriate +// error code. +type backoffStore struct { + client kubernetes.Interface + eventRecorder record.EventRecorder + backoff *wait.Backoff + ctrl *ProvisionController +} + +var _ VolumeStore = &backoffStore{} + +// NewBackoffStore returns VolumeStore that uses blocking exponential backoff to save PVs. +func NewBackoffStore(client kubernetes.Interface, + eventRecorder record.EventRecorder, + backoff *wait.Backoff, + ctrl *ProvisionController, +) VolumeStore { + return &backoffStore{ + client: client, + eventRecorder: eventRecorder, + backoff: backoff, + ctrl: ctrl, + } +} + +func (b *backoffStore) StoreVolume(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) error { + // Try to create the PV object several times + var lastSaveError error + err := wait.ExponentialBackoff(*b.backoff, func() (bool, error) { + klog.Infof("Trying to save persistentvolume %q", volume.Name) + var err error + if _, err = b.client.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) { + // Save succeeded. + if err != nil { + klog.Infof("persistentvolume %q already exists, reusing", volume.Name) + } else { + klog.Infof("persistentvolume %q saved", volume.Name) + } + return true, nil + } + // Save failed, try again after a while. + klog.Infof("Failed to save persistentvolume %q: %v", volume.Name, err) + lastSaveError = err + return false, nil + }) + + if err == nil { + // Save succeeded + msg := fmt.Sprintf("Successfully provisioned volume %s", volume.Name) + b.eventRecorder.Event(claim, v1.EventTypeNormal, "ProvisioningSucceeded", msg) + return nil + } + + // Save failed. Now we have a storage asset outside of Kubernetes, + // but we don't have appropriate PV object for it. + // Emit some event here and try to delete the storage asset several + // times. + strerr := fmt.Sprintf("Error creating provisioned PV object for claim %s: %v. Deleting the volume.", claimToClaimKey(claim), lastSaveError) + klog.Error(strerr) + b.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", strerr) + + var lastDeleteError error + err = wait.ExponentialBackoff(*b.backoff, func() (bool, error) { + if err = b.ctrl.provisioner.Delete(volume); err == nil { + // Delete succeeded + klog.Infof("Cleaning volume %q succeeded", volume.Name) + return true, nil + } + // Delete failed, try again after a while. + klog.Infof("Failed to clean volume %q: %v", volume.Name, err) + lastDeleteError = err + return false, nil + }) + if err != nil { + // Delete failed several times. There is an orphaned volume and there + // is nothing we can do about it. + strerr := fmt.Sprintf("Error cleaning provisioned volume for claim %s: %v. Please delete manually.", claimToClaimKey(claim), lastDeleteError) + klog.Error(strerr) + b.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningCleanupFailed", strerr) + } + + return lastSaveError +} + +func (b *backoffStore) Run(ctx context.Context, threadiness int) { + // There is not background processing +} diff --git a/vendor/github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/mount/LICENSE b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/mount/LICENSE similarity index 100% rename from vendor/github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/mount/LICENSE rename to vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/mount/LICENSE diff --git a/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/util/doc.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/util/doc.go new file mode 100644 index 0000000000..3697f68612 --- /dev/null +++ b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/util/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util // import "sigs.k8s.io/sig-storage-lib-external-provisioner/util" diff --git a/vendor/github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/util/util.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/util/util.go similarity index 100% rename from vendor/github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/util/util.go rename to vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/util/util.go