diff --git a/apis/cluster/constants.go b/apis/cluster/constants.go
index 2a22087f..355839b1 100644
--- a/apis/cluster/constants.go
+++ b/apis/cluster/constants.go
@@ -26,6 +26,7 @@ const (
LabelClusterExternalID = "byte.builders/cluster-external-id"
LabelClusterConnectorLinkID = "byte.builders/cluster-connector-link-id"
+ LabelTricksterReference = "byte.builders/cluster"
)
const (
diff --git a/apis/cluster/v1alpha1/cluster_info_types.go b/apis/cluster/v1alpha1/cluster_info_types.go
index 5c41c64c..e858487b 100644
--- a/apis/cluster/v1alpha1/cluster_info_types.go
+++ b/apis/cluster/v1alpha1/cluster_info_types.go
@@ -18,6 +18,7 @@ package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kmapi "kmodules.xyz/client-go/api/v1"
)
const (
@@ -140,4 +141,9 @@ type ClusterInfoStatus struct {
// Message specifies additional information regarding the possible actions for the user
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
+ // +optional
+ ClusterManagers []string `json:"clusterManagers,omitempty"`
+ // CAPIClusterInfo contains capi cluster information if the cluster is created by cluster-api
+ // +optional
+ CAPIClusterInfo *kmapi.CAPIClusterInfo `json:"capiClusterInfo,omitempty"`
}
diff --git a/apis/cluster/v1alpha1/zz_generated.deepcopy.go b/apis/cluster/v1alpha1/zz_generated.deepcopy.go
index dcdcebc3..2d0d2eee 100644
--- a/apis/cluster/v1alpha1/zz_generated.deepcopy.go
+++ b/apis/cluster/v1alpha1/zz_generated.deepcopy.go
@@ -23,6 +23,7 @@ package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
+ v1 "kmodules.xyz/client-go/api/v1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -203,7 +204,7 @@ func (in *ClusterInfo) DeepCopyInto(out *ClusterInfo) {
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
- out.Status = in.Status
+ in.Status.DeepCopyInto(&out.Status)
return
}
@@ -277,6 +278,16 @@ func (in *ClusterInfoSpec) DeepCopy() *ClusterInfoSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterInfoStatus) DeepCopyInto(out *ClusterInfoStatus) {
*out = *in
+ if in.ClusterManagers != nil {
+ in, out := &in.ClusterManagers, &out.ClusterManagers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.CAPIClusterInfo != nil {
+ in, out := &in.CAPIClusterInfo, &out.CAPIClusterInfo
+ *out = new(v1.CAPIClusterInfo)
+ **out = **in
+ }
return
}
diff --git a/crds/cluster.bytebuilders.dev_clusterinfos.yaml b/crds/cluster.bytebuilders.dev_clusterinfos.yaml
index 663befda..b4758531 100644
--- a/crds/cluster.bytebuilders.dev_clusterinfos.yaml
+++ b/crds/cluster.bytebuilders.dev_clusterinfos.yaml
@@ -88,6 +88,21 @@ spec:
type: object
status:
properties:
+ capiClusterInfo:
+ description: CAPIClusterInfo contains capi cluster information if
+ the cluster is created by cluster-api
+ properties:
+ clusterName:
+ type: string
+ namespace:
+ type: string
+ provider:
+ type: string
+ type: object
+ clusterManagers:
+ items:
+ type: string
+ type: array
message:
description: Message specifies additional information regarding the
possible actions for the user
diff --git a/go.mod b/go.mod
index 942df007..3342c43c 100644
--- a/go.mod
+++ b/go.mod
@@ -25,7 +25,7 @@ require (
k8s.io/apimachinery v0.25.3
k8s.io/client-go v0.25.3
k8s.io/klog/v2 v2.80.1
- kmodules.xyz/client-go v0.25.24
+ kmodules.xyz/client-go v0.25.34-0.20230920051128-f758ec0276ab
kmodules.xyz/crd-schema-fuzz v0.25.0
sigs.k8s.io/yaml v1.3.0
)
@@ -45,6 +45,7 @@ require (
github.com/dustin/go-humanize v1.0.1-0.20220316001817-d5090ed65664 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
+ github.com/evanphx/json-patch/v5 v5.6.0 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/fatih/structs v1.1.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
@@ -65,6 +66,7 @@ require (
github.com/googleapis/gax-go/v2 v2.7.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef // indirect
+ github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
@@ -79,7 +81,6 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/moul/gotty-client v1.10.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
- github.com/onsi/ginkgo v1.16.5 // indirect
github.com/onsi/gomega v1.20.2 // indirect
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect
github.com/renstrom/fuzzysearch v0.0.0-00010101000000-000000000000 // indirect
@@ -98,7 +99,7 @@ require (
golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.3.0 // indirect
gomodules.xyz/clock v0.0.0-20200817085942-06523dba733f // indirect
- gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect
+ gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
gomodules.xyz/sets v0.2.1 // indirect
gomodules.xyz/wait v0.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
@@ -115,6 +116,7 @@ require (
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect
moul.io/anonuuid v1.3.2 // indirect
moul.io/srand v1.6.1 // indirect
+ sigs.k8s.io/controller-runtime v0.13.1 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
)
diff --git a/go.sum b/go.sum
index 3e90a64a..ce0f1afe 100644
--- a/go.sum
+++ b/go.sum
@@ -49,11 +49,14 @@ github.com/aws/aws-sdk-go v1.44.171 h1:maREiPAmibvuONMOEZIkCH2OTosLRnDelceTtH3SY
github.com/aws/aws-sdk-go v1.44.171/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -97,6 +100,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
+github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
@@ -104,7 +109,6 @@ github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -116,6 +120,7 @@ github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
@@ -127,7 +132,6 @@ github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+
github.com/go-resty/resty/v2 v2.6.0 h1:joIR5PNLM2EFqqESUjCMGXrWmXNHEU9CEiK813oKYS4=
github.com/go-resty/resty/v2 v2.6.0/go.mod h1:PwvJS6hvaPkjtjNg9ph+VrSD92bi5Zq73w/BIH7cC3Q=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
@@ -224,10 +228,12 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM=
github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
+github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
@@ -278,6 +284,7 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@@ -302,17 +309,10 @@ github.com/moul/gotty-client v1.10.0/go.mod h1:IUVfEVjudY9hu9Zf0i7+6tOggGt+fN5aN
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
-github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
-github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU=
-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.20.2 h1:8uQq0zMgLEfa0vRrrBgaJF2gyW9Da9BmfGV+OyUzfkY=
github.com/onsi/gomega v1.20.2/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
github.com/packethost/packngo v0.13.0 h1:VIeDY/Uju53v8LAKxiqTrfR9jkpX5PhWdnQC0h3aUU8=
@@ -331,13 +331,17 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
@@ -410,8 +414,11 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -446,7 +453,6 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -459,7 +465,6 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
@@ -489,7 +494,6 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -499,17 +503,13 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210326220804-49726bf1d181/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -564,7 +564,6 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -576,8 +575,8 @@ gomodules.xyz/clock v0.0.0-20200817085942-06523dba733f h1:hTyhR4r+tj1Uq7/PpFxLTz
gomodules.xyz/clock v0.0.0-20200817085942-06523dba733f/go.mod h1:K3m7N+nBOlf91/tpv8REUGwsAgaKFwElQCuiLhm12AQ=
gomodules.xyz/flags v0.1.3 h1:jQ06+EfmoMv5NvjXvJon03dOhLU+FF0TQMWN7I6qpzs=
gomodules.xyz/flags v0.1.3/go.mod h1:e+kvBLnqdEWGG670SKOYag1CXStM2Slrxq01OIK3tFs=
-gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc=
-gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
+gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
+gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
gomodules.xyz/logs v0.0.6 h1:8+9Wkud5yBPtIvkVszubyTeFxNII30lWODom0+GZD8U=
gomodules.xyz/logs v0.0.6/go.mod h1:Q+fFtZFLEB5q86KmDehXCGuMP72Rv+Rwz0KuVxK+Gi4=
gomodules.xyz/pointer v0.1.0 h1:sG2UKrYVSo6E3r4itAjXfPfe4fuXMi0KdyTHpR3vGCg=
@@ -646,7 +645,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
@@ -654,14 +652,12 @@ gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@@ -686,8 +682,8 @@ k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 h1:tBEbstoM+K0FiBV5KGAKQ0
k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-kmodules.xyz/client-go v0.25.24 h1:h8B36CSZDnANXbVIVRnwGkkav6By3Qpe6F4ZFJxjVUU=
-kmodules.xyz/client-go v0.25.24/go.mod h1:wbdzLEoDYiCPI6dTW0mIAGNwkwFV4lC5BN1FJxiDsbw=
+kmodules.xyz/client-go v0.25.34-0.20230920051128-f758ec0276ab h1:Al8RDDn7TNBozXvqApo2BAU2lUxDkYAPDRC1FBA2DQ8=
+kmodules.xyz/client-go v0.25.34-0.20230920051128-f758ec0276ab/go.mod h1:r/Va2Y6t1G8X1sPRjrQC6FWB3oh/i6rjssmlfJnbCmg=
kmodules.xyz/crd-schema-fuzz v0.25.0 h1:c5ZxNRqJak1bkGhECmyrKpzKGThFMB4088Kynyvngbc=
kmodules.xyz/crd-schema-fuzz v0.25.0/go.mod h1:VigFz19GwCxMGhb3YjCtlSXmfXb0J/g9du1So6rvqsk=
moul.io/anonuuid v1.3.2 h1:0iPzohbBNJvqVKv2VsqeN8YsDJhJymW/HiytSK2Rf0g=
@@ -695,6 +691,8 @@ moul.io/anonuuid v1.3.2/go.mod h1:fijmP4WPvGy80Mn0PJVItKXgMt+bcOXHMzItOZu0emI=
moul.io/srand v1.6.1 h1:SJ335F+54ivLdlH7wH52Rtyv0Ffos6DpsF5wu3ZVMXU=
moul.io/srand v1.6.1/go.mod h1:P2uaZB+GFstFNo8sEj6/U8FRV1n25kD0LLckFpJ+qvc=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+sigs.k8s.io/controller-runtime v0.13.1 h1:tUsRCSJVM1QQOOeViGeX3GMT3dQF1eePPw6sEE3xSlg=
+sigs.k8s.io/controller-runtime v0.13.1/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
diff --git a/vendor/github.com/evanphx/json-patch/v5/LICENSE b/vendor/github.com/evanphx/json-patch/v5/LICENSE
new file mode 100644
index 00000000..df76d7d7
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/v5/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2014, Evan Phoenix
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+* Neither the name of the Evan Phoenix nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/evanphx/json-patch/v5/errors.go b/vendor/github.com/evanphx/json-patch/v5/errors.go
new file mode 100644
index 00000000..75304b44
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/v5/errors.go
@@ -0,0 +1,38 @@
+package jsonpatch
+
+import "fmt"
+
+// AccumulatedCopySizeError is an error type returned when the accumulated size
+// increase caused by copy operations in a patch operation has exceeded the
+// limit.
+type AccumulatedCopySizeError struct {
+ limit int64
+ accumulated int64
+}
+
+// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
+func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
+ return &AccumulatedCopySizeError{limit: l, accumulated: a}
+}
+
+// Error implements the error interface.
+func (a *AccumulatedCopySizeError) Error() string {
+ return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
+}
+
+// ArraySizeError is an error type returned when the array size has exceeded
+// the limit.
+type ArraySizeError struct {
+ limit int
+ size int
+}
+
+// NewArraySizeError returns an ArraySizeError.
+func NewArraySizeError(l, s int) *ArraySizeError {
+ return &ArraySizeError{limit: l, size: s}
+}
+
+// Error implements the error interface.
+func (a *ArraySizeError) Error() string {
+ return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
+}
diff --git a/vendor/github.com/evanphx/json-patch/v5/merge.go b/vendor/github.com/evanphx/json-patch/v5/merge.go
new file mode 100644
index 00000000..a7c45734
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/v5/merge.go
@@ -0,0 +1,408 @@
+package jsonpatch
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
+ curDoc, err := cur.intoDoc()
+
+ if err != nil {
+ pruneNulls(patch)
+ return patch
+ }
+
+ patchDoc, err := patch.intoDoc()
+
+ if err != nil {
+ return patch
+ }
+
+ mergeDocs(curDoc, patchDoc, mergeMerge)
+
+ return cur
+}
+
+func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
+ for k, v := range patch.obj {
+ if v == nil {
+ if mergeMerge {
+ idx := -1
+ for i, key := range doc.keys {
+ if key == k {
+ idx = i
+ break
+ }
+ }
+ if idx == -1 {
+ doc.keys = append(doc.keys, k)
+ }
+ doc.obj[k] = nil
+ } else {
+ _ = doc.remove(k, &ApplyOptions{})
+ }
+ } else {
+ cur, ok := doc.obj[k]
+
+ if !ok || cur == nil {
+ if !mergeMerge {
+ pruneNulls(v)
+ }
+ _ = doc.set(k, v, &ApplyOptions{})
+ } else {
+ _ = doc.set(k, merge(cur, v, mergeMerge), &ApplyOptions{})
+ }
+ }
+ }
+}
+
+func pruneNulls(n *lazyNode) {
+ sub, err := n.intoDoc()
+
+ if err == nil {
+ pruneDocNulls(sub)
+ } else {
+ ary, err := n.intoAry()
+
+ if err == nil {
+ pruneAryNulls(ary)
+ }
+ }
+}
+
+func pruneDocNulls(doc *partialDoc) *partialDoc {
+ for k, v := range doc.obj {
+ if v == nil {
+ _ = doc.remove(k, &ApplyOptions{})
+ } else {
+ pruneNulls(v)
+ }
+ }
+
+ return doc
+}
+
+func pruneAryNulls(ary *partialArray) *partialArray {
+ newAry := []*lazyNode{}
+
+ for _, v := range *ary {
+ if v != nil {
+ pruneNulls(v)
+ }
+ newAry = append(newAry, v)
+ }
+
+ *ary = newAry
+
+ return ary
+}
+
+var errBadJSONDoc = fmt.Errorf("Invalid JSON Document")
+var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
+var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
+
+// MergeMergePatches merges two merge patches together, such that
+// applying this resulting merged merge patch to a document yields the same
+// as merging each merge patch to the document in succession.
+func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
+ return doMergePatch(patch1Data, patch2Data, true)
+}
+
+// MergePatch merges the patchData into the docData.
+func MergePatch(docData, patchData []byte) ([]byte, error) {
+ return doMergePatch(docData, patchData, false)
+}
+
+func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
+ doc := &partialDoc{}
+
+ docErr := json.Unmarshal(docData, doc)
+
+ patch := &partialDoc{}
+
+ patchErr := json.Unmarshal(patchData, patch)
+
+ if isSyntaxError(docErr) {
+ return nil, errBadJSONDoc
+ }
+
+ if isSyntaxError(patchErr) {
+ return nil, errBadJSONPatch
+ }
+
+ if docErr == nil && doc.obj == nil {
+ return nil, errBadJSONDoc
+ }
+
+ if patchErr == nil && patch.obj == nil {
+ return nil, errBadJSONPatch
+ }
+
+ if docErr != nil || patchErr != nil {
+ // Not an error, just not a doc, so we turn straight into the patch
+ if patchErr == nil {
+ if mergeMerge {
+ doc = patch
+ } else {
+ doc = pruneDocNulls(patch)
+ }
+ } else {
+ patchAry := &partialArray{}
+ patchErr = json.Unmarshal(patchData, patchAry)
+
+ if patchErr != nil {
+ return nil, errBadJSONPatch
+ }
+
+ pruneAryNulls(patchAry)
+
+ out, patchErr := json.Marshal(patchAry)
+
+ if patchErr != nil {
+ return nil, errBadJSONPatch
+ }
+
+ return out, nil
+ }
+ } else {
+ mergeDocs(doc, patch, mergeMerge)
+ }
+
+ return json.Marshal(doc)
+}
+
+func isSyntaxError(err error) bool {
+ if _, ok := err.(*json.SyntaxError); ok {
+ return true
+ }
+ if _, ok := err.(*syntaxError); ok {
+ return true
+ }
+ return false
+}
+
+// resemblesJSONArray indicates whether the byte-slice "appears" to be
+// a JSON array or not.
+// False-positives are possible, as this function does not check the internal
+// structure of the array. It only checks that the outer syntax is present and
+// correct.
+func resemblesJSONArray(input []byte) bool {
+ input = bytes.TrimSpace(input)
+
+ hasPrefix := bytes.HasPrefix(input, []byte("["))
+ hasSuffix := bytes.HasSuffix(input, []byte("]"))
+
+ return hasPrefix && hasSuffix
+}
+
+// CreateMergePatch will return a merge patch document capable of converting
+// the original document(s) to the modified document(s).
+// The parameters can be bytes of either two JSON Documents, or two arrays of
+// JSON documents.
+// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
+func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+ originalResemblesArray := resemblesJSONArray(originalJSON)
+ modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
+
+ // Do both byte-slices seem like JSON arrays?
+ if originalResemblesArray && modifiedResemblesArray {
+ return createArrayMergePatch(originalJSON, modifiedJSON)
+ }
+
+ // Are both byte-slices are not arrays? Then they are likely JSON objects...
+ if !originalResemblesArray && !modifiedResemblesArray {
+ return createObjectMergePatch(originalJSON, modifiedJSON)
+ }
+
+ // None of the above? Then return an error because of mismatched types.
+ return nil, errBadMergeTypes
+}
+
+// createObjectMergePatch will return a merge-patch document capable of
+// converting the original document to the modified document.
+func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+ originalDoc := map[string]interface{}{}
+ modifiedDoc := map[string]interface{}{}
+
+ err := json.Unmarshal(originalJSON, &originalDoc)
+ if err != nil {
+ return nil, errBadJSONDoc
+ }
+
+ err = json.Unmarshal(modifiedJSON, &modifiedDoc)
+ if err != nil {
+ return nil, errBadJSONDoc
+ }
+
+ dest, err := getDiff(originalDoc, modifiedDoc)
+ if err != nil {
+ return nil, err
+ }
+
+ return json.Marshal(dest)
+}
+
+// createArrayMergePatch will return an array of merge-patch documents capable
+// of converting the original document to the modified document for each
+// pair of JSON documents provided in the arrays.
+// Arrays of mismatched sizes will result in an error.
+func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+ originalDocs := []json.RawMessage{}
+ modifiedDocs := []json.RawMessage{}
+
+ err := json.Unmarshal(originalJSON, &originalDocs)
+ if err != nil {
+ return nil, errBadJSONDoc
+ }
+
+ err = json.Unmarshal(modifiedJSON, &modifiedDocs)
+ if err != nil {
+ return nil, errBadJSONDoc
+ }
+
+ total := len(originalDocs)
+ if len(modifiedDocs) != total {
+ return nil, errBadJSONDoc
+ }
+
+ result := []json.RawMessage{}
+ for i := 0; i < len(originalDocs); i++ {
+ original := originalDocs[i]
+ modified := modifiedDocs[i]
+
+ patch, err := createObjectMergePatch(original, modified)
+ if err != nil {
+ return nil, err
+ }
+
+ result = append(result, json.RawMessage(patch))
+ }
+
+ return json.Marshal(result)
+}
+
+// Returns true if the array matches (must be json types).
+// As is idiomatic for go, an empty array is not the same as a nil array.
+func matchesArray(a, b []interface{}) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ if (a == nil && b != nil) || (a != nil && b == nil) {
+ return false
+ }
+ for i := range a {
+ if !matchesValue(a[i], b[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Returns true if the values matches (must be json types)
+// The types of the values must match, otherwise it will always return false
+// If two map[string]interface{} are given, all elements must match.
+func matchesValue(av, bv interface{}) bool {
+ if reflect.TypeOf(av) != reflect.TypeOf(bv) {
+ return false
+ }
+ switch at := av.(type) {
+ case string:
+ bt := bv.(string)
+ if bt == at {
+ return true
+ }
+ case float64:
+ bt := bv.(float64)
+ if bt == at {
+ return true
+ }
+ case bool:
+ bt := bv.(bool)
+ if bt == at {
+ return true
+ }
+ case nil:
+ // Both nil, fine.
+ return true
+ case map[string]interface{}:
+ bt := bv.(map[string]interface{})
+ if len(bt) != len(at) {
+ return false
+ }
+ for key := range bt {
+ av, aOK := at[key]
+ bv, bOK := bt[key]
+ if aOK != bOK {
+ return false
+ }
+ if !matchesValue(av, bv) {
+ return false
+ }
+ }
+ return true
+ case []interface{}:
+ bt := bv.([]interface{})
+ return matchesArray(at, bt)
+ }
+ return false
+}
+
+// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
+func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
+ into := map[string]interface{}{}
+ for key, bv := range b {
+ av, ok := a[key]
+ // value was added
+ if !ok {
+ into[key] = bv
+ continue
+ }
+ // If types have changed, replace completely
+ if reflect.TypeOf(av) != reflect.TypeOf(bv) {
+ into[key] = bv
+ continue
+ }
+ // Types are the same, compare values
+ switch at := av.(type) {
+ case map[string]interface{}:
+ bt := bv.(map[string]interface{})
+ dst := make(map[string]interface{}, len(bt))
+ dst, err := getDiff(at, bt)
+ if err != nil {
+ return nil, err
+ }
+ if len(dst) > 0 {
+ into[key] = dst
+ }
+ case string, float64, bool:
+ if !matchesValue(av, bv) {
+ into[key] = bv
+ }
+ case []interface{}:
+ bt := bv.([]interface{})
+ if !matchesArray(at, bt) {
+ into[key] = bv
+ }
+ case nil:
+ switch bv.(type) {
+ case nil:
+ // Both nil, fine.
+ default:
+ into[key] = bv
+ }
+ default:
+ panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
+ }
+ }
+ // Now add all deleted values as nil
+ for key := range a {
+ _, found := b[key]
+ if !found {
+ into[key] = nil
+ }
+ }
+ return into, nil
+}
diff --git a/vendor/github.com/evanphx/json-patch/v5/patch.go b/vendor/github.com/evanphx/json-patch/v5/patch.go
new file mode 100644
index 00000000..117f2c00
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/v5/patch.go
@@ -0,0 +1,1135 @@
+package jsonpatch
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+const (
+ eRaw = iota
+ eDoc
+ eAry
+)
+
+var (
+ // SupportNegativeIndices decides whether to support non-standard practice of
+ // allowing negative indices to mean indices starting at the end of an array.
+ // Default to true.
+ SupportNegativeIndices bool = true
+ // AccumulatedCopySizeLimit limits the total size increase in bytes caused by
+ // "copy" operations in a patch.
+ AccumulatedCopySizeLimit int64 = 0
+ startObject = json.Delim('{')
+ endObject = json.Delim('}')
+ startArray = json.Delim('[')
+ endArray = json.Delim(']')
+)
+
+var (
+ ErrTestFailed = errors.New("test failed")
+ ErrMissing = errors.New("missing value")
+ ErrUnknownType = errors.New("unknown object type")
+ ErrInvalid = errors.New("invalid state detected")
+ ErrInvalidIndex = errors.New("invalid index referenced")
+
+ rawJSONArray = []byte("[]")
+ rawJSONObject = []byte("{}")
+ rawJSONNull = []byte("null")
+)
+
+type lazyNode struct {
+ raw *json.RawMessage
+ doc *partialDoc
+ ary partialArray
+ which int
+}
+
+// Operation is a single JSON-Patch step, such as a single 'add' operation.
+type Operation map[string]*json.RawMessage
+
+// Patch is an ordered collection of Operations.
+type Patch []Operation
+
+type partialDoc struct {
+ keys []string
+ obj map[string]*lazyNode
+}
+
+type partialArray []*lazyNode
+
+type container interface {
+ get(key string, options *ApplyOptions) (*lazyNode, error)
+ set(key string, val *lazyNode, options *ApplyOptions) error
+ add(key string, val *lazyNode, options *ApplyOptions) error
+ remove(key string, options *ApplyOptions) error
+}
+
+// ApplyOptions specifies options for calls to ApplyWithOptions.
+// Use NewApplyOptions to obtain default values for ApplyOptions.
+type ApplyOptions struct {
+ // SupportNegativeIndices decides whether to support non-standard practice of
+ // allowing negative indices to mean indices starting at the end of an array.
+ // Default to true.
+ SupportNegativeIndices bool
+ // AccumulatedCopySizeLimit limits the total size increase in bytes caused by
+ // "copy" operations in a patch.
+ AccumulatedCopySizeLimit int64
+ // AllowMissingPathOnRemove indicates whether to fail "remove" operations when the target path is missing.
+ // Default to false.
+ AllowMissingPathOnRemove bool
+ // EnsurePathExistsOnAdd instructs json-patch to recursively create the missing parts of path on "add" operation.
+ // Default to false.
+ EnsurePathExistsOnAdd bool
+}
+
+// NewApplyOptions creates a default set of options for calls to ApplyWithOptions.
+func NewApplyOptions() *ApplyOptions {
+ return &ApplyOptions{
+ SupportNegativeIndices: SupportNegativeIndices,
+ AccumulatedCopySizeLimit: AccumulatedCopySizeLimit,
+ AllowMissingPathOnRemove: false,
+ EnsurePathExistsOnAdd: false,
+ }
+}
+
+func newLazyNode(raw *json.RawMessage) *lazyNode {
+ return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw}
+}
+
+func newRawMessage(buf []byte) *json.RawMessage {
+ ra := make(json.RawMessage, len(buf))
+ copy(ra, buf)
+ return &ra
+}
+
+func (n *lazyNode) MarshalJSON() ([]byte, error) {
+ switch n.which {
+ case eRaw:
+ return json.Marshal(n.raw)
+ case eDoc:
+ return json.Marshal(n.doc)
+ case eAry:
+ return json.Marshal(n.ary)
+ default:
+ return nil, ErrUnknownType
+ }
+}
+
+func (n *lazyNode) UnmarshalJSON(data []byte) error {
+ dest := make(json.RawMessage, len(data))
+ copy(dest, data)
+ n.raw = &dest
+ n.which = eRaw
+ return nil
+}
+
+func (n *partialDoc) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ if _, err := buf.WriteString("{"); err != nil {
+ return nil, err
+ }
+ for i, k := range n.keys {
+ if i > 0 {
+ if _, err := buf.WriteString(", "); err != nil {
+ return nil, err
+ }
+ }
+ key, err := json.Marshal(k)
+ if err != nil {
+ return nil, err
+ }
+ if _, err := buf.Write(key); err != nil {
+ return nil, err
+ }
+ if _, err := buf.WriteString(": "); err != nil {
+ return nil, err
+ }
+ value, err := json.Marshal(n.obj[k])
+ if err != nil {
+ return nil, err
+ }
+ if _, err := buf.Write(value); err != nil {
+ return nil, err
+ }
+ }
+ if _, err := buf.WriteString("}"); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+type syntaxError struct {
+ msg string
+}
+
+func (err *syntaxError) Error() string {
+ return err.msg
+}
+
+func (n *partialDoc) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &n.obj); err != nil {
+ return err
+ }
+ buffer := bytes.NewBuffer(data)
+ d := json.NewDecoder(buffer)
+ if t, err := d.Token(); err != nil {
+ return err
+ } else if t != startObject {
+ return &syntaxError{fmt.Sprintf("unexpected JSON token in document node: %s", t)}
+ }
+ for d.More() {
+ k, err := d.Token()
+ if err != nil {
+ return err
+ }
+ key, ok := k.(string)
+ if !ok {
+ return &syntaxError{fmt.Sprintf("unexpected JSON token as document node key: %s", k)}
+ }
+ if err := skipValue(d); err != nil {
+ return err
+ }
+ n.keys = append(n.keys, key)
+ }
+ return nil
+}
+
+func skipValue(d *json.Decoder) error {
+ t, err := d.Token()
+ if err != nil {
+ return err
+ }
+ if t != startObject && t != startArray {
+ return nil
+ }
+ for d.More() {
+ if t == startObject {
+ // consume key token
+ if _, err := d.Token(); err != nil {
+ return err
+ }
+ }
+ if err := skipValue(d); err != nil {
+ return err
+ }
+ }
+ end, err := d.Token()
+ if err != nil {
+ return err
+ }
+ if t == startObject && end != endObject {
+ return &syntaxError{msg: "expected close object token"}
+ }
+ if t == startArray && end != endArray {
+ return &syntaxError{msg: "expected close object token"}
+ }
+ return nil
+}
+
+func deepCopy(src *lazyNode) (*lazyNode, int, error) {
+ if src == nil {
+ return nil, 0, nil
+ }
+ a, err := src.MarshalJSON()
+ if err != nil {
+ return nil, 0, err
+ }
+ sz := len(a)
+ return newLazyNode(newRawMessage(a)), sz, nil
+}
+
+func (n *lazyNode) intoDoc() (*partialDoc, error) {
+ if n.which == eDoc {
+ return n.doc, nil
+ }
+
+ if n.raw == nil {
+ return nil, ErrInvalid
+ }
+
+ err := json.Unmarshal(*n.raw, &n.doc)
+
+ if err != nil {
+ return nil, err
+ }
+
+ n.which = eDoc
+ return n.doc, nil
+}
+
+func (n *lazyNode) intoAry() (*partialArray, error) {
+ if n.which == eAry {
+ return &n.ary, nil
+ }
+
+ if n.raw == nil {
+ return nil, ErrInvalid
+ }
+
+ err := json.Unmarshal(*n.raw, &n.ary)
+
+ if err != nil {
+ return nil, err
+ }
+
+ n.which = eAry
+ return &n.ary, nil
+}
+
+func (n *lazyNode) compact() []byte {
+ buf := &bytes.Buffer{}
+
+ if n.raw == nil {
+ return nil
+ }
+
+ err := json.Compact(buf, *n.raw)
+
+ if err != nil {
+ return *n.raw
+ }
+
+ return buf.Bytes()
+}
+
+func (n *lazyNode) tryDoc() bool {
+ if n.raw == nil {
+ return false
+ }
+
+ err := json.Unmarshal(*n.raw, &n.doc)
+
+ if err != nil {
+ return false
+ }
+
+ n.which = eDoc
+ return true
+}
+
+func (n *lazyNode) tryAry() bool {
+ if n.raw == nil {
+ return false
+ }
+
+ err := json.Unmarshal(*n.raw, &n.ary)
+
+ if err != nil {
+ return false
+ }
+
+ n.which = eAry
+ return true
+}
+
+func (n *lazyNode) equal(o *lazyNode) bool {
+ if n.which == eRaw {
+ if !n.tryDoc() && !n.tryAry() {
+ if o.which != eRaw {
+ return false
+ }
+
+ return bytes.Equal(n.compact(), o.compact())
+ }
+ }
+
+ if n.which == eDoc {
+ if o.which == eRaw {
+ if !o.tryDoc() {
+ return false
+ }
+ }
+
+ if o.which != eDoc {
+ return false
+ }
+
+ if len(n.doc.obj) != len(o.doc.obj) {
+ return false
+ }
+
+ for k, v := range n.doc.obj {
+ ov, ok := o.doc.obj[k]
+
+ if !ok {
+ return false
+ }
+
+ if (v == nil) != (ov == nil) {
+ return false
+ }
+
+ if v == nil && ov == nil {
+ continue
+ }
+
+ if !v.equal(ov) {
+ return false
+ }
+ }
+
+ return true
+ }
+
+ if o.which != eAry && !o.tryAry() {
+ return false
+ }
+
+ if len(n.ary) != len(o.ary) {
+ return false
+ }
+
+ for idx, val := range n.ary {
+ if !val.equal(o.ary[idx]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Kind reads the "op" field of the Operation.
+func (o Operation) Kind() string {
+ if obj, ok := o["op"]; ok && obj != nil {
+ var op string
+
+ err := json.Unmarshal(*obj, &op)
+
+ if err != nil {
+ return "unknown"
+ }
+
+ return op
+ }
+
+ return "unknown"
+}
+
+// Path reads the "path" field of the Operation.
+func (o Operation) Path() (string, error) {
+ if obj, ok := o["path"]; ok && obj != nil {
+ var op string
+
+ err := json.Unmarshal(*obj, &op)
+
+ if err != nil {
+ return "unknown", err
+ }
+
+ return op, nil
+ }
+
+ return "unknown", errors.Wrapf(ErrMissing, "operation missing path field")
+}
+
+// From reads the "from" field of the Operation.
+func (o Operation) From() (string, error) {
+ if obj, ok := o["from"]; ok && obj != nil {
+ var op string
+
+ err := json.Unmarshal(*obj, &op)
+
+ if err != nil {
+ return "unknown", err
+ }
+
+ return op, nil
+ }
+
+ return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field")
+}
+
+func (o Operation) value() *lazyNode {
+ if obj, ok := o["value"]; ok {
+ return newLazyNode(obj)
+ }
+
+ return nil
+}
+
+// ValueInterface decodes the operation value into an interface.
+func (o Operation) ValueInterface() (interface{}, error) {
+ if obj, ok := o["value"]; ok && obj != nil {
+ var v interface{}
+
+ err := json.Unmarshal(*obj, &v)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return v, nil
+ }
+
+ return nil, errors.Wrapf(ErrMissing, "operation, missing value field")
+}
+
+func isArray(buf []byte) bool {
+Loop:
+ for _, c := range buf {
+ switch c {
+ case ' ':
+ case '\n':
+ case '\t':
+ continue
+ case '[':
+ return true
+ default:
+ break Loop
+ }
+ }
+
+ return false
+}
+
+func findObject(pd *container, path string, options *ApplyOptions) (container, string) {
+ doc := *pd
+
+ split := strings.Split(path, "/")
+
+ if len(split) < 2 {
+ return nil, ""
+ }
+
+ parts := split[1 : len(split)-1]
+
+ key := split[len(split)-1]
+
+ var err error
+
+ for _, part := range parts {
+
+ next, ok := doc.get(decodePatchKey(part), options)
+
+ if next == nil || ok != nil {
+ return nil, ""
+ }
+
+ if isArray(*next.raw) {
+ doc, err = next.intoAry()
+
+ if err != nil {
+ return nil, ""
+ }
+ } else {
+ doc, err = next.intoDoc()
+
+ if err != nil {
+ return nil, ""
+ }
+ }
+ }
+
+ return doc, decodePatchKey(key)
+}
+
+func (d *partialDoc) set(key string, val *lazyNode, options *ApplyOptions) error {
+ found := false
+ for _, k := range d.keys {
+ if k == key {
+ found = true
+ break
+ }
+ }
+ if !found {
+ d.keys = append(d.keys, key)
+ }
+ d.obj[key] = val
+ return nil
+}
+
+func (d *partialDoc) add(key string, val *lazyNode, options *ApplyOptions) error {
+ return d.set(key, val, options)
+}
+
+func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) {
+ v, ok := d.obj[key]
+ if !ok {
+ return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key)
+ }
+ return v, nil
+}
+
+func (d *partialDoc) remove(key string, options *ApplyOptions) error {
+ _, ok := d.obj[key]
+ if !ok {
+ if options.AllowMissingPathOnRemove {
+ return nil
+ }
+ return errors.Wrapf(ErrMissing, "unable to remove nonexistent key: %s", key)
+ }
+ idx := -1
+ for i, k := range d.keys {
+ if k == key {
+ idx = i
+ break
+ }
+ }
+ d.keys = append(d.keys[0:idx], d.keys[idx+1:]...)
+ delete(d.obj, key)
+ return nil
+}
+
+// set should only be used to implement the "replace" operation, so "key" must
+// be an already existing index in "d".
+func (d *partialArray) set(key string, val *lazyNode, options *ApplyOptions) error {
+ idx, err := strconv.Atoi(key)
+ if err != nil {
+ return err
+ }
+
+ if idx < 0 {
+ if !options.SupportNegativeIndices {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ if idx < -len(*d) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ idx += len(*d)
+ }
+
+ (*d)[idx] = val
+ return nil
+}
+
+func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) error {
+ if key == "-" {
+ *d = append(*d, val)
+ return nil
+ }
+
+ idx, err := strconv.Atoi(key)
+ if err != nil {
+ return errors.Wrapf(err, "value was not a proper array index: '%s'", key)
+ }
+
+ sz := len(*d) + 1
+
+ ary := make([]*lazyNode, sz)
+
+ cur := *d
+
+ if idx >= len(ary) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+
+ if idx < 0 {
+ if !options.SupportNegativeIndices {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ if idx < -len(ary) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ idx += len(ary)
+ }
+
+ copy(ary[0:idx], cur[0:idx])
+ ary[idx] = val
+ copy(ary[idx+1:], cur[idx:])
+
+ *d = ary
+ return nil
+}
+
+func (d *partialArray) get(key string, options *ApplyOptions) (*lazyNode, error) {
+ idx, err := strconv.Atoi(key)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if idx < 0 {
+ if !options.SupportNegativeIndices {
+ return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ if idx < -len(*d) {
+ return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ idx += len(*d)
+ }
+
+ if idx >= len(*d) {
+ return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+
+ return (*d)[idx], nil
+}
+
+func (d *partialArray) remove(key string, options *ApplyOptions) error {
+ idx, err := strconv.Atoi(key)
+ if err != nil {
+ return err
+ }
+
+ cur := *d
+
+ if idx >= len(cur) {
+ if options.AllowMissingPathOnRemove {
+ return nil
+ }
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+
+ if idx < 0 {
+ if !options.SupportNegativeIndices {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ if idx < -len(cur) {
+ if options.AllowMissingPathOnRemove {
+ return nil
+ }
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ idx += len(cur)
+ }
+
+ ary := make([]*lazyNode, len(cur)-1)
+
+ copy(ary[0:idx], cur[0:idx])
+ copy(ary[idx:], cur[idx+1:])
+
+ *d = ary
+ return nil
+}
+
+func (p Patch) add(doc *container, op Operation, options *ApplyOptions) error {
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(ErrMissing, "add operation failed to decode path")
+ }
+
+ if options.EnsurePathExistsOnAdd {
+ err = ensurePathExists(doc, path, options)
+
+ if err != nil {
+ return err
+ }
+ }
+
+ con, key := findObject(doc, path, options)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path)
+ }
+
+ err = con.add(key, op.value(), options)
+ if err != nil {
+ return errors.Wrapf(err, "error in add for path: '%s'", path)
+ }
+
+ return nil
+}
+
+// Given a document and a path to a key, walk the path and create all missing elements
+// creating objects and arrays as needed.
+func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
+ doc := *pd
+
+ var err error
+ var arrIndex int
+
+ split := strings.Split(path, "/")
+
+ if len(split) < 2 {
+ return nil
+ }
+
+ parts := split[1:]
+
+ for pi, part := range parts {
+
+ // Have we reached the key part of the path?
+ // If yes, we're done.
+ if pi == len(parts)-1 {
+ return nil
+ }
+
+ target, ok := doc.get(decodePatchKey(part), options)
+
+ if target == nil || ok != nil {
+
+ // If the current container is an array which has fewer elements than our target index,
+ // pad the current container with nulls.
+ if arrIndex, err = strconv.Atoi(part); err == nil {
+ pa, ok := doc.(*partialArray)
+
+ if ok && arrIndex >= len(*pa)+1 {
+ // Pad the array with null values up to the required index.
+ for i := len(*pa); i <= arrIndex-1; i++ {
+ doc.add(strconv.Itoa(i), newLazyNode(newRawMessage(rawJSONNull)), options)
+ }
+ }
+ }
+
+ // Check if the next part is a numeric index or "-".
+ // If yes, then create an array, otherwise, create an object.
+ if arrIndex, err = strconv.Atoi(parts[pi+1]); err == nil || parts[pi+1] == "-" {
+ if arrIndex < 0 {
+
+ if !options.SupportNegativeIndices {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to ensure path for invalid index: %d", arrIndex)
+ }
+
+ if arrIndex < -1 {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to ensure path for negative index other than -1: %d", arrIndex)
+ }
+
+ arrIndex = 0
+ }
+
+ newNode := newLazyNode(newRawMessage(rawJSONArray))
+ doc.add(part, newNode, options)
+ doc, _ = newNode.intoAry()
+
+ // Pad the new array with null values up to the required index.
+ for i := 0; i < arrIndex; i++ {
+ doc.add(strconv.Itoa(i), newLazyNode(newRawMessage(rawJSONNull)), options)
+ }
+ } else {
+ newNode := newLazyNode(newRawMessage(rawJSONObject))
+
+ doc.add(part, newNode, options)
+ doc, _ = newNode.intoDoc()
+ }
+ } else {
+ if isArray(*target.raw) {
+ doc, err = target.intoAry()
+
+ if err != nil {
+ return err
+ }
+ } else {
+ doc, err = target.intoDoc()
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func (p Patch) remove(doc *container, op Operation, options *ApplyOptions) error {
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(ErrMissing, "remove operation failed to decode path")
+ }
+
+ con, key := findObject(doc, path, options)
+
+ if con == nil {
+ if options.AllowMissingPathOnRemove {
+ return nil
+ }
+ return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path)
+ }
+
+ err = con.remove(key, options)
+ if err != nil {
+ return errors.Wrapf(err, "error in remove for path: '%s'", path)
+ }
+
+ return nil
+}
+
+func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) error {
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(err, "replace operation failed to decode path")
+ }
+
+ if path == "" {
+ val := op.value()
+
+ if val.which == eRaw {
+ if !val.tryDoc() {
+ if !val.tryAry() {
+ return errors.Wrapf(err, "replace operation value must be object or array")
+ }
+ }
+ }
+
+ switch val.which {
+ case eAry:
+ *doc = &val.ary
+ case eDoc:
+ *doc = val.doc
+ case eRaw:
+ return errors.Wrapf(err, "replace operation hit impossible case")
+ }
+
+ return nil
+ }
+
+ con, key := findObject(doc, path, options)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path)
+ }
+
+ _, ok := con.get(key, options)
+ if ok != nil {
+ return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path)
+ }
+
+ err = con.set(key, op.value(), options)
+ if err != nil {
+ return errors.Wrapf(err, "error in remove for path: '%s'", path)
+ }
+
+ return nil
+}
+
+func (p Patch) move(doc *container, op Operation, options *ApplyOptions) error {
+ from, err := op.From()
+ if err != nil {
+ return errors.Wrapf(err, "move operation failed to decode from")
+ }
+
+ con, key := findObject(doc, from, options)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from)
+ }
+
+ val, err := con.get(key, options)
+ if err != nil {
+ return errors.Wrapf(err, "error in move for path: '%s'", key)
+ }
+
+ err = con.remove(key, options)
+ if err != nil {
+ return errors.Wrapf(err, "error in move for path: '%s'", key)
+ }
+
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(err, "move operation failed to decode path")
+ }
+
+ con, key = findObject(doc, path, options)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path)
+ }
+
+ err = con.add(key, val, options)
+ if err != nil {
+ return errors.Wrapf(err, "error in move for path: '%s'", path)
+ }
+
+ return nil
+}
+
+func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error {
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(err, "test operation failed to decode path")
+ }
+
+ if path == "" {
+ var self lazyNode
+
+ switch sv := (*doc).(type) {
+ case *partialDoc:
+ self.doc = sv
+ self.which = eDoc
+ case *partialArray:
+ self.ary = *sv
+ self.which = eAry
+ }
+
+ if self.equal(op.value()) {
+ return nil
+ }
+
+ return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ }
+
+ con, key := findObject(doc, path, options)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path)
+ }
+
+ val, err := con.get(key, options)
+ if err != nil && errors.Cause(err) != ErrMissing {
+ return errors.Wrapf(err, "error in test for path: '%s'", path)
+ }
+
+ if val == nil {
+ if op.value().raw == nil {
+ return nil
+ }
+ return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ } else if op.value() == nil {
+ return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ }
+
+ if val.equal(op.value()) {
+ return nil
+ }
+
+ return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+}
+
+func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, options *ApplyOptions) error {
+ from, err := op.From()
+ if err != nil {
+ return errors.Wrapf(err, "copy operation failed to decode from")
+ }
+
+ con, key := findObject(doc, from, options)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from)
+ }
+
+ val, err := con.get(key, options)
+ if err != nil {
+ return errors.Wrapf(err, "error in copy for from: '%s'", from)
+ }
+
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(ErrMissing, "copy operation failed to decode path")
+ }
+
+ con, key = findObject(doc, path, options)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
+ }
+
+ valCopy, sz, err := deepCopy(val)
+ if err != nil {
+ return errors.Wrapf(err, "error while performing deep copy")
+ }
+
+ (*accumulatedCopySize) += int64(sz)
+ if options.AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > options.AccumulatedCopySizeLimit {
+ return NewAccumulatedCopySizeError(options.AccumulatedCopySizeLimit, *accumulatedCopySize)
+ }
+
+ err = con.add(key, valCopy, options)
+ if err != nil {
+ return errors.Wrapf(err, "error while adding value during copy")
+ }
+
+ return nil
+}
+
+// Equal indicates if 2 JSON documents have the same structural equality.
+func Equal(a, b []byte) bool {
+ la := newLazyNode(newRawMessage(a))
+ lb := newLazyNode(newRawMessage(b))
+
+ return la.equal(lb)
+}
+
+// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
+func DecodePatch(buf []byte) (Patch, error) {
+ var p Patch
+
+ err := json.Unmarshal(buf, &p)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// Apply mutates a JSON document according to the patch, and returns the new
+// document.
+func (p Patch) Apply(doc []byte) ([]byte, error) {
+ return p.ApplyWithOptions(doc, NewApplyOptions())
+}
+
+// ApplyWithOptions mutates a JSON document according to the patch and the passed in ApplyOptions.
+// It returns the new document.
+func (p Patch) ApplyWithOptions(doc []byte, options *ApplyOptions) ([]byte, error) {
+ return p.ApplyIndentWithOptions(doc, "", options)
+}
+
+// ApplyIndent mutates a JSON document according to the patch, and returns the new
+// document indented.
+func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
+ return p.ApplyIndentWithOptions(doc, indent, NewApplyOptions())
+}
+
+// ApplyIndentWithOptions mutates a JSON document according to the patch and the passed in ApplyOptions.
+// It returns the new document indented.
+func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyOptions) ([]byte, error) {
+ if len(doc) == 0 {
+ return doc, nil
+ }
+
+ var pd container
+ if doc[0] == '[' {
+ pd = &partialArray{}
+ } else {
+ pd = &partialDoc{}
+ }
+
+ err := json.Unmarshal(doc, pd)
+
+ if err != nil {
+ return nil, err
+ }
+
+ err = nil
+
+ var accumulatedCopySize int64
+
+ for _, op := range p {
+ switch op.Kind() {
+ case "add":
+ err = p.add(&pd, op, options)
+ case "remove":
+ err = p.remove(&pd, op, options)
+ case "replace":
+ err = p.replace(&pd, op, options)
+ case "move":
+ err = p.move(&pd, op, options)
+ case "test":
+ err = p.test(&pd, op, options)
+ case "copy":
+ err = p.copy(&pd, op, &accumulatedCopySize, options)
+ default:
+ err = fmt.Errorf("Unexpected kind: %s", op.Kind())
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if indent != "" {
+ return json.MarshalIndent(pd, "", indent)
+ }
+
+ return json.Marshal(pd)
+}
+
+// From http://tools.ietf.org/html/rfc6901#section-4 :
+//
+// Evaluation of each reference token begins by decoding any escaped
+// character sequence. This is performed by first transforming any
+// occurrence of the sequence '~1' to '/', and then transforming any
+// occurrence of the sequence '~0' to '~'.
+
+var (
+ rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
+)
+
+func decodePatchKey(k string) string {
+ return rfc6901Decoder.Replace(k)
+}
diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore
new file mode 100644
index 00000000..529c3412
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/.gitignore
@@ -0,0 +1,33 @@
+#### joe made this: http://goel.io/joe
+
+#### go ####
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+#### vim ####
+# Swap
+[._]*.s[a-v][a-z]
+[._]*.sw[a-p]
+[._]s[a-v][a-z]
+[._]sw[a-p]
+
+# Session
+Session.vim
+
+# Temporary
+.netrwhist
+*~
+# Auto-generated tag files
+tags
diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml
new file mode 100644
index 00000000..b13a50ed
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+install:
+ - go get -t
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+script:
+ - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..469b4490
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE
new file mode 100644
index 00000000..68668029
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2013 Dario Castañé. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
new file mode 100644
index 00000000..8b76f1fb
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/README.md
@@ -0,0 +1,222 @@
+# Mergo
+
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
+
+## Status
+
+It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
+
+[![GoDoc][3]][4]
+[![GoCard][5]][6]
+[![Build Status][1]][2]
+[![Coverage Status][7]][8]
+[![Sourcegraph][9]][10]
+
+[1]: https://travis-ci.org/imdario/mergo.png
+[2]: https://travis-ci.org/imdario/mergo
+[3]: https://godoc.org/github.com/imdario/mergo?status.svg
+[4]: https://godoc.org/github.com/imdario/mergo
+[5]: https://goreportcard.com/badge/imdario/mergo
+[6]: https://goreportcard.com/report/github.com/imdario/mergo
+[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
+[8]: https://coveralls.io/github/imdario/mergo?branch=master
+[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
+[10]: https://sourcegraph.com/github.com/imdario/mergo?badge
+
+### Latest release
+
+[Release v0.3.6](https://github.com/imdario/mergo/releases/tag/v0.3.6).
+
+### Important note
+
+Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code.
+
+If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0).
+
+### Donations
+
+If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes:
+
+
+[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo)
+[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo)
+
+
+### Mergo in the wild
+
+- [moby/moby](https://github.com/moby/moby)
+- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
+- [vmware/dispatch](https://github.com/vmware/dispatch)
+- [Shopify/themekit](https://github.com/Shopify/themekit)
+- [imdario/zas](https://github.com/imdario/zas)
+- [matcornic/hermes](https://github.com/matcornic/hermes)
+- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
+- [kataras/iris](https://github.com/kataras/iris)
+- [michaelsauter/crane](https://github.com/michaelsauter/crane)
+- [go-task/task](https://github.com/go-task/task)
+- [sensu/uchiwa](https://github.com/sensu/uchiwa)
+- [ory/hydra](https://github.com/ory/hydra)
+- [sisatech/vcli](https://github.com/sisatech/vcli)
+- [dairycart/dairycart](https://github.com/dairycart/dairycart)
+- [projectcalico/felix](https://github.com/projectcalico/felix)
+- [resin-os/balena](https://github.com/resin-os/balena)
+- [go-kivik/kivik](https://github.com/go-kivik/kivik)
+- [Telefonica/govice](https://github.com/Telefonica/govice)
+- [supergiant/supergiant](supergiant/supergiant)
+- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
+- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
+- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
+- [EagerIO/Stout](https://github.com/EagerIO/Stout)
+- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
+- [russross/canvasassignments](https://github.com/russross/canvasassignments)
+- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
+- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
+- [divshot/gitling](https://github.com/divshot/gitling)
+- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
+- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
+- [elwinar/rambler](https://github.com/elwinar/rambler)
+- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
+- [jfbus/impressionist](https://github.com/jfbus/impressionist)
+- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
+- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
+- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
+- [thoas/picfit](https://github.com/thoas/picfit)
+- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
+- [jnuthong/item_search](https://github.com/jnuthong/item_search)
+- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
+
+## Installation
+
+ go get github.com/imdario/mergo
+
+ // use in your .go code
+ import (
+ "github.com/imdario/mergo"
+ )
+
+## Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+```go
+if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+}
+```
+
+Also, you can merge overwriting values using the transformer `WithOverride`.
+
+```go
+if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
+ // ...
+}
+```
+
+Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
+
+```go
+if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+}
+```
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
+
+More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
+
+### Nice example
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/imdario/mergo"
+)
+
+type Foo struct {
+ A string
+ B int64
+}
+
+func main() {
+ src := Foo{
+ A: "one",
+ B: 2,
+ }
+ dest := Foo{
+ A: "two",
+ }
+ mergo.Merge(&dest, src)
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
+}
+```
+
+Note: if test are failing due missing package, please execute:
+
+ go get gopkg.in/yaml.v2
+
+### Transformers
+
+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/imdario/mergo"
+ "reflect"
+ "time"
+)
+
+type timeTransfomer struct {
+}
+
+func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+ if typ == reflect.TypeOf(time.Time{}) {
+ return func(dst, src reflect.Value) error {
+ if dst.CanSet() {
+ isZero := dst.MethodByName("IsZero")
+ result := isZero.Call([]reflect.Value{})
+ if result[0].Bool() {
+ dst.Set(src)
+ }
+ }
+ return nil
+ }
+ }
+ return nil
+}
+
+type Snapshot struct {
+ Time time.Time
+ // ...
+}
+
+func main() {
+ src := Snapshot{time.Now()}
+ dest := Snapshot{}
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{}))
+ fmt.Println(dest)
+ // Will print
+ // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
+}
+```
+
+
+## Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
+
+## About
+
+Written by [Dario Castañé](http://dario.im).
+
+## License
+
+[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go
new file mode 100644
index 00000000..6e9aa7ba
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/doc.go
@@ -0,0 +1,44 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mergo merges same-type structs and maps by setting default values in zero-value fields.
+
+Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Usage
+
+From my own work-in-progress project:
+
+ type networkConfig struct {
+ Protocol string
+ Address string
+ ServerType string `json: "server_type"`
+ Port uint16
+ }
+
+ type FssnConfig struct {
+ Network networkConfig
+ }
+
+ var fssnDefault = FssnConfig {
+ networkConfig {
+ "tcp",
+ "127.0.0.1",
+ "http",
+ 31560,
+ },
+ }
+
+ // Inside a function [...]
+
+ if err := mergo.Merge(&config, fssnDefault); err != nil {
+ log.Fatal(err)
+ }
+
+ // More code [...]
+
+*/
+package mergo
diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go
new file mode 100644
index 00000000..6ea38e63
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/map.go
@@ -0,0 +1,174 @@
+// Copyright 2014 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+func changeInitialCase(s string, mapper func(rune) rune) string {
+ if s == "" {
+ return s
+ }
+ r, n := utf8.DecodeRuneInString(s)
+ return string(mapper(r)) + s[n:]
+}
+
+func isExported(field reflect.StructField) bool {
+ r, _ := utf8.DecodeRuneInString(field.Name)
+ return r >= 'A' && r <= 'Z'
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+ overwrite := config.Overwrite
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+ zeroValue := reflect.Value{}
+ switch dst.Kind() {
+ case reflect.Map:
+ dstMap := dst.Interface().(map[string]interface{})
+ for i, n := 0, src.NumField(); i < n; i++ {
+ srcType := src.Type()
+ field := srcType.Field(i)
+ if !isExported(field) {
+ continue
+ }
+ fieldName := field.Name
+ fieldName = changeInitialCase(fieldName, unicode.ToLower)
+ if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
+ dstMap[fieldName] = src.Field(i).Interface()
+ }
+ }
+ case reflect.Ptr:
+ if dst.IsNil() {
+ v := reflect.New(dst.Type().Elem())
+ dst.Set(v)
+ }
+ dst = dst.Elem()
+ fallthrough
+ case reflect.Struct:
+ srcMap := src.Interface().(map[string]interface{})
+ for key := range srcMap {
+ srcValue := srcMap[key]
+ fieldName := changeInitialCase(key, unicode.ToUpper)
+ dstElement := dst.FieldByName(fieldName)
+ if dstElement == zeroValue {
+ // We discard it because the field doesn't exist.
+ continue
+ }
+ srcElement := reflect.ValueOf(srcValue)
+ dstKind := dstElement.Kind()
+ srcKind := srcElement.Kind()
+ if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
+ srcElement = srcElement.Elem()
+ srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
+ } else if dstKind == reflect.Ptr {
+ // Can this work? I guess it can't.
+ if srcKind != reflect.Ptr && srcElement.CanAddr() {
+ srcPtr := srcElement.Addr()
+ srcElement = reflect.ValueOf(srcPtr)
+ srcKind = reflect.Ptr
+ }
+ }
+
+ if !srcElement.IsValid() {
+ continue
+ }
+ if srcKind == dstKind {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else if srcKind == reflect.Map {
+ if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else {
+ return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
+ }
+ }
+ }
+ return
+}
+
+// Map sets fields' values in dst from src.
+// src can be a map with string keys or a struct. dst must be the opposite:
+// if src is a map, dst must be a valid pointer to struct. If src is a struct,
+// dst must be map[string]interface{}.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+// If dst is a map, keys will be src fields' names in lower camel case.
+// Missing key in src that doesn't match a field in dst will be skipped. This
+// doesn't apply if dst is a map.
+// This is separated method from Merge because it is cleaner and it keeps sane
+// semantics: merging equal types, mapping different (restricted) types.
+func Map(dst, src interface{}, opts ...func(*Config)) error {
+ return _map(dst, src, opts...)
+}
+
+// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
+// non-empty src attribute values.
+// Deprecated: Use Map(…) with WithOverride
+func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
+ return _map(dst, src, append(opts, WithOverride)...)
+}
+
+func _map(dst, src interface{}, opts ...func(*Config)) error {
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+ config := &Config{}
+
+ for _, opt := range opts {
+ opt(config)
+ }
+
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ // To be friction-less, we redirect equal-type arguments
+ // to deepMerge. Only because arguments can be anything.
+ if vSrc.Kind() == vDst.Kind() {
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+ }
+ switch vSrc.Kind() {
+ case reflect.Struct:
+ if vDst.Kind() != reflect.Map {
+ return ErrExpectedMapAsDestination
+ }
+ case reflect.Map:
+ if vDst.Kind() != reflect.Struct {
+ return ErrExpectedStructAsDestination
+ }
+ default:
+ return ErrNotSupported
+ }
+ return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+}
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
new file mode 100644
index 00000000..44f70a89
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/merge.go
@@ -0,0 +1,252 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+)
+
+func hasExportedField(dst reflect.Value) (exported bool) {
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ field := dst.Type().Field(i)
+ if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
+ exported = exported || hasExportedField(dst.Field(i))
+ } else {
+ exported = exported || len(field.PkgPath) == 0
+ }
+ }
+ return
+}
+
+type Config struct {
+ Overwrite bool
+ AppendSlice bool
+ Transformers Transformers
+}
+
+type Transformers interface {
+ Transformer(reflect.Type) func(dst, src reflect.Value) error
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+ overwrite := config.Overwrite
+
+ if !src.IsValid() {
+ return
+ }
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+
+ if config.Transformers != nil && !isEmptyValue(dst) {
+ if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
+ err = fn(dst, src)
+ return
+ }
+ }
+
+ switch dst.Kind() {
+ case reflect.Struct:
+ if hasExportedField(dst) {
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
+ return
+ }
+ }
+ } else {
+ if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
+ dst.Set(src)
+ }
+ }
+ case reflect.Map:
+ if dst.IsNil() && !src.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ for _, key := range src.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ continue
+ }
+ dstElement := dst.MapIndex(key)
+ switch srcElement.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
+ if srcElement.IsNil() {
+ continue
+ }
+ fallthrough
+ default:
+ if !srcElement.CanInterface() {
+ continue
+ }
+ switch reflect.TypeOf(srcElement.Interface()).Kind() {
+ case reflect.Struct:
+ fallthrough
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Map:
+ srcMapElm := srcElement
+ dstMapElm := dstElement
+ if srcMapElm.CanInterface() {
+ srcMapElm = reflect.ValueOf(srcMapElm.Interface())
+ if dstMapElm.IsValid() {
+ dstMapElm = reflect.ValueOf(dstMapElm.Interface())
+ }
+ }
+ if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
+ return
+ }
+ case reflect.Slice:
+ srcSlice := reflect.ValueOf(srcElement.Interface())
+
+ var dstSlice reflect.Value
+ if !dstElement.IsValid() || dstElement.IsNil() {
+ dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
+ } else {
+ dstSlice = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
+ dstSlice = srcSlice
+ } else if config.AppendSlice {
+ if srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
+ dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
+ }
+ dst.SetMapIndex(key, dstSlice)
+ }
+ }
+ if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map {
+ continue
+ }
+
+ if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) {
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ dst.SetMapIndex(key, srcElement)
+ }
+ }
+ case reflect.Slice:
+ if !dst.CanSet() {
+ break
+ }
+ if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
+ dst.Set(src)
+ } else if config.AppendSlice {
+ if src.Type() != dst.Type() {
+ return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
+ }
+ dst.Set(reflect.AppendSlice(dst, src))
+ }
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Interface:
+ if src.IsNil() {
+ break
+ }
+ if src.Kind() != reflect.Interface {
+ if dst.IsNil() || overwrite {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+ dst.Set(src)
+ }
+ } else if src.Kind() == reflect.Ptr {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ } else if dst.Elem().Type() == src.Type() {
+ if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
+ return
+ }
+ } else {
+ return ErrDifferentArgumentsTypes
+ }
+ break
+ }
+ if dst.IsNil() || overwrite {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+ dst.Set(src)
+ }
+ } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ default:
+ if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
+ dst.Set(src)
+ }
+ }
+ return
+}
+
+// Merge will fill any empty for value type attributes on the dst struct using corresponding
+// src attributes if they themselves are not empty. dst and src must be valid same-type structs
+// and dst must be a pointer to struct.
+// It won't merge unexported (private) fields and will do recursively any exported field.
+func Merge(dst, src interface{}, opts ...func(*Config)) error {
+ return merge(dst, src, opts...)
+}
+
+// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by
+// non-empty src attribute values.
+// Deprecated: use Merge(…) with WithOverride
+func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
+ return merge(dst, src, append(opts, WithOverride)...)
+}
+
+// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
+func WithTransformers(transformers Transformers) func(*Config) {
+ return func(config *Config) {
+ config.Transformers = transformers
+ }
+}
+
+// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
+func WithOverride(config *Config) {
+ config.Overwrite = true
+}
+
+// WithAppendSlice will make merge append slices instead of overwriting it
+func WithAppendSlice(config *Config) {
+ config.AppendSlice = true
+}
+
+func merge(dst, src interface{}, opts ...func(*Config)) error {
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+
+ config := &Config{}
+
+ for _, opt := range opts {
+ opt(config)
+ }
+
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ if vDst.Type() != vSrc.Type() {
+ return ErrDifferentArgumentsTypes
+ }
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+}
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go
new file mode 100644
index 00000000..a82fea2f
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/mergo.go
@@ -0,0 +1,97 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "errors"
+ "reflect"
+)
+
+// Errors reported by Mergo when it finds invalid arguments.
+var (
+ ErrNilArguments = errors.New("src and dst must not be nil")
+ ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
+ ErrNotSupported = errors.New("only structs and maps are supported")
+ ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
+ ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
+)
+
+// During deepMerge, must keep track of checks that are
+// in progress. The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited are stored in a map indexed by 17 * a1 + a2;
+type visit struct {
+ ptr uintptr
+ typ reflect.Type
+ next *visit
+}
+
+// From src/pkg/encoding/json/encode.go.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ if v.IsNil() {
+ return true
+ }
+ return isEmptyValue(v.Elem())
+ case reflect.Func:
+ return v.IsNil()
+ case reflect.Invalid:
+ return true
+ }
+ return false
+}
+
+func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
+ if dst == nil || src == nil {
+ err = ErrNilArguments
+ return
+ }
+ vDst = reflect.ValueOf(dst).Elem()
+ if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
+ err = ErrNotSupported
+ return
+ }
+ vSrc = reflect.ValueOf(src)
+ // We check if vSrc is a pointer to dereference it.
+ if vSrc.Kind() == reflect.Ptr {
+ vSrc = vSrc.Elem()
+ }
+ return
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+ return // TODO refactor
+}
diff --git a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go
index a411d542..0d7823b3 100644
--- a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go
+++ b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go
@@ -1,6 +1,7 @@
package jsonpatch
import (
+ "bytes"
"encoding/json"
"fmt"
"reflect"
@@ -64,6 +65,9 @@ func NewOperation(op, path string, value interface{}) Operation {
//
// An error will be returned if any of the two documents are invalid.
func CreatePatch(a, b []byte) ([]Operation, error) {
+ if bytes.Equal(a, b) {
+ return []Operation{}, nil
+ }
var aI interface{}
var bI interface{}
err := json.Unmarshal(a, &aI)
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go
new file mode 100644
index 00000000..2741ee2c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:conversion-gen=k8s.io/apimachinery/pkg/apis/meta/v1
+
+package internalversion // import "k8s.io/apimachinery/pkg/apis/meta/internalversion"
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go
new file mode 100644
index 00000000..a59ac712
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internalversion
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name for this API.
+const GroupName = "meta.k8s.io"
+
+var (
+ // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+ // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+ SchemeBuilder runtime.SchemeBuilder
+ localSchemeBuilder = &SchemeBuilder
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// addToGroupVersion registers common meta types into schemas.
+func addToGroupVersion(scheme *runtime.Scheme) error {
+ if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil {
+ return err
+ }
+ // ListOptions is the only options struct which needs conversion (it exposes labels and fields
+ // as selectors for convenience). The other types have only a single representation today.
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &ListOptions{},
+ &metav1.GetOptions{},
+ &metav1.DeleteOptions{},
+ &metav1.CreateOptions{},
+ &metav1.UpdateOptions{},
+ )
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &metav1.Table{},
+ &metav1.TableOptions{},
+ &metav1beta1.PartialObjectMetadata{},
+ &metav1beta1.PartialObjectMetadataList{},
+ )
+ if err := metav1beta1.AddMetaToScheme(scheme); err != nil {
+ return err
+ }
+ if err := metav1.AddMetaToScheme(scheme); err != nil {
+ return err
+ }
+ // Allow delete options to be decoded across all version in this scheme (we may want to be more clever than this)
+ scheme.AddUnversionedTypes(SchemeGroupVersion,
+ &metav1.DeleteOptions{},
+ &metav1.CreateOptions{},
+ &metav1.UpdateOptions{})
+
+ metav1.AddToGroupVersion(scheme, metav1.SchemeGroupVersion)
+ if err := metav1beta1.RegisterConversions(scheme); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Unlike other API groups, meta internal knows about all meta external versions, but keeps
+// the logic for conversion private.
+func init() {
+ localSchemeBuilder.Register(addToGroupVersion)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go
new file mode 100644
index 00000000..a45fa2a8
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go
@@ -0,0 +1,17 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scheme // import "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme"
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go
new file mode 100644
index 00000000..472a9aeb
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scheme
+
+import (
+ "k8s.io/apimachinery/pkg/apis/meta/internalversion"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// Scheme is the registry for any type that adheres to the meta API spec.
+var scheme = runtime.NewScheme()
+
+// Codecs provides access to encoding and decoding for the scheme.
+var Codecs = serializer.NewCodecFactory(scheme)
+
+// ParameterCodec handles versioning of objects that are converted to query parameters.
+var ParameterCodec = runtime.NewParameterCodec(scheme)
+
+// Unlike other API groups, meta internal knows about all meta external versions, but keeps
+// the logic for conversion private.
+func init() {
+ utilruntime.Must(internalversion.AddToScheme(scheme))
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go
new file mode 100644
index 00000000..a49b5f2b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go
@@ -0,0 +1,80 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internalversion
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/fields"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ListOptions is the query options to a standard REST list call.
+type ListOptions struct {
+ metav1.TypeMeta
+
+ // A selector based on labels
+ LabelSelector labels.Selector
+ // A selector based on fields
+ FieldSelector fields.Selector
+ // If true, watch for changes to this list
+ Watch bool
+ // allowWatchBookmarks requests watch events with type "BOOKMARK".
+ // Servers that do not implement bookmarks may ignore this flag and
+ // bookmarks are sent at the server's discretion. Clients should not
+ // assume bookmarks are returned at any specific interval, nor may they
+ // assume the server will send any BOOKMARK event during a session.
+ // If this is not a watch, this field is ignored.
+ // If the feature gate WatchBookmarks is not enabled in apiserver,
+ // this field is ignored.
+ AllowWatchBookmarks bool
+ // resourceVersion sets a constraint on what resource versions a request may be served from.
+ // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+ // details.
+ ResourceVersion string
+ // resourceVersionMatch determines how resourceVersion is applied to list calls.
+ // It is highly recommended that resourceVersionMatch be set for list calls where
+ // resourceVersion is set.
+ // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+ // details.
+ ResourceVersionMatch metav1.ResourceVersionMatch
+
+ // Timeout for the list/watch call.
+ TimeoutSeconds *int64
+ // Limit specifies the maximum number of results to return from the server. The server may
+ // not support this field on all resource types, but if it does and more results remain it
+ // will set the continue field on the returned list object.
+ Limit int64
+ // Continue is a token returned by the server that lets a client retrieve chunks of results
+ // from the server by specifying limit. The server may reject requests for continuation tokens
+ // it does not recognize and will return a 410 error if the token can no longer be used because
+ // it has expired.
+ Continue string
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// List holds a list of objects, which may not be known by the server.
+type List struct {
+ metav1.TypeMeta
+ // +optional
+ metav1.ListMeta
+
+ Items []runtime.Object
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go
new file mode 100644
index 00000000..6d212b84
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go
@@ -0,0 +1,146 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package internalversion
+
+import (
+ unsafe "unsafe"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ conversion "k8s.io/apimachinery/pkg/conversion"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+func init() {
+ localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+ if err := s.AddGeneratedConversionFunc((*List)(nil), (*v1.List)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_internalversion_List_To_v1_List(a.(*List), b.(*v1.List), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1.List)(nil), (*List)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1_List_To_internalversion_List(a.(*v1.List), b.(*List), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ListOptions)(nil), (*v1.ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_internalversion_ListOptions_To_v1_ListOptions(a.(*ListOptions), b.(*v1.ListOptions), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1.ListOptions)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1_ListOptions_To_internalversion_ListOptions(a.(*v1.ListOptions), b.(*ListOptions), scope)
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_internalversion_List_To_v1_List(in *List, out *v1.List, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]runtime.RawExtension, len(*in))
+ for i := range *in {
+ if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_internalversion_List_To_v1_List is an autogenerated conversion function.
+func Convert_internalversion_List_To_v1_List(in *List, out *v1.List, s conversion.Scope) error {
+ return autoConvert_internalversion_List_To_v1_List(in, out, s)
+}
+
+func autoConvert_v1_List_To_internalversion_List(in *v1.List, out *List, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]runtime.Object, len(*in))
+ for i := range *in {
+ if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1_List_To_internalversion_List is an autogenerated conversion function.
+func Convert_v1_List_To_internalversion_List(in *v1.List, out *List, s conversion.Scope) error {
+ return autoConvert_v1_List_To_internalversion_List(in, out, s)
+}
+
+func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *v1.ListOptions, s conversion.Scope) error {
+ if err := v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil {
+ return err
+ }
+ if err := v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil {
+ return err
+ }
+ out.Watch = in.Watch
+ out.AllowWatchBookmarks = in.AllowWatchBookmarks
+ out.ResourceVersion = in.ResourceVersion
+ out.ResourceVersionMatch = v1.ResourceVersionMatch(in.ResourceVersionMatch)
+ out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds))
+ out.Limit = in.Limit
+ out.Continue = in.Continue
+ return nil
+}
+
+// Convert_internalversion_ListOptions_To_v1_ListOptions is an autogenerated conversion function.
+func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *v1.ListOptions, s conversion.Scope) error {
+ return autoConvert_internalversion_ListOptions_To_v1_ListOptions(in, out, s)
+}
+
+func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error {
+ if err := v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil {
+ return err
+ }
+ if err := v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil {
+ return err
+ }
+ out.Watch = in.Watch
+ out.AllowWatchBookmarks = in.AllowWatchBookmarks
+ out.ResourceVersion = in.ResourceVersion
+ out.ResourceVersionMatch = v1.ResourceVersionMatch(in.ResourceVersionMatch)
+ out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds))
+ out.Limit = in.Limit
+ out.Continue = in.Continue
+ return nil
+}
+
+// Convert_v1_ListOptions_To_internalversion_ListOptions is an autogenerated conversion function.
+func Convert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error {
+ return autoConvert_v1_ListOptions_To_internalversion_ListOptions(in, out, s)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go
new file mode 100644
index 00000000..6e1eac5c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go
@@ -0,0 +1,97 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package internalversion
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *List) DeepCopyInto(out *List) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]runtime.Object, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ (*out)[i] = (*in)[i].DeepCopyObject()
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List.
+func (in *List) DeepCopy() *List {
+ if in == nil {
+ return nil
+ }
+ out := new(List)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *List) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ListOptions) DeepCopyInto(out *ListOptions) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.LabelSelector != nil {
+ out.LabelSelector = in.LabelSelector.DeepCopySelector()
+ }
+ if in.FieldSelector != nil {
+ out.FieldSelector = in.FieldSelector.DeepCopySelector()
+ }
+ if in.TimeoutSeconds != nil {
+ in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions.
+func (in *ListOptions) DeepCopy() *ListOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(ListOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ListOptions) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
diff --git a/vendor/k8s.io/client-go/dynamic/interface.go b/vendor/k8s.io/client-go/dynamic/interface.go
new file mode 100644
index 00000000..a310b63e
--- /dev/null
+++ b/vendor/k8s.io/client-go/dynamic/interface.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package dynamic
+
+import (
+ "context"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/watch"
+)
+
+type Interface interface {
+ Resource(resource schema.GroupVersionResource) NamespaceableResourceInterface
+}
+
+type ResourceInterface interface {
+ Create(ctx context.Context, obj *unstructured.Unstructured, options metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error)
+ Update(ctx context.Context, obj *unstructured.Unstructured, options metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error)
+ UpdateStatus(ctx context.Context, obj *unstructured.Unstructured, options metav1.UpdateOptions) (*unstructured.Unstructured, error)
+ Delete(ctx context.Context, name string, options metav1.DeleteOptions, subresources ...string) error
+ DeleteCollection(ctx context.Context, options metav1.DeleteOptions, listOptions metav1.ListOptions) error
+ Get(ctx context.Context, name string, options metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error)
+ Apply(ctx context.Context, name string, obj *unstructured.Unstructured, options metav1.ApplyOptions, subresources ...string) (*unstructured.Unstructured, error)
+ ApplyStatus(ctx context.Context, name string, obj *unstructured.Unstructured, options metav1.ApplyOptions) (*unstructured.Unstructured, error)
+}
+
+type NamespaceableResourceInterface interface {
+ Namespace(string) ResourceInterface
+ ResourceInterface
+}
+
+// APIPathResolverFunc knows how to convert a groupVersion to its API path. The Kind field is optional.
+// TODO find a better place to move this for existing callers
+type APIPathResolverFunc func(kind schema.GroupVersionKind) string
+
+// LegacyAPIPathResolverFunc can resolve paths properly with the legacy API.
+// TODO find a better place to move this for existing callers
+func LegacyAPIPathResolverFunc(kind schema.GroupVersionKind) string {
+ if len(kind.Group) == 0 {
+ return "/api"
+ }
+ return "/apis"
+}
diff --git a/vendor/k8s.io/client-go/dynamic/scheme.go b/vendor/k8s.io/client-go/dynamic/scheme.go
new file mode 100644
index 00000000..3168c872
--- /dev/null
+++ b/vendor/k8s.io/client-go/dynamic/scheme.go
@@ -0,0 +1,108 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package dynamic
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/apimachinery/pkg/runtime/serializer/json"
+)
+
+var watchScheme = runtime.NewScheme()
+var basicScheme = runtime.NewScheme()
+var deleteScheme = runtime.NewScheme()
+var parameterScheme = runtime.NewScheme()
+var deleteOptionsCodec = serializer.NewCodecFactory(deleteScheme)
+var dynamicParameterCodec = runtime.NewParameterCodec(parameterScheme)
+
+var versionV1 = schema.GroupVersion{Version: "v1"}
+
+func init() {
+ metav1.AddToGroupVersion(watchScheme, versionV1)
+ metav1.AddToGroupVersion(basicScheme, versionV1)
+ metav1.AddToGroupVersion(parameterScheme, versionV1)
+ metav1.AddToGroupVersion(deleteScheme, versionV1)
+}
+
+// basicNegotiatedSerializer is used to handle discovery and error handling serialization
+type basicNegotiatedSerializer struct{}
+
+func (s basicNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo {
+ return []runtime.SerializerInfo{
+ {
+ MediaType: "application/json",
+ MediaTypeType: "application",
+ MediaTypeSubType: "json",
+ EncodesAsText: true,
+ Serializer: json.NewSerializer(json.DefaultMetaFactory, unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}, false),
+ PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}, true),
+ StreamSerializer: &runtime.StreamSerializerInfo{
+ EncodesAsText: true,
+ Serializer: json.NewSerializer(json.DefaultMetaFactory, basicScheme, basicScheme, false),
+ Framer: json.Framer,
+ },
+ },
+ }
+}
+
+func (s basicNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder {
+ return runtime.WithVersionEncoder{
+ Version: gv,
+ Encoder: encoder,
+ ObjectTyper: unstructuredTyper{basicScheme},
+ }
+}
+
+func (s basicNegotiatedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder {
+ return decoder
+}
+
+type unstructuredCreater struct {
+ nested runtime.ObjectCreater
+}
+
+func (c unstructuredCreater) New(kind schema.GroupVersionKind) (runtime.Object, error) {
+ out, err := c.nested.New(kind)
+ if err == nil {
+ return out, nil
+ }
+ out = &unstructured.Unstructured{}
+ out.GetObjectKind().SetGroupVersionKind(kind)
+ return out, nil
+}
+
+type unstructuredTyper struct {
+ nested runtime.ObjectTyper
+}
+
+func (t unstructuredTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) {
+ kinds, unversioned, err := t.nested.ObjectKinds(obj)
+ if err == nil {
+ return kinds, unversioned, nil
+ }
+ if _, ok := obj.(runtime.Unstructured); ok && !obj.GetObjectKind().GroupVersionKind().Empty() {
+ return []schema.GroupVersionKind{obj.GetObjectKind().GroupVersionKind()}, false, nil
+ }
+ return nil, false, err
+}
+
+func (t unstructuredTyper) Recognizes(gvk schema.GroupVersionKind) bool {
+ return true
+}
diff --git a/vendor/k8s.io/client-go/dynamic/simple.go b/vendor/k8s.io/client-go/dynamic/simple.go
new file mode 100644
index 00000000..9dc0fb5c
--- /dev/null
+++ b/vendor/k8s.io/client-go/dynamic/simple.go
@@ -0,0 +1,388 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package dynamic
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/rest"
+)
+
+type dynamicClient struct {
+ client *rest.RESTClient
+}
+
+var _ Interface = &dynamicClient{}
+
+// ConfigFor returns a copy of the provided config with the
+// appropriate dynamic client defaults set.
+func ConfigFor(inConfig *rest.Config) *rest.Config {
+ config := rest.CopyConfig(inConfig)
+ config.AcceptContentTypes = "application/json"
+ config.ContentType = "application/json"
+ config.NegotiatedSerializer = basicNegotiatedSerializer{} // this gets used for discovery and error handling types
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+ return config
+}
+
+// NewForConfigOrDie creates a new Interface for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) Interface {
+ ret, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return ret
+}
+
+// NewForConfig creates a new dynamic client or returns an error.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(inConfig *rest.Config) (Interface, error) {
+ config := ConfigFor(inConfig)
+
+ httpClient, err := rest.HTTPClientFor(config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(config, httpClient)
+}
+
+// NewForConfigAndClient creates a new dynamic client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(inConfig *rest.Config, h *http.Client) (Interface, error) {
+ config := ConfigFor(inConfig)
+ // for serializing the options
+ config.GroupVersion = &schema.GroupVersion{}
+ config.APIPath = "/if-you-see-this-search-for-the-break"
+
+ restClient, err := rest.RESTClientForConfigAndClient(config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &dynamicClient{client: restClient}, nil
+}
+
+type dynamicResourceClient struct {
+ client *dynamicClient
+ namespace string
+ resource schema.GroupVersionResource
+}
+
+func (c *dynamicClient) Resource(resource schema.GroupVersionResource) NamespaceableResourceInterface {
+ return &dynamicResourceClient{client: c, resource: resource}
+}
+
+func (c *dynamicResourceClient) Namespace(ns string) ResourceInterface {
+ ret := *c
+ ret.namespace = ns
+ return &ret
+}
+
+func (c *dynamicResourceClient) Create(ctx context.Context, obj *unstructured.Unstructured, opts metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) {
+ outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
+ if err != nil {
+ return nil, err
+ }
+ name := ""
+ if len(subresources) > 0 {
+ accessor, err := meta.Accessor(obj)
+ if err != nil {
+ return nil, err
+ }
+ name = accessor.GetName()
+ if len(name) == 0 {
+ return nil, fmt.Errorf("name is required")
+ }
+ }
+
+ result := c.client.client.
+ Post().
+ AbsPath(append(c.makeURLSegments(name), subresources...)...).
+ SetHeader("Content-Type", runtime.ContentTypeJSON).
+ Body(outBytes).
+ SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+ Do(ctx)
+ if err := result.Error(); err != nil {
+ return nil, err
+ }
+
+ retBytes, err := result.Raw()
+ if err != nil {
+ return nil, err
+ }
+ uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes)
+ if err != nil {
+ return nil, err
+ }
+ return uncastObj.(*unstructured.Unstructured), nil
+}
+
+func (c *dynamicResourceClient) Update(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) {
+ accessor, err := meta.Accessor(obj)
+ if err != nil {
+ return nil, err
+ }
+ name := accessor.GetName()
+ if len(name) == 0 {
+ return nil, fmt.Errorf("name is required")
+ }
+ outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
+ if err != nil {
+ return nil, err
+ }
+
+ result := c.client.client.
+ Put().
+ AbsPath(append(c.makeURLSegments(name), subresources...)...).
+ SetHeader("Content-Type", runtime.ContentTypeJSON).
+ Body(outBytes).
+ SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+ Do(ctx)
+ if err := result.Error(); err != nil {
+ return nil, err
+ }
+
+ retBytes, err := result.Raw()
+ if err != nil {
+ return nil, err
+ }
+ uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes)
+ if err != nil {
+ return nil, err
+ }
+ return uncastObj.(*unstructured.Unstructured), nil
+}
+
+func (c *dynamicResourceClient) UpdateStatus(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error) {
+ accessor, err := meta.Accessor(obj)
+ if err != nil {
+ return nil, err
+ }
+ name := accessor.GetName()
+ if len(name) == 0 {
+ return nil, fmt.Errorf("name is required")
+ }
+
+ outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
+ if err != nil {
+ return nil, err
+ }
+
+ result := c.client.client.
+ Put().
+ AbsPath(append(c.makeURLSegments(name), "status")...).
+ SetHeader("Content-Type", runtime.ContentTypeJSON).
+ Body(outBytes).
+ SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+ Do(ctx)
+ if err := result.Error(); err != nil {
+ return nil, err
+ }
+
+ retBytes, err := result.Raw()
+ if err != nil {
+ return nil, err
+ }
+ uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes)
+ if err != nil {
+ return nil, err
+ }
+ return uncastObj.(*unstructured.Unstructured), nil
+}
+
+func (c *dynamicResourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error {
+ if len(name) == 0 {
+ return fmt.Errorf("name is required")
+ }
+ deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts)
+ if err != nil {
+ return err
+ }
+
+ result := c.client.client.
+ Delete().
+ AbsPath(append(c.makeURLSegments(name), subresources...)...).
+ SetHeader("Content-Type", runtime.ContentTypeJSON).
+ Body(deleteOptionsByte).
+ Do(ctx)
+ return result.Error()
+}
+
+func (c *dynamicResourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+ deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts)
+ if err != nil {
+ return err
+ }
+
+ result := c.client.client.
+ Delete().
+ AbsPath(c.makeURLSegments("")...).
+ SetHeader("Content-Type", runtime.ContentTypeJSON).
+ Body(deleteOptionsByte).
+ SpecificallyVersionedParams(&listOptions, dynamicParameterCodec, versionV1).
+ Do(ctx)
+ return result.Error()
+}
+
+func (c *dynamicResourceClient) Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) {
+ if len(name) == 0 {
+ return nil, fmt.Errorf("name is required")
+ }
+ result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do(ctx)
+ if err := result.Error(); err != nil {
+ return nil, err
+ }
+ retBytes, err := result.Raw()
+ if err != nil {
+ return nil, err
+ }
+ uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes)
+ if err != nil {
+ return nil, err
+ }
+ return uncastObj.(*unstructured.Unstructured), nil
+}
+
+func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) {
+ result := c.client.client.Get().AbsPath(c.makeURLSegments("")...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do(ctx)
+ if err := result.Error(); err != nil {
+ return nil, err
+ }
+ retBytes, err := result.Raw()
+ if err != nil {
+ return nil, err
+ }
+ uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes)
+ if err != nil {
+ return nil, err
+ }
+ if list, ok := uncastObj.(*unstructured.UnstructuredList); ok {
+ return list, nil
+ }
+
+ list, err := uncastObj.(*unstructured.Unstructured).ToList()
+ if err != nil {
+ return nil, err
+ }
+ return list, nil
+}
+
+func (c *dynamicResourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ opts.Watch = true
+ return c.client.client.Get().AbsPath(c.makeURLSegments("")...).
+ SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+ Watch(ctx)
+}
+
+func (c *dynamicResourceClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) {
+ if len(name) == 0 {
+ return nil, fmt.Errorf("name is required")
+ }
+ result := c.client.client.
+ Patch(pt).
+ AbsPath(append(c.makeURLSegments(name), subresources...)...).
+ Body(data).
+ SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+ Do(ctx)
+ if err := result.Error(); err != nil {
+ return nil, err
+ }
+ retBytes, err := result.Raw()
+ if err != nil {
+ return nil, err
+ }
+ uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes)
+ if err != nil {
+ return nil, err
+ }
+ return uncastObj.(*unstructured.Unstructured), nil
+}
+
+func (c *dynamicResourceClient) Apply(ctx context.Context, name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions, subresources ...string) (*unstructured.Unstructured, error) {
+ if len(name) == 0 {
+ return nil, fmt.Errorf("name is required")
+ }
+ outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
+ if err != nil {
+ return nil, err
+ }
+ accessor, err := meta.Accessor(obj)
+ if err != nil {
+ return nil, err
+ }
+ managedFields := accessor.GetManagedFields()
+ if len(managedFields) > 0 {
+ return nil, fmt.Errorf(`cannot apply an object with managed fields already set.
+ Use the client-go/applyconfigurations "UnstructructuredExtractor" to obtain the unstructured ApplyConfiguration for the given field manager that you can use/modify here to apply`)
+ }
+ patchOpts := opts.ToPatchOptions()
+
+ result := c.client.client.
+ Patch(types.ApplyPatchType).
+ AbsPath(append(c.makeURLSegments(name), subresources...)...).
+ Body(outBytes).
+ SpecificallyVersionedParams(&patchOpts, dynamicParameterCodec, versionV1).
+ Do(ctx)
+ if err := result.Error(); err != nil {
+ return nil, err
+ }
+ retBytes, err := result.Raw()
+ if err != nil {
+ return nil, err
+ }
+ uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes)
+ if err != nil {
+ return nil, err
+ }
+ return uncastObj.(*unstructured.Unstructured), nil
+}
+func (c *dynamicResourceClient) ApplyStatus(ctx context.Context, name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions) (*unstructured.Unstructured, error) {
+ return c.Apply(ctx, name, obj, opts, "status")
+}
+
+func (c *dynamicResourceClient) makeURLSegments(name string) []string {
+ url := []string{}
+ if len(c.resource.Group) == 0 {
+ url = append(url, "api")
+ } else {
+ url = append(url, "apis", c.resource.Group)
+ }
+ url = append(url, c.resource.Version)
+
+ if len(c.namespace) > 0 {
+ url = append(url, "namespaces", c.namespace)
+ }
+ url = append(url, c.resource.Resource)
+
+ if len(name) > 0 {
+ url = append(url, name)
+ }
+
+ return url
+}
diff --git a/vendor/k8s.io/client-go/metadata/interface.go b/vendor/k8s.io/client-go/metadata/interface.go
new file mode 100644
index 00000000..127c3950
--- /dev/null
+++ b/vendor/k8s.io/client-go/metadata/interface.go
@@ -0,0 +1,49 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metadata
+
+import (
+ "context"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/watch"
+)
+
+// Interface allows a caller to get the metadata (in the form of PartialObjectMetadata objects)
+// from any Kubernetes compatible resource API.
+type Interface interface {
+ Resource(resource schema.GroupVersionResource) Getter
+}
+
+// ResourceInterface contains the set of methods that may be invoked on objects by their metadata.
+// Update is not supported by the server, but Patch can be used for the actions Update would handle.
+type ResourceInterface interface {
+ Delete(ctx context.Context, name string, options metav1.DeleteOptions, subresources ...string) error
+ DeleteCollection(ctx context.Context, options metav1.DeleteOptions, listOptions metav1.ListOptions) error
+ Get(ctx context.Context, name string, options metav1.GetOptions, subresources ...string) (*metav1.PartialObjectMetadata, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*metav1.PartialObjectMetadata, error)
+}
+
+// Getter handles both namespaced and non-namespaced resource types consistently.
+type Getter interface {
+ Namespace(string) ResourceInterface
+ ResourceInterface
+}
diff --git a/vendor/k8s.io/client-go/metadata/metadata.go b/vendor/k8s.io/client-go/metadata/metadata.go
new file mode 100644
index 00000000..8152aa12
--- /dev/null
+++ b/vendor/k8s.io/client-go/metadata/metadata.go
@@ -0,0 +1,331 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metadata
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "time"
+
+ "k8s.io/klog/v2"
+
+ metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/rest"
+)
+
+var deleteScheme = runtime.NewScheme()
+var parameterScheme = runtime.NewScheme()
+var deleteOptionsCodec = serializer.NewCodecFactory(deleteScheme)
+var dynamicParameterCodec = runtime.NewParameterCodec(parameterScheme)
+
+var versionV1 = schema.GroupVersion{Version: "v1"}
+
+func init() {
+ metav1.AddToGroupVersion(parameterScheme, versionV1)
+ metav1.AddToGroupVersion(deleteScheme, versionV1)
+}
+
+// Client allows callers to retrieve the object metadata for any
+// Kubernetes-compatible API endpoint. The client uses the
+// meta.k8s.io/v1 PartialObjectMetadata resource to more efficiently
+// retrieve just the necessary metadata, but on older servers
+// (Kubernetes 1.14 and before) will retrieve the object and then
+// convert the metadata.
+type Client struct {
+ client *rest.RESTClient
+}
+
+var _ Interface = &Client{}
+
+// ConfigFor returns a copy of the provided config with the
+// appropriate metadata client defaults set.
+func ConfigFor(inConfig *rest.Config) *rest.Config {
+ config := rest.CopyConfig(inConfig)
+ config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json"
+ config.ContentType = "application/vnd.kubernetes.protobuf"
+ config.NegotiatedSerializer = metainternalversionscheme.Codecs.WithoutConversion()
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+ return config
+}
+
+// NewForConfigOrDie creates a new metadata client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) Interface {
+ ret, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return ret
+}
+
+// NewForConfig creates a new metadata client that can retrieve object
+// metadata details about any Kubernetes object (core, aggregated, or custom
+// resource based) in the form of PartialObjectMetadata objects, or returns
+// an error.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(inConfig *rest.Config) (Interface, error) {
+ config := ConfigFor(inConfig)
+
+ httpClient, err := rest.HTTPClientFor(config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(config, httpClient)
+}
+
+// NewForConfigAndClient creates a new metadata client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(inConfig *rest.Config, h *http.Client) (Interface, error) {
+ config := ConfigFor(inConfig)
+ // for serializing the options
+ config.GroupVersion = &schema.GroupVersion{}
+ config.APIPath = "/this-value-should-never-be-sent"
+
+ restClient, err := rest.RESTClientForConfigAndClient(config, h)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Client{client: restClient}, nil
+}
+
+type client struct {
+ client *Client
+ namespace string
+ resource schema.GroupVersionResource
+}
+
+// Resource returns an interface that can access cluster or namespace
+// scoped instances of resource.
+func (c *Client) Resource(resource schema.GroupVersionResource) Getter {
+ return &client{client: c, resource: resource}
+}
+
+// Namespace returns an interface that can access namespace-scoped instances of the
+// provided resource.
+func (c *client) Namespace(ns string) ResourceInterface {
+ ret := *c
+ ret.namespace = ns
+ return &ret
+}
+
+// Delete removes the provided resource from the server.
+func (c *client) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error {
+ if len(name) == 0 {
+ return fmt.Errorf("name is required")
+ }
+ // if DeleteOptions are delivered to Negotiator for serialization,
+ // HTTP-Request header will bring "Content-Type: application/vnd.kubernetes.protobuf"
+ // apiextensions-apiserver uses unstructuredNegotiatedSerializer to decode the input,
+ // server-side will reply with 406 errors.
+ // The special treatment here is to be compatible with CRD Handler
+ // see: https://github.com/kubernetes/kubernetes/blob/1a845ccd076bbf1b03420fe694c85a5cd3bd6bed/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go#L843
+ deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts)
+ if err != nil {
+ return err
+ }
+
+ result := c.client.client.
+ Delete().
+ AbsPath(append(c.makeURLSegments(name), subresources...)...).
+ SetHeader("Content-Type", runtime.ContentTypeJSON).
+ Body(deleteOptionsByte).
+ Do(ctx)
+ return result.Error()
+}
+
+// DeleteCollection triggers deletion of all resources in the specified scope (namespace or cluster).
+func (c *client) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+ // See comment on Delete
+ deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts)
+ if err != nil {
+ return err
+ }
+
+ result := c.client.client.
+ Delete().
+ AbsPath(c.makeURLSegments("")...).
+ SetHeader("Content-Type", runtime.ContentTypeJSON).
+ Body(deleteOptionsByte).
+ SpecificallyVersionedParams(&listOptions, dynamicParameterCodec, versionV1).
+ Do(ctx)
+ return result.Error()
+}
+
+// Get returns the resource with name from the specified scope (namespace or cluster).
+func (c *client) Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) {
+ if len(name) == 0 {
+ return nil, fmt.Errorf("name is required")
+ }
+ result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...).
+ SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json").
+ SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+ Do(ctx)
+ if err := result.Error(); err != nil {
+ return nil, err
+ }
+ obj, err := result.Get()
+ if runtime.IsNotRegisteredError(err) {
+ klog.V(5).Infof("Unable to retrieve PartialObjectMetadata: %#v", err)
+ rawBytes, err := result.Raw()
+ if err != nil {
+ return nil, err
+ }
+ var partial metav1.PartialObjectMetadata
+ if err := json.Unmarshal(rawBytes, &partial); err != nil {
+ return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadata: %v", err)
+ }
+ if !isLikelyObjectMetadata(&partial) {
+ return nil, fmt.Errorf("object does not appear to match the ObjectMeta schema: %#v", partial)
+ }
+ partial.TypeMeta = metav1.TypeMeta{}
+ return &partial, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ partial, ok := obj.(*metav1.PartialObjectMetadata)
+ if !ok {
+ return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj)
+ }
+ return partial, nil
+}
+
+// List returns all resources within the specified scope (namespace or cluster).
+func (c *client) List(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) {
+ result := c.client.client.Get().AbsPath(c.makeURLSegments("")...).
+ SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json").
+ SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+ Do(ctx)
+ if err := result.Error(); err != nil {
+ return nil, err
+ }
+ obj, err := result.Get()
+ if runtime.IsNotRegisteredError(err) {
+ klog.V(5).Infof("Unable to retrieve PartialObjectMetadataList: %#v", err)
+ rawBytes, err := result.Raw()
+ if err != nil {
+ return nil, err
+ }
+ var partial metav1.PartialObjectMetadataList
+ if err := json.Unmarshal(rawBytes, &partial); err != nil {
+ return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadataList: %v", err)
+ }
+ partial.TypeMeta = metav1.TypeMeta{}
+ return &partial, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ partial, ok := obj.(*metav1.PartialObjectMetadataList)
+ if !ok {
+ return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj)
+ }
+ return partial, nil
+}
+
+// Watch finds all changes to the resources in the specified scope (namespace or cluster).
+func (c *client) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.client.Get().
+ AbsPath(c.makeURLSegments("")...).
+ SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json").
+ SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Patch modifies the named resource in the specified scope (namespace or cluster).
+func (c *client) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) {
+ if len(name) == 0 {
+ return nil, fmt.Errorf("name is required")
+ }
+ result := c.client.client.
+ Patch(pt).
+ AbsPath(append(c.makeURLSegments(name), subresources...)...).
+ Body(data).
+ SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json").
+ SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+ Do(ctx)
+ if err := result.Error(); err != nil {
+ return nil, err
+ }
+ obj, err := result.Get()
+ if runtime.IsNotRegisteredError(err) {
+ rawBytes, err := result.Raw()
+ if err != nil {
+ return nil, err
+ }
+ var partial metav1.PartialObjectMetadata
+ if err := json.Unmarshal(rawBytes, &partial); err != nil {
+ return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadata: %v", err)
+ }
+ if !isLikelyObjectMetadata(&partial) {
+ return nil, fmt.Errorf("object does not appear to match the ObjectMeta schema")
+ }
+ partial.TypeMeta = metav1.TypeMeta{}
+ return &partial, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ partial, ok := obj.(*metav1.PartialObjectMetadata)
+ if !ok {
+ return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj)
+ }
+ return partial, nil
+}
+
+func (c *client) makeURLSegments(name string) []string {
+ url := []string{}
+ if len(c.resource.Group) == 0 {
+ url = append(url, "api")
+ } else {
+ url = append(url, "apis", c.resource.Group)
+ }
+ url = append(url, c.resource.Version)
+
+ if len(c.namespace) > 0 {
+ url = append(url, "namespaces", c.namespace)
+ }
+ url = append(url, c.resource.Resource)
+
+ if len(name) > 0 {
+ url = append(url, name)
+ }
+
+ return url
+}
+
+func isLikelyObjectMetadata(meta *metav1.PartialObjectMetadata) bool {
+ return len(meta.UID) > 0 || !meta.CreationTimestamp.IsZero() || len(meta.Name) > 0 || len(meta.GenerateName) > 0
+}
diff --git a/vendor/k8s.io/client-go/restmapper/category_expansion.go b/vendor/k8s.io/client-go/restmapper/category_expansion.go
new file mode 100644
index 00000000..484e4c83
--- /dev/null
+++ b/vendor/k8s.io/client-go/restmapper/category_expansion.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package restmapper
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/discovery"
+)
+
+// CategoryExpander maps category strings to GroupResources.
+// Categories are classification or 'tag' of a group of resources.
+type CategoryExpander interface {
+ Expand(category string) ([]schema.GroupResource, bool)
+}
+
+// SimpleCategoryExpander implements CategoryExpander interface
+// using a static mapping of categories to GroupResource mapping.
+type SimpleCategoryExpander struct {
+ Expansions map[string][]schema.GroupResource
+}
+
+// Expand fulfills CategoryExpander
+func (e SimpleCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) {
+ ret, ok := e.Expansions[category]
+ return ret, ok
+}
+
+// discoveryCategoryExpander struct lets a REST Client wrapper (discoveryClient) to retrieve list of APIResourceList,
+// and then convert to fallbackExpander
+type discoveryCategoryExpander struct {
+ discoveryClient discovery.DiscoveryInterface
+}
+
+// NewDiscoveryCategoryExpander returns a category expander that makes use of the "categories" fields from
+// the API, found through the discovery client. In case of any error or no category found (which likely
+// means we're at a cluster prior to categories support, fallback to the expander provided.
+func NewDiscoveryCategoryExpander(client discovery.DiscoveryInterface) CategoryExpander {
+ if client == nil {
+ panic("Please provide discovery client to shortcut expander")
+ }
+ return discoveryCategoryExpander{discoveryClient: client}
+}
+
+// Expand fulfills CategoryExpander
+func (e discoveryCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) {
+ // Get all supported resources for groups and versions from server, if no resource found, fallback anyway.
+ _, apiResourceLists, _ := e.discoveryClient.ServerGroupsAndResources()
+ if len(apiResourceLists) == 0 {
+ return nil, false
+ }
+
+ discoveredExpansions := map[string][]schema.GroupResource{}
+ for _, apiResourceList := range apiResourceLists {
+ gv, err := schema.ParseGroupVersion(apiResourceList.GroupVersion)
+ if err != nil {
+ continue
+ }
+ // Collect GroupVersions by categories
+ for _, apiResource := range apiResourceList.APIResources {
+ if categories := apiResource.Categories; len(categories) > 0 {
+ for _, category := range categories {
+ groupResource := schema.GroupResource{
+ Group: gv.Group,
+ Resource: apiResource.Name,
+ }
+ discoveredExpansions[category] = append(discoveredExpansions[category], groupResource)
+ }
+ }
+ }
+ }
+
+ ret, ok := discoveredExpansions[category]
+ return ret, ok
+}
+
+// UnionCategoryExpander implements CategoryExpander interface.
+// It maps given category string to union of expansions returned by all the CategoryExpanders in the list.
+type UnionCategoryExpander []CategoryExpander
+
+// Expand fulfills CategoryExpander
+func (u UnionCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) {
+ ret := []schema.GroupResource{}
+ ok := false
+
+ // Expand the category for each CategoryExpander in the list and merge/combine the results.
+ for _, expansion := range u {
+ curr, currOk := expansion.Expand(category)
+
+ for _, currGR := range curr {
+ found := false
+ for _, existing := range ret {
+ if existing == currGR {
+ found = true
+ break
+ }
+ }
+ if !found {
+ ret = append(ret, currGR)
+ }
+ }
+ ok = ok || currOk
+ }
+
+ return ret, ok
+}
diff --git a/vendor/k8s.io/client-go/restmapper/discovery.go b/vendor/k8s.io/client-go/restmapper/discovery.go
new file mode 100644
index 00000000..3505178b
--- /dev/null
+++ b/vendor/k8s.io/client-go/restmapper/discovery.go
@@ -0,0 +1,338 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package restmapper
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/discovery"
+
+ "k8s.io/klog/v2"
+)
+
+// APIGroupResources is an API group with a mapping of versions to
+// resources.
+type APIGroupResources struct {
+ Group metav1.APIGroup
+ // A mapping of version string to a slice of APIResources for
+ // that version.
+ VersionedResources map[string][]metav1.APIResource
+}
+
+// NewDiscoveryRESTMapper returns a PriorityRESTMapper based on the discovered
+// groups and resources passed in.
+func NewDiscoveryRESTMapper(groupResources []*APIGroupResources) meta.RESTMapper {
+ unionMapper := meta.MultiRESTMapper{}
+
+ var groupPriority []string
+ // /v1 is special. It should always come first
+ resourcePriority := []schema.GroupVersionResource{{Group: "", Version: "v1", Resource: meta.AnyResource}}
+ kindPriority := []schema.GroupVersionKind{{Group: "", Version: "v1", Kind: meta.AnyKind}}
+
+ for _, group := range groupResources {
+ groupPriority = append(groupPriority, group.Group.Name)
+
+ // Make sure the preferred version comes first
+ if len(group.Group.PreferredVersion.Version) != 0 {
+ preferred := group.Group.PreferredVersion.Version
+ if _, ok := group.VersionedResources[preferred]; ok {
+ resourcePriority = append(resourcePriority, schema.GroupVersionResource{
+ Group: group.Group.Name,
+ Version: group.Group.PreferredVersion.Version,
+ Resource: meta.AnyResource,
+ })
+
+ kindPriority = append(kindPriority, schema.GroupVersionKind{
+ Group: group.Group.Name,
+ Version: group.Group.PreferredVersion.Version,
+ Kind: meta.AnyKind,
+ })
+ }
+ }
+
+ for _, discoveryVersion := range group.Group.Versions {
+ resources, ok := group.VersionedResources[discoveryVersion.Version]
+ if !ok {
+ continue
+ }
+
+ // Add non-preferred versions after the preferred version, in case there are resources that only exist in those versions
+ if discoveryVersion.Version != group.Group.PreferredVersion.Version {
+ resourcePriority = append(resourcePriority, schema.GroupVersionResource{
+ Group: group.Group.Name,
+ Version: discoveryVersion.Version,
+ Resource: meta.AnyResource,
+ })
+
+ kindPriority = append(kindPriority, schema.GroupVersionKind{
+ Group: group.Group.Name,
+ Version: discoveryVersion.Version,
+ Kind: meta.AnyKind,
+ })
+ }
+
+ gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version}
+ versionMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{gv})
+
+ for _, resource := range resources {
+ scope := meta.RESTScopeNamespace
+ if !resource.Namespaced {
+ scope = meta.RESTScopeRoot
+ }
+
+ // if we have a slash, then this is a subresource and we shouldn't create mappings for those.
+ if strings.Contains(resource.Name, "/") {
+ continue
+ }
+
+ plural := gv.WithResource(resource.Name)
+ singular := gv.WithResource(resource.SingularName)
+ // this is for legacy resources and servers which don't list singular forms. For those we must still guess.
+ if len(resource.SingularName) == 0 {
+ _, singular = meta.UnsafeGuessKindToResource(gv.WithKind(resource.Kind))
+ }
+
+ versionMapper.AddSpecific(gv.WithKind(strings.ToLower(resource.Kind)), plural, singular, scope)
+ versionMapper.AddSpecific(gv.WithKind(resource.Kind), plural, singular, scope)
+ // TODO this is producing unsafe guesses that don't actually work, but it matches previous behavior
+ versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope)
+ }
+ // TODO why is this type not in discovery (at least for "v1")
+ versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot)
+ unionMapper = append(unionMapper, versionMapper)
+ }
+ }
+
+ for _, group := range groupPriority {
+ resourcePriority = append(resourcePriority, schema.GroupVersionResource{
+ Group: group,
+ Version: meta.AnyVersion,
+ Resource: meta.AnyResource,
+ })
+ kindPriority = append(kindPriority, schema.GroupVersionKind{
+ Group: group,
+ Version: meta.AnyVersion,
+ Kind: meta.AnyKind,
+ })
+ }
+
+ return meta.PriorityRESTMapper{
+ Delegate: unionMapper,
+ ResourcePriority: resourcePriority,
+ KindPriority: kindPriority,
+ }
+}
+
+// GetAPIGroupResources uses the provided discovery client to gather
+// discovery information and populate a slice of APIGroupResources.
+func GetAPIGroupResources(cl discovery.DiscoveryInterface) ([]*APIGroupResources, error) {
+ gs, rs, err := cl.ServerGroupsAndResources()
+ if rs == nil || gs == nil {
+ return nil, err
+ // TODO track the errors and update callers to handle partial errors.
+ }
+ rsm := map[string]*metav1.APIResourceList{}
+ for _, r := range rs {
+ rsm[r.GroupVersion] = r
+ }
+
+ var result []*APIGroupResources
+ for _, group := range gs {
+ groupResources := &APIGroupResources{
+ Group: *group,
+ VersionedResources: make(map[string][]metav1.APIResource),
+ }
+ for _, version := range group.Versions {
+ resources, ok := rsm[version.GroupVersion]
+ if !ok {
+ continue
+ }
+ groupResources.VersionedResources[version.Version] = resources.APIResources
+ }
+ result = append(result, groupResources)
+ }
+ return result, nil
+}
+
+// DeferredDiscoveryRESTMapper is a RESTMapper that will defer
+// initialization of the RESTMapper until the first mapping is
+// requested.
+type DeferredDiscoveryRESTMapper struct {
+ initMu sync.Mutex
+ delegate meta.RESTMapper
+ cl discovery.CachedDiscoveryInterface
+}
+
+// NewDeferredDiscoveryRESTMapper returns a
+// DeferredDiscoveryRESTMapper that will lazily query the provided
+// client for discovery information to do REST mappings.
+func NewDeferredDiscoveryRESTMapper(cl discovery.CachedDiscoveryInterface) *DeferredDiscoveryRESTMapper {
+ return &DeferredDiscoveryRESTMapper{
+ cl: cl,
+ }
+}
+
+func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) {
+ d.initMu.Lock()
+ defer d.initMu.Unlock()
+
+ if d.delegate != nil {
+ return d.delegate, nil
+ }
+
+ groupResources, err := GetAPIGroupResources(d.cl)
+ if err != nil {
+ return nil, err
+ }
+
+ d.delegate = NewDiscoveryRESTMapper(groupResources)
+ return d.delegate, nil
+}
+
+// Reset resets the internally cached Discovery information and will
+// cause the next mapping request to re-discover.
+func (d *DeferredDiscoveryRESTMapper) Reset() {
+ klog.V(5).Info("Invalidating discovery information")
+
+ d.initMu.Lock()
+ defer d.initMu.Unlock()
+
+ d.cl.Invalidate()
+ d.delegate = nil
+}
+
+// KindFor takes a partial resource and returns back the single match.
+// It returns an error if there are multiple matches.
+func (d *DeferredDiscoveryRESTMapper) KindFor(resource schema.GroupVersionResource) (gvk schema.GroupVersionKind, err error) {
+ del, err := d.getDelegate()
+ if err != nil {
+ return schema.GroupVersionKind{}, err
+ }
+ gvk, err = del.KindFor(resource)
+ if err != nil && !d.cl.Fresh() {
+ d.Reset()
+ gvk, err = d.KindFor(resource)
+ }
+ return
+}
+
+// KindsFor takes a partial resource and returns back the list of
+// potential kinds in priority order.
+func (d *DeferredDiscoveryRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvks []schema.GroupVersionKind, err error) {
+ del, err := d.getDelegate()
+ if err != nil {
+ return nil, err
+ }
+ gvks, err = del.KindsFor(resource)
+ if len(gvks) == 0 && !d.cl.Fresh() {
+ d.Reset()
+ gvks, err = d.KindsFor(resource)
+ }
+ return
+}
+
+// ResourceFor takes a partial resource and returns back the single
+// match. It returns an error if there are multiple matches.
+func (d *DeferredDiscoveryRESTMapper) ResourceFor(input schema.GroupVersionResource) (gvr schema.GroupVersionResource, err error) {
+ del, err := d.getDelegate()
+ if err != nil {
+ return schema.GroupVersionResource{}, err
+ }
+ gvr, err = del.ResourceFor(input)
+ if err != nil && !d.cl.Fresh() {
+ d.Reset()
+ gvr, err = d.ResourceFor(input)
+ }
+ return
+}
+
+// ResourcesFor takes a partial resource and returns back the list of
+// potential resource in priority order.
+func (d *DeferredDiscoveryRESTMapper) ResourcesFor(input schema.GroupVersionResource) (gvrs []schema.GroupVersionResource, err error) {
+ del, err := d.getDelegate()
+ if err != nil {
+ return nil, err
+ }
+ gvrs, err = del.ResourcesFor(input)
+ if len(gvrs) == 0 && !d.cl.Fresh() {
+ d.Reset()
+ gvrs, err = d.ResourcesFor(input)
+ }
+ return
+}
+
+// RESTMapping identifies a preferred resource mapping for the
+// provided group kind.
+func (d *DeferredDiscoveryRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (m *meta.RESTMapping, err error) {
+ del, err := d.getDelegate()
+ if err != nil {
+ return nil, err
+ }
+ m, err = del.RESTMapping(gk, versions...)
+ if err != nil && !d.cl.Fresh() {
+ d.Reset()
+ m, err = d.RESTMapping(gk, versions...)
+ }
+ return
+}
+
+// RESTMappings returns the RESTMappings for the provided group kind
+// in a rough internal preferred order. If no kind is found, it will
+// return a NoResourceMatchError.
+func (d *DeferredDiscoveryRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) (ms []*meta.RESTMapping, err error) {
+ del, err := d.getDelegate()
+ if err != nil {
+ return nil, err
+ }
+ ms, err = del.RESTMappings(gk, versions...)
+ if len(ms) == 0 && !d.cl.Fresh() {
+ d.Reset()
+ ms, err = d.RESTMappings(gk, versions...)
+ }
+ return
+}
+
+// ResourceSingularizer converts a resource name from plural to
+// singular (e.g., from pods to pod).
+func (d *DeferredDiscoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
+ del, err := d.getDelegate()
+ if err != nil {
+ return resource, err
+ }
+ singular, err = del.ResourceSingularizer(resource)
+ if err != nil && !d.cl.Fresh() {
+ d.Reset()
+ singular, err = d.ResourceSingularizer(resource)
+ }
+ return
+}
+
+func (d *DeferredDiscoveryRESTMapper) String() string {
+ del, err := d.getDelegate()
+ if err != nil {
+ return fmt.Sprintf("DeferredDiscoveryRESTMapper{%v}", err)
+ }
+ return fmt.Sprintf("DeferredDiscoveryRESTMapper{\n\t%v\n}", del)
+}
+
+// Make sure it satisfies the interface
+var _ meta.ResettableRESTMapper = &DeferredDiscoveryRESTMapper{}
diff --git a/vendor/k8s.io/client-go/restmapper/shortcut.go b/vendor/k8s.io/client-go/restmapper/shortcut.go
new file mode 100644
index 00000000..7ab3cd46
--- /dev/null
+++ b/vendor/k8s.io/client-go/restmapper/shortcut.go
@@ -0,0 +1,187 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package restmapper
+
+import (
+ "strings"
+
+ "k8s.io/klog/v2"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/discovery"
+)
+
+// shortcutExpander is a RESTMapper that can be used for Kubernetes resources. It expands the resource first, then invokes the wrapped
+type shortcutExpander struct {
+ RESTMapper meta.RESTMapper
+
+ discoveryClient discovery.DiscoveryInterface
+}
+
+var _ meta.ResettableRESTMapper = shortcutExpander{}
+
+// NewShortcutExpander wraps a restmapper in a layer that expands shortcuts found via discovery
+func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) meta.RESTMapper {
+ return shortcutExpander{RESTMapper: delegate, discoveryClient: client}
+}
+
+// KindFor fulfills meta.RESTMapper
+func (e shortcutExpander) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+ // expandResourceShortcut works with current API resources as read from discovery cache.
+ // In case of new CRDs this means we potentially don't have current state of discovery.
+ // In the current wiring in k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go#toRESTMapper,
+ // we are using DeferredDiscoveryRESTMapper which on KindFor failure will clear the
+ // cache and fetch all data from a cluster (see vendor/k8s.io/client-go/restmapper/discovery.go#KindFor).
+ // Thus another call to expandResourceShortcut, after a NoMatchError should successfully
+ // read Kind to the user or an error.
+ gvk, err := e.RESTMapper.KindFor(e.expandResourceShortcut(resource))
+ if meta.IsNoMatchError(err) {
+ return e.RESTMapper.KindFor(e.expandResourceShortcut(resource))
+ }
+ return gvk, err
+}
+
+// KindsFor fulfills meta.RESTMapper
+func (e shortcutExpander) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) {
+ return e.RESTMapper.KindsFor(e.expandResourceShortcut(resource))
+}
+
+// ResourcesFor fulfills meta.RESTMapper
+func (e shortcutExpander) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
+ return e.RESTMapper.ResourcesFor(e.expandResourceShortcut(resource))
+}
+
+// ResourceFor fulfills meta.RESTMapper
+func (e shortcutExpander) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+ return e.RESTMapper.ResourceFor(e.expandResourceShortcut(resource))
+}
+
+// ResourceSingularizer fulfills meta.RESTMapper
+func (e shortcutExpander) ResourceSingularizer(resource string) (string, error) {
+ return e.RESTMapper.ResourceSingularizer(e.expandResourceShortcut(schema.GroupVersionResource{Resource: resource}).Resource)
+}
+
+// RESTMapping fulfills meta.RESTMapper
+func (e shortcutExpander) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) {
+ return e.RESTMapper.RESTMapping(gk, versions...)
+}
+
+// RESTMappings fulfills meta.RESTMapper
+func (e shortcutExpander) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) {
+ return e.RESTMapper.RESTMappings(gk, versions...)
+}
+
+// getShortcutMappings returns a set of tuples which holds short names for resources.
+// First the list of potential resources will be taken from the API server.
+// Next we will append the hardcoded list of resources - to be backward compatible with old servers.
+// NOTE that the list is ordered by group priority.
+func (e shortcutExpander) getShortcutMappings() ([]*metav1.APIResourceList, []resourceShortcuts, error) {
+ res := []resourceShortcuts{}
+ // get server resources
+ // This can return an error *and* the results it was able to find. We don't need to fail on the error.
+ _, apiResList, err := e.discoveryClient.ServerGroupsAndResources()
+ if err != nil {
+ klog.V(1).Infof("Error loading discovery information: %v", err)
+ }
+ for _, apiResources := range apiResList {
+ gv, err := schema.ParseGroupVersion(apiResources.GroupVersion)
+ if err != nil {
+ klog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error())
+ continue
+ }
+ for _, apiRes := range apiResources.APIResources {
+ for _, shortName := range apiRes.ShortNames {
+ rs := resourceShortcuts{
+ ShortForm: schema.GroupResource{Group: gv.Group, Resource: shortName},
+ LongForm: schema.GroupResource{Group: gv.Group, Resource: apiRes.Name},
+ }
+ res = append(res, rs)
+ }
+ }
+ }
+
+ return apiResList, res, nil
+}
+
+// expandResourceShortcut will return the expanded version of resource
+// (something that a pkg/api/meta.RESTMapper can understand), if it is
+// indeed a shortcut. If no match has been found, we will match on group prefixing.
+// Lastly we will return resource unmodified.
+func (e shortcutExpander) expandResourceShortcut(resource schema.GroupVersionResource) schema.GroupVersionResource {
+ // get the shortcut mappings and return on first match.
+ if allResources, shortcutResources, err := e.getShortcutMappings(); err == nil {
+ // avoid expanding if there's an exact match to a full resource name
+ for _, apiResources := range allResources {
+ gv, err := schema.ParseGroupVersion(apiResources.GroupVersion)
+ if err != nil {
+ continue
+ }
+ if len(resource.Group) != 0 && resource.Group != gv.Group {
+ continue
+ }
+ for _, apiRes := range apiResources.APIResources {
+ if resource.Resource == apiRes.Name {
+ return resource
+ }
+ if resource.Resource == apiRes.SingularName {
+ return resource
+ }
+ }
+ }
+
+ for _, item := range shortcutResources {
+ if len(resource.Group) != 0 && resource.Group != item.ShortForm.Group {
+ continue
+ }
+ if resource.Resource == item.ShortForm.Resource {
+ resource.Resource = item.LongForm.Resource
+ resource.Group = item.LongForm.Group
+ return resource
+ }
+ }
+
+ // we didn't find exact match so match on group prefixing. This allows autoscal to match autoscaling
+ if len(resource.Group) == 0 {
+ return resource
+ }
+ for _, item := range shortcutResources {
+ if !strings.HasPrefix(item.ShortForm.Group, resource.Group) {
+ continue
+ }
+ if resource.Resource == item.ShortForm.Resource {
+ resource.Resource = item.LongForm.Resource
+ resource.Group = item.LongForm.Group
+ return resource
+ }
+ }
+ }
+
+ return resource
+}
+
+func (e shortcutExpander) Reset() {
+ meta.MaybeResetRESTMapper(e.RESTMapper)
+}
+
+// ResourceShortcuts represents a structure that holds the information how to
+// transition from resource's shortcut to its full name.
+type resourceShortcuts struct {
+ ShortForm schema.GroupResource
+ LongForm schema.GroupResource
+}
diff --git a/vendor/kmodules.xyz/client-go/Makefile b/vendor/kmodules.xyz/client-go/Makefile
index 441dd38e..a57078a0 100644
--- a/vendor/kmodules.xyz/client-go/Makefile
+++ b/vendor/kmodules.xyz/client-go/Makefile
@@ -18,8 +18,10 @@ GO_PKG := kmodules.xyz
REPO := $(notdir $(shell pwd))
BIN := client-go
+CRD_OPTIONS ?= "crd:maxDescLen=0,generateEmbeddedObjectMeta=true,allowDangerousTypes=true"
# https://github.com/appscodelabs/gengo-builder
CODE_GENERATOR_IMAGE ?= ghcr.io/appscode/gengo:release-1.25
+API_GROUPS ?= management:v1alpha1
# This version-strategy uses git tags to set the version string
git_branch := $(shell git rev-parse --abbrev-ref HEAD)
@@ -45,7 +47,7 @@ endif
### These variables should not need tweaking.
###
-SRC_PKGS := admissionregistration api apiextensions apiregistration apps batch certificates client core discovery dynamic extensions meta networking openapi policy rbac storage tools
+SRC_PKGS := admissionregistration api apis apiextensions apiregistration apps batch certificates client cluster conditions core discovery dynamic extensions meta networking openapi policy rbac storage tools
SRC_DIRS := $(SRC_PKGS) *.go
DOCKER_PLATFORMS := linux/amd64 linux/arm linux/arm64
@@ -113,9 +115,23 @@ clientset:
--env HTTPS_PROXY=$(HTTPS_PROXY) \
$(CODE_GENERATOR_IMAGE) \
deepcopy-gen \
- --go-header-file "./hack/license/go.txt" \
- --input-dirs "$(GO_PKG)/$(REPO)/api/v1" \
+ --go-header-file "./hack/license/go.txt" \
+ --input-dirs "$(GO_PKG)/$(REPO)/api/v1" \
--output-file-base zz_generated.deepcopy
+ @docker run --rm \
+ -u $$(id -u):$$(id -g) \
+ -v /tmp:/.cache \
+ -v $$(pwd):$(DOCKER_REPO_ROOT) \
+ -w $(DOCKER_REPO_ROOT) \
+ --env HTTP_PROXY=$(HTTP_PROXY) \
+ --env HTTPS_PROXY=$(HTTPS_PROXY) \
+ $(CODE_GENERATOR_IMAGE) \
+ /go/src/k8s.io/code-generator/generate-groups.sh \
+ deepcopy \
+ $(GO_PKG)/$(REPO)/client \
+ $(GO_PKG)/$(REPO)/apis \
+ "$(API_GROUPS)" \
+ --go-header-file "./hack/license/go.txt"
# Generate openapi schema
.PHONY: openapi
@@ -154,7 +170,7 @@ gen-crd-protos:
--packages=-k8s.io/api/core/v1,kmodules.xyz/client-go/api/v1
.PHONY: gen-enum
-gen-enum:
+gen-enum: $(BUILD_DIRS)
@docker run \
-i \
--rm \
@@ -171,8 +187,28 @@ gen-enum:
go generate ./api/... \
"
+# Generate CRD manifests
+.PHONY: gen-crds
+gen-crds:
+ @echo "Generating CRD manifests"
+ @docker run --rm \
+ -u $$(id -u):$$(id -g) \
+ -v /tmp:/.cache \
+ -v $$(pwd):$(DOCKER_REPO_ROOT) \
+ -w $(DOCKER_REPO_ROOT) \
+ --env HTTP_PROXY=$(HTTP_PROXY) \
+ --env HTTPS_PROXY=$(HTTPS_PROXY) \
+ $(CODE_GENERATOR_IMAGE) \
+ controller-gen \
+ $(CRD_OPTIONS) \
+ paths="./apis/..." \
+ output:crd:artifacts:config=crds
+
+.PHONY: manifests
+manifests: gen-crds
+
.PHONY: gen
-gen: clientset gen-enum openapi gen-crd-protos
+gen: clientset manifests gen-enum # openapi gen-crd-protos
fmt: $(BUILD_DIRS)
@docker run \
diff --git a/vendor/kmodules.xyz/client-go/api/v1/certificates.go b/vendor/kmodules.xyz/client-go/api/v1/certificates.go
new file mode 100644
index 00000000..c72d55df
--- /dev/null
+++ b/vendor/kmodules.xyz/client-go/api/v1/certificates.go
@@ -0,0 +1,284 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "reflect"
+
+ "github.com/imdario/mergo"
+ core "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+type TLSConfig struct {
+ // IssuerRef is a reference to a Certificate Issuer.
+ // +optional
+ IssuerRef *core.TypedLocalObjectReference `json:"issuerRef,omitempty" protobuf:"bytes,1,opt,name=issuerRef"`
+
+ // Certificate provides server and/or client certificate options used by application pods.
+ // These options are passed to a cert-manager Certificate object.
+ // xref: https://github.com/jetstack/cert-manager/blob/v0.16.0/pkg/apis/certmanager/v1beta1/types_certificate.go#L82-L162
+ // +optional
+ Certificates []CertificateSpec `json:"certificates,omitempty" protobuf:"bytes,2,rep,name=certificates"`
+}
+
+type CertificateSpec struct {
+ // Alias represents the identifier of the certificate.
+ Alias string `json:"alias" protobuf:"bytes,1,opt,name=alias"`
+
+ // IssuerRef is a reference to a Certificate Issuer.
+ // +optional
+ IssuerRef *core.TypedLocalObjectReference `json:"issuerRef,omitempty" protobuf:"bytes,2,opt,name=issuerRef"`
+
+ // Specifies the k8s secret name that holds the certificates.
+ // Default to --cert.
+ // +optional
+ SecretName string `json:"secretName,omitempty" protobuf:"bytes,3,opt,name=secretName"`
+
+ // Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name).
+ // +optional
+ Subject *X509Subject `json:"subject,omitempty" protobuf:"bytes,4,opt,name=subject"`
+
+ // Certificate default Duration
+ // +optional
+ Duration *metav1.Duration `json:"duration,omitempty" protobuf:"bytes,5,opt,name=duration"`
+
+ // Certificate renew before expiration duration
+ // +optional
+ RenewBefore *metav1.Duration `json:"renewBefore,omitempty" protobuf:"bytes,6,opt,name=renewBefore"`
+
+ // DNSNames is a list of subject alt names to be used on the Certificate.
+ // +optional
+ DNSNames []string `json:"dnsNames,omitempty" protobuf:"bytes,7,rep,name=dnsNames"`
+
+ // IPAddresses is a list of IP addresses to be used on the Certificate
+ // +optional
+ IPAddresses []string `json:"ipAddresses,omitempty" protobuf:"bytes,8,rep,name=ipAddresses"`
+
+ // URIs is a list of URI subjectAltNames to be set on the Certificate.
+ // +optional
+ URIs []string `json:"uris,omitempty" protobuf:"bytes,9,rep,name=uris"`
+
+ // EmailAddresses is a list of email subjectAltNames to be set on the Certificate.
+ // +optional
+ EmailAddresses []string `json:"emailAddresses,omitempty" protobuf:"bytes,10,rep,name=emailAddresses"`
+
+ // Options to control private keys used for the Certificate.
+ // +optional
+ PrivateKey *CertificatePrivateKey `json:"privateKey,omitempty" protobuf:"bytes,11,opt,name=privateKey"`
+}
+
+// X509Subject Full X509 name specification
+type X509Subject struct {
+ // Organizations to be used on the Certificate.
+ // +optional
+ Organizations []string `json:"organizations,omitempty" protobuf:"bytes,1,rep,name=organizations"`
+ // Countries to be used on the CertificateSpec.
+ // +optional
+ Countries []string `json:"countries,omitempty" protobuf:"bytes,2,rep,name=countries"`
+ // Organizational Units to be used on the CertificateSpec.
+ // +optional
+ OrganizationalUnits []string `json:"organizationalUnits,omitempty" protobuf:"bytes,3,rep,name=organizationalUnits"`
+ // Cities to be used on the CertificateSpec.
+ // +optional
+ Localities []string `json:"localities,omitempty" protobuf:"bytes,4,rep,name=localities"`
+ // State/Provinces to be used on the CertificateSpec.
+ // +optional
+ Provinces []string `json:"provinces,omitempty" protobuf:"bytes,5,rep,name=provinces"`
+ // Street addresses to be used on the CertificateSpec.
+ // +optional
+ StreetAddresses []string `json:"streetAddresses,omitempty" protobuf:"bytes,6,rep,name=streetAddresses"`
+ // Postal codes to be used on the CertificateSpec.
+ // +optional
+ PostalCodes []string `json:"postalCodes,omitempty" protobuf:"bytes,7,rep,name=postalCodes"`
+ // Serial number to be used on the CertificateSpec.
+ // +optional
+ SerialNumber string `json:"serialNumber,omitempty" protobuf:"bytes,8,opt,name=serialNumber"`
+}
+
+// +kubebuilder:validation:Enum=PKCS1;PKCS8
+type PrivateKeyEncoding string
+
+const (
+ // PKCS1 key encoding will produce PEM files that include the type of
+ // private key as part of the PEM header, e.g. "BEGIN RSA PRIVATE KEY".
+ // If the keyAlgorithm is set to 'ECDSA', this will produce private keys
+ // that use the "BEGIN EC PRIVATE KEY" header.
+ PKCS1 PrivateKeyEncoding = "PKCS1"
+
+ // PKCS8 key encoding will produce PEM files with the "BEGIN PRIVATE KEY"
+ // header. It encodes the keyAlgorithm of the private key as part of the
+ // DER encoded PEM block.
+ PKCS8 PrivateKeyEncoding = "PKCS8"
+)
+
+// CertificatePrivateKey contains configuration options for private keys
+// used by the Certificate controller.
+// This allows control of how private keys are rotated.
+type CertificatePrivateKey struct {
+ // The private key cryptography standards (PKCS) encoding for this
+ // certificate's private key to be encoded in.
+ // If provided, allowed values are "pkcs1" and "pkcs8" standing for PKCS#1
+ // and PKCS#8, respectively.
+ // Defaults to PKCS#1 if not specified.
+ // See here for the difference between the formats: https://stackoverflow.com/a/48960291
+ // +optional
+ Encoding PrivateKeyEncoding `json:"encoding,omitempty" protobuf:"bytes,1,opt,name=encoding,casttype=PrivateKeyEncoding"`
+}
+
+// HasCertificate returns "true" if the desired certificate provided in "aliaS" is present in the certificate list.
+// Otherwise, it returns "false".
+func HasCertificate(certificates []CertificateSpec, alias string) bool {
+ for i := range certificates {
+ if certificates[i].Alias == alias {
+ return true
+ }
+ }
+ return false
+}
+
+// GetCertificate returns a pointer to the desired certificate referred by "aliaS". Otherwise, it returns nil.
+func GetCertificate(certificates []CertificateSpec, alias string) (int, *CertificateSpec) {
+ for i := range certificates {
+ c := certificates[i]
+ if c.Alias == alias {
+ return i, &c
+ }
+ }
+ return -1, nil
+}
+
+// SetCertificate add/update the desired certificate to the certificate list.
+func SetCertificate(certificates []CertificateSpec, newCertificate CertificateSpec) []CertificateSpec {
+ idx, _ := GetCertificate(certificates, newCertificate.Alias)
+ if idx != -1 {
+ certificates[idx] = newCertificate
+ } else {
+ certificates = append(certificates, newCertificate)
+ }
+ return certificates
+}
+
+// GetCertificateSecretName returns the name of secret for a certificate alias.
+func GetCertificateSecretName(certificates []CertificateSpec, alias string) (string, bool) {
+ idx, cert := GetCertificate(certificates, alias)
+ if idx == -1 {
+ return "", false
+ }
+ return cert.SecretName, cert.SecretName != ""
+}
+
+// SetMissingSpecForCertificate sets the missing spec fields for a certificate.
+// If the certificate does not exist, it will add a new certificate with the desired spec.
+func SetMissingSpecForCertificate(certificates []CertificateSpec, spec CertificateSpec) []CertificateSpec {
+ idx, _ := GetCertificate(certificates, spec.Alias)
+ if idx != -1 {
+ err := mergo.Merge(&certificates[idx], spec, mergo.WithTransformers(stringSetMerger{}))
+ if err != nil {
+ panic(err)
+ }
+ } else {
+ certificates = append(certificates, spec)
+ }
+ return certificates
+}
+
+// SetSpecForCertificate sets the spec for a certificate.
+// If the certificate does not exist, it will add a new certificate with the desired spec.
+// Otherwise, the spec will be overwritten.
+func SetSpecForCertificate(certificates []CertificateSpec, spec CertificateSpec) []CertificateSpec {
+ idx, _ := GetCertificate(certificates, spec.Alias)
+ if idx != -1 {
+ certificates[idx] = spec
+ } else {
+ certificates = append(certificates, spec)
+ }
+ return certificates
+}
+
+// SetMissingSecretNameForCertificate sets the missing secret name for a certificate.
+// If the certificate does not exist, it will add a new certificate with the desired secret name.
+func SetMissingSecretNameForCertificate(certificates []CertificateSpec, alias, secretName string) []CertificateSpec {
+ idx, _ := GetCertificate(certificates, alias)
+ if idx != -1 {
+ if certificates[idx].SecretName == "" {
+ certificates[idx].SecretName = secretName
+ }
+ } else {
+ certificates = append(certificates, CertificateSpec{
+ Alias: alias,
+ SecretName: secretName,
+ })
+ }
+ return certificates
+}
+
+// SetSecretNameForCertificate sets the secret name for a certificate.
+// If the certificate does not exist, it will add a new certificate with the desired secret name.
+// Otherwise, the secret name will be overwritten.
+func SetSecretNameForCertificate(certificates []CertificateSpec, alias, secretName string) []CertificateSpec {
+ idx, _ := GetCertificate(certificates, alias)
+ if idx != -1 {
+ certificates[idx].SecretName = secretName
+ } else {
+ certificates = append(certificates, CertificateSpec{
+ Alias: alias,
+ SecretName: secretName,
+ })
+ }
+ return certificates
+}
+
+// RemoveCertificate remove a certificate from the certificate list referred by "aliaS" parameter.
+func RemoveCertificate(certificates []CertificateSpec, alias string) []CertificateSpec {
+ idx, _ := GetCertificate(certificates, alias)
+ if idx == -1 {
+ // The desired certificate is not present in the certificate list. So, nothing to do.
+ return certificates
+ }
+ return append(certificates[:idx], certificates[idx+1:]...)
+}
+
+type stringSetMerger struct{}
+
+func (t stringSetMerger) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+ if typ == reflect.TypeOf([]string{}) {
+ return func(dst, src reflect.Value) error {
+ if dst.CanSet() {
+ if dst.Len() <= 1 && src.Len() == 0 {
+ return nil
+ }
+ if dst.Len() == 0 && src.Len() == 1 {
+ dst.Set(src)
+ return nil
+ }
+
+ out := sets.NewString()
+ for i := 0; i < dst.Len(); i++ {
+ out.Insert(dst.Index(i).String())
+ }
+ for i := 0; i < src.Len(); i++ {
+ out.Insert(src.Index(i).String())
+ }
+ dst.Set(reflect.ValueOf(out.List()))
+ }
+ return nil
+ }
+ }
+ return nil
+}
diff --git a/vendor/kmodules.xyz/client-go/api/v1/cluster.go b/vendor/kmodules.xyz/client-go/api/v1/cluster.go
new file mode 100644
index 00000000..064d3838
--- /dev/null
+++ b/vendor/kmodules.xyz/client-go/api/v1/cluster.go
@@ -0,0 +1,102 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import "strings"
+
+// +kubebuilder:validation:Enum=Aws;Azure;DigitalOcean;GoogleCloud;Linode;Packet;Scaleway;Vultr;BareMetal;KIND;Generic;Private
+type HostingProvider string
+
+const (
+ HostingProviderAWS HostingProvider = "Aws"
+ HostingProviderAzure HostingProvider = "Azure"
+ HostingProviderDigitalOcean HostingProvider = "DigitalOcean"
+ HostingProviderGoogleCloud HostingProvider = "GoogleCloud"
+ HostingProviderLinode HostingProvider = "Linode"
+ HostingProviderPacket HostingProvider = "Packet"
+ HostingProviderScaleway HostingProvider = "Scaleway"
+ HostingProviderVultr HostingProvider = "Vultr"
+ HostingProviderBareMetal HostingProvider = "BareMetal"
+ HostingProviderKIND HostingProvider = "KIND"
+ HostingProviderGeneric HostingProvider = "Generic"
+ HostingProviderPrivate HostingProvider = "Private"
+)
+
+const (
+ ClusterNameKey string = "cluster.appscode.com/name"
+ ClusterDisplayNameKey string = "cluster.appscode.com/display-name"
+ ClusterProviderNameKey string = "cluster.appscode.com/provider"
+)
+
+type ClusterMetadata struct {
+ UID string `json:"uid" protobuf:"bytes,1,opt,name=uid"`
+ Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"`
+ DisplayName string `json:"displayName,omitempty" protobuf:"bytes,3,opt,name=displayName"`
+ Provider HostingProvider `json:"provider,omitempty" protobuf:"bytes,4,opt,name=provider,casttype=HostingProvider"`
+}
+
+type ClusterManager int
+
+const (
+ ClusterManagerACE ClusterManager = 1 << iota
+ ClusterManagerOCM
+ ClusterManagerRancher
+ ClusterManagerOpenShift
+)
+
+func (cm ClusterManager) ManagedByACE() bool {
+ return cm&ClusterManagerACE == ClusterManagerACE
+}
+
+func (cm ClusterManager) ManagedByOCM() bool {
+ return cm&ClusterManagerOCM == ClusterManagerOCM
+}
+
+func (cm ClusterManager) ManagedByRancher() bool {
+ return cm&ClusterManagerRancher == ClusterManagerRancher
+}
+
+func (cm ClusterManager) ManagedByOpenShift() bool {
+ return cm&ClusterManagerOpenShift == ClusterManagerOpenShift
+}
+
+func (cm ClusterManager) Strings() []string {
+ out := make([]string, 0, 4)
+ if cm.ManagedByACE() {
+ out = append(out, "ACE")
+ }
+ if cm.ManagedByOCM() {
+ out = append(out, "OCM")
+ }
+ if cm.ManagedByRancher() {
+ out = append(out, "Rancher")
+ }
+ if cm.ManagedByOpenShift() {
+ out = append(out, "OpenShift")
+ }
+ return out
+}
+
+func (cm ClusterManager) String() string {
+ return strings.Join(cm.Strings(), ",")
+}
+
+type CAPIClusterInfo struct {
+ Provider string `json:"provider,omitempty"`
+ Namespace string `json:"namespace,omitempty"`
+ ClusterName string `json:"clusterName,omitempty"`
+}
diff --git a/vendor/kmodules.xyz/client-go/api/v1/conditions.go b/vendor/kmodules.xyz/client-go/api/v1/conditions.go
new file mode 100644
index 00000000..148531c8
--- /dev/null
+++ b/vendor/kmodules.xyz/client-go/api/v1/conditions.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// ConditionSeverity expresses the severity of a Condition Type failing.
+type ConditionSeverity string
+
+const (
+ // ConditionSeverityError specifies that a condition with `Status=False` is an error.
+ ConditionSeverityError ConditionSeverity = "Error"
+
+ // ConditionSeverityWarning specifies that a condition with `Status=False` is a warning.
+ ConditionSeverityWarning ConditionSeverity = "Warning"
+
+ // ConditionSeverityInfo specifies that a condition with `Status=False` is informative.
+ ConditionSeverityInfo ConditionSeverity = "Info"
+
+ // ConditionSeverityNone should apply only to util with `Status=True`.
+ ConditionSeverityNone ConditionSeverity = ""
+)
+
+// ConditionType is a valid value for Condition.Type.
+type ConditionType string
+
+const (
+ // ReadyCondition defines the Ready condition type that summarizes the operational state of an object.
+ ReadyCondition ConditionType = "Ready"
+)
+
+// Condition defines an observation of a object operational state.
+type Condition struct {
+ // Type of condition in CamelCase or in foo.example.com/CamelCase.
+ // Many .condition.type values are consistent across resources like Available, but because arbitrary util
+ // can be useful (see .node.status.util), the ability to deconflict is important.
+ Type ConditionType `json:"type" protobuf:"bytes,4,opt,name=type,casttype=ConditionType"`
+
+ // Status of the condition, one of True, False, Unknown.
+ Status metav1.ConditionStatus `json:"status" protobuf:"bytes,5,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+
+ // If set, this represents the .metadata.generation that the condition was set based upon.
+ // For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date
+ // with respect to the current state of the instance.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
+
+ // Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ // understand the current situation and act accordingly.
+ // The Severity field MUST be set only when Status=False.
+ // +optional
+ Severity ConditionSeverity `json:"severity,omitempty" protobuf:"bytes,6,opt,name=severity,casttype=ConditionSeverity"`
+
+ // Last time the condition transitioned from one status to another.
+ // This should be when the underlying condition changed. If that is not known, then using the time when
+ // the API field changed is acceptable.
+ LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,7,opt,name=lastTransitionTime"`
+
+ // The reason for the condition's last transition in CamelCase.
+ // The specific API may choose whether this field is considered a guaranteed API.
+ // This field may not be empty.
+ // +optional
+ Reason string `json:"reason,omitempty" protobuf:"bytes,8,opt,name=reason"`
+
+ // A human-readable message indicating details about the transition.
+ // This field may be empty.
+ // +optional
+ Message string `json:"message,omitempty" protobuf:"bytes,9,opt,name=message"`
+}
+
+// Conditions provide observations of the operational state of a object.
+type Conditions []Condition
diff --git a/vendor/kmodules.xyz/client-go/api/v1/doc.go b/vendor/kmodules.xyz/client-go/api/v1/doc.go
new file mode 100644
index 00000000..9fbacac4
--- /dev/null
+++ b/vendor/kmodules.xyz/client-go/api/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+// +gencrdrefdocs:force=true
+package v1 // import "kmodules.xyz/client-go/api/v1"
diff --git a/vendor/kmodules.xyz/client-go/api/v1/generated.pb.go b/vendor/kmodules.xyz/client-go/api/v1/generated.pb.go
new file mode 100644
index 00000000..afb78575
--- /dev/null
+++ b/vendor/kmodules.xyz/client-go/api/v1/generated.pb.go
@@ -0,0 +1,4970 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: kmodules.xyz/client-go/api/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+
+ proto "github.com/gogo/protobuf/proto"
+ v1 "k8s.io/api/core/v1"
+ k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ v11 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *CertificatePrivateKey) Reset() { *m = CertificatePrivateKey{} }
+func (*CertificatePrivateKey) ProtoMessage() {}
+func (*CertificatePrivateKey) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{0}
+}
+func (m *CertificatePrivateKey) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CertificatePrivateKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CertificatePrivateKey) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CertificatePrivateKey.Merge(m, src)
+}
+func (m *CertificatePrivateKey) XXX_Size() int {
+ return m.Size()
+}
+func (m *CertificatePrivateKey) XXX_DiscardUnknown() {
+ xxx_messageInfo_CertificatePrivateKey.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CertificatePrivateKey proto.InternalMessageInfo
+
+func (m *CertificateSpec) Reset() { *m = CertificateSpec{} }
+func (*CertificateSpec) ProtoMessage() {}
+func (*CertificateSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{1}
+}
+func (m *CertificateSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CertificateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CertificateSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CertificateSpec.Merge(m, src)
+}
+func (m *CertificateSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *CertificateSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_CertificateSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CertificateSpec proto.InternalMessageInfo
+
+func (m *ClusterMetadata) Reset() { *m = ClusterMetadata{} }
+func (*ClusterMetadata) ProtoMessage() {}
+func (*ClusterMetadata) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{2}
+}
+func (m *ClusterMetadata) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterMetadata) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterMetadata.Merge(m, src)
+}
+func (m *ClusterMetadata) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterMetadata) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterMetadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterMetadata proto.InternalMessageInfo
+
+func (m *Condition) Reset() { *m = Condition{} }
+func (*Condition) ProtoMessage() {}
+func (*Condition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{3}
+}
+func (m *Condition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Condition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Condition.Merge(m, src)
+}
+func (m *Condition) XXX_Size() int {
+ return m.Size()
+}
+func (m *Condition) XXX_DiscardUnknown() {
+ xxx_messageInfo_Condition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Condition proto.InternalMessageInfo
+
+func (m *HealthCheckSpec) Reset() { *m = HealthCheckSpec{} }
+func (*HealthCheckSpec) ProtoMessage() {}
+func (*HealthCheckSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{4}
+}
+func (m *HealthCheckSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HealthCheckSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *HealthCheckSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HealthCheckSpec.Merge(m, src)
+}
+func (m *HealthCheckSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *HealthCheckSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_HealthCheckSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HealthCheckSpec proto.InternalMessageInfo
+
+func (m *ImageInfo) Reset() { *m = ImageInfo{} }
+func (*ImageInfo) ProtoMessage() {}
+func (*ImageInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{5}
+}
+func (m *ImageInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageInfo.Merge(m, src)
+}
+func (m *ImageInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageInfo proto.InternalMessageInfo
+
+func (m *Lineage) Reset() { *m = Lineage{} }
+func (*Lineage) ProtoMessage() {}
+func (*Lineage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{6}
+}
+func (m *Lineage) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Lineage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Lineage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Lineage.Merge(m, src)
+}
+func (m *Lineage) XXX_Size() int {
+ return m.Size()
+}
+func (m *Lineage) XXX_DiscardUnknown() {
+ xxx_messageInfo_Lineage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Lineage proto.InternalMessageInfo
+
+func (m *ObjectID) Reset() { *m = ObjectID{} }
+func (*ObjectID) ProtoMessage() {}
+func (*ObjectID) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{7}
+}
+func (m *ObjectID) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ObjectID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ObjectID) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ObjectID.Merge(m, src)
+}
+func (m *ObjectID) XXX_Size() int {
+ return m.Size()
+}
+func (m *ObjectID) XXX_DiscardUnknown() {
+ xxx_messageInfo_ObjectID.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ObjectID proto.InternalMessageInfo
+
+func (m *ObjectInfo) Reset() { *m = ObjectInfo{} }
+func (*ObjectInfo) ProtoMessage() {}
+func (*ObjectInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{8}
+}
+func (m *ObjectInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ObjectInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ObjectInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ObjectInfo.Merge(m, src)
+}
+func (m *ObjectInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *ObjectInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_ObjectInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ObjectInfo proto.InternalMessageInfo
+
+func (m *ObjectReference) Reset() { *m = ObjectReference{} }
+func (*ObjectReference) ProtoMessage() {}
+func (*ObjectReference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{9}
+}
+func (m *ObjectReference) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ObjectReference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ObjectReference.Merge(m, src)
+}
+func (m *ObjectReference) XXX_Size() int {
+ return m.Size()
+}
+func (m *ObjectReference) XXX_DiscardUnknown() {
+ xxx_messageInfo_ObjectReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ObjectReference proto.InternalMessageInfo
+
+func (m *PullCredentials) Reset() { *m = PullCredentials{} }
+func (*PullCredentials) ProtoMessage() {}
+func (*PullCredentials) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{10}
+}
+func (m *PullCredentials) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PullCredentials) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *PullCredentials) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PullCredentials.Merge(m, src)
+}
+func (m *PullCredentials) XXX_Size() int {
+ return m.Size()
+}
+func (m *PullCredentials) XXX_DiscardUnknown() {
+ xxx_messageInfo_PullCredentials.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PullCredentials proto.InternalMessageInfo
+
+func (m *ReadonlyHealthCheckSpec) Reset() { *m = ReadonlyHealthCheckSpec{} }
+func (*ReadonlyHealthCheckSpec) ProtoMessage() {}
+func (*ReadonlyHealthCheckSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{11}
+}
+func (m *ReadonlyHealthCheckSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ReadonlyHealthCheckSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ReadonlyHealthCheckSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReadonlyHealthCheckSpec.Merge(m, src)
+}
+func (m *ReadonlyHealthCheckSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ReadonlyHealthCheckSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ReadonlyHealthCheckSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReadonlyHealthCheckSpec proto.InternalMessageInfo
+
+func (m *ResourceID) Reset() { *m = ResourceID{} }
+func (*ResourceID) ProtoMessage() {}
+func (*ResourceID) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{12}
+}
+func (m *ResourceID) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceID) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceID.Merge(m, src)
+}
+func (m *ResourceID) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceID) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceID.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceID proto.InternalMessageInfo
+
+func (m *TLSConfig) Reset() { *m = TLSConfig{} }
+func (*TLSConfig) ProtoMessage() {}
+func (*TLSConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{13}
+}
+func (m *TLSConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TLSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TLSConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TLSConfig.Merge(m, src)
+}
+func (m *TLSConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *TLSConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_TLSConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TLSConfig proto.InternalMessageInfo
+
+func (m *TimeOfDay) Reset() { *m = TimeOfDay{} }
+func (*TimeOfDay) ProtoMessage() {}
+func (*TimeOfDay) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{14}
+}
+func (m *TimeOfDay) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TimeOfDay.Unmarshal(m, b)
+}
+func (m *TimeOfDay) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TimeOfDay.Marshal(b, m, deterministic)
+}
+func (m *TimeOfDay) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TimeOfDay.Merge(m, src)
+}
+func (m *TimeOfDay) XXX_Size() int {
+ return xxx_messageInfo_TimeOfDay.Size(m)
+}
+func (m *TimeOfDay) XXX_DiscardUnknown() {
+ xxx_messageInfo_TimeOfDay.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TimeOfDay proto.InternalMessageInfo
+
+func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} }
+func (*TypedObjectReference) ProtoMessage() {}
+func (*TypedObjectReference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{15}
+}
+func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TypedObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TypedObjectReference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TypedObjectReference.Merge(m, src)
+}
+func (m *TypedObjectReference) XXX_Size() int {
+ return m.Size()
+}
+func (m *TypedObjectReference) XXX_DiscardUnknown() {
+ xxx_messageInfo_TypedObjectReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo
+
+func (m *X509Subject) Reset() { *m = X509Subject{} }
+func (*X509Subject) ProtoMessage() {}
+func (*X509Subject) Descriptor() ([]byte, []int) {
+ return fileDescriptor_af8e7a11c7a1ccd9, []int{16}
+}
+func (m *X509Subject) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *X509Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *X509Subject) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_X509Subject.Merge(m, src)
+}
+func (m *X509Subject) XXX_Size() int {
+ return m.Size()
+}
+func (m *X509Subject) XXX_DiscardUnknown() {
+ xxx_messageInfo_X509Subject.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_X509Subject proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*CertificatePrivateKey)(nil), "kmodules.xyz.client_go.api.v1.CertificatePrivateKey")
+ proto.RegisterType((*CertificateSpec)(nil), "kmodules.xyz.client_go.api.v1.CertificateSpec")
+ proto.RegisterType((*ClusterMetadata)(nil), "kmodules.xyz.client_go.api.v1.ClusterMetadata")
+ proto.RegisterType((*Condition)(nil), "kmodules.xyz.client_go.api.v1.Condition")
+ proto.RegisterType((*HealthCheckSpec)(nil), "kmodules.xyz.client_go.api.v1.HealthCheckSpec")
+ proto.RegisterType((*ImageInfo)(nil), "kmodules.xyz.client_go.api.v1.ImageInfo")
+ proto.RegisterType((*Lineage)(nil), "kmodules.xyz.client_go.api.v1.Lineage")
+ proto.RegisterType((*ObjectID)(nil), "kmodules.xyz.client_go.api.v1.ObjectID")
+ proto.RegisterType((*ObjectInfo)(nil), "kmodules.xyz.client_go.api.v1.ObjectInfo")
+ proto.RegisterType((*ObjectReference)(nil), "kmodules.xyz.client_go.api.v1.ObjectReference")
+ proto.RegisterType((*PullCredentials)(nil), "kmodules.xyz.client_go.api.v1.PullCredentials")
+ proto.RegisterType((*ReadonlyHealthCheckSpec)(nil), "kmodules.xyz.client_go.api.v1.ReadonlyHealthCheckSpec")
+ proto.RegisterType((*ResourceID)(nil), "kmodules.xyz.client_go.api.v1.ResourceID")
+ proto.RegisterType((*TLSConfig)(nil), "kmodules.xyz.client_go.api.v1.TLSConfig")
+ proto.RegisterType((*TimeOfDay)(nil), "kmodules.xyz.client_go.api.v1.TimeOfDay")
+ proto.RegisterType((*TypedObjectReference)(nil), "kmodules.xyz.client_go.api.v1.TypedObjectReference")
+ proto.RegisterType((*X509Subject)(nil), "kmodules.xyz.client_go.api.v1.X509Subject")
+}
+
+func init() {
+ proto.RegisterFile("kmodules.xyz/client-go/api/v1/generated.proto", fileDescriptor_af8e7a11c7a1ccd9)
+}
+
+var fileDescriptor_af8e7a11c7a1ccd9 = []byte{
+ // 1728 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x58, 0xcf, 0x6f, 0x23, 0x49,
+ 0x15, 0x76, 0xc7, 0x4e, 0xe2, 0x2e, 0x27, 0xe3, 0x49, 0xcd, 0xa0, 0x31, 0x23, 0xc6, 0x1d, 0xbc,
+ 0x62, 0x94, 0x01, 0xa6, 0x4d, 0x46, 0xb3, 0xb0, 0xac, 0x84, 0x20, 0xed, 0xec, 0x66, 0xbd, 0x9b,
+ 0x49, 0x42, 0x39, 0xc3, 0xae, 0x16, 0x04, 0xaa, 0x74, 0x3f, 0x3b, 0x45, 0xda, 0xdd, 0xad, 0xaa,
+ 0x6e, 0x83, 0xf7, 0xb4, 0x47, 0xb8, 0x2d, 0x37, 0x8e, 0x3b, 0x12, 0x7f, 0x02, 0x17, 0xfe, 0x02,
+ 0x46, 0x9c, 0x86, 0x0b, 0xda, 0x03, 0xb2, 0x18, 0x73, 0x45, 0x5c, 0x10, 0x12, 0xca, 0x09, 0x55,
+ 0xf5, 0x4f, 0x3b, 0xce, 0x24, 0x8b, 0x90, 0xf6, 0xe6, 0xfe, 0xde, 0xf7, 0xbe, 0xaa, 0xae, 0x7a,
+ 0xbf, 0xda, 0xe8, 0xe1, 0xd9, 0xd0, 0x77, 0x22, 0x17, 0x84, 0xf9, 0xcb, 0xf1, 0x47, 0x6d, 0xdb,
+ 0x65, 0xe0, 0x85, 0x0f, 0x07, 0x7e, 0x9b, 0x06, 0xac, 0x3d, 0xda, 0x6e, 0x0f, 0xc0, 0x03, 0x4e,
+ 0x43, 0x70, 0xcc, 0x80, 0xfb, 0xa1, 0x8f, 0xef, 0x15, 0xe9, 0x66, 0x4c, 0xff, 0xd9, 0xc0, 0x37,
+ 0x69, 0xc0, 0xcc, 0xd1, 0xf6, 0xdd, 0x87, 0x03, 0x16, 0x9e, 0x46, 0x27, 0xa6, 0xed, 0x0f, 0xdb,
+ 0x03, 0x7f, 0xe0, 0xb7, 0x95, 0xd7, 0x49, 0xd4, 0x57, 0x4f, 0xea, 0x41, 0xfd, 0x8a, 0xd5, 0xee,
+ 0xb6, 0xce, 0xde, 0x10, 0x26, 0x8b, 0x17, 0xb3, 0x7d, 0x0e, 0x0b, 0x56, 0xbc, 0xfb, 0x38, 0xe7,
+ 0x0c, 0xa9, 0x7d, 0xca, 0x3c, 0xe0, 0xe3, 0x76, 0x70, 0x36, 0x90, 0x80, 0x68, 0x0f, 0x21, 0xa4,
+ 0x0b, 0xbc, 0x5a, 0x3f, 0x46, 0x5f, 0xea, 0x00, 0x0f, 0x59, 0x9f, 0xd9, 0x34, 0x84, 0x23, 0xce,
+ 0x46, 0x34, 0x84, 0xf7, 0x60, 0x8c, 0x2d, 0x54, 0x05, 0xcf, 0xf6, 0x1d, 0xe6, 0x0d, 0x1a, 0xda,
+ 0xa6, 0xb6, 0xa5, 0x5b, 0xf7, 0x9f, 0x4f, 0x8c, 0xd2, 0x74, 0x62, 0x54, 0xdf, 0x4a, 0xf0, 0xf3,
+ 0x89, 0x81, 0x73, 0x8f, 0x14, 0x25, 0x99, 0x5f, 0xeb, 0x5f, 0xcb, 0xa8, 0x5e, 0x50, 0xef, 0x05,
+ 0x60, 0xe3, 0xd7, 0xd0, 0x32, 0x75, 0x19, 0x15, 0x89, 0xe8, 0x7a, 0x22, 0xba, 0xbc, 0x23, 0x41,
+ 0x12, 0xdb, 0xf0, 0x87, 0x48, 0x67, 0x42, 0x44, 0xc0, 0x09, 0xf4, 0x1b, 0x4b, 0x9b, 0xda, 0x56,
+ 0xed, 0xd1, 0x43, 0x33, 0x7e, 0x3f, 0x75, 0x82, 0xf2, 0x0c, 0xcc, 0xd1, 0xb6, 0x79, 0x3c, 0x0e,
+ 0xc0, 0xd9, 0xf7, 0x6d, 0xea, 0x1e, 0x9e, 0xfc, 0x1c, 0xec, 0x90, 0x40, 0x1f, 0x38, 0x78, 0x36,
+ 0x58, 0xeb, 0xd3, 0x89, 0xa1, 0x77, 0x53, 0x0d, 0x92, 0xcb, 0xe1, 0x47, 0x08, 0x09, 0xb0, 0x39,
+ 0x84, 0x07, 0x74, 0x08, 0x8d, 0xb2, 0xda, 0x05, 0x4e, 0x76, 0x81, 0x7a, 0x99, 0x85, 0x14, 0x58,
+ 0xf8, 0x87, 0x68, 0x55, 0x44, 0x6a, 0x85, 0x46, 0x45, 0xed, 0xe6, 0xeb, 0xe6, 0x2b, 0xef, 0xd7,
+ 0xfc, 0xe0, 0xf5, 0x6f, 0x7d, 0xb7, 0x17, 0x7b, 0x58, 0xb5, 0xe9, 0xc4, 0x58, 0x4d, 0x1e, 0x48,
+ 0xaa, 0x83, 0x3f, 0x40, 0x55, 0x27, 0xe2, 0x34, 0x64, 0xbe, 0xd7, 0x58, 0x56, 0x9a, 0x66, 0xe1,
+ 0x0d, 0xb3, 0x1b, 0x34, 0x83, 0xb3, 0x81, 0x04, 0x84, 0x29, 0x6f, 0x50, 0x4a, 0xef, 0x26, 0x5e,
+ 0xd6, 0x9a, 0xbc, 0x8b, 0xf4, 0x89, 0x64, 0x6a, 0x98, 0xa2, 0x1a, 0x07, 0x0f, 0x7e, 0x61, 0x41,
+ 0xdf, 0xe7, 0xd0, 0x58, 0xf9, 0x9f, 0xc4, 0xeb, 0xd3, 0x89, 0x51, 0x23, 0xb9, 0x0c, 0x29, 0x6a,
+ 0xe2, 0x2d, 0x54, 0x75, 0x3c, 0x21, 0x8f, 0x46, 0x34, 0x56, 0x37, 0xcb, 0x5b, 0x7a, 0xb2, 0x99,
+ 0x83, 0x9e, 0xc2, 0x48, 0x66, 0xc5, 0xdb, 0xa8, 0xc6, 0x82, 0x1d, 0xc7, 0xe1, 0x20, 0x04, 0x88,
+ 0x46, 0x55, 0x91, 0x95, 0x78, 0xf7, 0x28, 0x83, 0x49, 0x91, 0x83, 0xbf, 0x82, 0x2a, 0x11, 0x67,
+ 0xa2, 0xa1, 0x2b, 0x6e, 0x75, 0x3a, 0x31, 0x2a, 0x4f, 0x49, 0x57, 0x10, 0x85, 0xe2, 0x37, 0xd1,
+ 0x0d, 0x18, 0x52, 0xe6, 0xe6, 0x9a, 0x48, 0xf1, 0xf0, 0x74, 0x62, 0xdc, 0x78, 0x6b, 0xc6, 0x42,
+ 0xe6, 0x98, 0xd8, 0x41, 0x28, 0xc8, 0xe2, 0xb5, 0x51, 0x53, 0x07, 0xf3, 0xf8, 0x8a, 0x9b, 0x5c,
+ 0x98, 0x1d, 0xd6, 0x0d, 0x19, 0x2c, 0xf9, 0x33, 0x29, 0xe8, 0xb6, 0xfe, 0xa4, 0xa1, 0x7a, 0xc7,
+ 0x8d, 0x44, 0x08, 0xfc, 0x09, 0x84, 0xd4, 0xa1, 0x21, 0xc5, 0xf7, 0x50, 0x39, 0x62, 0x4e, 0x12,
+ 0xf3, 0xb5, 0x24, 0xda, 0xca, 0x4f, 0xbb, 0xbb, 0x44, 0xe2, 0x78, 0x13, 0x55, 0x3c, 0x19, 0x8d,
+ 0x4b, 0xca, 0xbe, 0x96, 0xd8, 0x2b, 0x2a, 0x0e, 0x95, 0x05, 0xbf, 0x8e, 0x6a, 0x0e, 0x13, 0x81,
+ 0x4b, 0xc7, 0x85, 0xb0, 0xbd, 0x95, 0x10, 0x6b, 0xbb, 0xb9, 0x89, 0x14, 0x79, 0xf8, 0xfb, 0xa8,
+ 0x1a, 0x70, 0x7f, 0xc4, 0x1c, 0xe0, 0x2a, 0x72, 0x75, 0xeb, 0xb5, 0x34, 0x8b, 0x8f, 0x12, 0xfc,
+ 0x7c, 0x62, 0xd4, 0xdf, 0xf1, 0x45, 0xc8, 0xbc, 0x41, 0x0a, 0x91, 0xcc, 0xa9, 0xf5, 0x9f, 0x32,
+ 0xd2, 0x3b, 0xbe, 0xe7, 0x30, 0x15, 0x5a, 0xdb, 0xa8, 0x12, 0x8e, 0x03, 0x48, 0xa4, 0xee, 0xa5,
+ 0xfb, 0x94, 0x69, 0x78, 0x3e, 0x31, 0xd6, 0x33, 0xa2, 0x04, 0x88, 0xa2, 0xe2, 0x9f, 0xa2, 0x15,
+ 0x11, 0xd2, 0x30, 0x12, 0x2a, 0xca, 0x75, 0xeb, 0xed, 0xc4, 0x69, 0xa5, 0xa7, 0xd0, 0xf3, 0x89,
+ 0x71, 0xad, 0xc2, 0x65, 0x66, 0xda, 0xb1, 0x1f, 0x49, 0x54, 0xf1, 0xbb, 0x08, 0xfb, 0x27, 0x02,
+ 0xf8, 0x08, 0x9c, 0xbd, 0xb8, 0xb6, 0xc9, 0x8c, 0x92, 0xe7, 0x53, 0xb6, 0xee, 0x26, 0x6b, 0xe1,
+ 0xc3, 0x0b, 0x0c, 0xb2, 0xc0, 0x0b, 0xef, 0xa0, 0xaa, 0x80, 0x11, 0x70, 0x16, 0x8e, 0x55, 0xda,
+ 0xe8, 0xd6, 0xd7, 0xd2, 0xd3, 0xea, 0x25, 0xf8, 0xf9, 0xc4, 0xd8, 0xc8, 0xb7, 0x92, 0x80, 0x24,
+ 0x73, 0xc3, 0x23, 0x84, 0x5d, 0x2a, 0xc2, 0x63, 0x4e, 0x3d, 0x11, 0x1f, 0x05, 0x1b, 0x42, 0x63,
+ 0x35, 0x2d, 0x1a, 0xd7, 0xc9, 0x41, 0xe9, 0x91, 0x6f, 0x7d, 0xff, 0x82, 0x1a, 0x59, 0xb0, 0x02,
+ 0xbe, 0x8f, 0x56, 0x38, 0x50, 0xe1, 0x7b, 0x8d, 0xaa, 0xda, 0xf8, 0x8d, 0xf4, 0x98, 0x89, 0x42,
+ 0x49, 0x62, 0xc5, 0x0f, 0xd0, 0xea, 0x10, 0x84, 0xa0, 0x03, 0x68, 0xe8, 0x8a, 0x58, 0x4f, 0x88,
+ 0xab, 0x4f, 0x62, 0x98, 0xa4, 0xf6, 0xd6, 0x3f, 0x35, 0x54, 0x7f, 0x07, 0xa8, 0x1b, 0x9e, 0x76,
+ 0x4e, 0xc1, 0x3e, 0x53, 0xd5, 0xfb, 0x37, 0x1a, 0xba, 0xc3, 0x81, 0x3a, 0xbe, 0xe7, 0x8e, 0xe7,
+ 0x6c, 0x2a, 0xb8, 0x6b, 0x8f, 0xbe, 0x7d, 0x45, 0x3e, 0x91, 0xc5, 0xde, 0x96, 0x91, 0xec, 0xe3,
+ 0xce, 0x25, 0x04, 0x72, 0xd9, 0xba, 0x78, 0x0f, 0x6d, 0x38, 0x4c, 0xd0, 0x13, 0x17, 0xde, 0xe7,
+ 0x2c, 0x04, 0x65, 0x50, 0x99, 0x54, 0xb5, 0xbe, 0x9c, 0x88, 0x6e, 0xec, 0xce, 0x13, 0xc8, 0x45,
+ 0x9f, 0xd6, 0xbf, 0x35, 0xa4, 0x77, 0x87, 0x74, 0x00, 0x5d, 0xaf, 0xef, 0xcb, 0x46, 0xc5, 0xe4,
+ 0xc3, 0x7c, 0xa3, 0x52, 0x0c, 0x12, 0xdb, 0xf0, 0x31, 0xaa, 0xba, 0xcc, 0x03, 0x3a, 0x00, 0xd1,
+ 0x58, 0xda, 0x2c, 0x6f, 0xd5, 0x1e, 0xdd, 0xbf, 0xe2, 0xfd, 0xf7, 0x63, 0xba, 0x75, 0x33, 0x8d,
+ 0xac, 0x04, 0x10, 0x24, 0x53, 0xc2, 0x43, 0x54, 0x0f, 0x22, 0xd7, 0xed, 0x70, 0x70, 0xc0, 0x0b,
+ 0x19, 0x75, 0x85, 0x0a, 0x68, 0x55, 0xc5, 0x5f, 0x29, 0x7e, 0x34, 0xeb, 0x65, 0xdd, 0x9a, 0x4e,
+ 0x8c, 0xfa, 0x1c, 0x48, 0xe6, 0xb5, 0x5b, 0xbf, 0xd6, 0xd0, 0x6a, 0xb2, 0x0b, 0x7c, 0x80, 0x96,
+ 0xed, 0x53, 0xca, 0xbc, 0x86, 0xa6, 0xde, 0xe6, 0xc1, 0x15, 0x0b, 0xc6, 0x6d, 0x57, 0x9e, 0x57,
+ 0x7e, 0x40, 0x1d, 0xe9, 0x4f, 0x62, 0x19, 0x6c, 0x22, 0x64, 0xfb, 0x5e, 0x48, 0x65, 0xac, 0xc7,
+ 0x47, 0xa4, 0xc7, 0xc5, 0xb3, 0x93, 0xa1, 0xa4, 0xc0, 0x68, 0xfd, 0x4e, 0x43, 0xd5, 0x44, 0x74,
+ 0x57, 0x5e, 0xc1, 0x80, 0xfb, 0x51, 0x30, 0x7f, 0x05, 0x7b, 0x12, 0x24, 0xb1, 0x4d, 0xd6, 0xce,
+ 0x33, 0xe6, 0x39, 0xf3, 0xb5, 0xf3, 0x3d, 0xe6, 0x39, 0x44, 0x59, 0x70, 0x1b, 0xe9, 0xb2, 0x86,
+ 0x8a, 0x80, 0xda, 0x69, 0xe5, 0xdc, 0x48, 0x68, 0xfa, 0x41, 0x6a, 0x20, 0x39, 0x27, 0x2b, 0xc7,
+ 0x95, 0xcb, 0xca, 0x71, 0xeb, 0xf7, 0x1a, 0x42, 0xf9, 0xbb, 0xe3, 0xf7, 0x51, 0x95, 0x83, 0xf0,
+ 0x23, 0x6e, 0x43, 0x92, 0x06, 0x0f, 0xae, 0x4c, 0x83, 0x98, 0xde, 0xdd, 0xcd, 0x23, 0x21, 0xc5,
+ 0x48, 0x26, 0x86, 0x9f, 0xa0, 0x32, 0xcf, 0x46, 0x20, 0xf3, 0x5a, 0x97, 0x91, 0xcf, 0x40, 0x59,
+ 0x9f, 0x91, 0x13, 0x90, 0xd4, 0x69, 0x39, 0xa8, 0x3e, 0x47, 0x9a, 0x3d, 0x1c, 0xed, 0x73, 0x1c,
+ 0xce, 0xa5, 0xbd, 0xaa, 0xf5, 0x0f, 0x0d, 0xcd, 0x07, 0xdd, 0xe7, 0x5f, 0xe6, 0x5d, 0x84, 0x65,
+ 0x7d, 0x66, 0x36, 0xec, 0xd8, 0xb6, 0x1f, 0x79, 0xf1, 0xb8, 0x16, 0x2f, 0x9a, 0x15, 0xc7, 0xde,
+ 0x05, 0x06, 0x59, 0xe0, 0x85, 0x7f, 0x92, 0x8e, 0x7c, 0x04, 0xfa, 0x32, 0x95, 0x64, 0x64, 0x6f,
+ 0x2d, 0x9a, 0x27, 0x17, 0x8e, 0x92, 0x73, 0xc3, 0xa1, 0xd4, 0x20, 0x05, 0xbd, 0xd6, 0x0b, 0x0d,
+ 0x5d, 0x56, 0xb4, 0xf0, 0x77, 0xd0, 0x7a, 0x00, 0x9c, 0xf9, 0x4e, 0x0f, 0x6c, 0xdf, 0x73, 0xe2,
+ 0xa9, 0x77, 0xd9, 0xda, 0x98, 0x4e, 0x8c, 0xf5, 0xa3, 0xa2, 0x81, 0xcc, 0xf2, 0xe4, 0x98, 0x13,
+ 0xb2, 0x21, 0xf8, 0x51, 0x98, 0x7a, 0x2e, 0x29, 0x4f, 0x35, 0xe6, 0x1c, 0xcf, 0x58, 0xc8, 0x1c,
+ 0x13, 0xff, 0x00, 0xdd, 0xec, 0x53, 0xe6, 0x46, 0x1c, 0x8e, 0x4f, 0x39, 0x88, 0x53, 0xdf, 0x75,
+ 0x54, 0xd8, 0x2f, 0x5b, 0xb7, 0xa7, 0x13, 0xe3, 0xe6, 0xdb, 0x73, 0x36, 0x72, 0x81, 0xdd, 0xfa,
+ 0x8b, 0x86, 0x50, 0x1e, 0xa1, 0xd7, 0xcb, 0xc3, 0x07, 0x68, 0x75, 0x04, 0x5c, 0xc8, 0xee, 0xbb,
+ 0x34, 0xdb, 0x59, 0x7e, 0x14, 0xc3, 0x24, 0xb5, 0x67, 0x21, 0x54, 0xbe, 0x74, 0xdc, 0x49, 0x93,
+ 0xba, 0x72, 0x69, 0x52, 0x3f, 0x46, 0xcb, 0xc2, 0xf6, 0x03, 0x48, 0xc6, 0x8a, 0x66, 0xba, 0xa7,
+ 0x9e, 0x04, 0xe5, 0x30, 0x92, 0xee, 0x5f, 0x01, 0x24, 0x26, 0xb7, 0xfe, 0xac, 0x21, 0xfd, 0x78,
+ 0xbf, 0xd7, 0xf1, 0xbd, 0x3e, 0x1b, 0xcc, 0x7e, 0x66, 0x68, 0xff, 0xdf, 0xcf, 0x8c, 0x53, 0xb4,
+ 0x66, 0xe7, 0xa3, 0x63, 0xda, 0x1d, 0xcc, 0xeb, 0x4f, 0x9b, 0xaa, 0x2b, 0xde, 0x4e, 0x5e, 0x6b,
+ 0xad, 0x60, 0x10, 0x64, 0x46, 0xb9, 0xf5, 0x55, 0xa4, 0xcb, 0x80, 0x38, 0xec, 0xef, 0xd2, 0xf1,
+ 0x9b, 0xb7, 0x7f, 0xfb, 0xa9, 0x51, 0xfa, 0xd5, 0x33, 0xa3, 0xf4, 0xc9, 0x33, 0xa3, 0xf4, 0xe9,
+ 0x33, 0xa3, 0xf4, 0xf1, 0x5f, 0x37, 0x4b, 0xad, 0x3f, 0x68, 0xe8, 0xb6, 0x7a, 0x89, 0xf9, 0xec,
+ 0xff, 0x26, 0xaa, 0xd2, 0x80, 0xed, 0x15, 0x2e, 0x37, 0xab, 0x46, 0x3b, 0x47, 0xdd, 0xf8, 0x7e,
+ 0x33, 0xc6, 0x17, 0x53, 0x6a, 0xff, 0x58, 0x46, 0xb5, 0xc2, 0xe7, 0x94, 0x4c, 0x29, 0x9f, 0x0f,
+ 0xa8, 0xc7, 0x3e, 0x52, 0x43, 0x9b, 0x50, 0x9d, 0x4a, 0x8f, 0x53, 0xea, 0xb0, 0x68, 0x20, 0xb3,
+ 0x3c, 0xfc, 0x0d, 0xa4, 0xab, 0x92, 0xc0, 0x19, 0xa4, 0x9d, 0x48, 0x5d, 0x5f, 0x27, 0x05, 0x49,
+ 0x6e, 0xc7, 0x5d, 0x74, 0xab, 0xe8, 0x4d, 0xdd, 0xa7, 0x1e, 0x0b, 0xe3, 0xda, 0xa1, 0x5b, 0x77,
+ 0xa6, 0x13, 0xe3, 0xd6, 0xe1, 0x45, 0x33, 0x59, 0xe4, 0x23, 0x5b, 0xa0, 0x2b, 0x63, 0x87, 0x85,
+ 0x72, 0xe1, 0x4a, 0xde, 0x02, 0xf7, 0x33, 0x94, 0x14, 0x18, 0x72, 0x9f, 0x6a, 0xfc, 0xf6, 0x6c,
+ 0x90, 0x43, 0x73, 0xb6, 0xcf, 0xa3, 0x14, 0x24, 0xb9, 0x1d, 0x7f, 0x0f, 0xd5, 0x45, 0xc8, 0x01,
+ 0xc2, 0xfc, 0x7b, 0x68, 0x45, 0xb9, 0xa8, 0xd6, 0xdf, 0x9b, 0x35, 0x91, 0x79, 0xae, 0xfc, 0x3c,
+ 0x0b, 0x7c, 0x11, 0x52, 0xb7, 0xe3, 0x3b, 0xd9, 0xb7, 0x9c, 0xfa, 0x3c, 0x3b, 0xca, 0x61, 0x52,
+ 0xe4, 0xe0, 0x37, 0xd0, 0x9a, 0x00, 0xce, 0xa8, 0x7b, 0x10, 0x0d, 0x4f, 0x80, 0x27, 0xf3, 0x66,
+ 0x16, 0xa8, 0xbd, 0x82, 0x8d, 0xcc, 0x30, 0xad, 0xce, 0xf3, 0x97, 0xcd, 0xd2, 0x8b, 0x97, 0xcd,
+ 0xd2, 0x67, 0x2f, 0x9b, 0xa5, 0x8f, 0xa7, 0x4d, 0xed, 0xf9, 0xb4, 0xa9, 0xbd, 0x98, 0x36, 0xb5,
+ 0xcf, 0xa6, 0x4d, 0xed, 0x6f, 0xd3, 0xa6, 0xf6, 0xc9, 0xdf, 0x9b, 0xa5, 0x0f, 0xef, 0xbd, 0xf2,
+ 0x8f, 0x96, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xe4, 0x6b, 0xda, 0x16, 0x88, 0x11, 0x00, 0x00,
+}
+
+func (m *CertificatePrivateKey) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CertificatePrivateKey) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CertificatePrivateKey) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Encoding)
+ copy(dAtA[i:], m.Encoding)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Encoding)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CertificateSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CertificateSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CertificateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.PrivateKey != nil {
+ {
+ size, err := m.PrivateKey.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
+ }
+ if len(m.EmailAddresses) > 0 {
+ for iNdEx := len(m.EmailAddresses) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.EmailAddresses[iNdEx])
+ copy(dAtA[i:], m.EmailAddresses[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EmailAddresses[iNdEx])))
+ i--
+ dAtA[i] = 0x52
+ }
+ }
+ if len(m.URIs) > 0 {
+ for iNdEx := len(m.URIs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.URIs[iNdEx])
+ copy(dAtA[i:], m.URIs[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.URIs[iNdEx])))
+ i--
+ dAtA[i] = 0x4a
+ }
+ }
+ if len(m.IPAddresses) > 0 {
+ for iNdEx := len(m.IPAddresses) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.IPAddresses[iNdEx])
+ copy(dAtA[i:], m.IPAddresses[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPAddresses[iNdEx])))
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ if len(m.DNSNames) > 0 {
+ for iNdEx := len(m.DNSNames) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.DNSNames[iNdEx])
+ copy(dAtA[i:], m.DNSNames[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSNames[iNdEx])))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if m.RenewBefore != nil {
+ {
+ size, err := m.RenewBefore.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.Duration != nil {
+ {
+ size, err := m.Duration.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Subject != nil {
+ {
+ size, err := m.Subject.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ i -= len(m.SecretName)
+ copy(dAtA[i:], m.SecretName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName)))
+ i--
+ dAtA[i] = 0x1a
+ if m.IssuerRef != nil {
+ {
+ size, err := m.IssuerRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Alias)
+ copy(dAtA[i:], m.Alias)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Alias)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterMetadata) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterMetadata) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Provider)
+ copy(dAtA[i:], m.Provider)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provider)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.DisplayName)
+ copy(dAtA[i:], m.DisplayName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.UID)
+ copy(dAtA[i:], m.UID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Condition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Condition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Condition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x4a
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x42
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ i -= len(m.Severity)
+ copy(dAtA[i:], m.Severity)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Severity)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0x22
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x18
+ return len(dAtA) - i, nil
+}
+
+func (m *HealthCheckSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HealthCheckSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HealthCheckSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.DisableWriteCheck {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ {
+ size, err := m.ReadonlyHealthCheckSpec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.PullCredentials != nil {
+ {
+ size, err := m.PullCredentials.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Lineages) > 0 {
+ for iNdEx := len(m.Lineages) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Lineages[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Image)
+ copy(dAtA[i:], m.Image)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Lineage) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Lineage) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Lineage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Containers) > 0 {
+ for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Containers[iNdEx])
+ copy(dAtA[i:], m.Containers[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Containers[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Chain) > 0 {
+ for iNdEx := len(m.Chain) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Chain[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ObjectID) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ObjectID) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ObjectID) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Kind)
+ copy(dAtA[i:], m.Kind)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Group)
+ copy(dAtA[i:], m.Group)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ObjectInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ObjectInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ObjectInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Ref.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ObjectReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *PullCredentials) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PullCredentials) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PullCredentials) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.SecretRefs) > 0 {
+ for iNdEx := len(m.SecretRefs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.SecretRefs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ i -= len(m.ServiceAccountName)
+ copy(dAtA[i:], m.ServiceAccountName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ReadonlyHealthCheckSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReadonlyHealthCheckSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReadonlyHealthCheckSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.FailureThreshold != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.FailureThreshold))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.TimeoutSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.PeriodSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.PeriodSeconds))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceID) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceID) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceID) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Scope)
+ copy(dAtA[i:], m.Scope)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Kind)
+ copy(dAtA[i:], m.Kind)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Group)
+ copy(dAtA[i:], m.Group)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *TLSConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TLSConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TLSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Certificates) > 0 {
+ for iNdEx := len(m.Certificates) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Certificates[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.IssuerRef != nil {
+ {
+ size, err := m.IssuerRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *TypedObjectReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TypedObjectReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TypedObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Kind)
+ copy(dAtA[i:], m.Kind)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.APIGroup)
+ copy(dAtA[i:], m.APIGroup)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *X509Subject) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *X509Subject) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *X509Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.SerialNumber)
+ copy(dAtA[i:], m.SerialNumber)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SerialNumber)))
+ i--
+ dAtA[i] = 0x42
+ if len(m.PostalCodes) > 0 {
+ for iNdEx := len(m.PostalCodes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.PostalCodes[iNdEx])
+ copy(dAtA[i:], m.PostalCodes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PostalCodes[iNdEx])))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if len(m.StreetAddresses) > 0 {
+ for iNdEx := len(m.StreetAddresses) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.StreetAddresses[iNdEx])
+ copy(dAtA[i:], m.StreetAddresses[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.StreetAddresses[iNdEx])))
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if len(m.Provinces) > 0 {
+ for iNdEx := len(m.Provinces) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Provinces[iNdEx])
+ copy(dAtA[i:], m.Provinces[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provinces[iNdEx])))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if len(m.Localities) > 0 {
+ for iNdEx := len(m.Localities) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Localities[iNdEx])
+ copy(dAtA[i:], m.Localities[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Localities[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.OrganizationalUnits) > 0 {
+ for iNdEx := len(m.OrganizationalUnits) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.OrganizationalUnits[iNdEx])
+ copy(dAtA[i:], m.OrganizationalUnits[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.OrganizationalUnits[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Countries) > 0 {
+ for iNdEx := len(m.Countries) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Countries[iNdEx])
+ copy(dAtA[i:], m.Countries[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Countries[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Organizations) > 0 {
+ for iNdEx := len(m.Organizations) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Organizations[iNdEx])
+ copy(dAtA[i:], m.Organizations[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Organizations[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *CertificatePrivateKey) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Encoding)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *CertificateSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Alias)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.IssuerRef != nil {
+ l = m.IssuerRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.SecretName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Subject != nil {
+ l = m.Subject.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Duration != nil {
+ l = m.Duration.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.RenewBefore != nil {
+ l = m.RenewBefore.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.DNSNames) > 0 {
+ for _, s := range m.DNSNames {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.IPAddresses) > 0 {
+ for _, s := range m.IPAddresses {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.URIs) > 0 {
+ for _, s := range m.URIs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.EmailAddresses) > 0 {
+ for _, s := range m.EmailAddresses {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.PrivateKey != nil {
+ l = m.PrivateKey.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ClusterMetadata) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.UID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.DisplayName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Provider)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Condition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Severity)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *HealthCheckSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ReadonlyHealthCheckSpec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ return n
+}
+
+func (m *ImageInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Image)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Lineages) > 0 {
+ for _, e := range m.Lineages {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.PullCredentials != nil {
+ l = m.PullCredentials.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Lineage) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Chain) > 0 {
+ for _, e := range m.Chain {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Containers) > 0 {
+ for _, s := range m.Containers {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ObjectID) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Group)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ObjectInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Resource.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Ref.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ObjectReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PullCredentials) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ServiceAccountName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.SecretRefs) > 0 {
+ for _, e := range m.SecretRefs {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ReadonlyHealthCheckSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.PeriodSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.PeriodSeconds))
+ }
+ if m.TimeoutSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
+ }
+ if m.FailureThreshold != nil {
+ n += 1 + sovGenerated(uint64(*m.FailureThreshold))
+ }
+ return n
+}
+
+func (m *ResourceID) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Group)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Scope)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *TLSConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.IssuerRef != nil {
+ l = m.IssuerRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Certificates) > 0 {
+ for _, e := range m.Certificates {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *TypedObjectReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.APIGroup)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *X509Subject) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Organizations) > 0 {
+ for _, s := range m.Organizations {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Countries) > 0 {
+ for _, s := range m.Countries {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.OrganizationalUnits) > 0 {
+ for _, s := range m.OrganizationalUnits {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Localities) > 0 {
+ for _, s := range m.Localities {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Provinces) > 0 {
+ for _, s := range m.Provinces {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.StreetAddresses) > 0 {
+ for _, s := range m.StreetAddresses {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.PostalCodes) > 0 {
+ for _, s := range m.PostalCodes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.SerialNumber)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *CertificatePrivateKey) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CertificatePrivateKey{`,
+ `Encoding:` + fmt.Sprintf("%v", this.Encoding) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CertificateSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CertificateSpec{`,
+ `Alias:` + fmt.Sprintf("%v", this.Alias) + `,`,
+ `IssuerRef:` + strings.Replace(fmt.Sprintf("%v", this.IssuerRef), "TypedLocalObjectReference", "v1.TypedLocalObjectReference", 1) + `,`,
+ `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`,
+ `Subject:` + strings.Replace(this.Subject.String(), "X509Subject", "X509Subject", 1) + `,`,
+ `Duration:` + strings.Replace(fmt.Sprintf("%v", this.Duration), "Duration", "v11.Duration", 1) + `,`,
+ `RenewBefore:` + strings.Replace(fmt.Sprintf("%v", this.RenewBefore), "Duration", "v11.Duration", 1) + `,`,
+ `DNSNames:` + fmt.Sprintf("%v", this.DNSNames) + `,`,
+ `IPAddresses:` + fmt.Sprintf("%v", this.IPAddresses) + `,`,
+ `URIs:` + fmt.Sprintf("%v", this.URIs) + `,`,
+ `EmailAddresses:` + fmt.Sprintf("%v", this.EmailAddresses) + `,`,
+ `PrivateKey:` + strings.Replace(this.PrivateKey.String(), "CertificatePrivateKey", "CertificatePrivateKey", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterMetadata) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ClusterMetadata{`,
+ `UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`,
+ `Provider:` + fmt.Sprintf("%v", this.Provider) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Condition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Condition{`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `Severity:` + fmt.Sprintf("%v", this.Severity) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *HealthCheckSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&HealthCheckSpec{`,
+ `ReadonlyHealthCheckSpec:` + strings.Replace(strings.Replace(this.ReadonlyHealthCheckSpec.String(), "ReadonlyHealthCheckSpec", "ReadonlyHealthCheckSpec", 1), `&`, ``, 1) + `,`,
+ `DisableWriteCheck:` + fmt.Sprintf("%v", this.DisableWriteCheck) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageInfo) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForLineages := "[]Lineage{"
+ for _, f := range this.Lineages {
+ repeatedStringForLineages += strings.Replace(strings.Replace(f.String(), "Lineage", "Lineage", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForLineages += "}"
+ s := strings.Join([]string{`&ImageInfo{`,
+ `Image:` + fmt.Sprintf("%v", this.Image) + `,`,
+ `Lineages:` + repeatedStringForLineages + `,`,
+ `PullCredentials:` + strings.Replace(this.PullCredentials.String(), "PullCredentials", "PullCredentials", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Lineage) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForChain := "[]ObjectInfo{"
+ for _, f := range this.Chain {
+ repeatedStringForChain += strings.Replace(strings.Replace(f.String(), "ObjectInfo", "ObjectInfo", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForChain += "}"
+ s := strings.Join([]string{`&Lineage{`,
+ `Chain:` + repeatedStringForChain + `,`,
+ `Containers:` + fmt.Sprintf("%v", this.Containers) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ObjectID) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ObjectID{`,
+ `Group:` + fmt.Sprintf("%v", this.Group) + `,`,
+ `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ObjectInfo) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ObjectInfo{`,
+ `Resource:` + strings.Replace(strings.Replace(this.Resource.String(), "ResourceID", "ResourceID", 1), `&`, ``, 1) + `,`,
+ `Ref:` + strings.Replace(strings.Replace(this.Ref.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ObjectReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ObjectReference{`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PullCredentials) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForSecretRefs := "[]LocalObjectReference{"
+ for _, f := range this.SecretRefs {
+ repeatedStringForSecretRefs += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForSecretRefs += "}"
+ s := strings.Join([]string{`&PullCredentials{`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`,
+ `SecretRefs:` + repeatedStringForSecretRefs + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ReadonlyHealthCheckSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ReadonlyHealthCheckSpec{`,
+ `PeriodSeconds:` + valueToStringGenerated(this.PeriodSeconds) + `,`,
+ `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
+ `FailureThreshold:` + valueToStringGenerated(this.FailureThreshold) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceID) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResourceID{`,
+ `Group:` + fmt.Sprintf("%v", this.Group) + `,`,
+ `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+ `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TLSConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForCertificates := "[]CertificateSpec{"
+ for _, f := range this.Certificates {
+ repeatedStringForCertificates += strings.Replace(strings.Replace(f.String(), "CertificateSpec", "CertificateSpec", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForCertificates += "}"
+ s := strings.Join([]string{`&TLSConfig{`,
+ `IssuerRef:` + strings.Replace(fmt.Sprintf("%v", this.IssuerRef), "TypedLocalObjectReference", "v1.TypedLocalObjectReference", 1) + `,`,
+ `Certificates:` + repeatedStringForCertificates + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TypedObjectReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TypedObjectReference{`,
+ `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`,
+ `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *X509Subject) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&X509Subject{`,
+ `Organizations:` + fmt.Sprintf("%v", this.Organizations) + `,`,
+ `Countries:` + fmt.Sprintf("%v", this.Countries) + `,`,
+ `OrganizationalUnits:` + fmt.Sprintf("%v", this.OrganizationalUnits) + `,`,
+ `Localities:` + fmt.Sprintf("%v", this.Localities) + `,`,
+ `Provinces:` + fmt.Sprintf("%v", this.Provinces) + `,`,
+ `StreetAddresses:` + fmt.Sprintf("%v", this.StreetAddresses) + `,`,
+ `PostalCodes:` + fmt.Sprintf("%v", this.PostalCodes) + `,`,
+ `SerialNumber:` + fmt.Sprintf("%v", this.SerialNumber) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *CertificatePrivateKey) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CertificatePrivateKey: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CertificatePrivateKey: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Encoding", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Encoding = PrivateKeyEncoding(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CertificateSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CertificateSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CertificateSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alias", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Alias = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IssuerRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.IssuerRef == nil {
+ m.IssuerRef = &v1.TypedLocalObjectReference{}
+ }
+ if err := m.IssuerRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SecretName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Subject == nil {
+ m.Subject = &X509Subject{}
+ }
+ if err := m.Subject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Duration == nil {
+ m.Duration = &v11.Duration{}
+ }
+ if err := m.Duration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RenewBefore", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RenewBefore == nil {
+ m.RenewBefore = &v11.Duration{}
+ }
+ if err := m.RenewBefore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DNSNames", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DNSNames = append(m.DNSNames, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IPAddresses", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IPAddresses = append(m.IPAddresses, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field URIs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.URIs = append(m.URIs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EmailAddresses", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EmailAddresses = append(m.EmailAddresses, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrivateKey", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PrivateKey == nil {
+ m.PrivateKey = &CertificatePrivateKey{}
+ }
+ if err := m.PrivateKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterMetadata) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterMetadata: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DisplayName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Provider = HostingProvider(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Condition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Condition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Condition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = ConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_apimachinery_pkg_apis_meta_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Severity", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Severity = ConditionSeverity(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HealthCheckSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HealthCheckSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HealthCheckSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadonlyHealthCheckSpec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ReadonlyHealthCheckSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DisableWriteCheck", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.DisableWriteCheck = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageInfo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Image = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lineages", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Lineages = append(m.Lineages, Lineage{})
+ if err := m.Lineages[len(m.Lineages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PullCredentials", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PullCredentials == nil {
+ m.PullCredentials = &PullCredentials{}
+ }
+ if err := m.PullCredentials.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Lineage) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Lineage: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Lineage: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Chain", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Chain = append(m.Chain, ObjectInfo{})
+ if err := m.Chain[len(m.Chain)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Containers = append(m.Containers, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ObjectID) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ObjectID: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ObjectID: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Group = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ObjectInfo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ObjectInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ObjectInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Ref.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ObjectReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ObjectReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ObjectReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PullCredentials) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PullCredentials: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PullCredentials: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceAccountName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRefs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SecretRefs = append(m.SecretRefs, v1.LocalObjectReference{})
+ if err := m.SecretRefs[len(m.SecretRefs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReadonlyHealthCheckSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReadonlyHealthCheckSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReadonlyHealthCheckSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PeriodSeconds = &v
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TimeoutSeconds = &v
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FailureThreshold", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.FailureThreshold = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceID) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceID: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceID: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Group = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Scope = ResourceScope(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TLSConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TLSConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TLSConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IssuerRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.IssuerRef == nil {
+ m.IssuerRef = &v1.TypedLocalObjectReference{}
+ }
+ if err := m.IssuerRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Certificates", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Certificates = append(m.Certificates, CertificateSpec{})
+ if err := m.Certificates[len(m.Certificates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TypedObjectReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TypedObjectReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TypedObjectReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIGroup = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *X509Subject) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: X509Subject: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: X509Subject: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Organizations", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Organizations = append(m.Organizations, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Countries", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Countries = append(m.Countries, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OrganizationalUnits", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.OrganizationalUnits = append(m.OrganizationalUnits, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Localities", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Localities = append(m.Localities, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Provinces", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Provinces = append(m.Provinces, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StreetAddresses", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.StreetAddresses = append(m.StreetAddresses, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PostalCodes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PostalCodes = append(m.PostalCodes, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SerialNumber", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SerialNumber = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/kmodules.xyz/client-go/api/v1/generated.proto b/vendor/kmodules.xyz/client-go/api/v1/generated.proto
new file mode 100644
index 00000000..d8fc1a44
--- /dev/null
+++ b/vendor/kmodules.xyz/client-go/api/v1/generated.proto
@@ -0,0 +1,309 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package kmodules.xyz.client_go.api.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "kmodules.xyz/client-go/api/v1";
+
+// CertificatePrivateKey contains configuration options for private keys
+// used by the Certificate controller.
+// This allows control of how private keys are rotated.
+message CertificatePrivateKey {
+ // The private key cryptography standards (PKCS) encoding for this
+ // certificate's private key to be encoded in.
+ // If provided, allowed values are "pkcs1" and "pkcs8" standing for PKCS#1
+ // and PKCS#8, respectively.
+ // Defaults to PKCS#1 if not specified.
+ // See here for the difference between the formats: https://stackoverflow.com/a/48960291
+ // +optional
+ optional string encoding = 1;
+}
+
+message CertificateSpec {
+ // Alias represents the identifier of the certificate.
+ optional string alias = 1;
+
+ // IssuerRef is a reference to a Certificate Issuer.
+ // +optional
+ optional k8s.io.api.core.v1.TypedLocalObjectReference issuerRef = 2;
+
+ // Specifies the k8s secret name that holds the certificates.
+ // Default to --cert.
+ // +optional
+ optional string secretName = 3;
+
+ // Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name).
+ // +optional
+ optional X509Subject subject = 4;
+
+ // Certificate default Duration
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration duration = 5;
+
+ // Certificate renew before expiration duration
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration renewBefore = 6;
+
+ // DNSNames is a list of subject alt names to be used on the Certificate.
+ // +optional
+ repeated string dnsNames = 7;
+
+ // IPAddresses is a list of IP addresses to be used on the Certificate
+ // +optional
+ repeated string ipAddresses = 8;
+
+ // URIs is a list of URI subjectAltNames to be set on the Certificate.
+ // +optional
+ repeated string uris = 9;
+
+ // EmailAddresses is a list of email subjectAltNames to be set on the Certificate.
+ // +optional
+ repeated string emailAddresses = 10;
+
+ // Options to control private keys used for the Certificate.
+ // +optional
+ optional CertificatePrivateKey privateKey = 11;
+}
+
+message ClusterMetadata {
+ optional string uid = 1;
+
+ optional string name = 2;
+
+ optional string displayName = 3;
+
+ optional string provider = 4;
+}
+
+// Condition defines an observation of a object operational state.
+message Condition {
+ // Type of condition in CamelCase or in foo.example.com/CamelCase.
+ // Many .condition.type values are consistent across resources like Available, but because arbitrary util
+ // can be useful (see .node.status.util), the ability to deconflict is important.
+ optional string type = 4;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 5;
+
+ // If set, this represents the .metadata.generation that the condition was set based upon.
+ // For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date
+ // with respect to the current state of the instance.
+ // +optional
+ optional int64 observedGeneration = 3;
+
+ // Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ // understand the current situation and act accordingly.
+ // The Severity field MUST be set only when Status=False.
+ // +optional
+ optional string severity = 6;
+
+ // Last time the condition transitioned from one status to another.
+ // This should be when the underlying condition changed. If that is not known, then using the time when
+ // the API field changed is acceptable.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
+
+ // The reason for the condition's last transition in CamelCase.
+ // The specific API may choose whether this field is considered a guaranteed API.
+ // This field may not be empty.
+ // +optional
+ optional string reason = 8;
+
+ // A human-readable message indicating details about the transition.
+ // This field may be empty.
+ // +optional
+ optional string message = 9;
+}
+
+// HealthCheckSpec defines attributes of the health check
+message HealthCheckSpec {
+ optional ReadonlyHealthCheckSpec readonlyHealthCheckSpec = 1;
+
+ // Whether to disable write check on database.
+ // Defaults to false.
+ // +optional
+ optional bool disableWriteCheck = 2;
+}
+
+message ImageInfo {
+ optional string image = 1;
+
+ repeated Lineage lineages = 2;
+
+ optional PullCredentials pullCredentials = 3;
+}
+
+message Lineage {
+ repeated ObjectInfo chain = 1;
+
+ repeated string containers = 2;
+}
+
+message ObjectID {
+ optional string group = 1;
+
+ optional string kind = 2;
+
+ optional string namespace = 3;
+
+ optional string name = 4;
+}
+
+message ObjectInfo {
+ optional ResourceID resource = 1;
+
+ optional ObjectReference ref = 2;
+}
+
+// ObjectReference contains enough information to let you inspect or modify the referred object.
+message ObjectReference {
+ // Namespace of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ // +optional
+ optional string namespace = 1;
+
+ // Name of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ optional string name = 2;
+}
+
+message PullCredentials {
+ optional string namespace = 1;
+
+ optional string serviceAccountName = 2;
+
+ repeated k8s.io.api.core.v1.LocalObjectReference secretRefs = 3;
+}
+
+// ReadonlyHealthCheckSpec defines attributes of the health check using only read-only checks
+message ReadonlyHealthCheckSpec {
+ // How often (in seconds) to perform the health check.
+ // Default to 10 seconds. Minimum value is 1.
+ // +optional
+ // +kubebuilder:default=10
+ optional int32 periodSeconds = 1;
+
+ // Number of seconds after which the probe times out.
+ // Defaults to 10 second. Minimum value is 1.
+ // It should be less than the periodSeconds.
+ // +optional
+ // +kubebuilder:default=10
+ optional int32 timeoutSeconds = 2;
+
+ // Minimum consecutive failures for the health check to be considered failed after having succeeded.
+ // Defaults to 1. Minimum value is 1.
+ // +optional
+ // +kubebuilder:default=1
+ optional int32 failureThreshold = 3;
+}
+
+// ResourceID identifies a resource
+message ResourceID {
+ optional string group = 1;
+
+ optional string version = 2;
+
+ // Name is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration
+ // too: plural.group and it must be all lowercase.
+ optional string name = 3;
+
+ // Kind is the serialized kind of the resource. It is normally CamelCase and singular.
+ optional string kind = 4;
+
+ optional string scope = 5;
+}
+
+message TLSConfig {
+ // IssuerRef is a reference to a Certificate Issuer.
+ // +optional
+ optional k8s.io.api.core.v1.TypedLocalObjectReference issuerRef = 1;
+
+ // Certificate provides server and/or client certificate options used by application pods.
+ // These options are passed to a cert-manager Certificate object.
+ // xref: https://github.com/jetstack/cert-manager/blob/v0.16.0/pkg/apis/certmanager/v1beta1/types_certificate.go#L82-L162
+ // +optional
+ repeated CertificateSpec certificates = 2;
+}
+
+// TimeOfDay is a wrapper around time.Time which supports correct
+// marshaling to YAML and JSON. Wrappers are provided for many
+// of the factory methods that the time package offers.
+//
+// +protobuf.options.marshal=false
+// +protobuf.as=Timestamp
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message TimeOfDay {
+}
+
+// TypedObjectReference represents an typed namespaced object.
+message TypedObjectReference {
+ optional string apiGroup = 1;
+
+ optional string kind = 2;
+
+ // Namespace of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ // +optional
+ optional string namespace = 3;
+
+ // Name of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ optional string name = 4;
+}
+
+// X509Subject Full X509 name specification
+message X509Subject {
+ // Organizations to be used on the Certificate.
+ // +optional
+ repeated string organizations = 1;
+
+ // Countries to be used on the CertificateSpec.
+ // +optional
+ repeated string countries = 2;
+
+ // Organizational Units to be used on the CertificateSpec.
+ // +optional
+ repeated string organizationalUnits = 3;
+
+ // Cities to be used on the CertificateSpec.
+ // +optional
+ repeated string localities = 4;
+
+ // State/Provinces to be used on the CertificateSpec.
+ // +optional
+ repeated string provinces = 5;
+
+ // Street addresses to be used on the CertificateSpec.
+ // +optional
+ repeated string streetAddresses = 6;
+
+ // Postal codes to be used on the CertificateSpec.
+ // +optional
+ repeated string postalCodes = 7;
+
+ // Serial number to be used on the CertificateSpec.
+ // +optional
+ optional string serialNumber = 8;
+}
+
diff --git a/vendor/kmodules.xyz/client-go/api/v1/healthchecker.go b/vendor/kmodules.xyz/client-go/api/v1/healthchecker.go
new file mode 100644
index 00000000..eac53958
--- /dev/null
+++ b/vendor/kmodules.xyz/client-go/api/v1/healthchecker.go
@@ -0,0 +1,46 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// ReadonlyHealthCheckSpec defines attributes of the health check using only read-only checks
+type ReadonlyHealthCheckSpec struct {
+ // How often (in seconds) to perform the health check.
+ // Default to 10 seconds. Minimum value is 1.
+ // +optional
+ // +kubebuilder:default=10
+ PeriodSeconds *int32 `json:"periodSeconds,omitempty" protobuf:"varint,1,opt,name=periodSeconds"`
+ // Number of seconds after which the probe times out.
+ // Defaults to 10 second. Minimum value is 1.
+ // It should be less than the periodSeconds.
+ // +optional
+ // +kubebuilder:default=10
+ TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,2,opt,name=timeoutSeconds"`
+ // Minimum consecutive failures for the health check to be considered failed after having succeeded.
+ // Defaults to 1. Minimum value is 1.
+ // +optional
+ // +kubebuilder:default=1
+ FailureThreshold *int32 `json:"failureThreshold,omitempty" protobuf:"varint,3,opt,name=failureThreshold"`
+}
+
+// HealthCheckSpec defines attributes of the health check
+type HealthCheckSpec struct {
+ ReadonlyHealthCheckSpec `json:",inline" protobuf:"bytes,1,opt,name=readonlyHealthCheckSpec"`
+ // Whether to disable write check on database.
+ // Defaults to false.
+ // +optional
+ DisableWriteCheck bool `json:"disableWriteCheck,omitempty" protobuf:"varint,2,opt,name=disableWriteCheck"`
+}
diff --git a/vendor/kmodules.xyz/client-go/api/v1/image.go b/vendor/kmodules.xyz/client-go/api/v1/image.go
new file mode 100644
index 00000000..48a1ace1
--- /dev/null
+++ b/vendor/kmodules.xyz/client-go/api/v1/image.go
@@ -0,0 +1,38 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ core "k8s.io/api/core/v1"
+)
+
+type Lineage struct {
+ Chain []ObjectInfo `json:"chain,omitempty" protobuf:"bytes,1,rep,name=chain"`
+ Containers []string `json:"containers,omitempty" protobuf:"bytes,2,rep,name=containers"`
+}
+
+type ImageInfo struct {
+ Image string `json:"image" protobuf:"bytes,1,opt,name=image"`
+ Lineages []Lineage `json:"lineages,omitempty" protobuf:"bytes,2,rep,name=lineages"`
+ PullCredentials *PullCredentials `json:"pullCredentials,omitempty" protobuf:"bytes,3,opt,name=pullCredentials"`
+}
+
+type PullCredentials struct {
+ Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
+ ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,2,opt,name=serviceAccountName"`
+ SecretRefs []core.LocalObjectReference `json:"secretRefs,omitempty" protobuf:"bytes,3,rep,name=secretRefs"`
+}
diff --git a/vendor/kmodules.xyz/client-go/api/v1/object.go b/vendor/kmodules.xyz/client-go/api/v1/object.go
new file mode 100644
index 00000000..4e16224e
--- /dev/null
+++ b/vendor/kmodules.xyz/client-go/api/v1/object.go
@@ -0,0 +1,228 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+//go:generate go-enum --mustparse --names --values
+package v1
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// TypedObjectReference represents an typed namespaced object.
+type TypedObjectReference struct {
+ APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"`
+ Kind string `json:"kind,omitempty" protobuf:"bytes,2,opt,name=kind"`
+ // Namespace of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ // +optional
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
+ // Name of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ Name string `json:"name" protobuf:"bytes,4,opt,name=name"`
+}
+
+// ObjectReference contains enough information to let you inspect or modify the referred object.
+type ObjectReference struct {
+ // Namespace of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ // +optional
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"`
+ // Name of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+}
+
+// WithNamespace sets the namespace if original namespace is empty.
+// Never changes the original ObjectReference.
+func (ref *ObjectReference) WithNamespace(fallback string) *ObjectReference {
+ if ref == nil {
+ return nil
+ }
+ if ref.Namespace != "" {
+ return ref
+ }
+ out := *ref
+ out.Namespace = fallback
+ return &out
+}
+
+func (ref ObjectReference) ObjectKey() client.ObjectKey {
+ return client.ObjectKey{Namespace: ref.Namespace, Name: ref.Name}
+}
+
+type OID string
+
+type ObjectID struct {
+ Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"`
+ Kind string `json:"kind,omitempty" protobuf:"bytes,2,opt,name=kind"`
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
+ Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"`
+}
+
+func (oid *ObjectID) OID() OID {
+ return OID(fmt.Sprintf("G=%s,K=%s,NS=%s,N=%s", oid.Group, oid.Kind, oid.Namespace, oid.Name))
+}
+
+// WithNamespace sets the namespace if original namespace is empty.
+// Never changes the original ObjectID.
+func (oid *ObjectID) WithNamespace(fallback string) *ObjectID {
+ if oid == nil {
+ return nil
+ }
+ if oid.Namespace != "" {
+ return oid
+ }
+ out := *oid
+ out.Namespace = fallback
+ return &out
+}
+
+func NewObjectID(obj client.Object) *ObjectID {
+ gvk := obj.GetObjectKind().GroupVersionKind()
+ return &ObjectID{
+ Group: gvk.Group,
+ Kind: gvk.Kind,
+ Namespace: obj.GetNamespace(),
+ Name: obj.GetName(),
+ }
+}
+
+func ParseObjectID(key OID) (*ObjectID, error) {
+ var id ObjectID
+
+ chunks := strings.Split(string(key), ",")
+ for _, chunk := range chunks {
+ parts := strings.FieldsFunc(chunk, func(r rune) bool {
+ return r == '=' || unicode.IsSpace(r)
+ })
+ if len(parts) == 0 || len(parts) > 2 {
+ return nil, fmt.Errorf("invalid chunk %s", chunk)
+ }
+
+ switch parts[0] {
+ case "G":
+ if len(parts) == 2 {
+ id.Group = parts[1]
+ }
+ case "K":
+ if len(parts) == 1 {
+ return nil, fmt.Errorf("kind not set")
+ }
+ id.Kind = parts[1]
+ case "NS":
+ if len(parts) == 2 {
+ id.Namespace = parts[1]
+ }
+ case "N":
+ if len(parts) == 1 {
+ return nil, fmt.Errorf("name not set")
+ }
+ id.Name = parts[1]
+ default:
+ return nil, fmt.Errorf("unknown key %s", parts[0])
+ }
+ }
+ return &id, nil
+}
+
+func MustParseObjectID(key OID) *ObjectID {
+ oid, err := ParseObjectID(key)
+ if err != nil {
+ panic(err)
+ }
+ return oid
+}
+
+func ObjectIDMap(key OID) (map[string]interface{}, error) {
+ id := map[string]interface{}{
+ "group": "",
+ "kind": "",
+ "namespace": "",
+ "name": "",
+ }
+
+ chunks := strings.Split(string(key), ",")
+ for _, chunk := range chunks {
+ parts := strings.FieldsFunc(chunk, func(r rune) bool {
+ return r == '=' || unicode.IsSpace(r)
+ })
+ if len(parts) == 0 || len(parts) > 2 {
+ return nil, fmt.Errorf("invalid chunk %s", chunk)
+ }
+
+ switch parts[0] {
+ case "G":
+ if len(parts) == 2 {
+ id["group"] = parts[1]
+ }
+ case "K":
+ if len(parts) == 1 {
+ return nil, fmt.Errorf("kind not set")
+ }
+ id["kind"] = parts[1]
+ case "NS":
+ if len(parts) == 2 {
+ id["namespace"] = parts[1]
+ }
+ case "N":
+ if len(parts) == 1 {
+ return nil, fmt.Errorf("name not set")
+ }
+ id["name"] = parts[1]
+ default:
+ return nil, fmt.Errorf("unknown key %s", parts[0])
+ }
+ }
+ return id, nil
+}
+
+func (oid *ObjectID) GroupKind() schema.GroupKind {
+ return schema.GroupKind{Group: oid.Group, Kind: oid.Kind}
+}
+
+func (oid *ObjectID) MetaGroupKind() metav1.GroupKind {
+ return metav1.GroupKind{Group: oid.Group, Kind: oid.Kind}
+}
+
+func (oid *ObjectID) ObjectReference() ObjectReference {
+ return ObjectReference{Namespace: oid.Namespace, Name: oid.Name}
+}
+
+func (oid *ObjectID) ObjectKey() client.ObjectKey {
+ return client.ObjectKey{Namespace: oid.Namespace, Name: oid.Name}
+}
+
+type ObjectInfo struct {
+ Resource ResourceID `json:"resource" protobuf:"bytes,1,opt,name=resource"`
+ Ref ObjectReference `json:"ref" protobuf:"bytes,2,opt,name=ref"`
+}
+
+// +kubebuilder:validation:Enum=authn;authz;auth_secret;backup_via;catalog;cert_issuer;config;connect_via;exposed_by;event;located_on;monitored_by;ocm_bind;offshoot;ops;placed_into;policy;recommended_for;restore_into;scaled_by;storage;view
+// ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,storage,view)
+type EdgeLabel string
+
+func (e EdgeLabel) Direct() bool {
+ return e == EdgeLabelOffshoot ||
+ e == EdgeLabelView ||
+ e == EdgeLabelOps ||
+ e == EdgeLabelRecommendedFor
+}
diff --git a/vendor/kmodules.xyz/client-go/api/v1/object_enum.go b/vendor/kmodules.xyz/client-go/api/v1/object_enum.go
new file mode 100644
index 00000000..403d3aed
--- /dev/null
+++ b/vendor/kmodules.xyz/client-go/api/v1/object_enum.go
@@ -0,0 +1,175 @@
+// Code generated by go-enum DO NOT EDIT.
+// Version:
+// Revision:
+// Build Date:
+// Built By:
+
+package v1
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ // EdgeLabelAuthn is a EdgeLabel of type authn.
+ EdgeLabelAuthn EdgeLabel = "authn"
+ // EdgeLabelAuthz is a EdgeLabel of type authz.
+ EdgeLabelAuthz EdgeLabel = "authz"
+ // EdgeLabelAuthSecret is a EdgeLabel of type auth_secret.
+ EdgeLabelAuthSecret EdgeLabel = "auth_secret"
+ // EdgeLabelBackupVia is a EdgeLabel of type backup_via.
+ EdgeLabelBackupVia EdgeLabel = "backup_via"
+ // EdgeLabelCatalog is a EdgeLabel of type catalog.
+ EdgeLabelCatalog EdgeLabel = "catalog"
+ // EdgeLabelCertIssuer is a EdgeLabel of type cert_issuer.
+ EdgeLabelCertIssuer EdgeLabel = "cert_issuer"
+ // EdgeLabelConfig is a EdgeLabel of type config.
+ EdgeLabelConfig EdgeLabel = "config"
+ // EdgeLabelConnectVia is a EdgeLabel of type connect_via.
+ EdgeLabelConnectVia EdgeLabel = "connect_via"
+ // EdgeLabelExposedBy is a EdgeLabel of type exposed_by.
+ EdgeLabelExposedBy EdgeLabel = "exposed_by"
+ // EdgeLabelEvent is a EdgeLabel of type event.
+ EdgeLabelEvent EdgeLabel = "event"
+ // EdgeLabelLocatedOn is a EdgeLabel of type located_on.
+ EdgeLabelLocatedOn EdgeLabel = "located_on"
+ // EdgeLabelMonitoredBy is a EdgeLabel of type monitored_by.
+ EdgeLabelMonitoredBy EdgeLabel = "monitored_by"
+ // EdgeLabelOcmBind is a EdgeLabel of type ocm_bind.
+ EdgeLabelOcmBind EdgeLabel = "ocm_bind"
+ // EdgeLabelOffshoot is a EdgeLabel of type offshoot.
+ EdgeLabelOffshoot EdgeLabel = "offshoot"
+ // EdgeLabelOps is a EdgeLabel of type ops.
+ EdgeLabelOps EdgeLabel = "ops"
+ // EdgeLabelPlacedInto is a EdgeLabel of type placed_into.
+ EdgeLabelPlacedInto EdgeLabel = "placed_into"
+ // EdgeLabelPolicy is a EdgeLabel of type policy.
+ EdgeLabelPolicy EdgeLabel = "policy"
+ // EdgeLabelRecommendedFor is a EdgeLabel of type recommended_for.
+ EdgeLabelRecommendedFor EdgeLabel = "recommended_for"
+ // EdgeLabelRestoreInto is a EdgeLabel of type restore_into.
+ EdgeLabelRestoreInto EdgeLabel = "restore_into"
+ // EdgeLabelScaledBy is a EdgeLabel of type scaled_by.
+ EdgeLabelScaledBy EdgeLabel = "scaled_by"
+ // EdgeLabelStorage is a EdgeLabel of type storage.
+ EdgeLabelStorage EdgeLabel = "storage"
+ // EdgeLabelView is a EdgeLabel of type view.
+ EdgeLabelView EdgeLabel = "view"
+)
+
+var ErrInvalidEdgeLabel = fmt.Errorf("not a valid EdgeLabel, try [%s]", strings.Join(_EdgeLabelNames, ", "))
+
+var _EdgeLabelNames = []string{
+ string(EdgeLabelAuthn),
+ string(EdgeLabelAuthz),
+ string(EdgeLabelAuthSecret),
+ string(EdgeLabelBackupVia),
+ string(EdgeLabelCatalog),
+ string(EdgeLabelCertIssuer),
+ string(EdgeLabelConfig),
+ string(EdgeLabelConnectVia),
+ string(EdgeLabelExposedBy),
+ string(EdgeLabelEvent),
+ string(EdgeLabelLocatedOn),
+ string(EdgeLabelMonitoredBy),
+ string(EdgeLabelOcmBind),
+ string(EdgeLabelOffshoot),
+ string(EdgeLabelOps),
+ string(EdgeLabelPlacedInto),
+ string(EdgeLabelPolicy),
+ string(EdgeLabelRecommendedFor),
+ string(EdgeLabelRestoreInto),
+ string(EdgeLabelScaledBy),
+ string(EdgeLabelStorage),
+ string(EdgeLabelView),
+}
+
+// EdgeLabelNames returns a list of possible string values of EdgeLabel.
+func EdgeLabelNames() []string {
+ tmp := make([]string, len(_EdgeLabelNames))
+ copy(tmp, _EdgeLabelNames)
+ return tmp
+}
+
+// EdgeLabelValues returns a list of the values for EdgeLabel
+func EdgeLabelValues() []EdgeLabel {
+ return []EdgeLabel{
+ EdgeLabelAuthn,
+ EdgeLabelAuthz,
+ EdgeLabelAuthSecret,
+ EdgeLabelBackupVia,
+ EdgeLabelCatalog,
+ EdgeLabelCertIssuer,
+ EdgeLabelConfig,
+ EdgeLabelConnectVia,
+ EdgeLabelExposedBy,
+ EdgeLabelEvent,
+ EdgeLabelLocatedOn,
+ EdgeLabelMonitoredBy,
+ EdgeLabelOcmBind,
+ EdgeLabelOffshoot,
+ EdgeLabelOps,
+ EdgeLabelPlacedInto,
+ EdgeLabelPolicy,
+ EdgeLabelRecommendedFor,
+ EdgeLabelRestoreInto,
+ EdgeLabelScaledBy,
+ EdgeLabelStorage,
+ EdgeLabelView,
+ }
+}
+
+// String implements the Stringer interface.
+func (x EdgeLabel) String() string {
+ return string(x)
+}
+
+// IsValid provides a quick way to determine if the typed value is
+// part of the allowed enumerated values
+func (x EdgeLabel) IsValid() bool {
+ _, err := ParseEdgeLabel(string(x))
+ return err == nil
+}
+
+var _EdgeLabelValue = map[string]EdgeLabel{
+ "authn": EdgeLabelAuthn,
+ "authz": EdgeLabelAuthz,
+ "auth_secret": EdgeLabelAuthSecret,
+ "backup_via": EdgeLabelBackupVia,
+ "catalog": EdgeLabelCatalog,
+ "cert_issuer": EdgeLabelCertIssuer,
+ "config": EdgeLabelConfig,
+ "connect_via": EdgeLabelConnectVia,
+ "exposed_by": EdgeLabelExposedBy,
+ "event": EdgeLabelEvent,
+ "located_on": EdgeLabelLocatedOn,
+ "monitored_by": EdgeLabelMonitoredBy,
+ "ocm_bind": EdgeLabelOcmBind,
+ "offshoot": EdgeLabelOffshoot,
+ "ops": EdgeLabelOps,
+ "placed_into": EdgeLabelPlacedInto,
+ "policy": EdgeLabelPolicy,
+ "recommended_for": EdgeLabelRecommendedFor,
+ "restore_into": EdgeLabelRestoreInto,
+ "scaled_by": EdgeLabelScaledBy,
+ "storage": EdgeLabelStorage,
+ "view": EdgeLabelView,
+}
+
+// ParseEdgeLabel attempts to convert a string to a EdgeLabel.
+func ParseEdgeLabel(name string) (EdgeLabel, error) {
+ if x, ok := _EdgeLabelValue[name]; ok {
+ return x, nil
+ }
+ return EdgeLabel(""), fmt.Errorf("%s is %w", name, ErrInvalidEdgeLabel)
+}
+
+// MustParseEdgeLabel converts a string to a EdgeLabel, and panics if is not valid.
+func MustParseEdgeLabel(name string) EdgeLabel {
+ val, err := ParseEdgeLabel(name)
+ if err != nil {
+ panic(err)
+ }
+ return val
+}
diff --git a/vendor/kmodules.xyz/client-go/api/v1/openapi_generated.go b/vendor/kmodules.xyz/client-go/api/v1/openapi_generated.go
new file mode 100644
index 00000000..e33f3756
--- /dev/null
+++ b/vendor/kmodules.xyz/client-go/api/v1/openapi_generated.go
@@ -0,0 +1,855 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+// This file was autogenerated by openapi-gen. Do not edit it manually!
+
+package v1
+
+import (
+ common "k8s.io/kube-openapi/pkg/common"
+ spec "k8s.io/kube-openapi/pkg/validation/spec"
+)
+
+func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
+ return map[string]common.OpenAPIDefinition{
+ "kmodules.xyz/client-go/api/v1.CertificatePrivateKey": schema_kmodulesxyz_client_go_api_v1_CertificatePrivateKey(ref),
+ "kmodules.xyz/client-go/api/v1.CertificateSpec": schema_kmodulesxyz_client_go_api_v1_CertificateSpec(ref),
+ "kmodules.xyz/client-go/api/v1.ClusterMetadata": schema_kmodulesxyz_client_go_api_v1_ClusterMetadata(ref),
+ "kmodules.xyz/client-go/api/v1.Condition": schema_kmodulesxyz_client_go_api_v1_Condition(ref),
+ "kmodules.xyz/client-go/api/v1.HealthCheckSpec": schema_kmodulesxyz_client_go_api_v1_HealthCheckSpec(ref),
+ "kmodules.xyz/client-go/api/v1.ImageInfo": schema_kmodulesxyz_client_go_api_v1_ImageInfo(ref),
+ "kmodules.xyz/client-go/api/v1.Lineage": schema_kmodulesxyz_client_go_api_v1_Lineage(ref),
+ "kmodules.xyz/client-go/api/v1.ObjectID": schema_kmodulesxyz_client_go_api_v1_ObjectID(ref),
+ "kmodules.xyz/client-go/api/v1.ObjectInfo": schema_kmodulesxyz_client_go_api_v1_ObjectInfo(ref),
+ "kmodules.xyz/client-go/api/v1.ObjectReference": schema_kmodulesxyz_client_go_api_v1_ObjectReference(ref),
+ "kmodules.xyz/client-go/api/v1.PullCredentials": schema_kmodulesxyz_client_go_api_v1_PullCredentials(ref),
+ "kmodules.xyz/client-go/api/v1.ReadonlyHealthCheckSpec": schema_kmodulesxyz_client_go_api_v1_ReadonlyHealthCheckSpec(ref),
+ "kmodules.xyz/client-go/api/v1.ResourceID": schema_kmodulesxyz_client_go_api_v1_ResourceID(ref),
+ "kmodules.xyz/client-go/api/v1.TLSConfig": schema_kmodulesxyz_client_go_api_v1_TLSConfig(ref),
+ "kmodules.xyz/client-go/api/v1.TimeOfDay": schema_kmodulesxyz_client_go_api_v1_TimeOfDay(ref),
+ "kmodules.xyz/client-go/api/v1.TypedObjectReference": schema_kmodulesxyz_client_go_api_v1_TypedObjectReference(ref),
+ "kmodules.xyz/client-go/api/v1.X509Subject": schema_kmodulesxyz_client_go_api_v1_X509Subject(ref),
+ "kmodules.xyz/client-go/api/v1.stringSetMerger": schema_kmodulesxyz_client_go_api_v1_stringSetMerger(ref),
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_CertificatePrivateKey(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "CertificatePrivateKey contains configuration options for private keys used by the Certificate controller. This allows control of how private keys are rotated.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "encoding": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are \"pkcs1\" and \"pkcs8\" standing for PKCS#1 and PKCS#8, respectively. Defaults to PKCS#1 if not specified. See here for the difference between the formats: https://stackoverflow.com/a/48960291",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_CertificateSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "alias": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Alias represents the identifier of the certificate.",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "issuerRef": {
+ SchemaProps: spec.SchemaProps{
+ Description: "IssuerRef is a reference to a Certificate Issuer.",
+ Ref: ref("k8s.io/api/core/v1.TypedLocalObjectReference"),
+ },
+ },
+ "secretName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Specifies the k8s secret name that holds the certificates. Default to --cert.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "subject": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name).",
+ Ref: ref("kmodules.xyz/client-go/api/v1.X509Subject"),
+ },
+ },
+ "duration": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Certificate default Duration",
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
+ },
+ },
+ "renewBefore": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Certificate renew before expiration duration",
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
+ },
+ },
+ "dnsNames": {
+ SchemaProps: spec.SchemaProps{
+ Description: "DNSNames is a list of subject alt names to be used on the Certificate.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "ipAddresses": {
+ SchemaProps: spec.SchemaProps{
+ Description: "IPAddresses is a list of IP addresses to be used on the Certificate",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "uris": {
+ SchemaProps: spec.SchemaProps{
+ Description: "URIs is a list of URI subjectAltNames to be set on the Certificate.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "emailAddresses": {
+ SchemaProps: spec.SchemaProps{
+ Description: "EmailAddresses is a list of email subjectAltNames to be set on the Certificate.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "privateKey": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Options to control private keys used for the Certificate.",
+ Ref: ref("kmodules.xyz/client-go/api/v1.CertificatePrivateKey"),
+ },
+ },
+ },
+ Required: []string{"alias"},
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.TypedLocalObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "kmodules.xyz/client-go/api/v1.CertificatePrivateKey", "kmodules.xyz/client-go/api/v1.X509Subject"},
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_ClusterMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "uid": {
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "displayName": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "provider": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"uid"},
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_Condition(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "Condition defines an observation of a object operational state.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "type": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary util can be useful (see .node.status.util), the ability to deconflict is important.",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "status": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Status of the condition, one of True, False, Unknown.",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "observedGeneration": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.",
+ Type: []string{"integer"},
+ Format: "int64",
+ },
+ },
+ "severity": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "lastTransitionTime": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.",
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
+ },
+ },
+ "reason": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The reason for the condition's last transition in CamelCase. The specific API may choose whether this field is considered a guaranteed API. This field may not be empty.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "message": {
+ SchemaProps: spec.SchemaProps{
+ Description: "A human-readable message indicating details about the transition. This field may be empty.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"type", "status", "lastTransitionTime"},
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_HealthCheckSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "HealthCheckSpec defines attributes of the health check",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "periodSeconds": {
+ SchemaProps: spec.SchemaProps{
+ Description: "How often (in seconds) to perform the health check. Default to 10 seconds. Minimum value is 1.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "timeoutSeconds": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Number of seconds after which the probe times out. Defaults to 10 second. Minimum value is 1. It should be less than the periodSeconds.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "failureThreshold": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Minimum consecutive failures for the health check to be considered failed after having succeeded. Defaults to 1. Minimum value is 1.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "disableWriteCheck": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Whether to disable write check on database. Defaults to false.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_ImageInfo(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "image": {
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "lineages": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/client-go/api/v1.Lineage"),
+ },
+ },
+ },
+ },
+ },
+ "pullCredentials": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("kmodules.xyz/client-go/api/v1.PullCredentials"),
+ },
+ },
+ },
+ Required: []string{"image"},
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/client-go/api/v1.Lineage", "kmodules.xyz/client-go/api/v1.PullCredentials"},
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_Lineage(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "chain": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/client-go/api/v1.ObjectInfo"),
+ },
+ },
+ },
+ },
+ },
+ "containers": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/client-go/api/v1.ObjectInfo"},
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_ObjectID(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "group": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "kind": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_ObjectInfo(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "resource": {
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/client-go/api/v1.ResourceID"),
+ },
+ },
+ "ref": {
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/client-go/api/v1.ObjectReference"),
+ },
+ },
+ },
+ Required: []string{"resource", "ref"},
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/client-go/api/v1.ObjectReference", "kmodules.xyz/client-go/api/v1.ResourceID"},
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_ObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ObjectReference contains enough information to let you inspect or modify the referred object.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"name"},
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_PullCredentials(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "serviceAccountName": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "secretRefs": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
+ },
+ },
+ },
+ },
+ },
+ },
+ Required: []string{"namespace"},
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.LocalObjectReference"},
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_ReadonlyHealthCheckSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ReadonlyHealthCheckSpec defines attributes of the health check using only read-only checks",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "periodSeconds": {
+ SchemaProps: spec.SchemaProps{
+ Description: "How often (in seconds) to perform the health check. Default to 10 seconds. Minimum value is 1.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "timeoutSeconds": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Number of seconds after which the probe times out. Defaults to 10 second. Minimum value is 1. It should be less than the periodSeconds.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "failureThreshold": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Minimum consecutive failures for the health check to be considered failed after having succeeded. Defaults to 1. Minimum value is 1.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_ResourceID(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ResourceID identifies a resource",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "group": {
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "version": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Name is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration too: plural.group and it must be all lowercase.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "kind": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Kind is the serialized kind of the resource. It is normally CamelCase and singular.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "scope": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"group"},
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_TLSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "issuerRef": {
+ SchemaProps: spec.SchemaProps{
+ Description: "IssuerRef is a reference to a Certificate Issuer.",
+ Ref: ref("k8s.io/api/core/v1.TypedLocalObjectReference"),
+ },
+ },
+ "certificates": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Certificate provides server and/or client certificate options used by application pods. These options are passed to a cert-manager Certificate object. xref: https://github.com/jetstack/cert-manager/blob/v0.16.0/pkg/apis/certmanager/v1beta1/types_certificate.go#L82-L162",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/client-go/api/v1.CertificateSpec"),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.TypedLocalObjectReference", "kmodules.xyz/client-go/api/v1.CertificateSpec"},
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_TimeOfDay(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "TimeOfDay is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.",
+ Type: TimeOfDay{}.OpenAPISchemaType(),
+ Format: TimeOfDay{}.OpenAPISchemaFormat(),
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_TypedObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "TypedObjectReference represents an typed namespaced object.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "apiGroup": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "kind": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"name"},
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_X509Subject(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "X509Subject Full X509 name specification",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "organizations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Organizations to be used on the Certificate.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "countries": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Countries to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "organizationalUnits": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Organizational Units to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "localities": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Cities to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "provinces": {
+ SchemaProps: spec.SchemaProps{
+ Description: "State/Provinces to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "streetAddresses": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Street addresses to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "postalCodes": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Postal codes to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "serialNumber": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Serial number to be used on the CertificateSpec.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_stringSetMerger(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ },
+ },
+ }
+}
diff --git a/vendor/kmodules.xyz/client-go/api/v1/resource.go b/vendor/kmodules.xyz/client-go/api/v1/resource.go
new file mode 100644
index 00000000..ec85b0aa
--- /dev/null
+++ b/vendor/kmodules.xyz/client-go/api/v1/resource.go
@@ -0,0 +1,192 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// ResourceID identifies a resource
+type ResourceID struct {
+ Group string `json:"group" protobuf:"bytes,1,opt,name=group"`
+ Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"`
+ // Name is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration
+ // too: plural.group and it must be all lowercase.
+ Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
+ // Kind is the serialized kind of the resource. It is normally CamelCase and singular.
+ Kind string `json:"kind,omitempty" protobuf:"bytes,4,opt,name=kind"`
+ Scope ResourceScope `json:"scope,omitempty" protobuf:"bytes,5,opt,name=scope,casttype=ResourceScope"`
+}
+
+// ResourceScope is an enum defining the different scopes available to a custom resource
+type ResourceScope string
+
+const (
+ ClusterScoped ResourceScope = "Cluster"
+ NamespaceScoped ResourceScope = "Namespaced"
+)
+
+func (r ResourceID) GroupVersion() schema.GroupVersion {
+ return schema.GroupVersion{Group: r.Group, Version: r.Version}
+}
+
+func (r ResourceID) GroupKind() schema.GroupKind {
+ return schema.GroupKind{Group: r.Group, Kind: r.Kind}
+}
+
+func (r ResourceID) GroupResource() schema.GroupResource {
+ return schema.GroupResource{Group: r.Group, Resource: r.Name}
+}
+
+func (r ResourceID) TypeMeta() metav1.TypeMeta {
+ return metav1.TypeMeta{APIVersion: r.GroupVersion().String(), Kind: r.Kind}
+}
+
+func (r ResourceID) GroupVersionResource() schema.GroupVersionResource {
+ return schema.GroupVersionResource{Group: r.Group, Version: r.Version, Resource: r.Name}
+}
+
+func (r ResourceID) GroupVersionKind() schema.GroupVersionKind {
+ return schema.GroupVersionKind{Group: r.Group, Version: r.Version, Kind: r.Kind}
+}
+
+func (r ResourceID) MetaGVR() metav1.GroupVersionResource {
+ return metav1.GroupVersionResource{Group: r.Group, Version: r.Version, Resource: r.Name}
+}
+
+func (r ResourceID) MetaGVK() metav1.GroupVersionKind {
+ return metav1.GroupVersionKind{Group: r.Group, Version: r.Version, Kind: r.Kind}
+}
+
+func NewResourceID(mapping *meta.RESTMapping) *ResourceID {
+ scope := ClusterScoped
+ if mapping.Scope == meta.RESTScopeNamespace {
+ scope = NamespaceScoped
+ }
+ return &ResourceID{
+ Group: mapping.Resource.Group,
+ Version: mapping.Resource.Version,
+ Name: mapping.Resource.Resource,
+ Kind: mapping.GroupVersionKind.Kind,
+ Scope: scope,
+ }
+}
+
+func FromMetaGVR(in metav1.GroupVersionResource) schema.GroupVersionResource {
+ return schema.GroupVersionResource{
+ Group: in.Group,
+ Version: in.Version,
+ Resource: in.Resource,
+ }
+}
+
+func ToMetaGVR(in schema.GroupVersionResource) metav1.GroupVersionResource {
+ return metav1.GroupVersionResource{
+ Group: in.Group,
+ Version: in.Version,
+ Resource: in.Resource,
+ }
+}
+
+func FromMetaGVK(in metav1.GroupVersionKind) schema.GroupVersionKind {
+ return schema.GroupVersionKind{
+ Group: in.Group,
+ Version: in.Version,
+ Kind: in.Kind,
+ }
+}
+
+func ToMetaGVK(in schema.GroupVersionKind) metav1.GroupVersionKind {
+ return metav1.GroupVersionKind{
+ Group: in.Group,
+ Version: in.Version,
+ Kind: in.Kind,
+ }
+}
+
+// FromAPIVersionAndKind returns a GVK representing the provided fields for types that
+// do not use TypeMeta. This method exists to support test types and legacy serializations
+// that have a distinct group and kind.
+func FromAPIVersionAndKind(apiVersion, kind string) metav1.GroupVersionKind {
+ if gv, err := schema.ParseGroupVersion(apiVersion); err == nil {
+ return metav1.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind}
+ }
+ return metav1.GroupVersionKind{Kind: kind}
+}
+
+func EqualsGVK(a schema.GroupVersionKind, b metav1.GroupVersionKind) bool {
+ return a.Group == b.Group &&
+ a.Version == b.Version &&
+ a.Kind == b.Kind
+}
+
+func EqualsGVR(a schema.GroupVersionResource, b metav1.GroupVersionResource) bool {
+ return a.Group == b.Group &&
+ a.Version == b.Version &&
+ a.Resource == b.Resource
+}
+
+func ExtractResourceID(mapper meta.RESTMapper, in ResourceID) (*ResourceID, error) {
+ if in.Group == "core" {
+ in.Group = ""
+ }
+ if in.Version != "" &&
+ in.Kind != "" &&
+ in.Name != "" &&
+ in.Scope != "" {
+ return &in, nil
+ }
+
+ kindFound := in.Kind != ""
+ resFOund := in.Name != ""
+ if kindFound {
+ if resFOund {
+ return &in, nil
+ } else {
+ var versions []string
+ if in.Version != "" {
+ versions = append(versions, in.Version)
+ }
+ mapping, err := mapper.RESTMapping(schema.GroupKind{
+ Group: in.Group,
+ Kind: in.Kind,
+ }, versions...)
+ if err != nil {
+ return nil, err
+ }
+ return NewResourceID(mapping), nil
+ }
+ } else {
+ if resFOund {
+ gvk, err := mapper.KindFor(in.GroupVersionResource())
+ if err != nil {
+ return nil, err
+ }
+ mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
+ if err != nil {
+ return nil, err
+ }
+ return NewResourceID(mapping), nil
+ } else {
+ return nil, fmt.Errorf("missing both Kind and Resource name for %+v", in)
+ }
+ }
+}
diff --git a/vendor/kmodules.xyz/client-go/api/v1/timeofday.go b/vendor/kmodules.xyz/client-go/api/v1/timeofday.go
new file mode 100644
index 00000000..88ea1a8b
--- /dev/null
+++ b/vendor/kmodules.xyz/client-go/api/v1/timeofday.go
@@ -0,0 +1,208 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "encoding/json"
+ "time"
+
+ fuzz "github.com/google/gofuzz"
+)
+
+// TimeOfDay is a wrapper around time.Time which supports correct
+// marshaling to YAML and JSON. Wrappers are provided for many
+// of the factory methods that the time package offers.
+//
+// +protobuf.options.marshal=false
+// +protobuf.as=Timestamp
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type TimeOfDay struct {
+ time.Time `protobuf:"-"`
+}
+
+// DeepCopyInto creates a deep-copy of the TimeOfDay value. The underlying time.Time
+// type is effectively immutable in the time API, so it is safe to
+// copy-by-assign, despite the presence of (unexported) Pointer fields.
+func (t *TimeOfDay) DeepCopyInto(out *TimeOfDay) {
+ *out = *t
+}
+
+// NewTime returns a wrapped instance of the provided time
+func NewTime(t time.Time) TimeOfDay {
+ return TimeOfDay{time.Date(0, 0, 0, t.Hour(), t.Minute(), t.Second(), 0, time.UTC)}
+}
+
+// NewTimeInLocation returns a wrapped instance of the provided time according to location
+func NewTimeInLocation(t time.Time, loc *time.Location) TimeOfDay {
+ if loc == nil {
+ loc = time.UTC
+ }
+ t = t.In(loc)
+ return TimeOfDay{time.Date(0, 0, 0, t.Hour(), t.Minute(), t.Second(), 0, loc)}
+}
+
+// Date returns the TimeOfDay corresponding to the supplied parameters
+// by wrapping time.Date.
+func Date(hour, min, sec int) TimeOfDay {
+ return TimeOfDay{time.Date(0, 0, 0, hour, min, sec, 0, time.UTC)}
+}
+
+// Now returns the current local time.
+func Now() TimeOfDay {
+ utc := time.Now().UTC()
+ return TimeOfDay{time.Date(0, 0, 0, utc.Hour(), utc.Minute(), utc.Second(), 0, time.UTC)}
+}
+
+// IsZero returns true if the value is nil or time is zero.
+func (t *TimeOfDay) IsZero() bool {
+ if t == nil {
+ return true
+ }
+ return t.Time.IsZero()
+}
+
+// Before reports whether the time instant t is before u.
+func (t *TimeOfDay) Before(u *TimeOfDay) bool {
+ if t != nil && u != nil {
+ return t.Time.Before(u.Time)
+ }
+ return false
+}
+
+// Equal reports whether the time instant t is equal to u.
+func (t *TimeOfDay) Equal(u *TimeOfDay) bool {
+ if t == nil && u == nil {
+ return true
+ }
+ if t != nil && u != nil {
+ return t.Time.Equal(u.Time)
+ }
+ return false
+}
+
+// Unix returns the local time corresponding to the given Unix time
+// by wrapping time.Unix.
+func Unix(sec int64, nsec int64) TimeOfDay {
+ return TimeOfDay{time.Unix(sec, nsec)}
+}
+
+// Copy returns a copy of the TimeOfDay at second-level precision.
+func (t TimeOfDay) Copy() TimeOfDay {
+ copied, _ := time.Parse(time.Kitchen, t.Format(time.Kitchen))
+ return TimeOfDay{copied}
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (t *TimeOfDay) UnmarshalJSON(b []byte) error {
+ if len(b) == 4 && string(b) == "null" {
+ t.Time = time.Time{}
+ return nil
+ }
+
+ var str string
+ err := json.Unmarshal(b, &str)
+ if err != nil {
+ return err
+ }
+
+ pt, err := time.Parse(time.Kitchen, str)
+ if err != nil {
+ return err
+ }
+
+ t.Time = pt.Local()
+ return nil
+}
+
+// UnmarshalQueryParameter converts from a URL query parameter value to an object
+func (t *TimeOfDay) UnmarshalQueryParameter(str string) error {
+ if len(str) == 0 {
+ t.Time = time.Time{}
+ return nil
+ }
+ // Tolerate requests from older clients that used JSON serialization to build query params
+ if len(str) == 4 && str == "null" {
+ t.Time = time.Time{}
+ return nil
+ }
+
+ pt, err := time.Parse(time.Kitchen, str)
+ if err != nil {
+ return err
+ }
+
+ t.Time = pt.Local()
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t TimeOfDay) MarshalJSON() ([]byte, error) {
+ if t.IsZero() {
+ // Encode unset/nil objects as JSON's "null".
+ return []byte("null"), nil
+ }
+ buf := make([]byte, 0, len(time.Kitchen)+2)
+ buf = append(buf, '"')
+ // time cannot contain non escapable JSON characters
+ buf = t.UTC().AppendFormat(buf, time.Kitchen)
+ buf = append(buf, '"')
+ return buf, nil
+}
+
+// ToUnstructured implements the value.UnstructuredConverter interface.
+func (t TimeOfDay) ToUnstructured() interface{} {
+ if t.IsZero() {
+ return nil
+ }
+ buf := make([]byte, 0, len(time.Kitchen))
+ buf = t.UTC().AppendFormat(buf, time.Kitchen)
+ return string(buf)
+}
+
+// OpenAPISchemaType is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+//
+// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
+func (_ TimeOfDay) OpenAPISchemaType() []string { return []string{"string"} }
+
+// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+func (_ TimeOfDay) OpenAPISchemaFormat() string { return "time" }
+
+// MarshalQueryParameter converts to a URL query parameter value
+func (t TimeOfDay) MarshalQueryParameter() (string, error) {
+ if t.IsZero() {
+ // Encode unset/nil objects as an empty string
+ return "", nil
+ }
+
+ return t.UTC().Format(time.Kitchen), nil
+}
+
+// Fuzz satisfies fuzz.Interface.
+func (t *TimeOfDay) Fuzz(c fuzz.Continue) {
+ if t == nil {
+ return
+ }
+ // Allow for about 1000 years of randomness. Leave off nanoseconds
+ // because JSON doesn't represent them so they can't round-trip
+ // properly.
+ t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0)
+}
+
+// ensure Time implements fuzz.Interface
+var _ fuzz.Interface = &TimeOfDay{}
diff --git a/vendor/kmodules.xyz/client-go/api/v1/zz_generated.deepcopy.go b/vendor/kmodules.xyz/client-go/api/v1/zz_generated.deepcopy.go
new file mode 100644
index 00000000..fd286ac4
--- /dev/null
+++ b/vendor/kmodules.xyz/client-go/api/v1/zz_generated.deepcopy.go
@@ -0,0 +1,469 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CAPIClusterInfo) DeepCopyInto(out *CAPIClusterInfo) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CAPIClusterInfo.
+func (in *CAPIClusterInfo) DeepCopy() *CAPIClusterInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(CAPIClusterInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CertificatePrivateKey) DeepCopyInto(out *CertificatePrivateKey) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificatePrivateKey.
+func (in *CertificatePrivateKey) DeepCopy() *CertificatePrivateKey {
+ if in == nil {
+ return nil
+ }
+ out := new(CertificatePrivateKey)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CertificateSpec) DeepCopyInto(out *CertificateSpec) {
+ *out = *in
+ if in.IssuerRef != nil {
+ in, out := &in.IssuerRef, &out.IssuerRef
+ *out = new(corev1.TypedLocalObjectReference)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Subject != nil {
+ in, out := &in.Subject, &out.Subject
+ *out = new(X509Subject)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Duration != nil {
+ in, out := &in.Duration, &out.Duration
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.RenewBefore != nil {
+ in, out := &in.RenewBefore, &out.RenewBefore
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.DNSNames != nil {
+ in, out := &in.DNSNames, &out.DNSNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.IPAddresses != nil {
+ in, out := &in.IPAddresses, &out.IPAddresses
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.URIs != nil {
+ in, out := &in.URIs, &out.URIs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.EmailAddresses != nil {
+ in, out := &in.EmailAddresses, &out.EmailAddresses
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PrivateKey != nil {
+ in, out := &in.PrivateKey, &out.PrivateKey
+ *out = new(CertificatePrivateKey)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSpec.
+func (in *CertificateSpec) DeepCopy() *CertificateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CertificateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterMetadata) DeepCopyInto(out *ClusterMetadata) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMetadata.
+func (in *ClusterMetadata) DeepCopy() *ClusterMetadata {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterMetadata)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Condition) DeepCopyInto(out *Condition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
+func (in *Condition) DeepCopy() *Condition {
+ if in == nil {
+ return nil
+ }
+ out := new(Condition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in Conditions) DeepCopyInto(out *Conditions) {
+ {
+ in := &in
+ *out = make(Conditions, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions.
+func (in Conditions) DeepCopy() Conditions {
+ if in == nil {
+ return nil
+ }
+ out := new(Conditions)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HealthCheckSpec) DeepCopyInto(out *HealthCheckSpec) {
+ *out = *in
+ in.ReadonlyHealthCheckSpec.DeepCopyInto(&out.ReadonlyHealthCheckSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckSpec.
+func (in *HealthCheckSpec) DeepCopy() *HealthCheckSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(HealthCheckSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageInfo) DeepCopyInto(out *ImageInfo) {
+ *out = *in
+ if in.Lineages != nil {
+ in, out := &in.Lineages, &out.Lineages
+ *out = make([]Lineage, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.PullCredentials != nil {
+ in, out := &in.PullCredentials, &out.PullCredentials
+ *out = new(PullCredentials)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageInfo.
+func (in *ImageInfo) DeepCopy() *ImageInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Lineage) DeepCopyInto(out *Lineage) {
+ *out = *in
+ if in.Chain != nil {
+ in, out := &in.Chain, &out.Chain
+ *out = make([]ObjectInfo, len(*in))
+ copy(*out, *in)
+ }
+ if in.Containers != nil {
+ in, out := &in.Containers, &out.Containers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lineage.
+func (in *Lineage) DeepCopy() *Lineage {
+ if in == nil {
+ return nil
+ }
+ out := new(Lineage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectID) DeepCopyInto(out *ObjectID) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectID.
+func (in *ObjectID) DeepCopy() *ObjectID {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectID)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectInfo) DeepCopyInto(out *ObjectInfo) {
+ *out = *in
+ out.Resource = in.Resource
+ out.Ref = in.Ref
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectInfo.
+func (in *ObjectInfo) DeepCopy() *ObjectInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectReference) DeepCopyInto(out *ObjectReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference.
+func (in *ObjectReference) DeepCopy() *ObjectReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PullCredentials) DeepCopyInto(out *PullCredentials) {
+ *out = *in
+ if in.SecretRefs != nil {
+ in, out := &in.SecretRefs, &out.SecretRefs
+ *out = make([]corev1.LocalObjectReference, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PullCredentials.
+func (in *PullCredentials) DeepCopy() *PullCredentials {
+ if in == nil {
+ return nil
+ }
+ out := new(PullCredentials)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReadonlyHealthCheckSpec) DeepCopyInto(out *ReadonlyHealthCheckSpec) {
+ *out = *in
+ if in.PeriodSeconds != nil {
+ in, out := &in.PeriodSeconds, &out.PeriodSeconds
+ *out = new(int32)
+ **out = **in
+ }
+ if in.TimeoutSeconds != nil {
+ in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
+ *out = new(int32)
+ **out = **in
+ }
+ if in.FailureThreshold != nil {
+ in, out := &in.FailureThreshold, &out.FailureThreshold
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadonlyHealthCheckSpec.
+func (in *ReadonlyHealthCheckSpec) DeepCopy() *ReadonlyHealthCheckSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ReadonlyHealthCheckSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceID) DeepCopyInto(out *ResourceID) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceID.
+func (in *ResourceID) DeepCopy() *ResourceID {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceID)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSConfig) DeepCopyInto(out *TLSConfig) {
+ *out = *in
+ if in.IssuerRef != nil {
+ in, out := &in.IssuerRef, &out.IssuerRef
+ *out = new(corev1.TypedLocalObjectReference)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Certificates != nil {
+ in, out := &in.Certificates, &out.Certificates
+ *out = make([]CertificateSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig.
+func (in *TLSConfig) DeepCopy() *TLSConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(TLSConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeOfDay.
+func (in *TimeOfDay) DeepCopy() *TimeOfDay {
+ if in == nil {
+ return nil
+ }
+ out := new(TimeOfDay)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TypedObjectReference) DeepCopyInto(out *TypedObjectReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedObjectReference.
+func (in *TypedObjectReference) DeepCopy() *TypedObjectReference {
+ if in == nil {
+ return nil
+ }
+ out := new(TypedObjectReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *X509Subject) DeepCopyInto(out *X509Subject) {
+ *out = *in
+ if in.Organizations != nil {
+ in, out := &in.Organizations, &out.Organizations
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Countries != nil {
+ in, out := &in.Countries, &out.Countries
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.OrganizationalUnits != nil {
+ in, out := &in.OrganizationalUnits, &out.OrganizationalUnits
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Localities != nil {
+ in, out := &in.Localities, &out.Localities
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Provinces != nil {
+ in, out := &in.Provinces, &out.Provinces
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.StreetAddresses != nil {
+ in, out := &in.StreetAddresses, &out.StreetAddresses
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PostalCodes != nil {
+ in, out := &in.PostalCodes, &out.PostalCodes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new X509Subject.
+func (in *X509Subject) DeepCopy() *X509Subject {
+ if in == nil {
+ return nil
+ }
+ out := new(X509Subject)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/kmodules.xyz/client-go/meta/conditions.go b/vendor/kmodules.xyz/client-go/meta/conditions.go
index c78172e3..2c72db47 100644
--- a/vendor/kmodules.xyz/client-go/meta/conditions.go
+++ b/vendor/kmodules.xyz/client-go/meta/conditions.go
@@ -65,13 +65,14 @@ func GetCondition(conditions []metav1.Condition, condType string) (int, *metav1.
// SetCondition add/update the desired condition to the condition list. It does nothing if the condition is already in
// its desired state.
func SetCondition(conditions []metav1.Condition, newCondition metav1.Condition) []metav1.Condition {
+ isGenerationUnset := newCondition.ObservedGeneration <= 0
idx, curCond := GetCondition(conditions, newCondition.Type)
// If the current condition is in its desired state, we have nothing to do. Just return the original condition list.
if curCond != nil &&
curCond.Status == newCondition.Status &&
curCond.Reason == newCondition.Reason &&
curCond.Message == newCondition.Message &&
- curCond.ObservedGeneration == newCondition.ObservedGeneration {
+ (isGenerationUnset || curCond.ObservedGeneration == newCondition.ObservedGeneration) {
return conditions
}
// The desired conditions is not in the condition list or is not in its desired state.
@@ -79,7 +80,7 @@ func SetCondition(conditions []metav1.Condition, newCondition metav1.Condition)
newCondition.LastTransitionTime = metav1.Now()
if idx == -1 {
conditions = append(conditions, newCondition)
- } else if newCondition.ObservedGeneration >= curCond.ObservedGeneration {
+ } else if isGenerationUnset || newCondition.ObservedGeneration >= curCond.ObservedGeneration {
// only update if the new condition is based on observed generation at least as updated as the current condition
conditions[idx] = newCondition
}
diff --git a/vendor/kmodules.xyz/client-go/meta/labels.go b/vendor/kmodules.xyz/client-go/meta/labels.go
new file mode 100644
index 00000000..41b1bc21
--- /dev/null
+++ b/vendor/kmodules.xyz/client-go/meta/labels.go
@@ -0,0 +1,46 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+func LabelsForLabelSelector(sel *metav1.LabelSelector) (map[string]string, bool) {
+ if sel != nil {
+ if len(sel.MatchExpressions) > 0 {
+ expr := sel.MatchExpressions[0]
+ switch expr.Operator {
+ case metav1.LabelSelectorOpIn:
+ return map[string]string{
+ expr.Key: expr.Values[0],
+ }, false
+ case metav1.LabelSelectorOpNotIn:
+ return map[string]string{
+ expr.Key: "not-" + expr.Values[0],
+ }, false
+ case metav1.LabelSelectorOpExists:
+ return map[string]string{
+ expr.Key: "",
+ }, false
+ case metav1.LabelSelectorOpDoesNotExist:
+ return make(map[string]string), false
+ }
+ } else {
+ return sel.MatchLabels, true
+ }
+ }
+ return make(map[string]string), true
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 89d76cea..f01117a9 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -138,6 +138,9 @@ github.com/emicklei/go-restful/v3/log
# github.com/evanphx/json-patch v5.6.0+incompatible
## explicit
github.com/evanphx/json-patch
+# github.com/evanphx/json-patch/v5 v5.6.0
+## explicit; go 1.12
+github.com/evanphx/json-patch/v5
# github.com/fatih/color v1.13.0
## explicit; go 1.13
github.com/fatih/color
@@ -224,6 +227,9 @@ github.com/gorilla/websocket
# github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef
## explicit
github.com/howeyc/gopass
+# github.com/imdario/mergo v0.3.13 => github.com/imdario/mergo v0.3.6
+## explicit
+github.com/imdario/mergo
# github.com/inconshreveable/mousetrap v1.0.1
## explicit; go 1.18
github.com/inconshreveable/mousetrap
@@ -273,8 +279,6 @@ github.com/moul/gotty-client
# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
## explicit
github.com/munnerz/goautoneg
-# github.com/onsi/ginkgo v1.16.5
-## explicit; go 1.16
# github.com/onsi/gomega v1.20.2
## explicit; go 1.18
# github.com/packethost/packngo v0.13.0
@@ -396,7 +400,7 @@ gomodules.xyz/clock
# gomodules.xyz/flags v0.1.3
## explicit; go 1.16
gomodules.xyz/flags
-# gomodules.xyz/jsonpatch/v2 v2.3.0
+# gomodules.xyz/jsonpatch/v2 v2.4.0
## explicit; go 1.20
gomodules.xyz/jsonpatch/v2
# gomodules.xyz/logs v0.0.6
@@ -611,6 +615,8 @@ k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta
k8s.io/apimachinery/pkg/api/resource
k8s.io/apimachinery/pkg/apis/meta/fuzzer
+k8s.io/apimachinery/pkg/apis/meta/internalversion
+k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme
k8s.io/apimachinery/pkg/apis/meta/v1
k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
k8s.io/apimachinery/pkg/apis/meta/v1beta1
@@ -695,6 +701,7 @@ k8s.io/client-go/applyconfigurations/storage/v1
k8s.io/client-go/applyconfigurations/storage/v1alpha1
k8s.io/client-go/applyconfigurations/storage/v1beta1
k8s.io/client-go/discovery
+k8s.io/client-go/dynamic
k8s.io/client-go/kubernetes
k8s.io/client-go/kubernetes/scheme
k8s.io/client-go/kubernetes/typed/admissionregistration/v1
@@ -743,6 +750,7 @@ k8s.io/client-go/kubernetes/typed/scheduling/v1beta1
k8s.io/client-go/kubernetes/typed/storage/v1
k8s.io/client-go/kubernetes/typed/storage/v1alpha1
k8s.io/client-go/kubernetes/typed/storage/v1beta1
+k8s.io/client-go/metadata
k8s.io/client-go/openapi
k8s.io/client-go/pkg/apis/clientauthentication
k8s.io/client-go/pkg/apis/clientauthentication/install
@@ -752,6 +760,7 @@ k8s.io/client-go/pkg/version
k8s.io/client-go/plugin/pkg/client/auth/exec
k8s.io/client-go/rest
k8s.io/client-go/rest/watch
+k8s.io/client-go/restmapper
k8s.io/client-go/tools/clientcmd/api
k8s.io/client-go/tools/metrics
k8s.io/client-go/tools/reference
@@ -792,9 +801,10 @@ k8s.io/utils/internal/third_party/forked/golang/net
k8s.io/utils/net
k8s.io/utils/pointer
k8s.io/utils/strings/slices
-# kmodules.xyz/client-go v0.25.24
+# kmodules.xyz/client-go v0.25.34-0.20230920051128-f758ec0276ab
## explicit; go 1.18
kmodules.xyz/client-go
+kmodules.xyz/client-go/api/v1
kmodules.xyz/client-go/apiextensions
kmodules.xyz/client-go/apiextensions/v1
kmodules.xyz/client-go/meta
@@ -807,6 +817,12 @@ moul.io/anonuuid
# moul.io/srand v1.6.1
## explicit; go 1.13
moul.io/srand
+# sigs.k8s.io/controller-runtime v0.13.1
+## explicit; go 1.17
+sigs.k8s.io/controller-runtime/pkg/client
+sigs.k8s.io/controller-runtime/pkg/client/apiutil
+sigs.k8s.io/controller-runtime/pkg/internal/objectutil
+sigs.k8s.io/controller-runtime/pkg/log
# sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd
## explicit; go 1.18
sigs.k8s.io/json
diff --git a/vendor/sigs.k8s.io/controller-runtime/LICENSE b/vendor/sigs.k8s.io/controller-runtime/LICENSE
new file mode 100644
index 00000000..8dada3ed
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go
new file mode 100644
index 00000000..c92b0eaa
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go
@@ -0,0 +1,196 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package apiutil contains utilities for working with raw Kubernetes
+// API machinery, such as creating RESTMappers and raw REST clients,
+// and extracting the GVK of an object.
+package apiutil
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/client-go/discovery"
+ clientgoscheme "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/restmapper"
+)
+
+var (
+ protobufScheme = runtime.NewScheme()
+ protobufSchemeLock sync.RWMutex
+)
+
+func init() {
+ // Currently only enabled for built-in resources which are guaranteed to implement Protocol Buffers.
+ // For custom resources, CRDs can not support Protocol Buffers but Aggregated API can.
+ // See doc: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility
+ if err := clientgoscheme.AddToScheme(protobufScheme); err != nil {
+ panic(err)
+ }
+}
+
+// AddToProtobufScheme add the given SchemeBuilder into protobufScheme, which should
+// be additional types that do support protobuf.
+func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error {
+ protobufSchemeLock.Lock()
+ defer protobufSchemeLock.Unlock()
+ return addToScheme(protobufScheme)
+}
+
+// NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery
+// information fetched by a new client with the given config.
+func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) {
+ // Get a mapper
+ dc, err := discovery.NewDiscoveryClientForConfig(c)
+ if err != nil {
+ return nil, err
+ }
+ gr, err := restmapper.GetAPIGroupResources(dc)
+ if err != nil {
+ return nil, err
+ }
+ return restmapper.NewDiscoveryRESTMapper(gr), nil
+}
+
+// GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK.
+func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) {
+ // TODO(directxman12): do we want to generalize this to arbitrary container types?
+ // I think we'd need a generalized form of scheme or something. It's a
+ // shame there's not a reliable "GetGVK" interface that works by default
+ // for unpopulated static types and populated "dynamic" types
+ // (unstructured, partial, etc)
+
+ // check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds
+ _, isPartial := obj.(*metav1.PartialObjectMetadata) //nolint:ifshort
+ _, isPartialList := obj.(*metav1.PartialObjectMetadataList)
+ if isPartial || isPartialList {
+ // we require that the GVK be populated in order to recognize the object
+ gvk := obj.GetObjectKind().GroupVersionKind()
+ if len(gvk.Kind) == 0 {
+ return schema.GroupVersionKind{}, runtime.NewMissingKindErr("unstructured object has no kind")
+ }
+ if len(gvk.Version) == 0 {
+ return schema.GroupVersionKind{}, runtime.NewMissingVersionErr("unstructured object has no version")
+ }
+ return gvk, nil
+ }
+
+ gvks, isUnversioned, err := scheme.ObjectKinds(obj)
+ if err != nil {
+ return schema.GroupVersionKind{}, err
+ }
+ if isUnversioned {
+ return schema.GroupVersionKind{}, fmt.Errorf("cannot create group-version-kind for unversioned type %T", obj)
+ }
+
+ if len(gvks) < 1 {
+ return schema.GroupVersionKind{}, fmt.Errorf("no group-version-kinds associated with type %T", obj)
+ }
+ if len(gvks) > 1 {
+ // this should only trigger for things like metav1.XYZ --
+ // normal versioned types should be fine
+ return schema.GroupVersionKind{}, fmt.Errorf(
+ "multiple group-version-kinds associated with type %T, refusing to guess at one", obj)
+ }
+ return gvks[0], nil
+}
+
+// RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated
+// with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from
+// baseConfig, if set, otherwise a default serializer will be set.
+func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) {
+ return rest.RESTClientFor(createRestConfig(gvk, isUnstructured, baseConfig, codecs))
+}
+
+// serializerWithDecodedGVK is a CodecFactory that overrides the DecoderToVersion of a WithoutConversionCodecFactory
+// in order to avoid clearing the GVK from the decoded object.
+//
+// See https://github.com/kubernetes/kubernetes/issues/80609.
+type serializerWithDecodedGVK struct {
+ serializer.WithoutConversionCodecFactory
+}
+
+// DecoderToVersion returns an decoder that does not do conversion.
+func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder {
+ return serializer
+}
+
+// createRestConfig copies the base config and updates needed fields for a new rest config.
+func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) *rest.Config {
+ gv := gvk.GroupVersion()
+
+ cfg := rest.CopyConfig(baseConfig)
+ cfg.GroupVersion = &gv
+ if gvk.Group == "" {
+ cfg.APIPath = "/api"
+ } else {
+ cfg.APIPath = "/apis"
+ }
+ if cfg.UserAgent == "" {
+ cfg.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+ // TODO(FillZpp): In the long run, we want to check discovery or something to make sure that this is actually true.
+ if cfg.ContentType == "" && !isUnstructured {
+ protobufSchemeLock.RLock()
+ if protobufScheme.Recognizes(gvk) {
+ cfg.ContentType = runtime.ContentTypeProtobuf
+ }
+ protobufSchemeLock.RUnlock()
+ }
+
+ if isUnstructured {
+ // If the object is unstructured, we need to preserve the GVK information.
+ // Use our own custom serializer.
+ cfg.NegotiatedSerializer = serializerWithDecodedGVK{serializer.WithoutConversionCodecFactory{CodecFactory: codecs}}
+ } else {
+ cfg.NegotiatedSerializer = serializerWithTargetZeroingDecode{NegotiatedSerializer: serializer.WithoutConversionCodecFactory{CodecFactory: codecs}}
+ }
+
+ return cfg
+}
+
+type serializerWithTargetZeroingDecode struct {
+ runtime.NegotiatedSerializer
+}
+
+func (s serializerWithTargetZeroingDecode) DecoderToVersion(serializer runtime.Decoder, r runtime.GroupVersioner) runtime.Decoder {
+ return targetZeroingDecoder{upstream: s.NegotiatedSerializer.DecoderToVersion(serializer, r)}
+}
+
+type targetZeroingDecoder struct {
+ upstream runtime.Decoder
+}
+
+func (t targetZeroingDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+ zero(into)
+ return t.upstream.Decode(data, defaults, into)
+}
+
+// zero zeros the value of a pointer.
+func zero(x interface{}) {
+ if x == nil {
+ return
+ }
+ res := reflect.ValueOf(x).Elem()
+ res.Set(reflect.Zero(res.Type()))
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go
new file mode 100644
index 00000000..8b7c1c4b
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go
@@ -0,0 +1,290 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apiutil
+
+import (
+ "sync"
+ "sync/atomic"
+
+ "golang.org/x/time/rate"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/discovery"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/restmapper"
+)
+
+// dynamicRESTMapper is a RESTMapper that dynamically discovers resource
+// types at runtime.
+type dynamicRESTMapper struct {
+ mu sync.RWMutex // protects the following fields
+ staticMapper meta.RESTMapper
+ limiter *rate.Limiter
+ newMapper func() (meta.RESTMapper, error)
+
+ lazy bool
+ // Used for lazy init.
+ inited uint32
+ initMtx sync.Mutex
+}
+
+// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper.
+type DynamicRESTMapperOption func(*dynamicRESTMapper) error
+
+// WithLimiter sets the RESTMapper's underlying limiter to lim.
+func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption {
+ return func(drm *dynamicRESTMapper) error {
+ drm.limiter = lim
+ return nil
+ }
+}
+
+// WithLazyDiscovery prevents the RESTMapper from discovering REST mappings
+// until an API call is made.
+var WithLazyDiscovery DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error {
+ drm.lazy = true
+ return nil
+}
+
+// WithCustomMapper supports setting a custom RESTMapper refresher instead of
+// the default method, which uses a discovery client.
+//
+// This exists mainly for testing, but can be useful if you need tighter control
+// over how discovery is performed, which discovery endpoints are queried, etc.
+func WithCustomMapper(newMapper func() (meta.RESTMapper, error)) DynamicRESTMapperOption {
+ return func(drm *dynamicRESTMapper) error {
+ drm.newMapper = newMapper
+ return nil
+ }
+}
+
+// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic
+// RESTMapper dynamically discovers resource types at runtime. opts
+// configure the RESTMapper.
+func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (meta.RESTMapper, error) {
+ client, err := discovery.NewDiscoveryClientForConfig(cfg)
+ if err != nil {
+ return nil, err
+ }
+ drm := &dynamicRESTMapper{
+ limiter: rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize),
+ newMapper: func() (meta.RESTMapper, error) {
+ groupResources, err := restmapper.GetAPIGroupResources(client)
+ if err != nil {
+ return nil, err
+ }
+ return restmapper.NewDiscoveryRESTMapper(groupResources), nil
+ },
+ }
+ for _, opt := range opts {
+ if err = opt(drm); err != nil {
+ return nil, err
+ }
+ }
+ if !drm.lazy {
+ if err := drm.setStaticMapper(); err != nil {
+ return nil, err
+ }
+ }
+ return drm, nil
+}
+
+var (
+ // defaultRefilRate is the default rate at which potential calls are
+ // added back to the "bucket" of allowed calls.
+ defaultRefillRate = 5
+ // defaultLimitSize is the default starting/max number of potential calls
+ // per second. Once a call is used, it's added back to the bucket at a rate
+ // of defaultRefillRate per second.
+ defaultLimitSize = 5
+)
+
+// setStaticMapper sets drm's staticMapper by querying its client, regardless
+// of reload backoff.
+func (drm *dynamicRESTMapper) setStaticMapper() error {
+ newMapper, err := drm.newMapper()
+ if err != nil {
+ return err
+ }
+ drm.staticMapper = newMapper
+ return nil
+}
+
+// init initializes drm only once if drm is lazy.
+func (drm *dynamicRESTMapper) init() (err error) {
+ // skip init if drm is not lazy or has initialized
+ if !drm.lazy || atomic.LoadUint32(&drm.inited) != 0 {
+ return nil
+ }
+
+ drm.initMtx.Lock()
+ defer drm.initMtx.Unlock()
+ if drm.inited == 0 {
+ if err = drm.setStaticMapper(); err == nil {
+ atomic.StoreUint32(&drm.inited, 1)
+ }
+ }
+ return err
+}
+
+// checkAndReload attempts to call the given callback, which is assumed to be dependent
+// on the data in the restmapper.
+//
+// If the callback returns an error matching meta.IsNoMatchErr, it will attempt to reload
+// the RESTMapper's data and re-call the callback once that's occurred.
+// If the callback returns any other error, the function will return immediately regardless.
+//
+// It will take care of ensuring that reloads are rate-limited and that extraneous calls
+// aren't made. If a reload would exceed the limiters rate, it returns the error return by
+// the callback.
+// It's thread-safe, and worries about thread-safety for the callback (so the callback does
+// not need to attempt to lock the restmapper).
+func (drm *dynamicRESTMapper) checkAndReload(checkNeedsReload func() error) error {
+ // first, check the common path -- data is fresh enough
+ // (use an IIFE for the lock's defer)
+ err := func() error {
+ drm.mu.RLock()
+ defer drm.mu.RUnlock()
+
+ return checkNeedsReload()
+ }()
+
+ needsReload := meta.IsNoMatchError(err)
+ if !needsReload {
+ return err
+ }
+
+ // if the data wasn't fresh, we'll need to try and update it, so grab the lock...
+ drm.mu.Lock()
+ defer drm.mu.Unlock()
+
+ // ... and double-check that we didn't reload in the meantime
+ err = checkNeedsReload()
+ needsReload = meta.IsNoMatchError(err)
+ if !needsReload {
+ return err
+ }
+
+ // we're still stale, so grab a rate-limit token if we can...
+ if !drm.limiter.Allow() {
+ // return error from static mapper here, we have refreshed often enough (exceeding rate of provided limiter)
+ // so that client's can handle this the same way as a "normal" NoResourceMatchError / NoKindMatchError
+ return err
+ }
+
+ // ...reload...
+ if err := drm.setStaticMapper(); err != nil {
+ return err
+ }
+
+ // ...and return the results of the closure regardless
+ return checkNeedsReload()
+}
+
+// TODO: wrap reload errors on NoKindMatchError with go 1.13 errors.
+
+func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+ if err := drm.init(); err != nil {
+ return schema.GroupVersionKind{}, err
+ }
+ var gvk schema.GroupVersionKind
+ err := drm.checkAndReload(func() error {
+ var err error
+ gvk, err = drm.staticMapper.KindFor(resource)
+ return err
+ })
+ return gvk, err
+}
+
+func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) {
+ if err := drm.init(); err != nil {
+ return nil, err
+ }
+ var gvks []schema.GroupVersionKind
+ err := drm.checkAndReload(func() error {
+ var err error
+ gvks, err = drm.staticMapper.KindsFor(resource)
+ return err
+ })
+ return gvks, err
+}
+
+func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+ if err := drm.init(); err != nil {
+ return schema.GroupVersionResource{}, err
+ }
+
+ var gvr schema.GroupVersionResource
+ err := drm.checkAndReload(func() error {
+ var err error
+ gvr, err = drm.staticMapper.ResourceFor(input)
+ return err
+ })
+ return gvr, err
+}
+
+func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
+ if err := drm.init(); err != nil {
+ return nil, err
+ }
+ var gvrs []schema.GroupVersionResource
+ err := drm.checkAndReload(func() error {
+ var err error
+ gvrs, err = drm.staticMapper.ResourcesFor(input)
+ return err
+ })
+ return gvrs, err
+}
+
+func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) {
+ if err := drm.init(); err != nil {
+ return nil, err
+ }
+ var mapping *meta.RESTMapping
+ err := drm.checkAndReload(func() error {
+ var err error
+ mapping, err = drm.staticMapper.RESTMapping(gk, versions...)
+ return err
+ })
+ return mapping, err
+}
+
+func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) {
+ if err := drm.init(); err != nil {
+ return nil, err
+ }
+ var mappings []*meta.RESTMapping
+ err := drm.checkAndReload(func() error {
+ var err error
+ mappings, err = drm.staticMapper.RESTMappings(gk, versions...)
+ return err
+ })
+ return mappings, err
+}
+
+func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, error) {
+ if err := drm.init(); err != nil {
+ return "", err
+ }
+ var singular string
+ err := drm.checkAndReload(func() error {
+ var err error
+ singular, err = drm.staticMapper.ResourceSingularizer(resource)
+ return err
+ })
+ return singular, err
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go
new file mode 100644
index 00000000..730e0ba9
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go
@@ -0,0 +1,327 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/metadata"
+ "k8s.io/client-go/rest"
+
+ "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+)
+
+// WarningHandlerOptions are options for configuring a
+// warning handler for the client which is responsible
+// for surfacing API Server warnings.
+type WarningHandlerOptions struct {
+ // SuppressWarnings decides if the warnings from the
+ // API server are suppressed or surfaced in the client.
+ SuppressWarnings bool
+ // AllowDuplicateLogs does not deduplicate the to-be
+ // logged surfaced warnings messages. See
+ // log.WarningHandlerOptions for considerations
+ // regarding deduplication
+ AllowDuplicateLogs bool
+}
+
+// Options are creation options for a Client.
+type Options struct {
+ // Scheme, if provided, will be used to map go structs to GroupVersionKinds
+ Scheme *runtime.Scheme
+
+ // Mapper, if provided, will be used to map GroupVersionKinds to Resources
+ Mapper meta.RESTMapper
+
+ // Opts is used to configure the warning handler responsible for
+ // surfacing and handling warnings messages sent by the API server.
+ Opts WarningHandlerOptions
+}
+
+// New returns a new Client using the provided config and Options.
+// The returned client reads *and* writes directly from the server
+// (it doesn't use object caches). It understands how to work with
+// normal types (both custom resources and aggregated/built-in resources),
+// as well as unstructured types.
+//
+// In the case of normal types, the scheme will be used to look up the
+// corresponding group, version, and kind for the given type. In the
+// case of unstructured types, the group, version, and kind will be extracted
+// from the corresponding fields on the object.
+func New(config *rest.Config, options Options) (Client, error) {
+ return newClient(config, options)
+}
+
+func newClient(config *rest.Config, options Options) (*client, error) {
+ if config == nil {
+ return nil, fmt.Errorf("must provide non-nil rest.Config to client.New")
+ }
+
+ if !options.Opts.SuppressWarnings {
+ // surface warnings
+ logger := log.Log.WithName("KubeAPIWarningLogger")
+ // Set a WarningHandler, the default WarningHandler
+ // is log.KubeAPIWarningLogger with deduplication enabled.
+ // See log.KubeAPIWarningLoggerOptions for considerations
+ // regarding deduplication.
+ config = rest.CopyConfig(config)
+ config.WarningHandler = log.NewKubeAPIWarningLogger(
+ logger,
+ log.KubeAPIWarningLoggerOptions{
+ Deduplicate: !options.Opts.AllowDuplicateLogs,
+ },
+ )
+ }
+
+ // Init a scheme if none provided
+ if options.Scheme == nil {
+ options.Scheme = scheme.Scheme
+ }
+
+ // Init a Mapper if none provided
+ if options.Mapper == nil {
+ var err error
+ options.Mapper, err = apiutil.NewDynamicRESTMapper(config)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ clientcache := &clientCache{
+ config: config,
+ scheme: options.Scheme,
+ mapper: options.Mapper,
+ codecs: serializer.NewCodecFactory(options.Scheme),
+
+ structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta),
+ unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta),
+ }
+
+ rawMetaClient, err := metadata.NewForConfig(config)
+ if err != nil {
+ return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err)
+ }
+
+ c := &client{
+ typedClient: typedClient{
+ cache: clientcache,
+ paramCodec: runtime.NewParameterCodec(options.Scheme),
+ },
+ unstructuredClient: unstructuredClient{
+ cache: clientcache,
+ paramCodec: noConversionParamCodec{},
+ },
+ metadataClient: metadataClient{
+ client: rawMetaClient,
+ restMapper: options.Mapper,
+ },
+ scheme: options.Scheme,
+ mapper: options.Mapper,
+ }
+
+ return c, nil
+}
+
+var _ Client = &client{}
+
+// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes
+// new clients at the time they are used, and caches the client.
+type client struct {
+ typedClient typedClient
+ unstructuredClient unstructuredClient
+ metadataClient metadataClient
+ scheme *runtime.Scheme
+ mapper meta.RESTMapper
+}
+
+// resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object.
+func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersionKind) {
+ if gvk != schema.EmptyObjectKind.GroupVersionKind() {
+ if v, ok := obj.(schema.ObjectKind); ok {
+ v.SetGroupVersionKind(gvk)
+ }
+ }
+}
+
+// Scheme returns the scheme this client is using.
+func (c *client) Scheme() *runtime.Scheme {
+ return c.scheme
+}
+
+// RESTMapper returns the scheme this client is using.
+func (c *client) RESTMapper() meta.RESTMapper {
+ return c.mapper
+}
+
+// Create implements client.Client.
+func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
+ switch obj.(type) {
+ case *unstructured.Unstructured:
+ return c.unstructuredClient.Create(ctx, obj, opts...)
+ case *metav1.PartialObjectMetadata:
+ return fmt.Errorf("cannot create using only metadata")
+ default:
+ return c.typedClient.Create(ctx, obj, opts...)
+ }
+}
+
+// Update implements client.Client.
+func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
+ defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
+ switch obj.(type) {
+ case *unstructured.Unstructured:
+ return c.unstructuredClient.Update(ctx, obj, opts...)
+ case *metav1.PartialObjectMetadata:
+ return fmt.Errorf("cannot update using only metadata -- did you mean to patch?")
+ default:
+ return c.typedClient.Update(ctx, obj, opts...)
+ }
+}
+
+// Delete implements client.Client.
+func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
+ switch obj.(type) {
+ case *unstructured.Unstructured:
+ return c.unstructuredClient.Delete(ctx, obj, opts...)
+ case *metav1.PartialObjectMetadata:
+ return c.metadataClient.Delete(ctx, obj, opts...)
+ default:
+ return c.typedClient.Delete(ctx, obj, opts...)
+ }
+}
+
+// DeleteAllOf implements client.Client.
+func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
+ switch obj.(type) {
+ case *unstructured.Unstructured:
+ return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...)
+ case *metav1.PartialObjectMetadata:
+ return c.metadataClient.DeleteAllOf(ctx, obj, opts...)
+ default:
+ return c.typedClient.DeleteAllOf(ctx, obj, opts...)
+ }
+}
+
+// Patch implements client.Client.
+func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
+ defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
+ switch obj.(type) {
+ case *unstructured.Unstructured:
+ return c.unstructuredClient.Patch(ctx, obj, patch, opts...)
+ case *metav1.PartialObjectMetadata:
+ return c.metadataClient.Patch(ctx, obj, patch, opts...)
+ default:
+ return c.typedClient.Patch(ctx, obj, patch, opts...)
+ }
+}
+
+// Get implements client.Client.
+func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
+ switch obj.(type) {
+ case *unstructured.Unstructured:
+ return c.unstructuredClient.Get(ctx, key, obj, opts...)
+ case *metav1.PartialObjectMetadata:
+ // Metadata only object should always preserve the GVK coming in from the caller.
+ defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
+ return c.metadataClient.Get(ctx, key, obj, opts...)
+ default:
+ return c.typedClient.Get(ctx, key, obj, opts...)
+ }
+}
+
+// List implements client.Client.
+func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
+ switch x := obj.(type) {
+ case *unstructured.UnstructuredList:
+ return c.unstructuredClient.List(ctx, obj, opts...)
+ case *metav1.PartialObjectMetadataList:
+ // Metadata only object should always preserve the GVK.
+ gvk := obj.GetObjectKind().GroupVersionKind()
+ defer c.resetGroupVersionKind(obj, gvk)
+
+ // Call the list client.
+ if err := c.metadataClient.List(ctx, obj, opts...); err != nil {
+ return err
+ }
+
+ // Restore the GVK for each item in the list.
+ itemGVK := schema.GroupVersionKind{
+ Group: gvk.Group,
+ Version: gvk.Version,
+ // TODO: this is producing unsafe guesses that don't actually work,
+ // but it matches ~99% of the cases out there.
+ Kind: strings.TrimSuffix(gvk.Kind, "List"),
+ }
+ for i := range x.Items {
+ item := &x.Items[i]
+ item.SetGroupVersionKind(itemGVK)
+ }
+
+ return nil
+ default:
+ return c.typedClient.List(ctx, obj, opts...)
+ }
+}
+
+// Status implements client.StatusClient.
+func (c *client) Status() StatusWriter {
+ return &statusWriter{client: c}
+}
+
+// statusWriter is client.StatusWriter that writes status subresource.
+type statusWriter struct {
+ client *client
+}
+
+// ensure statusWriter implements client.StatusWriter.
+var _ StatusWriter = &statusWriter{}
+
+// Update implements client.StatusWriter.
+func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
+ defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
+ switch obj.(type) {
+ case *unstructured.Unstructured:
+ return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...)
+ case *metav1.PartialObjectMetadata:
+ return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?")
+ default:
+ return sw.client.typedClient.UpdateStatus(ctx, obj, opts...)
+ }
+}
+
+// Patch implements client.Client.
+func (sw *statusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
+ defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
+ switch obj.(type) {
+ case *unstructured.Unstructured:
+ return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...)
+ case *metav1.PartialObjectMetadata:
+ return sw.client.metadataClient.PatchStatus(ctx, obj, patch, opts...)
+ default:
+ return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...)
+ }
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go
new file mode 100644
index 00000000..857a0b38
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go
@@ -0,0 +1,150 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "strings"
+ "sync"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/client-go/rest"
+ "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
+)
+
+// clientCache creates and caches rest clients and metadata for Kubernetes types.
+type clientCache struct {
+ // config is the rest.Config to talk to an apiserver
+ config *rest.Config
+
+ // scheme maps go structs to GroupVersionKinds
+ scheme *runtime.Scheme
+
+ // mapper maps GroupVersionKinds to Resources
+ mapper meta.RESTMapper
+
+ // codecs are used to create a REST client for a gvk
+ codecs serializer.CodecFactory
+
+ // structuredResourceByType caches structured type metadata
+ structuredResourceByType map[schema.GroupVersionKind]*resourceMeta
+ // unstructuredResourceByType caches unstructured type metadata
+ unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta
+ mu sync.RWMutex
+}
+
+// newResource maps obj to a Kubernetes Resource and constructs a client for that Resource.
+// If the object is a list, the resource represents the item's type instead.
+func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) {
+ if strings.HasSuffix(gvk.Kind, "List") && isList {
+ // if this was a list, treat it as a request for the item's resource
+ gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
+ }
+
+ client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs)
+ if err != nil {
+ return nil, err
+ }
+ mapping, err := c.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
+ if err != nil {
+ return nil, err
+ }
+ return &resourceMeta{Interface: client, mapping: mapping, gvk: gvk}, nil
+}
+
+// getResource returns the resource meta information for the given type of object.
+// If the object is a list, the resource represents the item's type instead.
+func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) {
+ gvk, err := apiutil.GVKForObject(obj, c.scheme)
+ if err != nil {
+ return nil, err
+ }
+
+ _, isUnstructured := obj.(*unstructured.Unstructured)
+ _, isUnstructuredList := obj.(*unstructured.UnstructuredList)
+ isUnstructured = isUnstructured || isUnstructuredList
+
+ // It's better to do creation work twice than to not let multiple
+ // people make requests at once
+ c.mu.RLock()
+ resourceByType := c.structuredResourceByType
+ if isUnstructured {
+ resourceByType = c.unstructuredResourceByType
+ }
+ r, known := resourceByType[gvk]
+ c.mu.RUnlock()
+
+ if known {
+ return r, nil
+ }
+
+ // Initialize a new Client
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ r, err = c.newResource(gvk, meta.IsListType(obj), isUnstructured)
+ if err != nil {
+ return nil, err
+ }
+ resourceByType[gvk] = r
+ return r, err
+}
+
+// getObjMeta returns objMeta containing both type and object metadata and state.
+func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) {
+ r, err := c.getResource(obj)
+ if err != nil {
+ return nil, err
+ }
+ m, err := meta.Accessor(obj)
+ if err != nil {
+ return nil, err
+ }
+ return &objMeta{resourceMeta: r, Object: m}, err
+}
+
+// resourceMeta caches state for a Kubernetes type.
+type resourceMeta struct {
+ // client is the rest client used to talk to the apiserver
+ rest.Interface
+ // gvk is the GroupVersionKind of the resourceMeta
+ gvk schema.GroupVersionKind
+ // mapping is the rest mapping
+ mapping *meta.RESTMapping
+}
+
+// isNamespaced returns true if the type is namespaced.
+func (r *resourceMeta) isNamespaced() bool {
+ return r.mapping.Scope.Name() != meta.RESTScopeNameRoot
+}
+
+// resource returns the resource name of the type.
+func (r *resourceMeta) resource() string {
+ return r.mapping.Resource.Resource
+}
+
+// objMeta stores type and object information about a Kubernetes type.
+type objMeta struct {
+ // resourceMeta contains type information for the object
+ *resourceMeta
+
+ // Object contains meta data for the object instance
+ metav1.Object
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go
new file mode 100644
index 00000000..9c292310
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "errors"
+ "net/url"
+
+ "k8s.io/apimachinery/pkg/conversion/queryparams"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var _ runtime.ParameterCodec = noConversionParamCodec{}
+
+// noConversionParamCodec is a no-conversion codec for serializing parameters into URL query strings.
+// it's useful in scenarios with the unstructured client and arbitrary resources.
+type noConversionParamCodec struct{}
+
+func (noConversionParamCodec) EncodeParameters(obj runtime.Object, to schema.GroupVersion) (url.Values, error) {
+ return queryparams.Convert(obj)
+}
+
+func (noConversionParamCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into runtime.Object) error {
+ return errors.New("DecodeParameters not implemented on noConversionParamCodec")
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go
new file mode 100644
index 00000000..e0e28850
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package client contains functionality for interacting with Kubernetes API
+// servers.
+//
+// # Clients
+//
+// Clients are split into two interfaces -- Readers and Writers. Readers
+// get and list, while writers create, update, and delete.
+//
+// The New function can be used to create a new client that talks directly
+// to the API server.
+//
+// It is a common pattern in Kubernetes to read from a cache and write to the API
+// server. This pattern is covered by the DelegatingClient type, which can
+// be used to have a client whose Reader is different from the Writer.
+//
+// # Options
+//
+// Many client operations in Kubernetes support options. These options are
+// represented as variadic arguments at the end of a given method call.
+// For instance, to use a label selector on list, you can call
+//
+// err := someReader.List(context.Background(), &podList, client.MatchingLabels{"somelabel": "someval"})
+//
+// # Indexing
+//
+// Indexes may be added to caches using a FieldIndexer. This allows you to easily
+// and efficiently look up objects with certain properties. You can then make
+// use of the index by specifying a field selector on calls to List on the Reader
+// corresponding to the given Cache.
+//
+// For instance, a Secret controller might have an index on the
+// `.spec.volumes.secret.secretName` field in Pod objects, so that it could
+// easily look up all pods that reference a given secret.
+package client
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go
new file mode 100644
index 00000000..14606a57
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// NewDryRunClient wraps an existing client and enforces DryRun mode
+// on all mutating api calls.
+func NewDryRunClient(c Client) Client {
+ return &dryRunClient{client: c}
+}
+
+var _ Client = &dryRunClient{}
+
+// dryRunClient is a Client that wraps another Client in order to enforce DryRun mode.
+type dryRunClient struct {
+ client Client
+}
+
+// Scheme returns the scheme this client is using.
+func (c *dryRunClient) Scheme() *runtime.Scheme {
+ return c.client.Scheme()
+}
+
+// RESTMapper returns the rest mapper this client is using.
+func (c *dryRunClient) RESTMapper() meta.RESTMapper {
+ return c.client.RESTMapper()
+}
+
+// Create implements client.Client.
+func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
+ return c.client.Create(ctx, obj, append(opts, DryRunAll)...)
+}
+
+// Update implements client.Client.
+func (c *dryRunClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
+ return c.client.Update(ctx, obj, append(opts, DryRunAll)...)
+}
+
+// Delete implements client.Client.
+func (c *dryRunClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
+ return c.client.Delete(ctx, obj, append(opts, DryRunAll)...)
+}
+
+// DeleteAllOf implements client.Client.
+func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
+ return c.client.DeleteAllOf(ctx, obj, append(opts, DryRunAll)...)
+}
+
+// Patch implements client.Client.
+func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
+ return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...)
+}
+
+// Get implements client.Client.
+func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
+ return c.client.Get(ctx, key, obj, opts...)
+}
+
+// List implements client.Client.
+func (c *dryRunClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
+ return c.client.List(ctx, obj, opts...)
+}
+
+// Status implements client.StatusClient.
+func (c *dryRunClient) Status() StatusWriter {
+ return &dryRunStatusWriter{client: c.client.Status()}
+}
+
+// ensure dryRunStatusWriter implements client.StatusWriter.
+var _ StatusWriter = &dryRunStatusWriter{}
+
+// dryRunStatusWriter is client.StatusWriter that writes status subresource with dryRun mode
+// enforced.
+type dryRunStatusWriter struct {
+ client StatusWriter
+}
+
+// Update implements client.StatusWriter.
+func (sw *dryRunStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
+ return sw.client.Update(ctx, obj, append(opts, DryRunAll)...)
+}
+
+// Patch implements client.StatusWriter.
+func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
+ return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...)
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go
new file mode 100644
index 00000000..7f8f8f31
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go
@@ -0,0 +1,155 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/watch"
+)
+
+// ObjectKey identifies a Kubernetes Object.
+type ObjectKey = types.NamespacedName
+
+// ObjectKeyFromObject returns the ObjectKey given a runtime.Object.
+func ObjectKeyFromObject(obj Object) ObjectKey {
+ return ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()}
+}
+
+// Patch is a patch that can be applied to a Kubernetes object.
+type Patch interface {
+ // Type is the PatchType of the patch.
+ Type() types.PatchType
+ // Data is the raw data representing the patch.
+ Data(obj Object) ([]byte, error)
+}
+
+// TODO(directxman12): is there a sane way to deal with get/delete options?
+
+// Reader knows how to read and list Kubernetes objects.
+type Reader interface {
+ // Get retrieves an obj for the given object key from the Kubernetes Cluster.
+ // obj must be a struct pointer so that obj can be updated with the response
+ // returned by the Server.
+ Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error
+
+ // List retrieves list of objects for a given namespace and list options. On a
+ // successful call, Items field in the list will be populated with the
+ // result returned from the server.
+ List(ctx context.Context, list ObjectList, opts ...ListOption) error
+}
+
+// Writer knows how to create, delete, and update Kubernetes objects.
+type Writer interface {
+ // Create saves the object obj in the Kubernetes cluster.
+ Create(ctx context.Context, obj Object, opts ...CreateOption) error
+
+ // Delete deletes the given obj from Kubernetes cluster.
+ Delete(ctx context.Context, obj Object, opts ...DeleteOption) error
+
+ // Update updates the given obj in the Kubernetes cluster. obj must be a
+ // struct pointer so that obj can be updated with the content returned by the Server.
+ Update(ctx context.Context, obj Object, opts ...UpdateOption) error
+
+ // Patch patches the given obj in the Kubernetes cluster. obj must be a
+ // struct pointer so that obj can be updated with the content returned by the Server.
+ Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error
+
+ // DeleteAllOf deletes all objects of the given type matching the given options.
+ DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error
+}
+
+// StatusClient knows how to create a client which can update status subresource
+// for kubernetes objects.
+type StatusClient interface {
+ Status() StatusWriter
+}
+
+// StatusWriter knows how to update status subresource of a Kubernetes object.
+type StatusWriter interface {
+ // Update updates the fields corresponding to the status subresource for the
+ // given obj. obj must be a struct pointer so that obj can be updated
+ // with the content returned by the Server.
+ Update(ctx context.Context, obj Object, opts ...UpdateOption) error
+
+ // Patch patches the given object's subresource. obj must be a struct
+ // pointer so that obj can be updated with the content returned by the
+ // Server.
+ Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error
+}
+
+// Client knows how to perform CRUD operations on Kubernetes objects.
+type Client interface {
+ Reader
+ Writer
+ StatusClient
+
+ // Scheme returns the scheme this client is using.
+ Scheme() *runtime.Scheme
+ // RESTMapper returns the rest this client is using.
+ RESTMapper() meta.RESTMapper
+}
+
+// WithWatch supports Watch on top of the CRUD operations supported by
+// the normal Client. Its intended use-case are CLI apps that need to wait for
+// events.
+type WithWatch interface {
+ Client
+ Watch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error)
+}
+
+// IndexerFunc knows how to take an object and turn it into a series
+// of non-namespaced keys. Namespaced objects are automatically given
+// namespaced and non-spaced variants, so keys do not need to include namespace.
+type IndexerFunc func(Object) []string
+
+// FieldIndexer knows how to index over a particular "field" such that it
+// can later be used by a field selector.
+type FieldIndexer interface {
+ // IndexFields adds an index with the given field name on the given object type
+ // by using the given function to extract the value for that field. If you want
+ // compatibility with the Kubernetes API server, only return one key, and only use
+ // fields that the API server supports. Otherwise, you can return multiple keys,
+ // and "equality" in the field selector means that at least one key matches the value.
+ // The FieldIndexer will automatically take care of indexing over namespace
+ // and supporting efficient all-namespace queries.
+ IndexField(ctx context.Context, obj Object, field string, extractValue IndexerFunc) error
+}
+
+// IgnoreNotFound returns nil on NotFound errors.
+// All other values that are not NotFound errors or nil are returned unmodified.
+func IgnoreNotFound(err error) error {
+ if apierrors.IsNotFound(err) {
+ return nil
+ }
+ return err
+}
+
+// IgnoreAlreadyExists returns nil on AlreadyExists errors.
+// All other values that are not AlreadyExists errors or nil are returned unmodified.
+func IgnoreAlreadyExists(err error) error {
+ if apierrors.IsAlreadyExists(err) {
+ return nil
+ }
+
+ return err
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go
new file mode 100644
index 00000000..2854556f
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go
@@ -0,0 +1,196 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/metadata"
+)
+
+// TODO(directxman12): we could rewrite this on top of the low-level REST
+// client to avoid the extra shallow copy at the end, but I'm not sure it's
+// worth it -- the metadata client deals with falling back to loading the whole
+// object on older API servers, etc, and we'd have to reproduce that.
+
+// metadataClient is a client that reads & writes metadata-only requests to/from the API server.
+type metadataClient struct {
+ client metadata.Interface
+ restMapper meta.RESTMapper
+}
+
+func (mc *metadataClient) getResourceInterface(gvk schema.GroupVersionKind, ns string) (metadata.ResourceInterface, error) {
+ mapping, err := mc.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
+ if err != nil {
+ return nil, err
+ }
+ if mapping.Scope.Name() == meta.RESTScopeNameRoot {
+ return mc.client.Resource(mapping.Resource), nil
+ }
+ return mc.client.Resource(mapping.Resource).Namespace(ns), nil
+}
+
+// Delete implements client.Client.
+func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
+ metadata, ok := obj.(*metav1.PartialObjectMetadata)
+ if !ok {
+ return fmt.Errorf("metadata client did not understand object: %T", obj)
+ }
+
+ resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), metadata.Namespace)
+ if err != nil {
+ return err
+ }
+
+ deleteOpts := DeleteOptions{}
+ deleteOpts.ApplyOptions(opts)
+
+ return resInt.Delete(ctx, metadata.Name, *deleteOpts.AsDeleteOptions())
+}
+
+// DeleteAllOf implements client.Client.
+func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
+ metadata, ok := obj.(*metav1.PartialObjectMetadata)
+ if !ok {
+ return fmt.Errorf("metadata client did not understand object: %T", obj)
+ }
+
+ deleteAllOfOpts := DeleteAllOfOptions{}
+ deleteAllOfOpts.ApplyOptions(opts)
+
+ resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), deleteAllOfOpts.ListOptions.Namespace)
+ if err != nil {
+ return err
+ }
+
+ return resInt.DeleteCollection(ctx, *deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions())
+}
+
+// Patch implements client.Client.
+func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
+ metadata, ok := obj.(*metav1.PartialObjectMetadata)
+ if !ok {
+ return fmt.Errorf("metadata client did not understand object: %T", obj)
+ }
+
+ gvk := metadata.GroupVersionKind()
+ resInt, err := mc.getResourceInterface(gvk, metadata.Namespace)
+ if err != nil {
+ return err
+ }
+
+ data, err := patch.Data(obj)
+ if err != nil {
+ return err
+ }
+
+ patchOpts := &PatchOptions{}
+ patchOpts.ApplyOptions(opts)
+
+ res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions())
+ if err != nil {
+ return err
+ }
+ *metadata = *res
+ metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata
+ return nil
+}
+
+// Get implements client.Client.
+func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
+ metadata, ok := obj.(*metav1.PartialObjectMetadata)
+ if !ok {
+ return fmt.Errorf("metadata client did not understand object: %T", obj)
+ }
+
+ gvk := metadata.GroupVersionKind()
+
+ getOpts := GetOptions{}
+ getOpts.ApplyOptions(opts)
+
+ resInt, err := mc.getResourceInterface(gvk, key.Namespace)
+ if err != nil {
+ return err
+ }
+
+ res, err := resInt.Get(ctx, key.Name, *getOpts.AsGetOptions())
+ if err != nil {
+ return err
+ }
+ *metadata = *res
+ metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata
+ return nil
+}
+
+// List implements client.Client.
+func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
+ metadata, ok := obj.(*metav1.PartialObjectMetadataList)
+ if !ok {
+ return fmt.Errorf("metadata client did not understand object: %T", obj)
+ }
+
+ gvk := metadata.GroupVersionKind()
+ gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
+
+ listOpts := ListOptions{}
+ listOpts.ApplyOptions(opts)
+
+ resInt, err := mc.getResourceInterface(gvk, listOpts.Namespace)
+ if err != nil {
+ return err
+ }
+
+ res, err := resInt.List(ctx, *listOpts.AsListOptions())
+ if err != nil {
+ return err
+ }
+ *metadata = *res
+ metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata
+ return nil
+}
+
+func (mc *metadataClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
+ metadata, ok := obj.(*metav1.PartialObjectMetadata)
+ if !ok {
+ return fmt.Errorf("metadata client did not understand object: %T", obj)
+ }
+
+ gvk := metadata.GroupVersionKind()
+ resInt, err := mc.getResourceInterface(gvk, metadata.Namespace)
+ if err != nil {
+ return err
+ }
+
+ data, err := patch.Data(obj)
+ if err != nil {
+ return err
+ }
+
+ patchOpts := &PatchOptions{}
+ res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), "status")
+ if err != nil {
+ return err
+ }
+ *metadata = *res
+ metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata
+ return nil
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go
new file mode 100644
index 00000000..674fe253
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go
@@ -0,0 +1,213 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/internal/objectutil"
+)
+
+// NewNamespacedClient wraps an existing client enforcing the namespace value.
+// All functions using this client will have the same namespace declared here.
+func NewNamespacedClient(c Client, ns string) Client {
+ return &namespacedClient{
+ client: c,
+ namespace: ns,
+ }
+}
+
+var _ Client = &namespacedClient{}
+
+// namespacedClient is a Client that wraps another Client in order to enforce the specified namespace value.
+type namespacedClient struct {
+ namespace string
+ client Client
+}
+
+// Scheme returns the scheme this client is using.
+func (n *namespacedClient) Scheme() *runtime.Scheme {
+ return n.client.Scheme()
+}
+
+// RESTMapper returns the scheme this client is using.
+func (n *namespacedClient) RESTMapper() meta.RESTMapper {
+ return n.client.RESTMapper()
+}
+
+// Create implements client.Client.
+func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
+ isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
+ if err != nil {
+ return fmt.Errorf("error finding the scope of the object: %w", err)
+ }
+
+ objectNamespace := obj.GetNamespace()
+ if objectNamespace != n.namespace && objectNamespace != "" {
+ return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace)
+ }
+
+ if isNamespaceScoped && objectNamespace == "" {
+ obj.SetNamespace(n.namespace)
+ }
+ return n.client.Create(ctx, obj, opts...)
+}
+
+// Update implements client.Client.
+func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
+ isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
+ if err != nil {
+ return fmt.Errorf("error finding the scope of the object: %w", err)
+ }
+
+ objectNamespace := obj.GetNamespace()
+ if objectNamespace != n.namespace && objectNamespace != "" {
+ return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace)
+ }
+
+ if isNamespaceScoped && objectNamespace == "" {
+ obj.SetNamespace(n.namespace)
+ }
+ return n.client.Update(ctx, obj, opts...)
+}
+
+// Delete implements client.Client.
+func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
+ isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
+ if err != nil {
+ return fmt.Errorf("error finding the scope of the object: %w", err)
+ }
+
+ objectNamespace := obj.GetNamespace()
+ if objectNamespace != n.namespace && objectNamespace != "" {
+ return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace)
+ }
+
+ if isNamespaceScoped && objectNamespace == "" {
+ obj.SetNamespace(n.namespace)
+ }
+ return n.client.Delete(ctx, obj, opts...)
+}
+
+// DeleteAllOf implements client.Client.
+func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
+ isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
+ if err != nil {
+ return fmt.Errorf("error finding the scope of the object: %w", err)
+ }
+
+ if isNamespaceScoped {
+ opts = append(opts, InNamespace(n.namespace))
+ }
+ return n.client.DeleteAllOf(ctx, obj, opts...)
+}
+
+// Patch implements client.Client.
+func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
+ isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
+ if err != nil {
+ return fmt.Errorf("error finding the scope of the object: %w", err)
+ }
+
+ objectNamespace := obj.GetNamespace()
+ if objectNamespace != n.namespace && objectNamespace != "" {
+ return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace)
+ }
+
+ if isNamespaceScoped && objectNamespace == "" {
+ obj.SetNamespace(n.namespace)
+ }
+ return n.client.Patch(ctx, obj, patch, opts...)
+}
+
+// Get implements client.Client.
+func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
+ isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
+ if err != nil {
+ return fmt.Errorf("error finding the scope of the object: %w", err)
+ }
+ if isNamespaceScoped {
+ if key.Namespace != "" && key.Namespace != n.namespace {
+ return fmt.Errorf("namespace %s provided for the object %s does not match the namespace %s on the client", key.Namespace, obj.GetName(), n.namespace)
+ }
+ key.Namespace = n.namespace
+ }
+ return n.client.Get(ctx, key, obj, opts...)
+}
+
+// List implements client.Client.
+func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
+ if n.namespace != "" {
+ opts = append(opts, InNamespace(n.namespace))
+ }
+ return n.client.List(ctx, obj, opts...)
+}
+
+// Status implements client.StatusClient.
+func (n *namespacedClient) Status() StatusWriter {
+ return &namespacedClientStatusWriter{StatusClient: n.client.Status(), namespace: n.namespace, namespacedclient: n}
+}
+
+// ensure namespacedClientStatusWriter implements client.StatusWriter.
+var _ StatusWriter = &namespacedClientStatusWriter{}
+
+type namespacedClientStatusWriter struct {
+ StatusClient StatusWriter
+ namespace string
+ namespacedclient Client
+}
+
+// Update implements client.StatusWriter.
+func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
+ isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper())
+
+ if err != nil {
+ return fmt.Errorf("error finding the scope of the object: %w", err)
+ }
+
+ objectNamespace := obj.GetNamespace()
+ if objectNamespace != nsw.namespace && objectNamespace != "" {
+ return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace)
+ }
+
+ if isNamespaceScoped && objectNamespace == "" {
+ obj.SetNamespace(nsw.namespace)
+ }
+ return nsw.StatusClient.Update(ctx, obj, opts...)
+}
+
+// Patch implements client.StatusWriter.
+func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
+ isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper())
+
+ if err != nil {
+ return fmt.Errorf("error finding the scope of the object: %w", err)
+ }
+
+ objectNamespace := obj.GetNamespace()
+ if objectNamespace != nsw.namespace && objectNamespace != "" {
+ return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace)
+ }
+
+ if isNamespaceScoped && objectNamespace == "" {
+ obj.SetNamespace(nsw.namespace)
+ }
+ return nsw.StatusClient.Patch(ctx, obj, patch, opts...)
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go
new file mode 100644
index 00000000..31e334d6
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// Object is a Kubernetes object, allows functions to work indistinctly with
+// any resource that implements both Object interfaces.
+//
+// Semantically, these are objects which are both serializable (runtime.Object)
+// and identifiable (metav1.Object) -- think any object which you could write
+// as YAML or JSON, and then `kubectl create`.
+//
+// Code-wise, this means that any object which embeds both ObjectMeta (which
+// provides metav1.Object) and TypeMeta (which provides half of runtime.Object)
+// and has a `DeepCopyObject` implementation (the other half of runtime.Object)
+// will implement this by default.
+//
+// For example, nearly all the built-in types are Objects, as well as all
+// KubeBuilder-generated CRDs (unless you do something real funky to them).
+//
+// By and large, most things that implement runtime.Object also implement
+// Object -- it's very rare to have *just* a runtime.Object implementation (the
+// cases tend to be funky built-in types like Webhook payloads that don't have
+// a `metadata` field).
+//
+// Notice that XYZList types are distinct: they implement ObjectList instead.
+type Object interface {
+ metav1.Object
+ runtime.Object
+}
+
+// ObjectList is a Kubernetes object list, allows functions to work
+// indistinctly with any resource that implements both runtime.Object and
+// metav1.ListInterface interfaces.
+//
+// Semantically, this is any object which may be serialized (ObjectMeta), and
+// is a kubernetes list wrapper (has items, pagination fields, etc) -- think
+// the wrapper used in a response from a `kubectl list --output yaml` call.
+//
+// Code-wise, this means that any object which embedds both ListMeta (which
+// provides metav1.ListInterface) and TypeMeta (which provides half of
+// runtime.Object) and has a `DeepCopyObject` implementation (the other half of
+// runtime.Object) will implement this by default.
+//
+// For example, nearly all the built-in XYZList types are ObjectLists, as well
+// as the XYZList types for all KubeBuilder-generated CRDs (unless you do
+// something real funky to them).
+//
+// By and large, most things that are XYZList and implement runtime.Object also
+// implement ObjectList -- it's very rare to have *just* a runtime.Object
+// implementation (the cases tend to be funky built-in types like Webhook
+// payloads that don't have a `metadata` field).
+//
+// This is similar to Object, which is almost always implemented by the items
+// in the list themselves.
+type ObjectList interface {
+ metav1.ListInterface
+ runtime.Object
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go
new file mode 100644
index 00000000..495b8694
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go
@@ -0,0 +1,742 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/fields"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/selection"
+)
+
+// {{{ "Functional" Option Interfaces
+
+// CreateOption is some configuration that modifies options for a create request.
+type CreateOption interface {
+ // ApplyToCreate applies this configuration to the given create options.
+ ApplyToCreate(*CreateOptions)
+}
+
+// DeleteOption is some configuration that modifies options for a delete request.
+type DeleteOption interface {
+ // ApplyToDelete applies this configuration to the given delete options.
+ ApplyToDelete(*DeleteOptions)
+}
+
+// GetOption is some configuration that modifies options for a get request.
+type GetOption interface {
+ // ApplyToGet applies this configuration to the given get options.
+ ApplyToGet(*GetOptions)
+}
+
+// ListOption is some configuration that modifies options for a list request.
+type ListOption interface {
+ // ApplyToList applies this configuration to the given list options.
+ ApplyToList(*ListOptions)
+}
+
+// UpdateOption is some configuration that modifies options for a update request.
+type UpdateOption interface {
+ // ApplyToUpdate applies this configuration to the given update options.
+ ApplyToUpdate(*UpdateOptions)
+}
+
+// PatchOption is some configuration that modifies options for a patch request.
+type PatchOption interface {
+ // ApplyToPatch applies this configuration to the given patch options.
+ ApplyToPatch(*PatchOptions)
+}
+
+// DeleteAllOfOption is some configuration that modifies options for a delete request.
+type DeleteAllOfOption interface {
+ // ApplyToDeleteAllOf applies this configuration to the given deletecollection options.
+ ApplyToDeleteAllOf(*DeleteAllOfOptions)
+}
+
+// }}}
+
+// {{{ Multi-Type Options
+
+// DryRunAll sets the "dry run" option to "all", executing all
+// validation, etc without persisting the change to storage.
+var DryRunAll = dryRunAll{}
+
+type dryRunAll struct{}
+
+// ApplyToCreate applies this configuration to the given create options.
+func (dryRunAll) ApplyToCreate(opts *CreateOptions) {
+ opts.DryRun = []string{metav1.DryRunAll}
+}
+
+// ApplyToUpdate applies this configuration to the given update options.
+func (dryRunAll) ApplyToUpdate(opts *UpdateOptions) {
+ opts.DryRun = []string{metav1.DryRunAll}
+}
+
+// ApplyToPatch applies this configuration to the given patch options.
+func (dryRunAll) ApplyToPatch(opts *PatchOptions) {
+ opts.DryRun = []string{metav1.DryRunAll}
+}
+
+// ApplyToPatch applies this configuration to the given delete options.
+func (dryRunAll) ApplyToDelete(opts *DeleteOptions) {
+ opts.DryRun = []string{metav1.DryRunAll}
+}
+func (dryRunAll) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
+ opts.DryRun = []string{metav1.DryRunAll}
+}
+
+// FieldOwner set the field manager name for the given server-side apply patch.
+type FieldOwner string
+
+// ApplyToPatch applies this configuration to the given patch options.
+func (f FieldOwner) ApplyToPatch(opts *PatchOptions) {
+ opts.FieldManager = string(f)
+}
+
+// ApplyToCreate applies this configuration to the given create options.
+func (f FieldOwner) ApplyToCreate(opts *CreateOptions) {
+ opts.FieldManager = string(f)
+}
+
+// ApplyToUpdate applies this configuration to the given update options.
+func (f FieldOwner) ApplyToUpdate(opts *UpdateOptions) {
+ opts.FieldManager = string(f)
+}
+
+// }}}
+
+// {{{ Create Options
+
+// CreateOptions contains options for create requests. It's generally a subset
+// of metav1.CreateOptions.
+type CreateOptions struct {
+ // When present, indicates that modifications should not be
+ // persisted. An invalid or unrecognized dryRun directive will
+ // result in an error response and no further processing of the
+ // request. Valid values are:
+ // - All: all dry run stages will be processed
+ DryRun []string
+
+ // FieldManager is the name of the user or component submitting
+ // this request. It must be set with server-side apply.
+ FieldManager string
+
+ // Raw represents raw CreateOptions, as passed to the API server.
+ Raw *metav1.CreateOptions
+}
+
+// AsCreateOptions returns these options as a metav1.CreateOptions.
+// This may mutate the Raw field.
+func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions {
+ if o == nil {
+ return &metav1.CreateOptions{}
+ }
+ if o.Raw == nil {
+ o.Raw = &metav1.CreateOptions{}
+ }
+
+ o.Raw.DryRun = o.DryRun
+ o.Raw.FieldManager = o.FieldManager
+ return o.Raw
+}
+
+// ApplyOptions applies the given create options on these options,
+// and then returns itself (for convenient chaining).
+func (o *CreateOptions) ApplyOptions(opts []CreateOption) *CreateOptions {
+ for _, opt := range opts {
+ opt.ApplyToCreate(o)
+ }
+ return o
+}
+
+// ApplyToCreate implements CreateOption.
+func (o *CreateOptions) ApplyToCreate(co *CreateOptions) {
+ if o.DryRun != nil {
+ co.DryRun = o.DryRun
+ }
+ if o.FieldManager != "" {
+ co.FieldManager = o.FieldManager
+ }
+ if o.Raw != nil {
+ co.Raw = o.Raw
+ }
+}
+
+var _ CreateOption = &CreateOptions{}
+
+// }}}
+
+// {{{ Delete Options
+
+// DeleteOptions contains options for delete requests. It's generally a subset
+// of metav1.DeleteOptions.
+type DeleteOptions struct {
+ // GracePeriodSeconds is the duration in seconds before the object should be
+ // deleted. Value must be non-negative integer. The value zero indicates
+ // delete immediately. If this value is nil, the default grace period for the
+ // specified type will be used.
+ GracePeriodSeconds *int64
+
+ // Preconditions must be fulfilled before a deletion is carried out. If not
+ // possible, a 409 Conflict status will be returned.
+ Preconditions *metav1.Preconditions
+
+ // PropagationPolicy determined whether and how garbage collection will be
+ // performed. Either this field or OrphanDependents may be set, but not both.
+ // The default policy is decided by the existing finalizer set in the
+ // metadata.finalizers and the resource-specific default policy.
+ // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' -
+ // allow the garbage collector to delete the dependents in the background;
+ // 'Foreground' - a cascading policy that deletes all dependents in the
+ // foreground.
+ PropagationPolicy *metav1.DeletionPropagation
+
+ // Raw represents raw DeleteOptions, as passed to the API server.
+ Raw *metav1.DeleteOptions
+
+ // When present, indicates that modifications should not be
+ // persisted. An invalid or unrecognized dryRun directive will
+ // result in an error response and no further processing of the
+ // request. Valid values are:
+ // - All: all dry run stages will be processed
+ DryRun []string
+}
+
+// AsDeleteOptions returns these options as a metav1.DeleteOptions.
+// This may mutate the Raw field.
+func (o *DeleteOptions) AsDeleteOptions() *metav1.DeleteOptions {
+ if o == nil {
+ return &metav1.DeleteOptions{}
+ }
+ if o.Raw == nil {
+ o.Raw = &metav1.DeleteOptions{}
+ }
+
+ o.Raw.GracePeriodSeconds = o.GracePeriodSeconds
+ o.Raw.Preconditions = o.Preconditions
+ o.Raw.PropagationPolicy = o.PropagationPolicy
+ o.Raw.DryRun = o.DryRun
+ return o.Raw
+}
+
+// ApplyOptions applies the given delete options on these options,
+// and then returns itself (for convenient chaining).
+func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) *DeleteOptions {
+ for _, opt := range opts {
+ opt.ApplyToDelete(o)
+ }
+ return o
+}
+
+var _ DeleteOption = &DeleteOptions{}
+
+// ApplyToDelete implements DeleteOption.
+func (o *DeleteOptions) ApplyToDelete(do *DeleteOptions) {
+ if o.GracePeriodSeconds != nil {
+ do.GracePeriodSeconds = o.GracePeriodSeconds
+ }
+ if o.Preconditions != nil {
+ do.Preconditions = o.Preconditions
+ }
+ if o.PropagationPolicy != nil {
+ do.PropagationPolicy = o.PropagationPolicy
+ }
+ if o.Raw != nil {
+ do.Raw = o.Raw
+ }
+ if o.DryRun != nil {
+ do.DryRun = o.DryRun
+ }
+}
+
+// GracePeriodSeconds sets the grace period for the deletion
+// to the given number of seconds.
+type GracePeriodSeconds int64
+
+// ApplyToDelete applies this configuration to the given delete options.
+func (s GracePeriodSeconds) ApplyToDelete(opts *DeleteOptions) {
+ secs := int64(s)
+ opts.GracePeriodSeconds = &secs
+}
+
+// ApplyToDeleteAllOf applies this configuration to the given an List options.
+func (s GracePeriodSeconds) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
+ s.ApplyToDelete(&opts.DeleteOptions)
+}
+
+// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
+type Preconditions metav1.Preconditions
+
+// ApplyToDelete applies this configuration to the given delete options.
+func (p Preconditions) ApplyToDelete(opts *DeleteOptions) {
+ preconds := metav1.Preconditions(p)
+ opts.Preconditions = &preconds
+}
+
+// ApplyToDeleteAllOf applies this configuration to the given an List options.
+func (p Preconditions) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
+ p.ApplyToDelete(&opts.DeleteOptions)
+}
+
+// PropagationPolicy determined whether and how garbage collection will be
+// performed. Either this field or OrphanDependents may be set, but not both.
+// The default policy is decided by the existing finalizer set in the
+// metadata.finalizers and the resource-specific default policy.
+// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' -
+// allow the garbage collector to delete the dependents in the background;
+// 'Foreground' - a cascading policy that deletes all dependents in the
+// foreground.
+type PropagationPolicy metav1.DeletionPropagation
+
+// ApplyToDelete applies the given delete options on these options.
+// It will propagate to the dependents of the object to let the garbage collector handle it.
+func (p PropagationPolicy) ApplyToDelete(opts *DeleteOptions) {
+ policy := metav1.DeletionPropagation(p)
+ opts.PropagationPolicy = &policy
+}
+
+// ApplyToDeleteAllOf applies this configuration to the given an List options.
+func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
+ p.ApplyToDelete(&opts.DeleteOptions)
+}
+
+// }}}
+
+// {{{ Get Options
+
+// GetOptions contains options for get operation.
+// Now it only has a Raw field, with support for specific resourceVersion.
+type GetOptions struct {
+ // Raw represents raw GetOptions, as passed to the API server. Note
+ // that these may not be respected by all implementations of interface.
+ Raw *metav1.GetOptions
+}
+
+var _ GetOption = &GetOptions{}
+
+// ApplyToGet implements GetOption for GetOptions.
+func (o *GetOptions) ApplyToGet(lo *GetOptions) {
+ if o.Raw != nil {
+ lo.Raw = o.Raw
+ }
+}
+
+// AsGetOptions returns these options as a flattened metav1.GetOptions.
+// This may mutate the Raw field.
+func (o *GetOptions) AsGetOptions() *metav1.GetOptions {
+ if o == nil || o.Raw == nil {
+ return &metav1.GetOptions{}
+ }
+ return o.Raw
+}
+
+// ApplyOptions applies the given get options on these options,
+// and then returns itself (for convenient chaining).
+func (o *GetOptions) ApplyOptions(opts []GetOption) *GetOptions {
+ for _, opt := range opts {
+ opt.ApplyToGet(o)
+ }
+ return o
+}
+
+// }}}
+
+// {{{ List Options
+
+// ListOptions contains options for limiting or filtering results.
+// It's generally a subset of metav1.ListOptions, with support for
+// pre-parsed selectors (since generally, selectors will be executed
+// against the cache).
+type ListOptions struct {
+ // LabelSelector filters results by label. Use labels.Parse() to
+ // set from raw string form.
+ LabelSelector labels.Selector
+ // FieldSelector filters results by a particular field. In order
+ // to use this with cache-based implementations, restrict usage to
+ // a single field-value pair that's been added to the indexers.
+ FieldSelector fields.Selector
+
+ // Namespace represents the namespace to list for, or empty for
+ // non-namespaced objects, or to list across all namespaces.
+ Namespace string
+
+ // Limit specifies the maximum number of results to return from the server. The server may
+ // not support this field on all resource types, but if it does and more results remain it
+ // will set the continue field on the returned list object. This field is not supported if watch
+ // is true in the Raw ListOptions.
+ Limit int64
+ // Continue is a token returned by the server that lets a client retrieve chunks of results
+ // from the server by specifying limit. The server may reject requests for continuation tokens
+ // it does not recognize and will return a 410 error if the token can no longer be used because
+ // it has expired. This field is not supported if watch is true in the Raw ListOptions.
+ Continue string
+
+ // Raw represents raw ListOptions, as passed to the API server. Note
+ // that these may not be respected by all implementations of interface,
+ // and the LabelSelector, FieldSelector, Limit and Continue fields are ignored.
+ Raw *metav1.ListOptions
+}
+
+var _ ListOption = &ListOptions{}
+
+// ApplyToList implements ListOption for ListOptions.
+func (o *ListOptions) ApplyToList(lo *ListOptions) {
+ if o.LabelSelector != nil {
+ lo.LabelSelector = o.LabelSelector
+ }
+ if o.FieldSelector != nil {
+ lo.FieldSelector = o.FieldSelector
+ }
+ if o.Namespace != "" {
+ lo.Namespace = o.Namespace
+ }
+ if o.Raw != nil {
+ lo.Raw = o.Raw
+ }
+ if o.Limit > 0 {
+ lo.Limit = o.Limit
+ }
+ if o.Continue != "" {
+ lo.Continue = o.Continue
+ }
+}
+
+// AsListOptions returns these options as a flattened metav1.ListOptions.
+// This may mutate the Raw field.
+func (o *ListOptions) AsListOptions() *metav1.ListOptions {
+ if o == nil {
+ return &metav1.ListOptions{}
+ }
+ if o.Raw == nil {
+ o.Raw = &metav1.ListOptions{}
+ }
+ if o.LabelSelector != nil {
+ o.Raw.LabelSelector = o.LabelSelector.String()
+ }
+ if o.FieldSelector != nil {
+ o.Raw.FieldSelector = o.FieldSelector.String()
+ }
+ if !o.Raw.Watch {
+ o.Raw.Limit = o.Limit
+ o.Raw.Continue = o.Continue
+ }
+ return o.Raw
+}
+
+// ApplyOptions applies the given list options on these options,
+// and then returns itself (for convenient chaining).
+func (o *ListOptions) ApplyOptions(opts []ListOption) *ListOptions {
+ for _, opt := range opts {
+ opt.ApplyToList(o)
+ }
+ return o
+}
+
+// MatchingLabels filters the list/delete operation on the given set of labels.
+type MatchingLabels map[string]string
+
+// ApplyToList applies this configuration to the given list options.
+func (m MatchingLabels) ApplyToList(opts *ListOptions) {
+ // TODO(directxman12): can we avoid reserializing this over and over?
+ sel := labels.SelectorFromValidatedSet(map[string]string(m))
+ opts.LabelSelector = sel
+}
+
+// ApplyToDeleteAllOf applies this configuration to the given an List options.
+func (m MatchingLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
+ m.ApplyToList(&opts.ListOptions)
+}
+
+// HasLabels filters the list/delete operation checking if the set of labels exists
+// without checking their values.
+type HasLabels []string
+
+// ApplyToList applies this configuration to the given list options.
+func (m HasLabels) ApplyToList(opts *ListOptions) {
+ sel := labels.NewSelector()
+ for _, label := range m {
+ r, err := labels.NewRequirement(label, selection.Exists, nil)
+ if err == nil {
+ sel = sel.Add(*r)
+ }
+ }
+ opts.LabelSelector = sel
+}
+
+// ApplyToDeleteAllOf applies this configuration to the given an List options.
+func (m HasLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
+ m.ApplyToList(&opts.ListOptions)
+}
+
+// MatchingLabelsSelector filters the list/delete operation on the given label
+// selector (or index in the case of cached lists). A struct is used because
+// labels.Selector is an interface, which cannot be aliased.
+type MatchingLabelsSelector struct {
+ labels.Selector
+}
+
+// ApplyToList applies this configuration to the given list options.
+func (m MatchingLabelsSelector) ApplyToList(opts *ListOptions) {
+ opts.LabelSelector = m
+}
+
+// ApplyToDeleteAllOf applies this configuration to the given an List options.
+func (m MatchingLabelsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
+ m.ApplyToList(&opts.ListOptions)
+}
+
+// MatchingFields filters the list/delete operation on the given field Set
+// (or index in the case of cached lists).
+type MatchingFields fields.Set
+
+// ApplyToList applies this configuration to the given list options.
+func (m MatchingFields) ApplyToList(opts *ListOptions) {
+ // TODO(directxman12): can we avoid re-serializing this?
+ sel := fields.Set(m).AsSelector()
+ opts.FieldSelector = sel
+}
+
+// ApplyToDeleteAllOf applies this configuration to the given an List options.
+func (m MatchingFields) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
+ m.ApplyToList(&opts.ListOptions)
+}
+
+// MatchingFieldsSelector filters the list/delete operation on the given field
+// selector (or index in the case of cached lists). A struct is used because
+// fields.Selector is an interface, which cannot be aliased.
+type MatchingFieldsSelector struct {
+ fields.Selector
+}
+
+// ApplyToList applies this configuration to the given list options.
+func (m MatchingFieldsSelector) ApplyToList(opts *ListOptions) {
+ opts.FieldSelector = m
+}
+
+// ApplyToDeleteAllOf applies this configuration to the given an List options.
+func (m MatchingFieldsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
+ m.ApplyToList(&opts.ListOptions)
+}
+
+// InNamespace restricts the list/delete operation to the given namespace.
+type InNamespace string
+
+// ApplyToList applies this configuration to the given list options.
+func (n InNamespace) ApplyToList(opts *ListOptions) {
+ opts.Namespace = string(n)
+}
+
+// ApplyToDeleteAllOf applies this configuration to the given an List options.
+func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
+ n.ApplyToList(&opts.ListOptions)
+}
+
+// Limit specifies the maximum number of results to return from the server.
+// Limit does not implement DeleteAllOfOption interface because the server
+// does not support setting it for deletecollection operations.
+type Limit int64
+
+// ApplyToList applies this configuration to the given an list options.
+func (l Limit) ApplyToList(opts *ListOptions) {
+ opts.Limit = int64(l)
+}
+
+// Continue sets a continuation token to retrieve chunks of results when using limit.
+// Continue does not implement DeleteAllOfOption interface because the server
+// does not support setting it for deletecollection operations.
+type Continue string
+
+// ApplyToList applies this configuration to the given an List options.
+func (c Continue) ApplyToList(opts *ListOptions) {
+ opts.Continue = string(c)
+}
+
+// }}}
+
+// {{{ Update Options
+
+// UpdateOptions contains options for create requests. It's generally a subset
+// of metav1.UpdateOptions.
+type UpdateOptions struct {
+ // When present, indicates that modifications should not be
+ // persisted. An invalid or unrecognized dryRun directive will
+ // result in an error response and no further processing of the
+ // request. Valid values are:
+ // - All: all dry run stages will be processed
+ DryRun []string
+
+ // FieldManager is the name of the user or component submitting
+ // this request. It must be set with server-side apply.
+ FieldManager string
+
+ // Raw represents raw UpdateOptions, as passed to the API server.
+ Raw *metav1.UpdateOptions
+}
+
+// AsUpdateOptions returns these options as a metav1.UpdateOptions.
+// This may mutate the Raw field.
+func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions {
+ if o == nil {
+ return &metav1.UpdateOptions{}
+ }
+ if o.Raw == nil {
+ o.Raw = &metav1.UpdateOptions{}
+ }
+
+ o.Raw.DryRun = o.DryRun
+ o.Raw.FieldManager = o.FieldManager
+ return o.Raw
+}
+
+// ApplyOptions applies the given update options on these options,
+// and then returns itself (for convenient chaining).
+func (o *UpdateOptions) ApplyOptions(opts []UpdateOption) *UpdateOptions {
+ for _, opt := range opts {
+ opt.ApplyToUpdate(o)
+ }
+ return o
+}
+
+var _ UpdateOption = &UpdateOptions{}
+
+// ApplyToUpdate implements UpdateOption.
+func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) {
+ if o.DryRun != nil {
+ uo.DryRun = o.DryRun
+ }
+ if o.FieldManager != "" {
+ uo.FieldManager = o.FieldManager
+ }
+ if o.Raw != nil {
+ uo.Raw = o.Raw
+ }
+}
+
+// }}}
+
+// {{{ Patch Options
+
+// PatchOptions contains options for patch requests.
+type PatchOptions struct {
+ // When present, indicates that modifications should not be
+ // persisted. An invalid or unrecognized dryRun directive will
+ // result in an error response and no further processing of the
+ // request. Valid values are:
+ // - All: all dry run stages will be processed
+ DryRun []string
+
+ // Force is going to "force" Apply requests. It means user will
+ // re-acquire conflicting fields owned by other people. Force
+ // flag must be unset for non-apply patch requests.
+ // +optional
+ Force *bool
+
+ // FieldManager is the name of the user or component submitting
+ // this request. It must be set with server-side apply.
+ FieldManager string
+
+ // Raw represents raw PatchOptions, as passed to the API server.
+ Raw *metav1.PatchOptions
+}
+
+// ApplyOptions applies the given patch options on these options,
+// and then returns itself (for convenient chaining).
+func (o *PatchOptions) ApplyOptions(opts []PatchOption) *PatchOptions {
+ for _, opt := range opts {
+ opt.ApplyToPatch(o)
+ }
+ return o
+}
+
+// AsPatchOptions returns these options as a metav1.PatchOptions.
+// This may mutate the Raw field.
+func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions {
+ if o == nil {
+ return &metav1.PatchOptions{}
+ }
+ if o.Raw == nil {
+ o.Raw = &metav1.PatchOptions{}
+ }
+
+ o.Raw.DryRun = o.DryRun
+ o.Raw.Force = o.Force
+ o.Raw.FieldManager = o.FieldManager
+ return o.Raw
+}
+
+var _ PatchOption = &PatchOptions{}
+
+// ApplyToPatch implements PatchOptions.
+func (o *PatchOptions) ApplyToPatch(po *PatchOptions) {
+ if o.DryRun != nil {
+ po.DryRun = o.DryRun
+ }
+ if o.Force != nil {
+ po.Force = o.Force
+ }
+ if o.FieldManager != "" {
+ po.FieldManager = o.FieldManager
+ }
+ if o.Raw != nil {
+ po.Raw = o.Raw
+ }
+}
+
+// ForceOwnership indicates that in case of conflicts with server-side apply,
+// the client should acquire ownership of the conflicting field. Most
+// controllers should use this.
+var ForceOwnership = forceOwnership{}
+
+type forceOwnership struct{}
+
+func (forceOwnership) ApplyToPatch(opts *PatchOptions) {
+ definitelyTrue := true
+ opts.Force = &definitelyTrue
+}
+
+// }}}
+
+// {{{ DeleteAllOf Options
+
+// these are all just delete options and list options
+
+// DeleteAllOfOptions contains options for deletecollection (deleteallof) requests.
+// It's just list and delete options smooshed together.
+type DeleteAllOfOptions struct {
+ ListOptions
+ DeleteOptions
+}
+
+// ApplyOptions applies the given deleteallof options on these options,
+// and then returns itself (for convenient chaining).
+func (o *DeleteAllOfOptions) ApplyOptions(opts []DeleteAllOfOption) *DeleteAllOfOptions {
+ for _, opt := range opts {
+ opt.ApplyToDeleteAllOf(o)
+ }
+ return o
+}
+
+var _ DeleteAllOfOption = &DeleteAllOfOptions{}
+
+// ApplyToDeleteAllOf implements DeleteAllOfOption.
+func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) {
+ o.ApplyToList(&do.ListOptions)
+ o.ApplyToDelete(&do.DeleteOptions)
+}
+
+// }}}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go
new file mode 100644
index 00000000..11d60838
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go
@@ -0,0 +1,213 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "fmt"
+
+ jsonpatch "github.com/evanphx/json-patch/v5"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/json"
+ "k8s.io/apimachinery/pkg/util/strategicpatch"
+)
+
+var (
+ // Apply uses server-side apply to patch the given object.
+ Apply Patch = applyPatch{}
+
+ // Merge uses the raw object as a merge patch, without modifications.
+ // Use MergeFrom if you wish to compute a diff instead.
+ Merge Patch = mergePatch{}
+)
+
+type patch struct {
+ patchType types.PatchType
+ data []byte
+}
+
+// Type implements Patch.
+func (s *patch) Type() types.PatchType {
+ return s.patchType
+}
+
+// Data implements Patch.
+func (s *patch) Data(obj Object) ([]byte, error) {
+ return s.data, nil
+}
+
+// RawPatch constructs a new Patch with the given PatchType and data.
+func RawPatch(patchType types.PatchType, data []byte) Patch {
+ return &patch{patchType, data}
+}
+
+// MergeFromWithOptimisticLock can be used if clients want to make sure a patch
+// is being applied to the latest resource version of an object.
+//
+// The behavior is similar to what an Update would do, without the need to send the
+// whole object. Usually this method is useful if you might have multiple clients
+// acting on the same object and the same API version, but with different versions of the Go structs.
+//
+// For example, an "older" copy of a Widget that has fields A and B, and a "newer" copy with A, B, and C.
+// Sending an update using the older struct definition results in C being dropped, whereas using a patch does not.
+type MergeFromWithOptimisticLock struct{}
+
+// ApplyToMergeFrom applies this configuration to the given patch options.
+func (m MergeFromWithOptimisticLock) ApplyToMergeFrom(in *MergeFromOptions) {
+ in.OptimisticLock = true
+}
+
+// MergeFromOption is some configuration that modifies options for a merge-from patch data.
+type MergeFromOption interface {
+ // ApplyToMergeFrom applies this configuration to the given patch options.
+ ApplyToMergeFrom(*MergeFromOptions)
+}
+
+// MergeFromOptions contains options to generate a merge-from patch data.
+type MergeFromOptions struct {
+ // OptimisticLock, when true, includes `metadata.resourceVersion` into the final
+ // patch data. If the `resourceVersion` field doesn't match what's stored,
+ // the operation results in a conflict and clients will need to try again.
+ OptimisticLock bool
+}
+
+type mergeFromPatch struct {
+ patchType types.PatchType
+ createPatch func(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error)
+ from Object
+ opts MergeFromOptions
+}
+
+// Type implements Patch.
+func (s *mergeFromPatch) Type() types.PatchType {
+ return s.patchType
+}
+
+// Data implements Patch.
+func (s *mergeFromPatch) Data(obj Object) ([]byte, error) {
+ original := s.from
+ modified := obj
+
+ if s.opts.OptimisticLock {
+ version := original.GetResourceVersion()
+ if len(version) == 0 {
+ return nil, fmt.Errorf("cannot use OptimisticLock, object %q does not have any resource version we can use", original)
+ }
+
+ original = original.DeepCopyObject().(Object)
+ original.SetResourceVersion("")
+
+ modified = modified.DeepCopyObject().(Object)
+ modified.SetResourceVersion(version)
+ }
+
+ originalJSON, err := json.Marshal(original)
+ if err != nil {
+ return nil, err
+ }
+
+ modifiedJSON, err := json.Marshal(modified)
+ if err != nil {
+ return nil, err
+ }
+
+ data, err := s.createPatch(originalJSON, modifiedJSON, obj)
+ if err != nil {
+ return nil, err
+ }
+
+ return data, nil
+}
+
+func createMergePatch(originalJSON, modifiedJSON []byte, _ interface{}) ([]byte, error) {
+ return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON)
+}
+
+func createStrategicMergePatch(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error) {
+ return strategicpatch.CreateTwoWayMergePatch(originalJSON, modifiedJSON, dataStruct)
+}
+
+// MergeFrom creates a Patch that patches using the merge-patch strategy with the given object as base.
+// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields.
+// When using MergeFrom, existing lists will be completely replaced by new lists.
+// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type,
+// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`.
+// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on
+// the difference between merge-patch and strategic-merge-patch.
+func MergeFrom(obj Object) Patch {
+ return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj}
+}
+
+// MergeFromWithOptions creates a Patch that patches using the merge-patch strategy with the given object as base.
+// See MergeFrom for more details.
+func MergeFromWithOptions(obj Object, opts ...MergeFromOption) Patch {
+ options := &MergeFromOptions{}
+ for _, opt := range opts {
+ opt.ApplyToMergeFrom(options)
+ }
+ return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj, opts: *options}
+}
+
+// StrategicMergeFrom creates a Patch that patches using the strategic-merge-patch strategy with the given object as base.
+// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields.
+// When using MergeFrom, existing lists will be completely replaced by new lists.
+// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type,
+// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`.
+// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on
+// the difference between merge-patch and strategic-merge-patch.
+// Please note, that CRDs don't support strategic-merge-patch, see
+// https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility
+func StrategicMergeFrom(obj Object, opts ...MergeFromOption) Patch {
+ options := &MergeFromOptions{}
+ for _, opt := range opts {
+ opt.ApplyToMergeFrom(options)
+ }
+ return &mergeFromPatch{patchType: types.StrategicMergePatchType, createPatch: createStrategicMergePatch, from: obj, opts: *options}
+}
+
+// mergePatch uses a raw merge strategy to patch the object.
+type mergePatch struct{}
+
+// Type implements Patch.
+func (p mergePatch) Type() types.PatchType {
+ return types.MergePatchType
+}
+
+// Data implements Patch.
+func (p mergePatch) Data(obj Object) ([]byte, error) {
+ // NB(directxman12): we might technically want to be using an actual encoder
+ // here (in case some more performant encoder is introduced) but this is
+ // correct and sufficient for our uses (it's what the JSON serializer in
+ // client-go does, more-or-less).
+ return json.Marshal(obj)
+}
+
+// applyPatch uses server-side apply to patch the object.
+type applyPatch struct{}
+
+// Type implements Patch.
+func (p applyPatch) Type() types.PatchType {
+ return types.ApplyPatchType
+}
+
+// Data implements Patch.
+func (p applyPatch) Data(obj Object) ([]byte, error) {
+ // NB(directxman12): we might technically want to be using an actual encoder
+ // here (in case some more performant encoder is introduced) but this is
+ // correct and sufficient for our uses (it's what the JSON serializer in
+ // client-go does, more-or-less).
+ return json.Marshal(obj)
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go
new file mode 100644
index 00000000..87173453
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go
@@ -0,0 +1,141 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
+)
+
+// NewDelegatingClientInput encapsulates the input parameters to create a new delegating client.
+type NewDelegatingClientInput struct {
+ CacheReader Reader
+ Client Client
+ UncachedObjects []Object
+ CacheUnstructured bool
+}
+
+// NewDelegatingClient creates a new delegating client.
+//
+// A delegating client forms a Client by composing separate reader, writer and
+// statusclient interfaces. This way, you can have an Client that reads from a
+// cache and writes to the API server.
+func NewDelegatingClient(in NewDelegatingClientInput) (Client, error) {
+ uncachedGVKs := map[schema.GroupVersionKind]struct{}{}
+ for _, obj := range in.UncachedObjects {
+ gvk, err := apiutil.GVKForObject(obj, in.Client.Scheme())
+ if err != nil {
+ return nil, err
+ }
+ uncachedGVKs[gvk] = struct{}{}
+ }
+
+ return &delegatingClient{
+ scheme: in.Client.Scheme(),
+ mapper: in.Client.RESTMapper(),
+ Reader: &delegatingReader{
+ CacheReader: in.CacheReader,
+ ClientReader: in.Client,
+ scheme: in.Client.Scheme(),
+ uncachedGVKs: uncachedGVKs,
+ cacheUnstructured: in.CacheUnstructured,
+ },
+ Writer: in.Client,
+ StatusClient: in.Client,
+ }, nil
+}
+
+type delegatingClient struct {
+ Reader
+ Writer
+ StatusClient
+
+ scheme *runtime.Scheme
+ mapper meta.RESTMapper
+}
+
+// Scheme returns the scheme this client is using.
+func (d *delegatingClient) Scheme() *runtime.Scheme {
+ return d.scheme
+}
+
+// RESTMapper returns the rest mapper this client is using.
+func (d *delegatingClient) RESTMapper() meta.RESTMapper {
+ return d.mapper
+}
+
+// delegatingReader forms a Reader that will cause Get and List requests for
+// unstructured types to use the ClientReader while requests for any other type
+// of object with use the CacheReader. This avoids accidentally caching the
+// entire cluster in the common case of loading arbitrary unstructured objects
+// (e.g. from OwnerReferences).
+type delegatingReader struct {
+ CacheReader Reader
+ ClientReader Reader
+
+ uncachedGVKs map[schema.GroupVersionKind]struct{}
+ scheme *runtime.Scheme
+ cacheUnstructured bool
+}
+
+func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) {
+ gvk, err := apiutil.GVKForObject(obj, d.scheme)
+ if err != nil {
+ return false, err
+ }
+ // TODO: this is producing unsafe guesses that don't actually work,
+ // but it matches ~99% of the cases out there.
+ if meta.IsListType(obj) {
+ gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
+ }
+ if _, isUncached := d.uncachedGVKs[gvk]; isUncached {
+ return true, nil
+ }
+ if !d.cacheUnstructured {
+ _, isUnstructured := obj.(*unstructured.Unstructured)
+ _, isUnstructuredList := obj.(*unstructured.UnstructuredList)
+ return isUnstructured || isUnstructuredList, nil
+ }
+ return false, nil
+}
+
+// Get retrieves an obj for a given object key from the Kubernetes Cluster.
+func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
+ if isUncached, err := d.shouldBypassCache(obj); err != nil {
+ return err
+ } else if isUncached {
+ return d.ClientReader.Get(ctx, key, obj, opts...)
+ }
+ return d.CacheReader.Get(ctx, key, obj, opts...)
+}
+
+// List retrieves list of objects for a given namespace and list options.
+func (d *delegatingReader) List(ctx context.Context, list ObjectList, opts ...ListOption) error {
+ if isUncached, err := d.shouldBypassCache(list); err != nil {
+ return err
+ } else if isUncached {
+ return d.ClientReader.List(ctx, list, opts...)
+ }
+ return d.CacheReader.List(ctx, list, opts...)
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go
new file mode 100644
index 00000000..c4e56d9b
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go
@@ -0,0 +1,208 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+var _ Reader = &typedClient{}
+var _ Writer = &typedClient{}
+var _ StatusWriter = &typedClient{}
+
+// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes
+// new clients at the time they are used, and caches the client.
+type typedClient struct {
+ cache *clientCache
+ paramCodec runtime.ParameterCodec
+}
+
+// Create implements client.Client.
+func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
+ o, err := c.cache.getObjMeta(obj)
+ if err != nil {
+ return err
+ }
+
+ createOpts := &CreateOptions{}
+ createOpts.ApplyOptions(opts)
+ return o.Post().
+ NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ Resource(o.resource()).
+ Body(obj).
+ VersionedParams(createOpts.AsCreateOptions(), c.paramCodec).
+ Do(ctx).
+ Into(obj)
+}
+
+// Update implements client.Client.
+func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
+ o, err := c.cache.getObjMeta(obj)
+ if err != nil {
+ return err
+ }
+
+ updateOpts := &UpdateOptions{}
+ updateOpts.ApplyOptions(opts)
+ return o.Put().
+ NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ Resource(o.resource()).
+ Name(o.GetName()).
+ Body(obj).
+ VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec).
+ Do(ctx).
+ Into(obj)
+}
+
+// Delete implements client.Client.
+func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
+ o, err := c.cache.getObjMeta(obj)
+ if err != nil {
+ return err
+ }
+
+ deleteOpts := DeleteOptions{}
+ deleteOpts.ApplyOptions(opts)
+
+ return o.Delete().
+ NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ Resource(o.resource()).
+ Name(o.GetName()).
+ Body(deleteOpts.AsDeleteOptions()).
+ Do(ctx).
+ Error()
+}
+
+// DeleteAllOf implements client.Client.
+func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
+ o, err := c.cache.getObjMeta(obj)
+ if err != nil {
+ return err
+ }
+
+ deleteAllOfOpts := DeleteAllOfOptions{}
+ deleteAllOfOpts.ApplyOptions(opts)
+
+ return o.Delete().
+ NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()).
+ Resource(o.resource()).
+ VersionedParams(deleteAllOfOpts.AsListOptions(), c.paramCodec).
+ Body(deleteAllOfOpts.AsDeleteOptions()).
+ Do(ctx).
+ Error()
+}
+
+// Patch implements client.Client.
+func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
+ o, err := c.cache.getObjMeta(obj)
+ if err != nil {
+ return err
+ }
+
+ data, err := patch.Data(obj)
+ if err != nil {
+ return err
+ }
+
+ patchOpts := &PatchOptions{}
+ return o.Patch(patch.Type()).
+ NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ Resource(o.resource()).
+ Name(o.GetName()).
+ VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec).
+ Body(data).
+ Do(ctx).
+ Into(obj)
+}
+
+// Get implements client.Client.
+func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
+ r, err := c.cache.getResource(obj)
+ if err != nil {
+ return err
+ }
+ getOpts := GetOptions{}
+ getOpts.ApplyOptions(opts)
+ return r.Get().
+ NamespaceIfScoped(key.Namespace, r.isNamespaced()).
+ Resource(r.resource()).
+ VersionedParams(getOpts.AsGetOptions(), c.paramCodec).
+ Name(key.Name).Do(ctx).Into(obj)
+}
+
+// List implements client.Client.
+func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
+ r, err := c.cache.getResource(obj)
+ if err != nil {
+ return err
+ }
+ listOpts := ListOptions{}
+ listOpts.ApplyOptions(opts)
+ return r.Get().
+ NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()).
+ Resource(r.resource()).
+ VersionedParams(listOpts.AsListOptions(), c.paramCodec).
+ Do(ctx).
+ Into(obj)
+}
+
+// UpdateStatus used by StatusWriter to write status.
+func (c *typedClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error {
+ o, err := c.cache.getObjMeta(obj)
+ if err != nil {
+ return err
+ }
+ // TODO(droot): examine the returned error and check if it error needs to be
+ // wrapped to improve the UX ?
+ // It will be nice to receive an error saying the object doesn't implement
+ // status subresource and check CRD definition
+ return o.Put().
+ NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ Resource(o.resource()).
+ Name(o.GetName()).
+ SubResource("status").
+ Body(obj).
+ VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), c.paramCodec).
+ Do(ctx).
+ Into(obj)
+}
+
+// PatchStatus used by StatusWriter to write status.
+func (c *typedClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
+ o, err := c.cache.getObjMeta(obj)
+ if err != nil {
+ return err
+ }
+
+ data, err := patch.Data(obj)
+ if err != nil {
+ return err
+ }
+
+ patchOpts := &PatchOptions{}
+ return o.Patch(patch.Type()).
+ NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ Resource(o.resource()).
+ Name(o.GetName()).
+ SubResource("status").
+ Body(data).
+ VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec).
+ Do(ctx).
+ Into(obj)
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go
new file mode 100644
index 00000000..3d3dbe7b
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go
@@ -0,0 +1,275 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+var _ Reader = &unstructuredClient{}
+var _ Writer = &unstructuredClient{}
+var _ StatusWriter = &unstructuredClient{}
+
+// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes
+// new clients at the time they are used, and caches the client.
+type unstructuredClient struct {
+ cache *clientCache
+ paramCodec runtime.ParameterCodec
+}
+
+// Create implements client.Client.
+func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
+ u, ok := obj.(*unstructured.Unstructured)
+ if !ok {
+ return fmt.Errorf("unstructured client did not understand object: %T", obj)
+ }
+
+ gvk := u.GroupVersionKind()
+
+ o, err := uc.cache.getObjMeta(obj)
+ if err != nil {
+ return err
+ }
+
+ createOpts := &CreateOptions{}
+ createOpts.ApplyOptions(opts)
+ result := o.Post().
+ NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ Resource(o.resource()).
+ Body(obj).
+ VersionedParams(createOpts.AsCreateOptions(), uc.paramCodec).
+ Do(ctx).
+ Into(obj)
+
+ u.SetGroupVersionKind(gvk)
+ return result
+}
+
+// Update implements client.Client.
+func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
+ u, ok := obj.(*unstructured.Unstructured)
+ if !ok {
+ return fmt.Errorf("unstructured client did not understand object: %T", obj)
+ }
+
+ gvk := u.GroupVersionKind()
+
+ o, err := uc.cache.getObjMeta(obj)
+ if err != nil {
+ return err
+ }
+
+ updateOpts := UpdateOptions{}
+ updateOpts.ApplyOptions(opts)
+ result := o.Put().
+ NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ Resource(o.resource()).
+ Name(o.GetName()).
+ Body(obj).
+ VersionedParams(updateOpts.AsUpdateOptions(), uc.paramCodec).
+ Do(ctx).
+ Into(obj)
+
+ u.SetGroupVersionKind(gvk)
+ return result
+}
+
+// Delete implements client.Client.
+func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
+ if _, ok := obj.(*unstructured.Unstructured); !ok {
+ return fmt.Errorf("unstructured client did not understand object: %T", obj)
+ }
+
+ o, err := uc.cache.getObjMeta(obj)
+ if err != nil {
+ return err
+ }
+
+ deleteOpts := DeleteOptions{}
+ deleteOpts.ApplyOptions(opts)
+ return o.Delete().
+ NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ Resource(o.resource()).
+ Name(o.GetName()).
+ Body(deleteOpts.AsDeleteOptions()).
+ Do(ctx).
+ Error()
+}
+
+// DeleteAllOf implements client.Client.
+func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
+ if _, ok := obj.(*unstructured.Unstructured); !ok {
+ return fmt.Errorf("unstructured client did not understand object: %T", obj)
+ }
+
+ o, err := uc.cache.getObjMeta(obj)
+ if err != nil {
+ return err
+ }
+
+ deleteAllOfOpts := DeleteAllOfOptions{}
+ deleteAllOfOpts.ApplyOptions(opts)
+ return o.Delete().
+ NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()).
+ Resource(o.resource()).
+ VersionedParams(deleteAllOfOpts.AsListOptions(), uc.paramCodec).
+ Body(deleteAllOfOpts.AsDeleteOptions()).
+ Do(ctx).
+ Error()
+}
+
+// Patch implements client.Client.
+func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
+ if _, ok := obj.(*unstructured.Unstructured); !ok {
+ return fmt.Errorf("unstructured client did not understand object: %T", obj)
+ }
+
+ o, err := uc.cache.getObjMeta(obj)
+ if err != nil {
+ return err
+ }
+
+ data, err := patch.Data(obj)
+ if err != nil {
+ return err
+ }
+
+ patchOpts := &PatchOptions{}
+ return o.Patch(patch.Type()).
+ NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ Resource(o.resource()).
+ Name(o.GetName()).
+ VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec).
+ Body(data).
+ Do(ctx).
+ Into(obj)
+}
+
+// Get implements client.Client.
+func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
+ u, ok := obj.(*unstructured.Unstructured)
+ if !ok {
+ return fmt.Errorf("unstructured client did not understand object: %T", obj)
+ }
+
+ gvk := u.GroupVersionKind()
+
+ getOpts := GetOptions{}
+ getOpts.ApplyOptions(opts)
+
+ r, err := uc.cache.getResource(obj)
+ if err != nil {
+ return err
+ }
+
+ result := r.Get().
+ NamespaceIfScoped(key.Namespace, r.isNamespaced()).
+ Resource(r.resource()).
+ VersionedParams(getOpts.AsGetOptions(), uc.paramCodec).
+ Name(key.Name).
+ Do(ctx).
+ Into(obj)
+
+ u.SetGroupVersionKind(gvk)
+
+ return result
+}
+
+// List implements client.Client.
+func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
+ u, ok := obj.(*unstructured.UnstructuredList)
+ if !ok {
+ return fmt.Errorf("unstructured client did not understand object: %T", obj)
+ }
+
+ gvk := u.GroupVersionKind()
+ gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
+
+ listOpts := ListOptions{}
+ listOpts.ApplyOptions(opts)
+
+ r, err := uc.cache.getResource(obj)
+ if err != nil {
+ return err
+ }
+
+ return r.Get().
+ NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()).
+ Resource(r.resource()).
+ VersionedParams(listOpts.AsListOptions(), uc.paramCodec).
+ Do(ctx).
+ Into(obj)
+}
+
+func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error {
+ if _, ok := obj.(*unstructured.Unstructured); !ok {
+ return fmt.Errorf("unstructured client did not understand object: %T", obj)
+ }
+
+ o, err := uc.cache.getObjMeta(obj)
+ if err != nil {
+ return err
+ }
+
+ return o.Put().
+ NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ Resource(o.resource()).
+ Name(o.GetName()).
+ SubResource("status").
+ Body(obj).
+ VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), uc.paramCodec).
+ Do(ctx).
+ Into(obj)
+}
+
+func (uc *unstructuredClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
+ u, ok := obj.(*unstructured.Unstructured)
+ if !ok {
+ return fmt.Errorf("unstructured client did not understand object: %T", obj)
+ }
+
+ gvk := u.GroupVersionKind()
+
+ o, err := uc.cache.getObjMeta(obj)
+ if err != nil {
+ return err
+ }
+
+ data, err := patch.Data(obj)
+ if err != nil {
+ return err
+ }
+
+ patchOpts := &PatchOptions{}
+ result := o.Patch(patch.Type()).
+ NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ Resource(o.resource()).
+ Name(o.GetName()).
+ SubResource("status").
+ Body(data).
+ VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec).
+ Do(ctx).
+ Into(u)
+
+ u.SetGroupVersionKind(gvk)
+ return result
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go
new file mode 100644
index 00000000..70490664
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go
@@ -0,0 +1,114 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+ "strings"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/dynamic"
+ "k8s.io/client-go/rest"
+)
+
+// NewWithWatch returns a new WithWatch.
+func NewWithWatch(config *rest.Config, options Options) (WithWatch, error) {
+ client, err := newClient(config, options)
+ if err != nil {
+ return nil, err
+ }
+ dynamicClient, err := dynamic.NewForConfig(config)
+ if err != nil {
+ return nil, err
+ }
+ return &watchingClient{client: client, dynamic: dynamicClient}, nil
+}
+
+type watchingClient struct {
+ *client
+ dynamic dynamic.Interface
+}
+
+func (w *watchingClient) Watch(ctx context.Context, list ObjectList, opts ...ListOption) (watch.Interface, error) {
+ switch l := list.(type) {
+ case *unstructured.UnstructuredList:
+ return w.unstructuredWatch(ctx, l, opts...)
+ case *metav1.PartialObjectMetadataList:
+ return w.metadataWatch(ctx, l, opts...)
+ default:
+ return w.typedWatch(ctx, l, opts...)
+ }
+}
+
+func (w *watchingClient) listOpts(opts ...ListOption) ListOptions {
+ listOpts := ListOptions{}
+ listOpts.ApplyOptions(opts)
+ if listOpts.Raw == nil {
+ listOpts.Raw = &metav1.ListOptions{}
+ }
+ listOpts.Raw.Watch = true
+
+ return listOpts
+}
+
+func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialObjectMetadataList, opts ...ListOption) (watch.Interface, error) {
+ gvk := obj.GroupVersionKind()
+ gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
+
+ listOpts := w.listOpts(opts...)
+
+ resInt, err := w.client.metadataClient.getResourceInterface(gvk, listOpts.Namespace)
+ if err != nil {
+ return nil, err
+ }
+
+ return resInt.Watch(ctx, *listOpts.AsListOptions())
+}
+
+func (w *watchingClient) unstructuredWatch(ctx context.Context, obj *unstructured.UnstructuredList, opts ...ListOption) (watch.Interface, error) {
+ gvk := obj.GroupVersionKind()
+ gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
+
+ r, err := w.client.unstructuredClient.cache.getResource(obj)
+ if err != nil {
+ return nil, err
+ }
+
+ listOpts := w.listOpts(opts...)
+
+ if listOpts.Namespace != "" && r.isNamespaced() {
+ return w.dynamic.Resource(r.mapping.Resource).Namespace(listOpts.Namespace).Watch(ctx, *listOpts.AsListOptions())
+ }
+ return w.dynamic.Resource(r.mapping.Resource).Watch(ctx, *listOpts.AsListOptions())
+}
+
+func (w *watchingClient) typedWatch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) {
+ r, err := w.client.typedClient.cache.getResource(obj)
+ if err != nil {
+ return nil, err
+ }
+
+ listOpts := w.listOpts(opts...)
+
+ return r.Get().
+ NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()).
+ Resource(r.resource()).
+ VersionedParams(listOpts.AsListOptions(), w.client.typedClient.paramCodec).
+ Watch(ctx)
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go
new file mode 100644
index 00000000..7057f3db
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go
@@ -0,0 +1,78 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package objectutil
+
+import (
+ "errors"
+ "fmt"
+
+ apimeta "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
+)
+
+// FilterWithLabels returns a copy of the items in objs matching labelSel.
+func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) {
+ outItems := make([]runtime.Object, 0, len(objs))
+ for _, obj := range objs {
+ meta, err := apimeta.Accessor(obj)
+ if err != nil {
+ return nil, err
+ }
+ if labelSel != nil {
+ lbls := labels.Set(meta.GetLabels())
+ if !labelSel.Matches(lbls) {
+ continue
+ }
+ }
+ outItems = append(outItems, obj.DeepCopyObject())
+ }
+ return outItems, nil
+}
+
+// IsAPINamespaced returns true if the object is namespace scoped.
+// For unstructured objects the gvk is found from the object itself.
+func IsAPINamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) {
+ gvk, err := apiutil.GVKForObject(obj, scheme)
+ if err != nil {
+ return false, err
+ }
+
+ return IsAPINamespacedWithGVK(gvk, scheme, restmapper)
+}
+
+// IsAPINamespacedWithGVK returns true if the object having the provided
+// GVK is namespace scoped.
+func IsAPINamespacedWithGVK(gk schema.GroupVersionKind, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) {
+ restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind})
+ if err != nil {
+ return false, fmt.Errorf("failed to get restmapping: %w", err)
+ }
+
+ scope := restmapping.Scope.Name()
+
+ if scope == "" {
+ return false, errors.New("scope cannot be identified, empty scope returned")
+ }
+
+ if scope != apimeta.RESTScopeNameRoot {
+ return true, nil
+ }
+ return false, nil
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go
new file mode 100644
index 00000000..c82447d9
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go
@@ -0,0 +1,199 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package log
+
+import (
+ "sync"
+
+ "github.com/go-logr/logr"
+)
+
+// loggerPromise knows how to populate a concrete logr.Logger
+// with options, given an actual base logger later on down the line.
+type loggerPromise struct {
+ logger *DelegatingLogSink
+ childPromises []*loggerPromise
+ promisesLock sync.Mutex
+
+ name *string
+ tags []interface{}
+}
+
+func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromise {
+ res := &loggerPromise{
+ logger: l,
+ name: &name,
+ promisesLock: sync.Mutex{},
+ }
+
+ p.promisesLock.Lock()
+ defer p.promisesLock.Unlock()
+ p.childPromises = append(p.childPromises, res)
+ return res
+}
+
+// WithValues provides a new Logger with the tags appended.
+func (p *loggerPromise) WithValues(l *DelegatingLogSink, tags ...interface{}) *loggerPromise {
+ res := &loggerPromise{
+ logger: l,
+ tags: tags,
+ promisesLock: sync.Mutex{},
+ }
+
+ p.promisesLock.Lock()
+ defer p.promisesLock.Unlock()
+ p.childPromises = append(p.childPromises, res)
+ return res
+}
+
+// Fulfill instantiates the Logger with the provided logger.
+func (p *loggerPromise) Fulfill(parentLogSink logr.LogSink) {
+ sink := parentLogSink
+ if p.name != nil {
+ sink = sink.WithName(*p.name)
+ }
+
+ if p.tags != nil {
+ sink = sink.WithValues(p.tags...)
+ }
+
+ p.logger.lock.Lock()
+ p.logger.logger = sink
+ if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok {
+ p.logger.logger = withCallDepth.WithCallDepth(1)
+ }
+ p.logger.promise = nil
+ p.logger.lock.Unlock()
+
+ for _, childPromise := range p.childPromises {
+ childPromise.Fulfill(sink)
+ }
+}
+
+// DelegatingLogSink is a logsink that delegates to another logr.LogSink.
+// If the underlying promise is not nil, it registers calls to sub-loggers with
+// the logging factory to be populated later, and returns a new delegating
+// logger. It expects to have *some* logr.Logger set at all times (generally
+// a no-op logger before the promises are fulfilled).
+type DelegatingLogSink struct {
+ lock sync.RWMutex
+ logger logr.LogSink
+ promise *loggerPromise
+ info logr.RuntimeInfo
+}
+
+// Init implements logr.LogSink.
+func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) {
+ l.lock.Lock()
+ defer l.lock.Unlock()
+ l.info = info
+}
+
+// Enabled tests whether this Logger is enabled. For example, commandline
+// flags might be used to set the logging verbosity and disable some info
+// logs.
+func (l *DelegatingLogSink) Enabled(level int) bool {
+ l.lock.RLock()
+ defer l.lock.RUnlock()
+ return l.logger.Enabled(level)
+}
+
+// Info logs a non-error message with the given key/value pairs as context.
+//
+// The msg argument should be used to add some constant description to
+// the log line. The key/value pairs can then be used to add additional
+// variable information. The key/value pairs should alternate string
+// keys and arbitrary values.
+func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) {
+ l.lock.RLock()
+ defer l.lock.RUnlock()
+ l.logger.Info(level, msg, keysAndValues...)
+}
+
+// Error logs an error, with the given message and key/value pairs as context.
+// It functions similarly to calling Info with the "error" named value, but may
+// have unique behavior, and should be preferred for logging errors (see the
+// package documentations for more information).
+//
+// The msg field should be used to add context to any underlying error,
+// while the err field should be used to attach the actual error that
+// triggered this log line, if present.
+func (l *DelegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) {
+ l.lock.RLock()
+ defer l.lock.RUnlock()
+ l.logger.Error(err, msg, keysAndValues...)
+}
+
+// WithName provides a new Logger with the name appended.
+func (l *DelegatingLogSink) WithName(name string) logr.LogSink {
+ l.lock.RLock()
+ defer l.lock.RUnlock()
+
+ if l.promise == nil {
+ sink := l.logger.WithName(name)
+ if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok {
+ sink = withCallDepth.WithCallDepth(-1)
+ }
+ return sink
+ }
+
+ res := &DelegatingLogSink{logger: l.logger}
+ promise := l.promise.WithName(res, name)
+ res.promise = promise
+
+ return res
+}
+
+// WithValues provides a new Logger with the tags appended.
+func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink {
+ l.lock.RLock()
+ defer l.lock.RUnlock()
+
+ if l.promise == nil {
+ sink := l.logger.WithValues(tags...)
+ if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok {
+ sink = withCallDepth.WithCallDepth(-1)
+ }
+ return sink
+ }
+
+ res := &DelegatingLogSink{logger: l.logger}
+ promise := l.promise.WithValues(res, tags...)
+ res.promise = promise
+
+ return res
+}
+
+// Fulfill switches the logger over to use the actual logger
+// provided, instead of the temporary initial one, if this method
+// has not been previously called.
+func (l *DelegatingLogSink) Fulfill(actual logr.LogSink) {
+ if l.promise != nil {
+ l.promise.Fulfill(actual)
+ }
+}
+
+// NewDelegatingLogSink constructs a new DelegatingLogSink which uses
+// the given logger before its promise is fulfilled.
+func NewDelegatingLogSink(initial logr.LogSink) *DelegatingLogSink {
+ l := &DelegatingLogSink{
+ logger: initial,
+ promise: &loggerPromise{promisesLock: sync.Mutex{}},
+ }
+ l.promise.logger = l
+ return l
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go
new file mode 100644
index 00000000..082dce3a
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go
@@ -0,0 +1,102 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package log contains utilities for fetching a new logger
+// when one is not already available.
+//
+// # The Log Handle
+//
+// This package contains a root logr.Logger Log. It may be used to
+// get a handle to whatever the root logging implementation is. By
+// default, no implementation exists, and the handle returns "promises"
+// to loggers. When the implementation is set using SetLogger, these
+// "promises" will be converted over to real loggers.
+//
+// # Logr
+//
+// All logging in controller-runtime is structured, using a set of interfaces
+// defined by a package called logr
+// (https://pkg.go.dev/github.com/go-logr/logr). The sub-package zap provides
+// helpers for setting up logr backed by Zap (go.uber.org/zap).
+package log
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/go-logr/logr"
+)
+
+// SetLogger sets a concrete logging implementation for all deferred Loggers.
+func SetLogger(l logr.Logger) {
+ loggerWasSetLock.Lock()
+ defer loggerWasSetLock.Unlock()
+
+ loggerWasSet = true
+ dlog.Fulfill(l.GetSink())
+}
+
+// It is safe to assume that if this wasn't set within the first 30 seconds of a binaries
+// lifetime, it will never get set. The DelegatingLogSink causes a high number of memory
+// allocations when not given an actual Logger, so we set a NullLogSink to avoid that.
+//
+// We need to keep the DelegatingLogSink because we have various inits() that get a logger from
+// here. They will always get executed before any code that imports controller-runtime
+// has a chance to run and hence to set an actual logger.
+func init() {
+ // Init is blocking, so start a new goroutine
+ go func() {
+ time.Sleep(30 * time.Second)
+ loggerWasSetLock.Lock()
+ defer loggerWasSetLock.Unlock()
+ if !loggerWasSet {
+ dlog.Fulfill(NullLogSink{})
+ }
+ }()
+}
+
+var (
+ loggerWasSetLock sync.Mutex
+ loggerWasSet bool
+)
+
+// Log is the base logger used by kubebuilder. It delegates
+// to another logr.Logger. You *must* call SetLogger to
+// get any actual logging. If SetLogger is not called within
+// the first 30 seconds of a binaries lifetime, it will get
+// set to a NullLogSink.
+var (
+ dlog = NewDelegatingLogSink(NullLogSink{})
+ Log = logr.New(dlog)
+)
+
+// FromContext returns a logger with predefined values from a context.Context.
+func FromContext(ctx context.Context, keysAndValues ...interface{}) logr.Logger {
+ log := Log
+ if ctx != nil {
+ if logger, err := logr.FromContext(ctx); err == nil {
+ log = logger
+ }
+ }
+ return log.WithValues(keysAndValues...)
+}
+
+// IntoContext takes a context and sets the logger as one of its values.
+// Use FromContext function to retrieve the logger.
+func IntoContext(ctx context.Context, log logr.Logger) context.Context {
+ return logr.NewContext(ctx, log)
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go
new file mode 100644
index 00000000..f3e81074
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package log
+
+import (
+ "github.com/go-logr/logr"
+)
+
+// NB: this is the same as the null logger logr/testing,
+// but avoids accidentally adding the testing flags to
+// all binaries.
+
+// NullLogSink is a logr.Logger that does nothing.
+type NullLogSink struct{}
+
+var _ logr.LogSink = NullLogSink{}
+
+// Init implements logr.LogSink.
+func (log NullLogSink) Init(logr.RuntimeInfo) {
+}
+
+// Info implements logr.InfoLogger.
+func (NullLogSink) Info(_ int, _ string, _ ...interface{}) {
+ // Do nothing.
+}
+
+// Enabled implements logr.InfoLogger.
+func (NullLogSink) Enabled(level int) bool {
+ return false
+}
+
+// Error implements logr.Logger.
+func (NullLogSink) Error(_ error, _ string, _ ...interface{}) {
+ // Do nothing.
+}
+
+// WithName implements logr.Logger.
+func (log NullLogSink) WithName(_ string) logr.LogSink {
+ return log
+}
+
+// WithValues implements logr.Logger.
+func (log NullLogSink) WithValues(_ ...interface{}) logr.LogSink {
+ return log
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go
new file mode 100644
index 00000000..e9522632
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package log
+
+import (
+ "sync"
+
+ "github.com/go-logr/logr"
+)
+
+// KubeAPIWarningLoggerOptions controls the behavior
+// of a rest.WarningHandler constructed using NewKubeAPIWarningLogger().
+type KubeAPIWarningLoggerOptions struct {
+ // Deduplicate indicates a given warning message should only be written once.
+ // Setting this to true in a long-running process handling many warnings can
+ // result in increased memory use.
+ Deduplicate bool
+}
+
+// KubeAPIWarningLogger is a wrapper around
+// a provided logr.Logger that implements the
+// rest.WarningHandler interface.
+type KubeAPIWarningLogger struct {
+ // logger is used to log responses with the warning header
+ logger logr.Logger
+ // opts contain options controlling warning output
+ opts KubeAPIWarningLoggerOptions
+ // writtenLock gurads written
+ writtenLock sync.Mutex
+ // used to keep track of already logged messages
+ // and help in de-duplication.
+ written map[string]struct{}
+}
+
+// HandleWarningHeader handles logging for responses from API server that are
+// warnings with code being 299 and uses a logr.Logger for its logging purposes.
+func (l *KubeAPIWarningLogger) HandleWarningHeader(code int, agent string, message string) {
+ if code != 299 || len(message) == 0 {
+ return
+ }
+
+ if l.opts.Deduplicate {
+ l.writtenLock.Lock()
+ defer l.writtenLock.Unlock()
+
+ if _, alreadyLogged := l.written[message]; alreadyLogged {
+ return
+ }
+ l.written[message] = struct{}{}
+ }
+ l.logger.Info(message)
+}
+
+// NewKubeAPIWarningLogger returns an implementation of rest.WarningHandler that logs warnings
+// with code = 299 to the provided logr.Logger.
+func NewKubeAPIWarningLogger(l logr.Logger, opts KubeAPIWarningLoggerOptions) *KubeAPIWarningLogger {
+ h := &KubeAPIWarningLogger{logger: l, opts: opts}
+ if opts.Deduplicate {
+ h.written = map[string]struct{}{}
+ }
+ return h
+}