diff --git a/hack/update/filesystem.go b/hack/update/filesystem.go index 864063f5f14e..d8cc6285f545 100644 --- a/hack/update/filesystem.go +++ b/hack/update/filesystem.go @@ -17,37 +17,58 @@ limitations under the License. package update import ( + "bytes" + "fmt" "io/ioutil" "os" "path/filepath" + + "k8s.io/klog/v2" ) // fsUpdate updates local filesystem repo files according to the given schema and data. // Returns if the update actually changed anything, and any error occurred. func fsUpdate(fsRoot string, schema map[string]Item, data interface{}) (changed bool, err error) { + var mode os.FileMode = 0644 for path, item := range schema { path = filepath.Join(fsRoot, path) - blob, err := ioutil.ReadFile(path) - if err != nil { - return false, err - } - info, err := os.Stat(path) - if err != nil { - return false, err + // if the item's content is already set, give it precedence over any current file content + var content []byte + if item.Content == nil { + info, err := os.Stat(path) + if err != nil { + return false, fmt.Errorf("unable to get file content: %w", err) + } + mode = info.Mode() + content, err = ioutil.ReadFile(path) + if err != nil { + return false, fmt.Errorf("unable to read file content: %w", err) + } + item.Content = content } - mode := info.Mode() - - item.Content = blob - chg, err := item.apply(data) - if err != nil { - return false, err + if err := item.apply(data); err != nil { + return false, fmt.Errorf("unable to update file: %w", err) } - if chg { + if !bytes.Equal(content, item.Content) { + // make sure path exists + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return false, fmt.Errorf("unable to create directory: %w", err) + } + if err := ioutil.WriteFile(path, item.Content, mode); err != nil { + return false, fmt.Errorf("unable to write file: %w", err) + } changed = true } - if err := ioutil.WriteFile(path, item.Content, mode); err != nil { - return false, err - } } return changed, nil } + +// Loadf returns the file content read as byte slice +func Loadf(path string) []byte { + blob, err := ioutil.ReadFile(path) + if err != nil { + klog.Fatalf("Unable to load file %s: %v", path, err) + return nil + } + return blob +} diff --git a/hack/update/github.go b/hack/update/github.go index cefe99b1fb93..8371296ddf23 100644 --- a/hack/update/github.go +++ b/hack/update/github.go @@ -73,7 +73,7 @@ func ghCreatePR(ctx context.Context, owner, repo, base, branch, title string, is } // update files - changes, err := ghUpdate(ctx, owner, repo, baseTree, token, schema, data) + changes, err := ghUpdate(ctx, owner, repo, token, schema, data) if err != nil { return nil, fmt.Errorf("unable to update files: %w", err) } @@ -126,16 +126,16 @@ func ghCreatePR(ctx context.Context, owner, repo, base, branch, title string, is klog.Infof("PR branch '%s' successfully created: %s", prBranch, prRef.GetURL()) // create PR - plan, err := GetPlan(schema, data) + _, pretty, err := GetPlan(schema, data) if err != nil { - klog.Fatalf("Unable to parse schema: %v\n%s", err, plan) + klog.Fatalf("Unable to parse schema: %v\n%s", err, pretty) } modifiable := true pr, _, err := ghc.PullRequests.Create(ctx, owner, repo, &github.NewPullRequest{ Title: github.String(title), Head: github.String(*fork.Owner.Login + ":" + prBranch), Base: github.String(base), - Body: github.String(fmt.Sprintf("fixes: #%d\n\nAutomatically created PR to update repo according to the Plan:\n\n```\n%s\n```", issue, plan)), + Body: github.String(fmt.Sprintf("fixes: #%d\n\nAutomatically created PR to update repo according to the Plan:\n\n```\n%s\n```", issue, pretty)), MaintainerCanModify: &modifiable, }) if err != nil { @@ -170,40 +170,40 @@ func ghFindPR(ctx context.Context, title, owner, repo, base, token string) (url // ghUpdate updates remote GitHub owner/repo tree according to the given token, schema and data. // Returns resulting changes, and any error occurred. -func ghUpdate(ctx context.Context, owner, repo string, tree *github.Tree, token string, schema map[string]Item, data interface{}) (changes []*github.TreeEntry, err error) { +func ghUpdate(ctx context.Context, owner, repo string, token string, schema map[string]Item, data interface{}) (changes []*github.TreeEntry, err error) { ghc := ghClient(ctx, token) // load each schema item content and update it creating new GitHub TreeEntries - cnt := len(schema) // expected number of files to change - for _, org := range tree.Entries { - if *org.Type == "blob" { - if item, match := schema[*org.Path]; match { - blob, _, err := ghc.Git.GetBlobRaw(ctx, owner, repo, *org.SHA) - if err != nil { - return nil, fmt.Errorf("unable to get file: %w", err) - } - item.Content = blob - changed, err := item.apply(data) - if err != nil { - return nil, fmt.Errorf("unable to update file: %w", err) - } - if changed { - // add github.TreeEntry that will replace original path content with the updated one - changes = append(changes, &github.TreeEntry{ - Path: org.Path, - Mode: org.Mode, - Type: org.Type, - Content: github.String(string(item.Content)), - }) - } - if cnt--; cnt == 0 { - break - } + for path, item := range schema { + // if the item's content is already set, give it precedence over any current file content + var content string + if item.Content == nil { + file, _, _, err := ghc.Repositories.GetContents(ctx, owner, repo, path, &github.RepositoryContentGetOptions{Ref: ghBase}) + if err != nil { + return nil, fmt.Errorf("unable to get file content: %w", err) } + content, err = file.GetContent() + if err != nil { + return nil, fmt.Errorf("unable to read file content: %w", err) + } + item.Content = []byte(content) + } + if err := item.apply(data); err != nil { + return nil, fmt.Errorf("unable to update file: %w", err) + } + if content != string(item.Content) { + // add github.TreeEntry that will replace original path content with the updated one or add new if one doesn't exist already + // ref: https://developer.github.com/v3/git/trees/#tree-object + rcPath := path // make sure to copy path variable as its reference (not value!) is passed to changes + rcMode := "100644" + rcType := "blob" + changes = append(changes, &github.TreeEntry{ + Path: &rcPath, + Mode: &rcMode, + Type: &rcType, + Content: github.String(string(item.Content)), + }) } - } - if cnt != 0 { - return nil, fmt.Errorf("unable to find all the files (%d missing) - check the Plan: %w", cnt, err) } return changes, nil } diff --git a/hack/update/kubernetes_version/templates/v1beta2/containerd-api-port.yaml b/hack/update/kubernetes_version/templates/v1beta2/containerd-api-port.yaml new file mode 100644 index 000000000000..617e821e6b82 --- /dev/null +++ b/hack/update/kubernetes_version/templates/v1beta2/containerd-api-port.yaml @@ -0,0 +1,67 @@ +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 12345 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /run/containerd/containerd.sock + name: "mk" + kubeletExtraArgs: + node-ip: 1.1.1.1 + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +apiServer: + certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" +certificatesDir: /var/lib/minikube/certs +clusterName: mk +controlPlaneEndpoint: control-plane.minikube.internal:12345 +dns: + type: CoreDNS +etcd: + local: + dataDir: /var/lib/minikube/etcd + extraArgs: + proxy-refresh-interval: "70000" +kubernetesVersion: v1.19.0 +networking: + dnsDomain: cluster.local + podSubnet: "10.244.0.0/16" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +authentication: + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt +cgroupDriver: systemd +clusterDomain: "cluster.local" +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +failSwapOn: false +staticPodPath: /etc/kubernetes/manifests +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +clusterCIDR: "10.244.0.0/16" +metricsBindAddress: 1.1.1.1:10249 diff --git a/hack/update/kubernetes_version/templates/v1beta2/containerd-pod-network-cidr.yaml b/hack/update/kubernetes_version/templates/v1beta2/containerd-pod-network-cidr.yaml new file mode 100644 index 000000000000..d91d3e926ec3 --- /dev/null +++ b/hack/update/kubernetes_version/templates/v1beta2/containerd-pod-network-cidr.yaml @@ -0,0 +1,67 @@ +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /run/containerd/containerd.sock + name: "mk" + kubeletExtraArgs: + node-ip: 1.1.1.1 + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +apiServer: + certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" +certificatesDir: /var/lib/minikube/certs +clusterName: mk +controlPlaneEndpoint: control-plane.minikube.internal:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /var/lib/minikube/etcd + extraArgs: + proxy-refresh-interval: "70000" +kubernetesVersion: v1.19.0 +networking: + dnsDomain: cluster.local + podSubnet: "192.168.32.0/20" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +authentication: + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt +cgroupDriver: systemd +clusterDomain: "cluster.local" +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +failSwapOn: false +staticPodPath: /etc/kubernetes/manifests +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +clusterCIDR: "192.168.32.0/20" +metricsBindAddress: 1.1.1.1:10249 diff --git a/hack/update/kubernetes_version/templates/v1beta2/containerd.yaml b/hack/update/kubernetes_version/templates/v1beta2/containerd.yaml new file mode 100644 index 000000000000..6c12857ab2f0 --- /dev/null +++ b/hack/update/kubernetes_version/templates/v1beta2/containerd.yaml @@ -0,0 +1,67 @@ +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /run/containerd/containerd.sock + name: "mk" + kubeletExtraArgs: + node-ip: 1.1.1.1 + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +apiServer: + certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" +certificatesDir: /var/lib/minikube/certs +clusterName: mk +controlPlaneEndpoint: control-plane.minikube.internal:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /var/lib/minikube/etcd + extraArgs: + proxy-refresh-interval: "70000" +kubernetesVersion: v1.19.0 +networking: + dnsDomain: cluster.local + podSubnet: "10.244.0.0/16" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +authentication: + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt +cgroupDriver: systemd +clusterDomain: "cluster.local" +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +failSwapOn: false +staticPodPath: /etc/kubernetes/manifests +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +clusterCIDR: "10.244.0.0/16" +metricsBindAddress: 1.1.1.1:10249 diff --git a/hack/update/kubernetes_version/templates/v1beta2/crio-options-gates.yaml b/hack/update/kubernetes_version/templates/v1beta2/crio-options-gates.yaml new file mode 100644 index 000000000000..1dcff3d334d3 --- /dev/null +++ b/hack/update/kubernetes_version/templates/v1beta2/crio-options-gates.yaml @@ -0,0 +1,74 @@ +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/crio/crio.sock + name: "mk" + kubeletExtraArgs: + node-ip: 1.1.1.1 + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +apiServer: + certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + fail-no-swap: "true" + feature-gates: "a=b" +controllerManager: + extraArgs: + feature-gates: "a=b" + kube-api-burst: "32" + leader-elect: "false" +scheduler: + extraArgs: + feature-gates: "a=b" + leader-elect: "false" + scheduler-name: "mini-scheduler" +certificatesDir: /var/lib/minikube/certs +clusterName: mk +controlPlaneEndpoint: control-plane.minikube.internal:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /var/lib/minikube/etcd + extraArgs: + proxy-refresh-interval: "70000" +kubernetesVersion: v1.19.0 +networking: + dnsDomain: cluster.local + podSubnet: "10.244.0.0/16" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +authentication: + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt +cgroupDriver: systemd +clusterDomain: "cluster.local" +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +failSwapOn: false +staticPodPath: /etc/kubernetes/manifests +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +clusterCIDR: "10.244.0.0/16" +metricsBindAddress: 1.1.1.1:10249 +mode: "iptables" diff --git a/hack/update/kubernetes_version/templates/v1beta2/crio.yaml b/hack/update/kubernetes_version/templates/v1beta2/crio.yaml new file mode 100644 index 000000000000..751041646eff --- /dev/null +++ b/hack/update/kubernetes_version/templates/v1beta2/crio.yaml @@ -0,0 +1,67 @@ +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/crio/crio.sock + name: "mk" + kubeletExtraArgs: + node-ip: 1.1.1.1 + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +apiServer: + certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" +certificatesDir: /var/lib/minikube/certs +clusterName: mk +controlPlaneEndpoint: control-plane.minikube.internal:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /var/lib/minikube/etcd + extraArgs: + proxy-refresh-interval: "70000" +kubernetesVersion: v1.19.0 +networking: + dnsDomain: cluster.local + podSubnet: "10.244.0.0/16" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +authentication: + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt +cgroupDriver: systemd +clusterDomain: "cluster.local" +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +failSwapOn: false +staticPodPath: /etc/kubernetes/manifests +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +clusterCIDR: "10.244.0.0/16" +metricsBindAddress: 1.1.1.1:10249 diff --git a/hack/update/kubernetes_version/templates/v1beta2/default.yaml b/hack/update/kubernetes_version/templates/v1beta2/default.yaml new file mode 100644 index 000000000000..da68c6fbcd8a --- /dev/null +++ b/hack/update/kubernetes_version/templates/v1beta2/default.yaml @@ -0,0 +1,67 @@ +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: "mk" + kubeletExtraArgs: + node-ip: 1.1.1.1 + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +apiServer: + certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" +certificatesDir: /var/lib/minikube/certs +clusterName: mk +controlPlaneEndpoint: control-plane.minikube.internal:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /var/lib/minikube/etcd + extraArgs: + proxy-refresh-interval: "70000" +kubernetesVersion: v1.19.0 +networking: + dnsDomain: cluster.local + podSubnet: "10.244.0.0/16" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +authentication: + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt +cgroupDriver: systemd +clusterDomain: "cluster.local" +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +failSwapOn: false +staticPodPath: /etc/kubernetes/manifests +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +clusterCIDR: "10.244.0.0/16" +metricsBindAddress: 1.1.1.1:10249 diff --git a/hack/update/kubernetes_version/templates/v1beta2/dns.yaml b/hack/update/kubernetes_version/templates/v1beta2/dns.yaml new file mode 100644 index 000000000000..cadb2556e0f6 --- /dev/null +++ b/hack/update/kubernetes_version/templates/v1beta2/dns.yaml @@ -0,0 +1,67 @@ +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: "mk" + kubeletExtraArgs: + node-ip: 1.1.1.1 + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +apiServer: + certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" +certificatesDir: /var/lib/minikube/certs +clusterName: mk +controlPlaneEndpoint: control-plane.minikube.internal:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /var/lib/minikube/etcd + extraArgs: + proxy-refresh-interval: "70000" +kubernetesVersion: v1.19.0 +networking: + dnsDomain: 1.1.1.1 + podSubnet: "10.244.0.0/16" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +authentication: + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt +cgroupDriver: systemd +clusterDomain: "cluster.local" +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +failSwapOn: false +staticPodPath: /etc/kubernetes/manifests +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +clusterCIDR: "10.244.0.0/16" +metricsBindAddress: 1.1.1.1:10249 diff --git a/hack/update/kubernetes_version/templates/v1beta2/image-repository.yaml b/hack/update/kubernetes_version/templates/v1beta2/image-repository.yaml new file mode 100644 index 000000000000..be593e2fd07b --- /dev/null +++ b/hack/update/kubernetes_version/templates/v1beta2/image-repository.yaml @@ -0,0 +1,68 @@ +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: "mk" + kubeletExtraArgs: + node-ip: 1.1.1.1 + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +imageRepository: test/repo +apiServer: + certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" +certificatesDir: /var/lib/minikube/certs +clusterName: mk +controlPlaneEndpoint: control-plane.minikube.internal:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /var/lib/minikube/etcd + extraArgs: + proxy-refresh-interval: "70000" +kubernetesVersion: v1.19.0 +networking: + dnsDomain: cluster.local + podSubnet: "10.244.0.0/16" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +authentication: + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt +cgroupDriver: systemd +clusterDomain: "cluster.local" +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +failSwapOn: false +staticPodPath: /etc/kubernetes/manifests +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +clusterCIDR: "10.244.0.0/16" +metricsBindAddress: 1.1.1.1:10249 diff --git a/hack/update/kubernetes_version/templates/v1beta2/options.yaml b/hack/update/kubernetes_version/templates/v1beta2/options.yaml new file mode 100644 index 000000000000..f9bad9233f3e --- /dev/null +++ b/hack/update/kubernetes_version/templates/v1beta2/options.yaml @@ -0,0 +1,71 @@ +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: "mk" + kubeletExtraArgs: + node-ip: 1.1.1.1 + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +apiServer: + certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + fail-no-swap: "true" +controllerManager: + extraArgs: + kube-api-burst: "32" + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" + scheduler-name: "mini-scheduler" +certificatesDir: /var/lib/minikube/certs +clusterName: mk +controlPlaneEndpoint: control-plane.minikube.internal:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /var/lib/minikube/etcd + extraArgs: + proxy-refresh-interval: "70000" +kubernetesVersion: v1.19.0 +networking: + dnsDomain: cluster.local + podSubnet: "10.244.0.0/16" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +authentication: + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt +cgroupDriver: systemd +clusterDomain: "cluster.local" +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +failSwapOn: false +staticPodPath: /etc/kubernetes/manifests +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +clusterCIDR: "10.244.0.0/16" +metricsBindAddress: 1.1.1.1:10249 +mode: "iptables" diff --git a/hack/update/kubernetes_version/update_kubernetes_version.go b/hack/update/kubernetes_version/update_kubernetes_version.go index 92f910b2929b..692e4f423139 100644 --- a/hack/update/kubernetes_version/update_kubernetes_version.go +++ b/hack/update/kubernetes_version/update_kubernetes_version.go @@ -30,6 +30,7 @@ import ( "context" "time" + "golang.org/x/mod/semver" "k8s.io/klog/v2" "k8s.io/minikube/hack/update" @@ -54,6 +55,60 @@ var ( `'latest' for .*\)`: `'latest' for {{.LatestVersion}})`, }, }, + "pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/containerd-api-port.yaml": { + Content: update.Loadf("templates/v1beta2/containerd-api-port.yaml"), + Replace: map[string]string{ + `kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`, + }, + }, + "pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/containerd-pod-network-cidr.yaml": { + Content: update.Loadf("templates/v1beta2/containerd-pod-network-cidr.yaml"), + Replace: map[string]string{ + `kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`, + }, + }, + "pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/containerd.yaml": { + Content: update.Loadf("templates/v1beta2/containerd.yaml"), + Replace: map[string]string{ + `kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`, + }, + }, + "pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/crio-options-gates.yaml": { + Content: update.Loadf("templates/v1beta2/crio-options-gates.yaml"), + Replace: map[string]string{ + `kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`, + }, + }, + "pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/crio.yaml": { + Content: update.Loadf("templates/v1beta2/crio.yaml"), + Replace: map[string]string{ + `kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`, + }, + }, + "pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/default.yaml": { + Content: update.Loadf("templates/v1beta2/default.yaml"), + Replace: map[string]string{ + `kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`, + }, + }, + "pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/dns.yaml": { + Content: update.Loadf("templates/v1beta2/dns.yaml"), + Replace: map[string]string{ + `kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`, + }, + }, + "pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/image-repository.yaml": { + Content: update.Loadf("templates/v1beta2/image-repository.yaml"), + Replace: map[string]string{ + `kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`, + }, + }, + "pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/options.yaml": { + Content: update.Loadf("templates/v1beta2/options.yaml"), + Replace: map[string]string{ + `kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`, + }, + }, } // PR data @@ -64,8 +119,11 @@ var ( // Data holds greatest current stable release and greatest latest rc or beta pre-release Kubernetes versions type Data struct { - StableVersion string `json:"StableVersion"` - LatestVersion string `json:"LatestVersion"` + StableVersion string `json:"StableVersion"` + LatestVersion string `json:"LatestVersion"` + LatestVersionMM string `json:"LatestVersionMM"` // LatestVersion in . format + // for testdata: if StableVersion greater than 'LatestVersionMM.0' exists, LatestVersionP0 is 'LatestVersionMM.0', otherwise LatestVersionP0 is LatestVersion. + LatestVersionP0 string `json:"LatestVersionP0"` } func main() { @@ -74,12 +132,27 @@ func main() { defer cancel() // get Kubernetes versions from GitHub Releases - stable, latest, err := update.GHReleases(ctx, "kubernetes", "kubernetes") - if err != nil || stable == "" || latest == "" { + stable, latest, latestMM, latestP0, err := k8sVersions(ctx, "kubernetes", "kubernetes") + if err != nil || !semver.IsValid(stable) || !semver.IsValid(latest) || !semver.IsValid(latestMM) || !semver.IsValid(latestP0) { klog.Fatalf("Unable to get Kubernetes versions: %v", err) } - data := Data{StableVersion: stable, LatestVersion: latest} + data := Data{StableVersion: stable, LatestVersion: latest, LatestVersionMM: latestMM, LatestVersionP0: latestP0} klog.Infof("Kubernetes versions: 'stable' is %s and 'latest' is %s", data.StableVersion, data.LatestVersion) update.Apply(ctx, schema, data, prBranchPrefix, prTitle, prIssue) } + +// k8sVersion returns Kubernetes versions. +func k8sVersions(ctx context.Context, owner, repo string) (stable, latest, latestMM, latestP0 string, err error) { + // get Kubernetes versions from GitHub Releases + stable, latest, err = update.GHReleases(ctx, owner, repo) + if err != nil || !semver.IsValid(stable) || !semver.IsValid(latest) { + return "", "", "", "", err + } + latestMM = semver.MajorMinor(latest) + latestP0 = latestMM + ".0" + if semver.Compare(stable, latestP0) == -1 { + latestP0 = latest + } + return stable, latest, latestMM, latestP0, nil +} diff --git a/hack/update/update.go b/hack/update/update.go index bf1c9530d753..8cf74d01d829 100644 --- a/hack/update/update.go +++ b/hack/update/update.go @@ -84,33 +84,32 @@ type Item struct { } // apply updates Item Content by replacing all occurrences of Replace map's keys with their actual map values (with placeholders replaced with data). -func (i *Item) apply(data interface{}) (changed bool, err error) { - if i.Content == nil || i.Replace == nil { - return false, fmt.Errorf("unable to update content: nothing to update") +func (i *Item) apply(data interface{}) error { + if i.Content == nil { + return fmt.Errorf("unable to update content: nothing to update") } org := string(i.Content) str := org for src, dst := range i.Replace { - tmpl := template.Must(template.New("").Parse(dst)) - buf := new(bytes.Buffer) - if err := tmpl.Execute(buf, data); err != nil { - return false, err + out, err := ParseTmpl(dst, data, "") + if err != nil { + return err } re := regexp.MustCompile(src) - str = re.ReplaceAllString(str, buf.String()) + str = re.ReplaceAllString(str, out) } i.Content = []byte(str) - return str != org, nil + return nil } // Apply applies concrete update plan (schema + data) to GitHub or local filesystem repo func Apply(ctx context.Context, schema map[string]Item, data interface{}, prBranchPrefix, prTitle string, prIssue int) { - plan, err := GetPlan(schema, data) + schema, pretty, err := GetPlan(schema, data) if err != nil { - klog.Fatalf("Unable to parse schema: %v\n%s", err, plan) + klog.Fatalf("Unable to parse schema: %v\n%s", err, pretty) } - klog.Infof("The Plan:\n%s", plan) + klog.Infof("The Plan:\n%s", pretty) if target == "fs" || target == "all" { changed, err := fsUpdate(FSRoot, schema, data) @@ -125,12 +124,9 @@ func Apply(ctx context.Context, schema map[string]Item, data interface{}, prBran if target == "gh" || target == "all" { // update prTitle replacing template placeholders with actual data values - tmpl := template.Must(template.New("prTitle").Parse(prTitle)) - buf := new(bytes.Buffer) - if err := tmpl.Execute(buf, data); err != nil { + if prTitle, err = ParseTmpl(prTitle, data, "prTitle"); err != nil { klog.Fatalf("Unable to parse PR Title: %v", err) } - prTitle = buf.String() // check if PR already exists prURL, err := ghFindPR(ctx, prTitle, ghOwner, ghRepo, ghBase, ghToken) @@ -153,22 +149,31 @@ func Apply(ctx context.Context, schema map[string]Item, data interface{}, prBran } // GetPlan returns concrete plan replacing placeholders in schema with actual data values, returns JSON-formatted representation of the plan and any error occurred. -func GetPlan(schema map[string]Item, data interface{}) (prettyprint string, err error) { - for _, item := range schema { +func GetPlan(schema map[string]Item, data interface{}) (plan map[string]Item, prettyprint string, err error) { + plan = make(map[string]Item) + for p, item := range schema { + path, err := ParseTmpl(p, data, "") + if err != nil { + return plan, fmt.Sprintf("%+v", schema), err + } + plan[path] = item + } + + for _, item := range plan { for src, dst := range item.Replace { - tmpl := template.Must(template.New("").Parse(dst)) - buf := new(bytes.Buffer) - if err := tmpl.Execute(buf, data); err != nil { - return fmt.Sprintf("%+v", schema), err + out, err := ParseTmpl(dst, data, "") + if err != nil { + return plan, fmt.Sprintf("%+v", schema), err } - item.Replace[src] = buf.String() + item.Replace[src] = out } } - str, err := json.MarshalIndent(schema, "", " ") + str, err := json.MarshalIndent(plan, "", " ") if err != nil { - return fmt.Sprintf("%+v", schema), err + return plan, fmt.Sprintf("%+v", schema), err } - return string(str), nil + + return plan, string(str), nil } // RunWithRetryNotify runs command cmd with stdin using exponential backoff for maxTime duration @@ -210,3 +215,13 @@ func Run(cmd *exec.Cmd, stdin io.Reader) error { } return nil } + +// ParseTmpl replaces placeholders in text with actual data values +func ParseTmpl(text string, data interface{}, name string) (string, error) { + tmpl := template.Must(template.New(name).Parse(text)) + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, data); err != nil { + return "", err + } + return buf.String(), nil +}