From 9a85d4b3b41f0efa3d0df701433272cdf08709d8 Mon Sep 17 00:00:00 2001 From: TomHellier Date: Thu, 21 Oct 2021 17:51:38 +0100 Subject: [PATCH 1/3] #1329 update the operator to allow subpaths to be used with the spark ui ingress. (#1330) * Slightly restructure the way the ingressURL is created, and the variable which is passed around, and configure the Ingress manifest to work with subpaths Signed-off-by: Tom Hellier * fix #1329 unit tests now capture groups are being added in the subpath condition, and protect against empty paths Signed-off-by: Tom Hellier --- .github/workflows/main.yaml | 2 +- .github/workflows/release.yaml | 4 +- Dockerfile | 3 +- pkg/controller/sparkapplication/controller.go | 55 ++++++++++++------- .../sparkapplication/controller_test.go | 49 +++++++++++++++++ pkg/controller/sparkapplication/sparkui.go | 49 +++++++++++------ .../sparkapplication/sparkui_test.go | 53 ++++++++++++++---- 7 files changed, 163 insertions(+), 52 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 9b9e3b56a..521901928 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -69,7 +69,7 @@ jobs: run: make detect-crds-drift - name: Create kind cluster - uses: helm/kind-action@v1.0.0 + uses: helm/kind-action@v1.2.0 - name: Run chart-testing (install) run: ct install diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 273b9f906..46fae8d7f 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -43,7 +43,7 @@ jobs: run: ct lint - name: Create kind cluster - uses: helm/kind-action@v1.0.0 + uses: helm/kind-action@v1.2.0 if: steps.list-changed.outputs.changed == 'true' - name: Run chart-testing (install) @@ -53,4 +53,4 @@ jobs: uses: helm/chart-releaser-action@v1.1.0 env: CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" - CR_RELEASE_NAME_TEMPLATE: "spark-operator-chart-{{ .Version }}" \ No newline at end of file + CR_RELEASE_NAME_TEMPLATE: "spark-operator-chart-{{ .Version }}" diff --git a/Dockerfile b/Dockerfile index ad89a5b2e..d6d7cf61f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -37,7 +37,8 @@ RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o /usr/bin FROM ${SPARK_IMAGE} USER root COPY --from=builder /usr/bin/spark-operator /usr/bin/ -RUN apt-get update \ +RUN apt-get update --allow-releaseinfo-change \ + && apt-get update \ && apt-get install -y openssl curl tini \ && rm -rf /var/lib/apt/lists/* COPY hack/gencerts.sh /usr/bin/ diff --git a/pkg/controller/sparkapplication/controller.go b/pkg/controller/sparkapplication/controller.go index f4d899063..567e25c4e 100644 --- a/pkg/controller/sparkapplication/controller.go +++ b/pkg/controller/sparkapplication/controller.go @@ -664,6 +664,41 @@ func (c *Controller) submitSparkApplication(app *v1beta2.SparkApplication) *v1be } } + if c.enableUIService { + service, err := createSparkUIService(app, c.kubeClient) + if err != nil { + glog.Errorf("failed to create UI service for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) + } else { + app.Status.DriverInfo.WebUIServiceName = service.serviceName + app.Status.DriverInfo.WebUIPort = service.servicePort + app.Status.DriverInfo.WebUIAddress = fmt.Sprintf("%s:%d", service.serviceIP, app.Status.DriverInfo.WebUIPort) + // Create UI Ingress if ingress-format is set. + if c.ingressURLFormat != "" { + // We are going to want to use an ingress url. + ingressURL, err := getSparkUIingressURL(c.ingressURLFormat, app.GetName(), app.GetNamespace()) + if err != nil { + glog.Errorf("failed to get the spark ingress url %s/%s: %v", app.Namespace, app.Name, err) + } else { + // need to ensure the spark.ui variables are configured correctly if a subPath is used. + if ingressURL.Path != "" { + if app.Spec.SparkConf == nil { + app.Spec.SparkConf = make(map[string]string) + } + app.Spec.SparkConf["spark.ui.proxyBase"] = ingressURL.Path + app.Spec.SparkConf["spark.ui.proxyRedirectUri"] = "/" + } + ingress, err := createSparkUIIngress(app, *service, ingressURL, c.kubeClient) + if err != nil { + glog.Errorf("failed to create UI Ingress for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) + } else { + app.Status.DriverInfo.WebUIIngressAddress = ingress.ingressURL.String() + app.Status.DriverInfo.WebUIIngressName = ingress.ingressName + } + } + } + } + } + driverPodName := getDriverPodName(app) submissionID := uuid.New().String() submissionCmdArgs, err := buildSubmissionCommandArgs(app, driverPodName, submissionID) @@ -715,26 +750,6 @@ func (c *Controller) submitSparkApplication(app *v1beta2.SparkApplication) *v1be } c.recordSparkApplicationEvent(app) - if c.enableUIService { - service, err := createSparkUIService(app, c.kubeClient) - if err != nil { - glog.Errorf("failed to create UI service for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - } else { - app.Status.DriverInfo.WebUIServiceName = service.serviceName - app.Status.DriverInfo.WebUIPort = service.servicePort - app.Status.DriverInfo.WebUIAddress = fmt.Sprintf("%s:%d", service.serviceIP, app.Status.DriverInfo.WebUIPort) - // Create UI Ingress if ingress-format is set. - if c.ingressURLFormat != "" { - ingress, err := createSparkUIIngress(app, *service, c.ingressURLFormat, c.kubeClient) - if err != nil { - glog.Errorf("failed to create UI Ingress for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - } else { - app.Status.DriverInfo.WebUIIngressAddress = ingress.ingressURL - app.Status.DriverInfo.WebUIIngressName = ingress.ingressName - } - } - } - } return app } diff --git a/pkg/controller/sparkapplication/controller_test.go b/pkg/controller/sparkapplication/controller_test.go index 022a27a89..e70c7a80c 100644 --- a/pkg/controller/sparkapplication/controller_test.go +++ b/pkg/controller/sparkapplication/controller_test.go @@ -1566,6 +1566,55 @@ func TestIsNextRetryDue(t *testing.T) { assert.True(t, isNextRetryDue(int64ptr(50), 3, metav1.Time{Time: metav1.Now().Add(-151 * time.Second)})) } +func TestIngressWithSubpathAffectsSparkConfiguration(t *testing.T) { + os.Setenv(kubernetesServiceHostEnvVar, "localhost") + os.Setenv(kubernetesServicePortEnvVar, "443") + + appName := "ingressaffectssparkconfig" + + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: appName, + Namespace: "test", + }, + Spec: v1beta2.SparkApplicationSpec{ + RestartPolicy: v1beta2.RestartPolicy{ + Type: v1beta2.Never, + }, + TimeToLiveSeconds: int64ptr(1), + }, + Status: v1beta2.SparkApplicationStatus{}, + } + + ctrl, _ := newFakeController(app) + ctrl.ingressURLFormat = "example.com/{{$appNamespace}}/{{$appName}}" + ctrl.enableUIService = true + _, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) + if err != nil { + t.Fatal(err) + } + err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", app.Namespace, app.Name)) + assert.Nil(t, err) + deployedApp, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{}) + if err != nil { + t.Fatal(err) + } + ingresses, err := ctrl.kubeClient.ExtensionsV1beta1().Ingresses(app.Namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + t.Fatal(err) + } + if ingresses.Items[0].Spec.Rules[0].IngressRuleValue.HTTP.Paths[0].Path != "/"+app.Namespace+"/"+app.Name+"(/|$)(.*)" { + t.Fatal("The ingress subpath was not created successfully.") + } + // The controller doesn't sync changes to the sparkConf performed by submitSparkApplication back to the kubernetes API server. + if deployedApp.Spec.SparkConf["spark.ui.proxyBase"] != "/"+app.Namespace+"/"+app.Name { + t.Log("The spark configuration does not reflect the subpath expected by the ingress") + } + if deployedApp.Spec.SparkConf["spark.ui.proxyRedirectUri"] != "/" { + t.Log("The spark configuration does not reflect the proxyRedirectUri expected by the ingress") + } +} + func stringptr(s string) *string { return &s } diff --git a/pkg/controller/sparkapplication/sparkui.go b/pkg/controller/sparkapplication/sparkui.go index b62fc9914..f46a2a552 100644 --- a/pkg/controller/sparkapplication/sparkui.go +++ b/pkg/controller/sparkapplication/sparkui.go @@ -44,8 +44,20 @@ const ( var ingressAppNameURLRegex = regexp.MustCompile("{{\\s*[$]appName\\s*}}") var ingressAppNamespaceURLRegex = regexp.MustCompile("{{\\s*[$]appNamespace\\s*}}") -func getSparkUIingressURL(ingressURLFormat string, appName string, appNamespace string) string { - return ingressAppNamespaceURLRegex.ReplaceAllString(ingressAppNameURLRegex.ReplaceAllString(ingressURLFormat, appName), appNamespace) +func getSparkUIingressURL(ingressURLFormat string, appName string, appNamespace string) (*url.URL, error) { + ingressURL := ingressAppNamespaceURLRegex.ReplaceAllString(ingressAppNameURLRegex.ReplaceAllString(ingressURLFormat, appName), appNamespace) + parsedURL, err := url.Parse(ingressURL) + if err != nil { + return nil, err + } + if parsedURL.Scheme == "" { + //url does not contain any scheme, adding http:// so url.Parse can function correctly + parsedURL, err = url.Parse("http://" + ingressURL) + if err != nil { + return nil, err + } + } + return parsedURL, nil } // SparkService encapsulates information about the driver UI service. @@ -62,26 +74,19 @@ type SparkService struct { // SparkIngress encapsulates information about the driver UI ingress. type SparkIngress struct { ingressName string - ingressURL string + ingressURL *url.URL annotations map[string]string ingressTLS []extensions.IngressTLS } -func createSparkUIIngress(app *v1beta2.SparkApplication, service SparkService, ingressURLFormat string, kubeClient clientset.Interface) (*SparkIngress, error) { - ingressURL := getSparkUIingressURL(ingressURLFormat, app.GetName(), app.GetNamespace()) +func createSparkUIIngress(app *v1beta2.SparkApplication, service SparkService, ingressURL *url.URL, kubeClient clientset.Interface) (*SparkIngress, error) { ingressResourceAnnotations := getIngressResourceAnnotations(app) ingressTlsHosts := getIngressTlsHosts(app) - parsedURL, err := url.Parse(ingressURL) - if err != nil { - return nil, err - } - if parsedURL.Scheme == "" { - //url does not contain any scheme, adding http:// so url.Parse can function correctly - parsedURL, err = url.Parse("http://" + ingressURL) - if err != nil { - return nil, err - } + ingressURLPath := ingressURL.Path + // If we're serving on a subpath, we need to ensure we create capture groups + if ingressURLPath != "" && ingressURLPath != "/" { + ingressURLPath = ingressURLPath + "(/|$)(.*)" } ingress := extensions.Ingress{ @@ -93,7 +98,7 @@ func createSparkUIIngress(app *v1beta2.SparkApplication, service SparkService, i }, Spec: extensions.IngressSpec{ Rules: []extensions.IngressRule{{ - Host: parsedURL.Host, + Host: ingressURL.Host, IngressRuleValue: extensions.IngressRuleValue{ HTTP: &extensions.HTTPIngressRuleValue{ Paths: []extensions.HTTPIngressPath{{ @@ -104,7 +109,7 @@ func createSparkUIIngress(app *v1beta2.SparkApplication, service SparkService, i IntVal: service.servicePort, }, }, - Path: parsedURL.Path, + Path: ingressURLPath, }}, }, }, @@ -115,11 +120,19 @@ func createSparkUIIngress(app *v1beta2.SparkApplication, service SparkService, i if len(ingressResourceAnnotations) != 0 { ingress.ObjectMeta.Annotations = ingressResourceAnnotations } + + // If we're serving on a subpath, we need to ensure we use the capture groups + if ingressURL.Path != "" && ingressURL.Path != "/" { + if ingress.ObjectMeta.Annotations == nil { + ingress.ObjectMeta.Annotations = make(map[string]string) + } + ingress.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/rewrite-target"] = "/$2" + } if len(ingressTlsHosts) != 0 { ingress.Spec.TLS = ingressTlsHosts } glog.Infof("Creating an Ingress %s for the Spark UI for application %s", ingress.Name, app.Name) - _, err = kubeClient.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(context.TODO(), &ingress, metav1.CreateOptions{}) + _, err := kubeClient.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(context.TODO(), &ingress, metav1.CreateOptions{}) if err != nil { return nil, err diff --git a/pkg/controller/sparkapplication/sparkui_test.go b/pkg/controller/sparkapplication/sparkui_test.go index a23c80e17..91b5c6682 100644 --- a/pkg/controller/sparkapplication/sparkui_test.go +++ b/pkg/controller/sparkapplication/sparkui_test.go @@ -19,6 +19,7 @@ package sparkapplication import ( "context" "fmt" + "net/url" "reflect" "testing" @@ -330,7 +331,14 @@ func TestCreateSparkUIIngress(t *testing.T) { testFn := func(test testcase, t *testing.T, ingressURLFormat string) { fakeClient := fake.NewSimpleClientset() sparkService, err := createSparkUIService(test.app, fakeClient) - sparkIngress, err := createSparkUIIngress(test.app, *sparkService, ingressURLFormat, fakeClient) + if err != nil { + t.Fatal(err) + } + ingressURL, err := getSparkUIingressURL(ingressURLFormat, test.app.Name, test.app.Namespace) + if err != nil { + t.Fatal(err) + } + sparkIngress, err := createSparkUIIngress(test.app, *sparkService, ingressURL, fakeClient) if err != nil { if test.expectError { return @@ -340,7 +348,7 @@ func TestCreateSparkUIIngress(t *testing.T) { if sparkIngress.ingressName != test.expectedIngress.ingressName { t.Errorf("Ingress name wanted %s got %s", test.expectedIngress.ingressName, sparkIngress.ingressName) } - if sparkIngress.ingressURL != test.expectedIngress.ingressURL { + if sparkIngress.ingressURL.String() != test.expectedIngress.ingressURL.String() { t.Errorf("Ingress URL wanted %s got %s", test.expectedIngress.ingressURL, sparkIngress.ingressURL) } ingress, err := fakeClient.ExtensionsV1beta1().Ingresses(test.app.Namespace). @@ -373,9 +381,13 @@ func TestCreateSparkUIIngress(t *testing.T) { t.Errorf("No Ingress rules found.") } ingressRule := ingress.Spec.Rules[0] - //ingress URL is same as Host and Path combined from k8s ingress - if ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path != test.expectedIngress.ingressURL { - t.Errorf("Ingress of app %s has the wrong host %s", test.expectedIngress.ingressURL, ingressRule.Host) + // If we have a path, then the ingress adds capture groups + if ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "" && ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "/" { + test.expectedIngress.ingressURL.Path = test.expectedIngress.ingressURL.Path + "(/|$)(.*)" + } + if ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path != test.expectedIngress.ingressURL.Host+test.expectedIngress.ingressURL.Path { + + t.Errorf("Ingress of app %s has the wrong host %s", ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path, test.expectedIngress.ingressURL.Host+test.expectedIngress.ingressURL.Path) } if len(ingressRule.IngressRuleValue.HTTP.Paths) != 1 { @@ -481,7 +493,7 @@ func TestCreateSparkUIIngress(t *testing.T) { app: app1, expectedIngress: SparkIngress{ ingressName: fmt.Sprintf("%s-ui-ingress", app1.GetName()), - ingressURL: app1.GetName() + ".ingress.clusterName.com", + ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), }, expectError: false, }, @@ -490,7 +502,7 @@ func TestCreateSparkUIIngress(t *testing.T) { app: app2, expectedIngress: SparkIngress{ ingressName: fmt.Sprintf("%s-ui-ingress", app2.GetName()), - ingressURL: app2.GetName() + ".ingress.clusterName.com", + ingressURL: parseURLAndAssertError(app2.GetName()+".ingress.clusterName.com", t), annotations: map[string]string{ "kubernetes.io/ingress.class": "nginx", "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", @@ -503,7 +515,7 @@ func TestCreateSparkUIIngress(t *testing.T) { app: app3, expectedIngress: SparkIngress{ ingressName: fmt.Sprintf("%s-ui-ingress", app3.GetName()), - ingressURL: app3.GetName() + ".ingress.clusterName.com", + ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), annotations: map[string]string{ "kubernetes.io/ingress.class": "nginx", "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", @@ -519,7 +531,7 @@ func TestCreateSparkUIIngress(t *testing.T) { app: app4, expectedIngress: SparkIngress{ ingressName: fmt.Sprintf("%s-ui-ingress", app4.GetName()), - ingressURL: app3.GetName() + ".ingress.clusterName.com", + ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), annotations: map[string]string{ "kubernetes.io/ingress.class": "nginx", "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", @@ -542,7 +554,10 @@ func TestCreateSparkUIIngress(t *testing.T) { app: app1, expectedIngress: SparkIngress{ ingressName: fmt.Sprintf("%s-ui-ingress", app1.GetName()), - ingressURL: "ingress.clusterName.com/" + app1.GetNamespace() + "/" + app1.GetName(), + ingressURL: parseURLAndAssertError("ingress.clusterName.com/"+app1.GetNamespace()+"/"+app1.GetName(), t), + annotations: map[string]string{ + "nginx.ingress.kubernetes.io/rewrite-target": "/$2", + }, }, expectError: false, }, @@ -552,3 +567,21 @@ func TestCreateSparkUIIngress(t *testing.T) { testFn(test, t, "ingress.clusterName.com/{{$appNamespace}}/{{$appName}}") } } + +func parseURLAndAssertError(testURL string, t *testing.T) *url.URL { + fallbackURL, _ := url.Parse("http://example.com") + parsedURL, err := url.Parse(testURL) + if err != nil { + t.Errorf("failed to parse the url: %s", testURL) + return fallbackURL + } + if parsedURL.Scheme == "" { + //url does not contain any scheme, adding http:// so url.Parse can function correctly + parsedURL, err = url.Parse("http://" + testURL) + if err != nil { + t.Errorf("failed to parse the url: %s", testURL) + return fallbackURL + } + } + return parsedURL +} From a010501d7800b9490cd09dcc6d316737b9928da1 Mon Sep 17 00:00:00 2001 From: Ilya Karpov Date: Tue, 26 Oct 2021 21:51:53 +0300 Subject: [PATCH 2/3] Make manifests usable by Kustomize (#1367) * Add kustomization manifests * exlcude kustomization.yaml from detect-crds-drift build step --- Makefile | 2 +- .../spark-operator-with-metrics.yaml | 0 .../spark-operator-with-webhook.yaml | 0 manifest/crds/kustomization.yaml | 21 ++++++++ .../spark-application-rbac/kustomization.yaml | 23 ++++++++ .../spark-application-rbac.yaml} | 14 +++-- .../spark-operator-install/kustomization.yaml | 25 +++++++++ .../spark-operator-rbac.yaml | 0 .../spark-operator.yaml | 0 .../kustomization.yaml | 27 ++++++++++ .../spark-operator-patch.yaml | 40 ++++++++++++++ .../spark-operator-webhook.yaml | 53 +++++++++++++++++++ 12 files changed, 199 insertions(+), 6 deletions(-) rename {manifest => examples}/spark-operator-with-metrics.yaml (100%) rename {manifest => examples}/spark-operator-with-webhook.yaml (100%) create mode 100644 manifest/crds/kustomization.yaml create mode 100644 manifest/spark-application-rbac/kustomization.yaml rename manifest/{spark-rbac.yaml => spark-application-rbac/spark-application-rbac.yaml} (88%) create mode 100644 manifest/spark-operator-install/kustomization.yaml rename manifest/{ => spark-operator-install}/spark-operator-rbac.yaml (100%) rename manifest/{ => spark-operator-install}/spark-operator.yaml (100%) create mode 100644 manifest/spark-operator-with-webhook-install/kustomization.yaml create mode 100644 manifest/spark-operator-with-webhook-install/spark-operator-patch.yaml create mode 100644 manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml diff --git a/Makefile b/Makefile index 7f70f8494..7e7d7e176 100644 --- a/Makefile +++ b/Makefile @@ -50,7 +50,7 @@ fmt-check: clean ./.travis.gofmt.sh detect-crds-drift: - diff -q charts/spark-operator-chart/crds manifest/crds + diff -q charts/spark-operator-chart/crds manifest/crds --exclude=kustomization.yaml clean: @echo "cleaning up caches and output" diff --git a/manifest/spark-operator-with-metrics.yaml b/examples/spark-operator-with-metrics.yaml similarity index 100% rename from manifest/spark-operator-with-metrics.yaml rename to examples/spark-operator-with-metrics.yaml diff --git a/manifest/spark-operator-with-webhook.yaml b/examples/spark-operator-with-webhook.yaml similarity index 100% rename from manifest/spark-operator-with-webhook.yaml rename to examples/spark-operator-with-webhook.yaml diff --git a/manifest/crds/kustomization.yaml b/manifest/crds/kustomization.yaml new file mode 100644 index 000000000..ed4efc5a8 --- /dev/null +++ b/manifest/crds/kustomization.yaml @@ -0,0 +1,21 @@ +# +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - sparkoperator.k8s.io_sparkapplications.yaml + - sparkoperator.k8s.io_scheduledsparkapplications.yaml diff --git a/manifest/spark-application-rbac/kustomization.yaml b/manifest/spark-application-rbac/kustomization.yaml new file mode 100644 index 000000000..1e4e490c9 --- /dev/null +++ b/manifest/spark-application-rbac/kustomization.yaml @@ -0,0 +1,23 @@ +# +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: spark + +resources: + - spark-application-rbac.yaml diff --git a/manifest/spark-rbac.yaml b/manifest/spark-application-rbac/spark-application-rbac.yaml similarity index 88% rename from manifest/spark-rbac.yaml rename to manifest/spark-application-rbac/spark-application-rbac.yaml index 8bd4e86fa..662f227d1 100644 --- a/manifest/spark-rbac.yaml +++ b/manifest/spark-application-rbac/spark-application-rbac.yaml @@ -13,17 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. # - +apiVersion: v1 +kind: Namespace +metadata: + name: spark +--- apiVersion: v1 kind: ServiceAccount metadata: name: spark - namespace: default + namespace: spark --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - namespace: default + namespace: spark name: spark-role rules: - apiGroups: [""] @@ -37,11 +41,11 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: spark-role-binding - namespace: default + namespace: spark subjects: - kind: ServiceAccount name: spark - namespace: default + namespace: spark roleRef: kind: Role name: spark-role diff --git a/manifest/spark-operator-install/kustomization.yaml b/manifest/spark-operator-install/kustomization.yaml new file mode 100644 index 000000000..1d102d262 --- /dev/null +++ b/manifest/spark-operator-install/kustomization.yaml @@ -0,0 +1,25 @@ +# +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: spark-operator + +resources: + - spark-operator-rbac.yaml + - ../crds + - spark-operator.yaml diff --git a/manifest/spark-operator-rbac.yaml b/manifest/spark-operator-install/spark-operator-rbac.yaml similarity index 100% rename from manifest/spark-operator-rbac.yaml rename to manifest/spark-operator-install/spark-operator-rbac.yaml diff --git a/manifest/spark-operator.yaml b/manifest/spark-operator-install/spark-operator.yaml similarity index 100% rename from manifest/spark-operator.yaml rename to manifest/spark-operator-install/spark-operator.yaml diff --git a/manifest/spark-operator-with-webhook-install/kustomization.yaml b/manifest/spark-operator-with-webhook-install/kustomization.yaml new file mode 100644 index 000000000..ec3b237e7 --- /dev/null +++ b/manifest/spark-operator-with-webhook-install/kustomization.yaml @@ -0,0 +1,27 @@ +# +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: spark-operator + +resources: + - ../spark-operator-install + - spark-operator-webhook.yaml + +patchesStrategicMerge: + - spark-operator-patch.yaml diff --git a/manifest/spark-operator-with-webhook-install/spark-operator-patch.yaml b/manifest/spark-operator-with-webhook-install/spark-operator-patch.yaml new file mode 100644 index 000000000..ba3ce4fe9 --- /dev/null +++ b/manifest/spark-operator-with-webhook-install/spark-operator-patch.yaml @@ -0,0 +1,40 @@ +# +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sparkoperator + labels: + app.kubernetes.io/name: sparkoperator + app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + namespace: spark-operator +spec: + template: + spec: + volumes: + - name: webhook-certs + secret: + secretName: spark-webhook-certs + containers: + - name: sparkoperator + args: + - -logtostderr + - -enable-webhook=true + - -v=2 + volumeMounts: + - name: webhook-certs + mountPath: /etc/webhook-certs + diff --git a/manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml b/manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml new file mode 100644 index 000000000..1618f06cc --- /dev/null +++ b/manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml @@ -0,0 +1,53 @@ +# +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: batch/v1 +kind: Job +metadata: + name: sparkoperator-init + namespace: spark-operator + labels: + app.kubernetes.io/name: sparkoperator + app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 +spec: + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/name: sparkoperator + app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + spec: + serviceAccountName: sparkoperator + restartPolicy: Never + containers: + - name: main + image: gcr.io/spark-operator/spark-operator:v1beta2-1.2.3-3.1.1 + imagePullPolicy: IfNotPresent + command: ["/usr/bin/gencerts.sh", "-p"] +--- +kind: Service +apiVersion: v1 +metadata: + name: spark-webhook + namespace: spark-operator +spec: + ports: + - port: 443 + targetPort: 8080 + name: webhook + selector: + app.kubernetes.io/name: sparkoperator + app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 From 4637882db7bca962c748c6f67561e39570315c24 Mon Sep 17 00:00:00 2001 From: Anton Yanchenko Date: Tue, 26 Oct 2021 23:32:23 +0200 Subject: [PATCH 3/3] Regenerate deleted cert after upgrade (#1373) * Regenerate deleted certs after upgrade Should fix #875. * Bump chart version * Update docs --- charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/README.md | 1 + charts/spark-operator-chart/templates/webhook-init-job.yaml | 2 ++ charts/spark-operator-chart/values.yaml | 4 ++++ 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index db53de3e5..718c0b6a6 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.1.7 +version: 1.1.8 appVersion: v1beta2-1.2.3-3.1.1 keywords: - spark diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 684e518e5..ef5ed6b59 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -125,6 +125,7 @@ All charts linted successfully | serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account | | sparkJobNamespace | string | `""` | Set this if running spark jobs in a different namespace than the operator | | tolerations | list | `[]` | List of node taints to tolerate | +| webhook.initAnnotations | object | `{"helm.sh/hook":"pre-install, pre-upgrade","helm.sh/hook-weight":"50"}` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | | webhook.cleanupAnnotations | object | `{"helm.sh/hook":"pre-delete, pre-upgrade","helm.sh/hook-delete-policy":"hook-succeeded"}` | The annotations applied to the cleanup job, required for helm lifecycle hooks | | webhook.enable | bool | `false` | Enable webhook server | | webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | diff --git a/charts/spark-operator-chart/templates/webhook-init-job.yaml b/charts/spark-operator-chart/templates/webhook-init-job.yaml index b6fd11e5d..172955723 100644 --- a/charts/spark-operator-chart/templates/webhook-init-job.yaml +++ b/charts/spark-operator-chart/templates/webhook-init-job.yaml @@ -3,6 +3,8 @@ apiVersion: batch/v1 kind: Job metadata: name: {{ include "spark-operator.fullname" . }}-webhook-init + annotations: + {{- toYaml .Values.webhook.initAnnotations | nindent 4 }} labels: {{- include "spark-operator.labels" . | nindent 4 }} spec: diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index 2c7edb705..a07fe7a77 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -82,6 +82,10 @@ webhook: # -- The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. # Empty string (default) will operate on all namespaces namespaceSelector: "" + # -- The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade + initAnnotations: + "helm.sh/hook": pre-install, pre-upgrade + "helm.sh/hook-weight": "50" # -- The annotations applied to the cleanup job, required for helm lifecycle hooks cleanupAnnotations: "helm.sh/hook": pre-delete, pre-upgrade