diff --git a/ADOPTERS.md b/ADOPTERS.md index 0e9528d3..ab2c2be1 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -10,4 +10,4 @@ This is a list of production adopters of Druid Operator: Open Source Solutions based on Druid Operator: | Company | Industry | | :--- |:----------------------------------| -|[AWS](https://github.com/aws-solutions/scalable-analytics-using-apache-druid-on-aws) +|[AWS](https://github.com/aws-solutions/scalable-analytics-using-apache-druid-on-aws)| diff --git a/README.md b/README.md index ca8293da..565c236a 100644 --- a/README.md +++ b/README.md @@ -12,12 +12,11 @@ -Druid Operator provisions and manages [Apache Druid](https://druid.apache.org/) cluster on kubernetes. -Druid Operator is designed to provision and manage [Apache Druid](https://druid.apache.org/) in distributed mode only. -It is built in Golang using [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder). -Druid Operator is available on [operatorhub.io](https://operatorhub.io/operator/druid-operator) -Refer to [Documentation](./docs/README.md) for getting started. -Feel free to join Kubernetes slack and join [druid-operator](https://kubernetes.slack.com/archives/C04F4M6HT2L) +- Druid Operator provisions and manages [Apache Druid](https://druid.apache.org/) cluster on kubernetes. +- Druid Operator is designed to provision and manage [Apache Druid](https://druid.apache.org/) in distributed mode only. +- It is built in Golang using [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder). +- Refer to [Documentation](./docs/README.md) for getting started. +- Feel free to join Kubernetes slack and join [druid-operator](https://kubernetes.slack.com/archives/C04F4M6HT2L) ### Newsletter - Monthly updates on running druid on kubernetes. - [Apache Druid on Kubernetes](https://druidonk8s.substack.com/) @@ -47,6 +46,16 @@ Feel free to join Kubernetes slack and join [druid-operator](https://kubernetes. - The operator has moved from HPA apiVersion autoscaling/v2beta1 to autoscaling/v2 API users will need to update there HPA Specs according v2 api in order to work with the latest druid-operator release. - druid-operator has moved Ingress apiVersion networking/v1beta1 to networking/v1. Users will need to update there Ingress Spec in the druid CR according networking/v1 syntax. In case users are using schema validated CRD, the CRD will also be needed to be updated. - The v1.0.0 release for druid-operator is compatible with k8s version 1.25. HPA API is kept to version v2beta2. +- Release v1.2.2 had a bug for namespace scoped operator deployments, this is fixed in 1.2.3. + +### Kubernetes version compatibility + +| druid-operator | 0.0.9 | v1.0.0 | v1.1.0 | v1.2.2 | v1.2.3 | +| :------------- | :-------------: | :-----: | :---: | :---: | :---: | +| kubernetes <= 1.20 | :x:| :x: | :x: | :x: | :x: | +| kubernetes == 1.21 | :white_check_mark:| :x: | :x: | :x: | :x: | +| kubernetes >= 1.22 and <= 1.25 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| kubernetes > 1.25 and <= 1.29.1 | :x: | :x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | ### Contributors diff --git a/chart/Chart.yaml b/chart/Chart.yaml index 6419cedb..faf26871 100644 --- a/chart/Chart.yaml +++ b/chart/Chart.yaml @@ -15,11 +15,11 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.3.5 +version: 0.3.6 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: v1.2.2 +appVersion: v1.2.3 # icon icon: "https://www.apache.org/logos/res/druid/druid-1.png" diff --git a/controllers/druid/handler.go b/controllers/druid/handler.go index 546e7747..114c4542 100644 --- a/controllers/druid/handler.go +++ b/controllers/druid/handler.go @@ -186,7 +186,7 @@ func deployDruidCluster(ctx context.Context, sdk client.Client, m *v1alpha1.Drui } // Default is set to true - execCheckCrashStatus(ctx, sdk, &nodeSpec, m, emitEvents) + execCheckCrashStatus(ctx, sdk, &nodeSpec, m, nodeSpecUniqueStr, emitEvents) // Ignore isObjFullyDeployed() for the first iteration ie cluster creation // will force cluster creation in parallel, post first iteration rolling updates @@ -201,7 +201,7 @@ func deployDruidCluster(ctx context.Context, sdk client.Client, m *v1alpha1.Drui } // Default is set to true - execCheckCrashStatus(ctx, sdk, &nodeSpec, m, emitEvents) + execCheckCrashStatus(ctx, sdk, &nodeSpec, m, nodeSpecUniqueStr, emitEvents) } // Create Ingress Spec @@ -551,19 +551,19 @@ func setPVCLabels(ctx context.Context, sdk client.Client, drd *v1alpha1.Druid, e return nil } -func execCheckCrashStatus(ctx context.Context, sdk client.Client, nodeSpec *v1alpha1.DruidNodeSpec, m *v1alpha1.Druid, event EventEmitter) { +func execCheckCrashStatus(ctx context.Context, sdk client.Client, nodeSpec *v1alpha1.DruidNodeSpec, m *v1alpha1.Druid, nodeSpecUniqueStr string, event EventEmitter) { if m.Spec.ForceDeleteStsPodOnError == false { return } else { if nodeSpec.PodManagementPolicy == "OrderedReady" { - checkCrashStatus(ctx, sdk, m, event) + checkCrashStatus(ctx, sdk, nodeSpec, m, nodeSpecUniqueStr, event) } } } -func checkCrashStatus(ctx context.Context, sdk client.Client, drd *v1alpha1.Druid, emitEvents EventEmitter) error { +func checkCrashStatus(ctx context.Context, sdk client.Client, nodeSpec *v1alpha1.DruidNodeSpec, drd *v1alpha1.Druid, nodeSpecUniqueStr string, emitEvents EventEmitter) error { - podList, err := readers.List(ctx, sdk, drd, makeLabelsForDruid(drd.Name), emitEvents, func() objectList { return &v1.PodList{} }, func(listObj runtime.Object) []object { + podList, err := readers.List(ctx, sdk, drd, makeLabelsForNodeSpec(nodeSpec, drd, drd.Name, nodeSpecUniqueStr), emitEvents, func() objectList { return &v1.PodList{} }, func(listObj runtime.Object) []object { items := listObj.(*v1.PodList).Items result := make([]object, len(items)) for i := 0; i < len(items); i++ { diff --git a/e2e/e2e.sh b/e2e/e2e.sh index 1413e89e..9bc7d1cf 100755 --- a/e2e/e2e.sh +++ b/e2e/e2e.sh @@ -2,7 +2,7 @@ set -o errexit set -x # Get Kind -go install sigs.k8s.io/kind@v0.17.0 +go install sigs.k8s.io/kind@v0.21.0 # minio statefulset name MINIO_STS_NAME=myminio-ss-0 # druid namespace @@ -45,7 +45,7 @@ kubectl apply -f e2e/configs/druid-cr.yaml -n ${NAMESPACE} sleep 10 for d in $(kubectl get pods -n ${NAMESPACE} -l app=druid -l druid_cr=tiny-cluster -o name) do - kubectl wait -n ${NAMESPACE} "$d" --for=condition=Ready --timeout=5m + kubectl wait -n ${NAMESPACE} "$d" --for=condition=Ready --timeout=15m done # wait for druid pods for s in $(kubectl get sts -n ${NAMESPACE} -l app=${NAMESPACE} -l druid_cr=tiny-cluster -o name) diff --git a/main.go b/main.go index e0861689..7b375da1 100644 --- a/main.go +++ b/main.go @@ -118,8 +118,9 @@ func main() { func watchNamespaceCache() cache.NewCacheFunc { var managerWatchCache cache.NewCacheFunc - if watchNamespace != "" { - ns := strings.Split(watchNamespace, ",") + ns := strings.Split(watchNamespace, ",") + + if len(ns) > 1 { for i := range ns { ns[i] = strings.TrimSpace(ns[i]) }