Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions charts/postgres-operator/crds/operatorconfigurations.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -278,12 +278,12 @@ spec:
pdb_name_format:
type: string
default: "postgres-{cluster}-pdb"
pod_antiaffinity_topology_key:
type: string
default: "kubernetes.io/hostname"
pod_antiaffinity_preferred_during_scheduling:
type: boolean
default: false
pod_antiaffinity_topology_key:
type: string
default: "kubernetes.io/hostname"
pod_environment_configmap:
type: string
pod_environment_secret:
Expand Down
4 changes: 2 additions & 2 deletions charts/postgres-operator/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -165,10 +165,10 @@ configKubernetes:

# defines the template for PDB (Pod Disruption Budget) names
pdb_name_format: "postgres-{cluster}-pdb"
# switches pod anti affinity type to `preferredDuringSchedulingIgnoredDuringExecution`
pod_antiaffinity_preferred_during_scheduling: false
# override topology key for pod anti affinity
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
# switches pod anti affinity type to `preferredDuringSchedulingIgnoredDuringExecution`
# pod_antiaffinity_preferred_during_scheduling: true
# namespaced name of the ConfigMap with environment variables to populate on every pod
# pod_environment_configmap: "default/my-custom-config"
# name of the Secret (in cluster namespace) with environment variables to populate on every pod
Expand Down
1 change: 1 addition & 0 deletions manifests/configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ data:
# password_rotation_interval: "90"
# password_rotation_user_retention: "180"
pdb_name_format: "postgres-{cluster}-pdb"
# pod_antiaffinity_preferred_during_scheduling: "false"
# pod_antiaffinity_topology_key: "kubernetes.io/hostname"
pod_deletion_wait_timeout: 10m
# pod_environment_configmap: "default/my-custom-config"
Expand Down
3 changes: 3 additions & 0 deletions manifests/operatorconfiguration.crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -276,6 +276,9 @@ spec:
pdb_name_format:
type: string
default: "postgres-{cluster}-pdb"
pod_antiaffinity_preferred_during_scheduling:
type: boolean
default: false
pod_antiaffinity_topology_key:
type: string
default: "kubernetes.io/hostname"
Expand Down
2 changes: 2 additions & 0 deletions manifests/postgresql-operator-default-configuration.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ configuration:
# node_readiness_label_merge: "OR"
oauth_token_secret_name: postgresql-operator
pdb_name_format: "postgres-{cluster}-pdb"
pod_antiaffinity_preferred_during_scheduling: false
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
# pod_environment_configmap: "default/my-custom-config"
# pod_environment_secret: "my-custom-secret"
Expand All @@ -95,6 +96,7 @@ configuration:
# pod_service_account_role_binding_definition: ""
pod_terminate_grace_period: 5m
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
share_pgsocket_with_sidecars: false
spilo_allow_privilege_escalation: true
# spilo_runasuser: 101
# spilo_runasgroup: 103
Expand Down
6 changes: 3 additions & 3 deletions pkg/apis/acid.zalan.do/v1/crds.go
Original file line number Diff line number Diff line change
Expand Up @@ -1372,12 +1372,12 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
"pdb_name_format": {
Type: "string",
},
"pod_antiaffinity_topology_key": {
Type: "string",
},
"pod_antiaffinity_preferred_during_scheduling": {
Type: "boolean",
},
"pod_antiaffinity_topology_key": {
Type: "string",
},
"pod_environment_configmap": {
Type: "string",
},
Expand Down
4 changes: 2 additions & 2 deletions pkg/apis/acid.zalan.do/v1/operator_configuration_type.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,10 +97,10 @@ type KubernetesMetaConfiguration struct {
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"`
EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"`
PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"`
PodAntiAffinityPreferredDuringScheduling bool `json:"pod_antiaffinity_preferred_during_scheduling,omitempty"`
PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"`
PodManagementPolicy string `json:"pod_management_policy,omitempty"`
EnableReadinessProbe bool `json:"enable_readiness_probe,omitempty"`
EnableReadinessProbe bool `json:"enable_readiness_probe,omitempty"`
EnableCrossNamespaceSecret bool `json:"enable_cross_namespace_secret,omitempty"`
}

Expand Down
3 changes: 2 additions & 1 deletion pkg/cluster/connection_pooler.go
Original file line number Diff line number Diff line change
Expand Up @@ -354,11 +354,12 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
nodeAffinity := c.nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity)
if c.OpConfig.EnablePodAntiAffinity {
labelsSet := labels.Set(c.connectionPoolerLabels(role, false).MatchLabels)
podTemplate.Spec.Affinity = generatePodAffinity(
podTemplate.Spec.Affinity = podAffinity(
labelsSet,
c.OpConfig.PodAntiAffinityTopologyKey,
nodeAffinity,
c.OpConfig.PodAntiAffinityPreferredDuringScheduling,
true,
)
} else if nodeAffinity != nil {
podTemplate.Spec.Affinity = nodeAffinity
Expand Down
75 changes: 50 additions & 25 deletions pkg/cluster/k8sres.go
Original file line number Diff line number Diff line change
Expand Up @@ -495,8 +495,14 @@ func (c *Cluster) nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinit
}
}

func generatePodAffinity(labels labels.Set, topologyKey string, nodeAffinity *v1.Affinity, preferredDuringScheduling bool) *v1.Affinity {
// generate pod anti-affinity to avoid multiple pods of the same Postgres cluster in the same topology , e.g. node
func podAffinity(
labels labels.Set,
topologyKey string,
nodeAffinity *v1.Affinity,
preferredDuringScheduling bool,
anti bool) *v1.Affinity {

var podAffinity v1.Affinity

podAffinityTerm := v1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
Expand All @@ -505,24 +511,47 @@ func generatePodAffinity(labels labels.Set, topologyKey string, nodeAffinity *v1
TopologyKey: topologyKey,
}

podAffinity := v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{},
if anti {
podAffinity.PodAntiAffinity = generatePodAntiAffinity(podAffinityTerm, preferredDuringScheduling)
} else {
podAffinity.PodAffinity = generatePodAffinity(podAffinityTerm, preferredDuringScheduling)
}

if nodeAffinity != nil && nodeAffinity.NodeAffinity != nil {
podAffinity.NodeAffinity = nodeAffinity.NodeAffinity
}

return &podAffinity
}

func generatePodAffinity(podAffinityTerm v1.PodAffinityTerm, preferredDuringScheduling bool) *v1.PodAffinity {
podAffinity := &v1.PodAffinity{}

if preferredDuringScheduling {
podAffinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []v1.WeightedPodAffinityTerm{{
podAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []v1.WeightedPodAffinityTerm{{
Weight: 1,
PodAffinityTerm: podAffinityTerm,
}}
} else {
podAffinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = []v1.PodAffinityTerm{podAffinityTerm}
podAffinity.RequiredDuringSchedulingIgnoredDuringExecution = []v1.PodAffinityTerm{podAffinityTerm}
}

if nodeAffinity != nil && nodeAffinity.NodeAffinity != nil {
podAffinity.NodeAffinity = nodeAffinity.NodeAffinity
return podAffinity
}

func generatePodAntiAffinity(podAffinityTerm v1.PodAffinityTerm, preferredDuringScheduling bool) *v1.PodAntiAffinity {
podAntiAffinity := &v1.PodAntiAffinity{}

if preferredDuringScheduling {
podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []v1.WeightedPodAffinityTerm{{
Weight: 1,
PodAffinityTerm: podAffinityTerm,
}}
} else {
podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = []v1.PodAffinityTerm{podAffinityTerm}
}

return &podAffinity
return podAntiAffinity
}

func tolerations(tolerationsSpec *[]v1.Toleration, podToleration map[string]string) []v1.Toleration {
Expand Down Expand Up @@ -778,11 +807,12 @@ func (c *Cluster) generatePodTemplate(
}

if podAntiAffinity {
podSpec.Affinity = generatePodAffinity(
podSpec.Affinity = podAffinity(
labels,
podAntiAffinityTopologyKey,
nodeAffinity,
podAntiAffinityPreferredDuringScheduling,
true,
)
} else if nodeAffinity != nil {
podSpec.Affinity = nodeAffinity
Expand Down Expand Up @@ -2100,20 +2130,15 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) {
c.OpConfig.ClusterNameLabel: c.Name,
"application": "spilo-logical-backup",
}
podAffinityTerm := v1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchLabels: labels,
},
TopologyKey: "kubernetes.io/hostname",
}
podAffinity := v1.Affinity{
PodAffinity: &v1.PodAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{{
Weight: 1,
PodAffinityTerm: podAffinityTerm,
},
},
}}

nodeAffinity := c.nodeAffinity(c.OpConfig.NodeReadinessLabel, nil)
podAffinity := podAffinity(
labels,
"kubernetes.io/hostname",
nodeAffinity,
true,
false,
)

annotations := c.generatePodAnnotations(&c.Spec)

Expand Down Expand Up @@ -2147,7 +2172,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) {
}

// overwrite specific params of logical backups pods
podTemplate.Spec.Affinity = &podAffinity
podTemplate.Spec.Affinity = podAffinity
podTemplate.Spec.RestartPolicy = "Never" // affects containers within a pod

// configure a batch job
Expand Down
Loading