Skip to content

Commit

Permalink
Remove emptyDir volume support
Browse files Browse the repository at this point in the history
The operator was using emptyDir storage if the user didn't specify a
StorageClass to be used. Given that the `emptyDir` storage is ephimeral and
meant to be used only when bootstrapping a development environment, we decided
that this wasn't a safe default.

We now require the user to specify a size requirement and, if the StorageClass
is empty, we let k8s use the default storage class of the cluster.

Co-authored-by: Leonardo Cecchi <leonardo.cecchi@2ndquadrant.it>
Co-authored-by: Marco Nenciarini <marco.nenciarini@2ndquadrant.it>
  • Loading branch information
leonardoce and mnencia committed Sep 16, 2020
1 parent 3ad650b commit 1f81217
Show file tree
Hide file tree
Showing 20 changed files with 86 additions and 221 deletions.
6 changes: 6 additions & 0 deletions NEWS
Original file line number Diff line number Diff line change
@@ -1,6 +1,12 @@
Cloud Native PostgreSQL - History of user-visible changes
Copyright (C) 2019-2020 2ndQuadrant Italia SRL. Exclusively licensed to 2ndQuadrant Limited.

Version 0.3.0 - TODO

- Remove EmptyDir storage support
- Remove "unusable" annotation support, now PVC can just be removed followed
by a deletion of the corresponding Pod

Version 0.2.0 - 11 Aug 2020

- PostgreSQL 10 and 11 are now supported, in addition to PostgreSQL 12
Expand Down
17 changes: 14 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,27 @@
workloads on Kubernetes, particularly optimised for Private Cloud environments
with Local Persistent Volumes (PV).

## Quickstart for local testing (TODO)
## Quickstart for local testing of a git branch

Temporary information on how to test PG Operator using private images on Quay.io:
If you want to deploy a cluster using the operator from your current git branch,
you can use the following commands:

```bash
kind create cluster --name pg
kubectl create namespace postgresql-operator-system
kubectl create secret docker-registry \
-n postgresql-operator-system \
postgresql-operator-pull-secret \
--docker-server=internal.2ndq.io \
--docker-username=$GITLAB_TOKEN_USERNAME \
--docker-password=$GITLAB_TOKEN_PASSWORD
make deploy CONTROLLER_IMG=internal.2ndq.io/k8s/cloud-native-postgresql:$(git symbolic-ref --short HEAD | tr / _)
kubectl apply -f docs/src/samples/cluster-emptydir.yaml
kubectl apply -f docs/src/samples/cluster-example.yaml
```

Replace `$GITLAB_TOKEN_USERNAME` and `$GITLAB_TOKEN_PASSWORD` with one with the permission to pull
from the gitlab docker registry.

# How to upgrade the list of licenses

To generate or update the `licenses` folder run the following command:
Expand Down
13 changes: 4 additions & 9 deletions api/v1alpha1/cluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ type ClusterSpec struct {

// Configuration of the storage of the instances
// +optional
StorageConfiguration *StorageConfiguration `json:"storage,omitempty"`
StorageConfiguration StorageConfiguration `json:"storage,omitempty"`

// The time in seconds that is allowed for a PostgreSQL instance to
// successfully start up (default 30)
Expand Down Expand Up @@ -191,12 +191,13 @@ type ApplicationConfiguration struct {
type StorageConfiguration struct {
// StorageClass to use for database data (PGDATA). Applied after
// evaluating the PVC template, if available.
// If not specified, generated PVCs will be satisfied by the
// default storage class
// +optional
StorageClass *string `json:"storageClass"`

// Size of the storage. Required if not already specified in the PVC template.
// +optional
Size *resource.Quantity `json:"size"`
Size resource.Quantity `json:"size"`

// Template to be used to generate the Persistent Volume Claim
// +optional
Expand Down Expand Up @@ -387,12 +388,6 @@ func (cluster *Cluster) GetServiceReadWriteName() string {
return fmt.Sprintf("%v%v", cluster.Name, ServiceReadWriteSuffix)
}

// IsUsingPersistentStorage check if this cluster will use persistent storage
// or not
func (cluster *Cluster) IsUsingPersistentStorage() bool {
return cluster.Spec.StorageConfiguration != nil
}

// GetMaxStartDelay get the amount of time of startDelay config option
func (cluster *Cluster) GetMaxStartDelay() int32 {
if cluster.Spec.MaxStartDelay > 0 {
Expand Down
19 changes: 0 additions & 19 deletions api/v1alpha1/cluster_types_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,25 +49,6 @@ var _ = Describe("PostgreSQL services name", func() {
})
})

var _ = Describe("Detect persistent storage", func() {
It("by defaults work with emptyDir storage", func() {
var cluster = Cluster{}
Expect(cluster.IsUsingPersistentStorage()).To(BeFalse())
})

It("consider the presence of storage configuration", func() {
var storageClassName = "default-storage-class"
var cluster = Cluster{
Spec: ClusterSpec{
StorageConfiguration: &StorageConfiguration{
StorageClass: &storageClassName,
},
},
}
Expect(cluster.IsUsingPersistentStorage()).To(BeTrue())
})
})

var _ = Describe("Primary update strategy", func() {
It("defaults to switchover", func() {
emptyCluster := Cluster{}
Expand Down
12 changes: 2 additions & 10 deletions api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 4 additions & 1 deletion config/crd/bases/postgresql.k8s.2ndq.io_clusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -388,8 +388,11 @@ spec:
x-kubernetes-int-or-string: true
storageClass:
description: StorageClass to use for database data (PGDATA). Applied
after evaluating the PVC template, if available.
after evaluating the PVC template, if available. If not specified,
generated PVCs will be satisfied by the default storage class
type: string
required:
- size
type: object
required:
- applicationConfiguration
Expand Down
28 changes: 12 additions & 16 deletions controllers/cluster_create.go
Original file line number Diff line number Diff line change
Expand Up @@ -347,14 +347,12 @@ func (r *ClusterReconciler) createPrimaryInstance(
return err
}

if cluster.IsUsingPersistentStorage() {
pvcSpec := specs.CreatePVC(*cluster.Spec.StorageConfiguration, cluster.Name, cluster.Namespace, nodeSerial)
utils.SetAsOwnedBy(&pvcSpec.ObjectMeta, cluster.ObjectMeta, cluster.TypeMeta)
specs.SetOperatorVersion(&pvcSpec.ObjectMeta, versions.Version)
if err = r.Create(ctx, pvcSpec); err != nil && !apierrs.IsAlreadyExists(err) {
log.Error(err, "Unable to create a PVC for this node", "nodeSerial", nodeSerial)
return err
}
pvcSpec := specs.CreatePVC(cluster.Spec.StorageConfiguration, cluster.Name, cluster.Namespace, nodeSerial)
utils.SetAsOwnedBy(&pvcSpec.ObjectMeta, cluster.ObjectMeta, cluster.TypeMeta)
specs.SetOperatorVersion(&pvcSpec.ObjectMeta, versions.Version)
if err = r.Create(ctx, pvcSpec); err != nil && !apierrs.IsAlreadyExists(err) {
log.Error(err, "Unable to create a PVC for this node", "nodeSerial", nodeSerial)
return err
}

return nil
Expand Down Expand Up @@ -395,14 +393,12 @@ func (r *ClusterReconciler) joinReplicaInstance(
return err
}

if cluster.IsUsingPersistentStorage() {
pvcSpec := specs.CreatePVC(*cluster.Spec.StorageConfiguration, cluster.Name, cluster.Namespace, nodeSerial)
utils.SetAsOwnedBy(&pvcSpec.ObjectMeta, cluster.ObjectMeta, cluster.TypeMeta)
specs.SetOperatorVersion(&pvcSpec.ObjectMeta, versions.Version)
if err = r.Create(ctx, pvcSpec); err != nil && !apierrs.IsAlreadyExists(err) {
log.Error(err, "Unable to create a PVC for this node", "nodeSerial", nodeSerial)
return err
}
pvcSpec := specs.CreatePVC(cluster.Spec.StorageConfiguration, cluster.Name, cluster.Namespace, nodeSerial)
utils.SetAsOwnedBy(&pvcSpec.ObjectMeta, cluster.ObjectMeta, cluster.TypeMeta)
specs.SetOperatorVersion(&pvcSpec.ObjectMeta, versions.Version)
if err = r.Create(ctx, pvcSpec); err != nil && !apierrs.IsAlreadyExists(err) {
log.Error(err, "Unable to create a PVC for this node", "nodeSerial", nodeSerial)
return err
}

return nil
Expand Down
4 changes: 0 additions & 4 deletions controllers/cluster_scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,6 @@ func (r *ClusterReconciler) scaleDownCluster(
return err
}

if !cluster.IsUsingPersistentStorage() {
return nil
}

// Let's drop the PVC too
pvc := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Expand Down
4 changes: 1 addition & 3 deletions controllers/cluster_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,7 @@ func (r *ClusterReconciler) updateResourceStatus(
filteredPods := utils.FilterActivePods(childPods.Items)

// Fill the list of dangling PVCs
if cluster.IsUsingPersistentStorage() {
cluster.Status.DanglingPVC = specs.DetectDanglingPVCs(filteredPods, childPVCs.Items)
}
cluster.Status.DanglingPVC = specs.DetectDanglingPVCs(filteredPods, childPVCs.Items)

// Count pods
cluster.Status.Instances = int32(len(filteredPods))
Expand Down
6 changes: 3 additions & 3 deletions docs/src/expose_pg_services.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ NGINX Ingress Controller.

If you followed the [QuickStart](/quickstart), you should have by now
a database that can be accessed inside the cluster via the
`cluster-emptydir-rw` (primary) and `cluster-emptydir-r` (read-only)
`cluster-example-rw` (primary) and `cluster-example-r` (read-only)
services in the `default` namespace. Both services use port `5432`.

Let's assume that you want to make the primary instance accessible from external
Expand Down Expand Up @@ -49,7 +49,7 @@ metadata:
name: tcp-services
namespace: ingress-nginx
data:
5432: default/cluster-emptydir-rw:5432
5432: default/cluster-example-rw:5432
```
Then, if you've installed NGINX Ingress Controller as suggested in their
Expand Down Expand Up @@ -111,7 +111,7 @@ connections on port 5432 of the Ingress:

```sh
kubectl patch configmap tcp-services -n kube-system \
--patch '{"data":{"5432":"default/cluster-emptydir-rw:5432"}}'
--patch '{"data":{"5432":"default/cluster-example-rw:5432"}}'
```

You can then patch the deployment to allow access on port 5432.
Expand Down
17 changes: 11 additions & 6 deletions docs/src/quickstart.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,15 +89,16 @@ kubectl get deploy -n postgresql-operator-system postgresql-operator-controller-
As with any other deployment in Kubernetes, in order to deploy a PostgreSQL cluster
you need to apply a configuration file that defines your desired `Cluster`.

The [`cluster-emptydir.yaml`](samples/cluster-emptydir.yaml) sample file
defines a simple `Cluster` with an `emptyDir` local volume:
The [`cluster-example.yaml`](samples/cluster-example.yaml) sample file
defines a simple `Cluster` using the default storage class to allocate
disk space:

```yaml
# Example of PostgreSQL cluster using emptyDir volumes
# Example of PostgreSQL cluster
apiVersion: postgresql.k8s.2ndq.io/v1alpha1
kind: Cluster
metadata:
name: cluster-emptydir
name: cluster-example
spec:
instances: 3

Expand Down Expand Up @@ -137,9 +138,13 @@ spec:
# Require md5 authentication elsewhere
- host all all all md5
- host replication all all md5

# Require 1Gi of space
storage:
size: 1Gi
```
This will create a `Cluster` called `cluster-emptydir` with a PostgreSQL
This will create a `Cluster` called `cluster-example` with a PostgreSQL
primary, two replicas, and a database called `app` owned by the `app` PostgreSQL user.

!!! Note "There's more"
Expand All @@ -149,7 +154,7 @@ primary, two replicas, and a database called `app` owned by the `app` PostgreSQL
In order to create the 3-node PostgreSQL cluster, you need to run the following command:

```sh
kubectl apply -f cluster-emptydir.yaml
kubectl apply -f cluster-example.yaml
```

You can check that the pods are being created with the `get pods` command:
Expand Down
10 changes: 2 additions & 8 deletions docs/src/rolling_update.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,8 @@ Every version of the operator comes with a default PostgreSQL image version.
If a cluster doesn't have `imageName` specified, the operator will upgrade
it to match its default.

If you are using persistent storage, which is a requirement for
a production environment, the upgrade keeps the Cloud Native PostgreSQL
identity and do not reclone the data.

If you are using emptyDir based storage, which is meaningful only for
a development environment, the operator cannot preserve the node
data directory and must create new nodes and part those with
the old version.
The upgrade keeps the Cloud Native PostgreSQL identity and do not
reclone the data.

During the rolling update procedure, the services endpoints move to reflect
the status of the cluster, so the applications ignore the node that
Expand Down
4 changes: 2 additions & 2 deletions docs/src/samples.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
In this section you can find some examples of configuration files to setup your PostgreSQL `Cluster`.

* [`cluster-emptydir.yaml`](samples/cluster-emptydir.yaml):
basic example of `Cluster` that uses `emptyDir` local storage. For demonstration and experimentation purposes
* [`cluster-example.yaml`](samples/cluster-example.yaml):
basic example of `Cluster` that uses the default storage class. For demonstration and experimentation purposes
on a personal Kubernetes cluster with Minikube or Kind as described in the ["Quickstart"](quickstart.md).
* [`cluster-storage-class.yaml`](samples/cluster-storage-class.yaml):
basic example of `Cluster` that uses a specified storage class.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# Example of PostgreSQL cluster using emptyDir volumes
# Example of PostgreSQL cluster
apiVersion: postgresql.k8s.2ndq.io/v1alpha1
kind: Cluster
metadata:
name: cluster-emptydir
name: cluster-example
spec:
instances: 3

Expand Down Expand Up @@ -42,3 +42,7 @@ spec:
# Require md5 authentication elsewhere
- host all all all md5
- host replication all all md5

# Require 1Gi of space
storage:
size: 1Gi
2 changes: 1 addition & 1 deletion docs/src/samples/cluster-expose-service.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ metadata:
name: tcp-services
namespace: ingress-nginx
data:
5432: default/cluster-emptydir-lead-primary:5432
5432: default/cluster-example-lead-primary:5432

---
apiVersion: v1
Expand Down
27 changes: 6 additions & 21 deletions pkg/specs/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -167,8 +167,12 @@ func createImagePullSecrets(cluster v1alpha1.Cluster) []corev1.LocalObjectRefere
func createPostgresVolumes(cluster v1alpha1.Cluster, podName string) []corev1.Volume {
return []corev1.Volume{
{
Name: "pgdata",
VolumeSource: createVolumeSource(cluster, podName),
Name: "pgdata",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: podName,
},
},
},
{
Name: "config",
Expand Down Expand Up @@ -205,25 +209,6 @@ func createPostgresVolumes(cluster v1alpha1.Cluster, podName string) []corev1.Vo
}
}

// createVolumeSource create the VolumeSource environment that is used
// when starting a container
func createVolumeSource(cluster v1alpha1.Cluster, podName string) corev1.VolumeSource {
var pgDataVolumeSource corev1.VolumeSource
if cluster.IsUsingPersistentStorage() {
pgDataVolumeSource = corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: podName,
},
}
} else {
pgDataVolumeSource = corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
}
}

return pgDataVolumeSource
}

// createPostgresContainers create the PostgreSQL containers that are
// used for every instance
func createPostgresContainers(
Expand Down
12 changes: 5 additions & 7 deletions pkg/specs/pvcs.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,13 +45,11 @@ func CreatePVC(
result.Spec.StorageClassName = storageConfiguration.StorageClass
}

// If the customer specified a storage requirement, let's use it
if storageConfiguration.Size != nil {
result.Spec.Resources = corev1.ResourceRequirements{
Requests: corev1.ResourceList{
"storage": *storageConfiguration.Size,
},
}
// Insert the storage requirement
result.Spec.Resources = corev1.ResourceRequirements{
Requests: corev1.ResourceList{
"storage": storageConfiguration.Size,
},
}

if len(result.Spec.AccessModes) == 0 {
Expand Down
Loading

0 comments on commit 1f81217

Please sign in to comment.