Skip to content

Commit

Permalink
adding topology support for zfspv (#7)
Browse files Browse the repository at this point in the history
This PR adds support to allow the CSI driver to pick up a node matching the  topology specified in the storage class. Admin can specify allowedTopologies in the StorageClass to specify the nodes where the zfs pools are setup

```yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: openebs-zfspv
allowVolumeExpansion: true
parameters:
  blocksize: "4k"
  compression: "on"
  dedup: "on"
  thinprovision: "yes"
  poolname: "zfspv-pool"
provisioner: zfs-localpv
volumeBindingMode: WaitForFirstConsumer
allowedTopologies:
- matchLabelExpressions:
  - key: kubernetes.io/hostname
    values:
      - gke-zfspv-pawan-default-pool-c8929518-cgd4
      - gke-zfspv-pawan-default-pool-c8929518-dxzc
```

Note: This PR picks up the first node from the list of nodes available.

Signed-off-by: Pawan <pawan@mayadata.io>
  • Loading branch information
pawanpraka1 authored and kmova committed Nov 1, 2019
1 parent 0218dac commit d0e97cd
Show file tree
Hide file tree
Showing 11 changed files with 88 additions and 48 deletions.
17 changes: 12 additions & 5 deletions cmd/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,17 @@ func (c *ZVController) syncZV(zv *apis.ZFSVolume) error {
zvol.RemoveZvolFinalizer(zv)
}
} else {
err = zvol.SetZvolProp(zv)
// if finalizer is not set then it means we are creating
// the volume. And if it is set then volume has already been
// created and this event is for property change only.
if zv.Finalizers != nil {
err = zvol.SetZvolProp(zv)
} else {
err = zvol.CreateZvol(zv)
if err == nil {
err = zvol.UpdateZvolInfo(zv)
}
}
}
return err
}
Expand All @@ -101,11 +111,8 @@ func (c *ZVController) addZV(obj interface{}) {
if zvol.NodeID != zv.Spec.OwnerNodeID {
return
}
// TODO(pawan) scheduler will schedule the volume
// on a node and populate the OwnerNodeID accordingly.
// We need to create the zfs volume in that case.
logrus.Infof("Got add event for ZV %s/%s", zv.Spec.PoolName, zv.Name)
//c.enqueueZV(zv)
c.enqueueZV(zv)
}

// updateZV is the update event handler for CstorVolumeClaim
Expand Down
17 changes: 7 additions & 10 deletions deploy/sample/fio.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,14 @@ parameters:
#keyformat: "raw"
#keylocation: "file:///home/pawan/key"
poolname: "zfspv-pool"
provisioner: openebs.io/zfs
provisioner: zfs-localpv
volumeBindingMode: WaitForFirstConsumer
allowedTopologies:
- matchLabelExpressions:
- key: kubernetes.io/hostname
values:
- gke-zfspv-pawan-default-pool-c8929518-cgd4
- gke-zfspv-pawan-default-pool-c8929518-dxzc
---
kind: PersistentVolumeClaim
apiVersion: v1
Expand All @@ -32,15 +38,6 @@ kind: Pod
metadata:
name: fio
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- gke-pawan-zfspv-default-pool-1813a371-6nhl
restartPolicy: Never
containers:
- name: perfrunner
Expand Down
20 changes: 9 additions & 11 deletions deploy/sample/percona.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,14 @@ parameters:
dedup: "on"
thinprovision: "yes"
poolname: "zfspv-pool"
provisioner: openebs.io/zfs
provisioner: zfs-localpv
volumeBindingMode: WaitForFirstConsumer
allowedTopologies:
- matchLabelExpressions:
- key: kubernetes.io/hostname
values:
- gke-zfspv-pawan-default-pool-c8929518-cgd4
- gke-zfspv-pawan-default-pool-c8929518-dxzc
---
kind: PersistentVolumeClaim
apiVersion: v1
Expand Down Expand Up @@ -57,7 +64,7 @@ data:
mysql -uroot -pk8sDem0 -e "INSERT INTO Hardware (id, name, owner, description) values (1, "dellserver", "basavaraj", "controller");" $DB_NAME
mysql -uroot -pk8sDem0 -e "DROP DATABASE $DB_NAME;"
---
apiVersion: apps/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: percona
Expand All @@ -73,15 +80,6 @@ spec:
labels:
name: percona
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- gke-pawan-zfspv-default-pool-26f2b9a9-5fqd
containers:
- resources:
name: percona
Expand Down
20 changes: 13 additions & 7 deletions deploy/zfs-operator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ roleRef:

---
kind: StatefulSet
apiVersion: apps/v1beta1
apiVersion: apps/v1
metadata:
name: openebs-zfs-controller
namespace: kube-system
Expand All @@ -107,21 +107,21 @@ spec:
serviceAccount: openebs-zfs-controller-sa
containers:
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v1.0.1
image: quay.io/k8scsi/csi-provisioner:v1.4.0
imagePullPolicy: IfNotPresent
args:
- "--provisioner=openebs.io/zfs"
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--feature-gates=Topology=true"
- "--strict-topology"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v1.0.1
image: quay.io/k8scsi/csi-attacher:v2.0.0
imagePullPolicy: IfNotPresent
args:
- "--v=5"
Expand Down Expand Up @@ -184,7 +184,7 @@ rules:
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments", "csinodes"]
verbs: ["get", "list", "watch", "update"]
verbs: ["get", "list", "watch", "update", "patch"]

---
kind: ClusterRoleBinding
Expand Down Expand Up @@ -324,7 +324,7 @@ roleRef:
---

kind: DaemonSet
apiVersion: apps/v1beta2
apiVersion: apps/v1
metadata:
name: openebs-zfs-node
namespace: kube-system
Expand All @@ -343,7 +343,7 @@ spec:
hostNetwork: true
containers:
- name: csi-node-driver-registrar
image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.1
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
Expand Down Expand Up @@ -427,21 +427,27 @@ spec:
- name: zfs-bin
hostPath:
path: /sbin/zfs
type: File
- name: libzpool
hostPath:
path: /lib/libzpool.so.2.0.0
type: File
- name: libzfscore
hostPath:
path: /lib/libzfs_core.so.1.0.0
type: File
- name: libzfs
hostPath:
path: /lib/libzfs.so.2.0.0
type: File
- name: libuutil
hostPath:
path: /lib/libuutil.so.1.0.1
type: File
- name: libnvpair
hostPath:
path: /lib/libnvpair.so.1.0.1
type: File
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
Expand Down
6 changes: 6 additions & 0 deletions pkg/builder/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,12 @@ func (b *Builder) WithThinProv(thinprov string) *Builder {
return b
}

// WithOwnerNode sets owner node for the ZFSVolume where the volume should be provisioned
func (b *Builder) WithOwnerNode(host string) *Builder {
b.volume.Object.Spec.OwnerNodeID = host
return b
}

// WithBlockSize sets blocksize of ZFSVolume
func (b *Builder) WithBlockSize(blockSize string) *Builder {
bs := "4k"
Expand Down
4 changes: 4 additions & 0 deletions pkg/driver/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,8 +181,12 @@ func (ns *node) NodeGetInfo(
req *csi.NodeGetInfoRequest,
) (*csi.NodeGetInfoResponse, error) {

topology := map[string]string{zvol.ZFSTopologyKey: ns.driver.config.NodeID}
return &csi.NodeGetInfoResponse{
NodeId: ns.driver.config.NodeID,
AccessibleTopology: &csi.Topology{
Segments: topology,
},
}, nil
}

Expand Down
7 changes: 7 additions & 0 deletions pkg/driver/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,9 @@ func (cs *controller) CreateVolume(
pool := req.GetParameters()["poolname"]
tp := req.GetParameters()["thinprovision"]

// setting first in preferred list as the ownernode of this volume
OwnerNode := req.AccessibilityRequirements.Preferred[0].Segments[zvol.ZFSTopologyKey]

volObj, err := builder.NewBuilder().
WithName(volName).
WithCapacity(strconv.FormatInt(int64(size), 10)).
Expand All @@ -89,6 +92,7 @@ func (cs *controller) CreateVolume(
WithKeyFormat(kf).
WithKeyLocation(kl).
WithThinProv(tp).
WithOwnerNode(OwnerNode).
WithCompression(compression).Build()

if err != nil {
Expand All @@ -100,9 +104,12 @@ func (cs *controller) CreateVolume(
return nil, status.Error(codes.Internal, err.Error())
}

topology := map[string]string{zvol.ZFSTopologyKey: OwnerNode}

return csipayload.NewCreateVolumeResponseBuilder().
WithName(volName).
WithCapacity(size).
WithTopology(topology).
Build(), nil
}

Expand Down
8 changes: 8 additions & 0 deletions pkg/response/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,14 @@ func (b *CreateVolumeResponseBuilder) WithContext(ctx map[string]string) *Create
return b
}

// WithTopology sets the topology for the
// CreateVolumeResponse instance
func (b *CreateVolumeResponseBuilder) WithTopology(topology map[string]string) *CreateVolumeResponseBuilder {
b.response.Volume.AccessibleTopology = make([]*csi.Topology, 1)
b.response.Volume.AccessibleTopology[0] = &csi.Topology{Segments: topology}
return b
}

// Build returns the constructed instance
// of csi CreateVolumeResponse
func (b *CreateVolumeResponseBuilder) Build() *csi.CreateVolumeResponse {
Expand Down
11 changes: 3 additions & 8 deletions pkg/zfs/mount.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,14 +109,9 @@ func CreateAndMountZvol(vol *apis.ZFSVolume, mount *apis.MountInfo) error {
return status.Error(codes.Internal, "volume is owned by different node")
}

devicePath, err := createZvol(vol)
devicePath, err := GetDevicePath(vol)
if err != nil {
return status.Error(codes.Internal, err.Error())
}

err = UpdateZvolInfo(vol)
if err != nil {
return status.Error(codes.Internal, err.Error())
return status.Error(codes.Internal, "not able to get the device path")
}

/*
Expand All @@ -138,7 +133,7 @@ func CreateAndMountZvol(vol *apis.ZFSVolume, mount *apis.MountInfo) error {
}
err = FormatAndMountZvol(devicePath, mount)
if err != nil {
return status.Error(codes.Internal, err.Error())
return status.Error(codes.Internal, "not able to mount the volume")
}

return err
Expand Down
5 changes: 3 additions & 2 deletions pkg/zfs/volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,10 @@ const (
OpenEBSNamespaceKey string = "OPENEBS_NAMESPACE"
// ZFSFinalizer for the ZfsVolume CR
ZFSFinalizer string = "zfs.openebs.io/finalizer"
// ZFSNodeKey will be used to insert Label
// in ZfsVolume CR
// ZFSNodeKey will be used to insert Label in ZfsVolume CR
ZFSNodeKey string = "kubernetes.io/nodename"
// ZFSTopologyKey is supported topology key for the zfs driver
ZFSTopologyKey string = "kubernetes.io/hostname"
)

var (
Expand Down
21 changes: 16 additions & 5 deletions pkg/zfs/zfs_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,9 +119,9 @@ func buildVolumeDestroyArgs(vol *apis.ZFSVolume) []string {
return ZFSVolCmd
}

// createZvol creates the zvol and returns the corresponding diskPath
// CreateZvol creates the zvol and returns the corresponding diskPath
// of the volume which gets created on the node
func createZvol(vol *apis.ZFSVolume) (string, error) {
func CreateZvol(vol *apis.ZFSVolume) error {
zvol := vol.Spec.PoolName + "/" + vol.Name
devicePath := ZFS_DEVPATH + zvol

Expand All @@ -135,16 +135,16 @@ func createZvol(vol *apis.ZFSVolume) (string, error) {
logrus.Errorf(
"zfs: could not create zvol %v cmd %v error: %s", zvol, args, string(out),
)
return "", err
return err
}
logrus.Infof("created zvol %s", zvol)
} else if err == nil {
logrus.Infof("using existing zvol %v", zvol)
} else {
return "", err
return err
}

return devicePath, nil
return nil
}

// SetZvolProp sets the zvol property
Expand Down Expand Up @@ -191,3 +191,14 @@ func DestroyZvol(vol *apis.ZFSVolume) error {

return nil
}

// GetDevicePath returns device path for zvol if it exists
func GetDevicePath(vol *apis.ZFSVolume) (string, error) {
zvol := vol.Spec.PoolName + "/" + vol.Name
devicePath := ZFS_DEVPATH + zvol

if _, err := os.Stat(devicePath); os.IsNotExist(err) {
return "", err
}
return devicePath, nil
}

0 comments on commit d0e97cd

Please sign in to comment.