Skip to content

Commit

Permalink
Modify RBD plugin to use a single ID and move the id and key into the…
Browse files Browse the repository at this point in the history
… secret

RBD plugin needs only a single ID to manage images and operations against a
pool, mentioned in the storage class. The current scheme of 2 IDs is hence not
needed and removed in this commit.

Further, unlike CephFS plugin, the RBD plugin splits the user id and the key
into the storage class and the secret respectively. Also the parameter name
for the key in the secret is noted in the storageclass making it a variant and
hampers usability/comprehension. This is also fixed by moving the id and the key
to the secret and not retaining the same in the storage class, like CephFS.

Fixes #270

Testing done:
- Basic PVC creation and mounting

Signed-off-by: ShyamsundarR <srangana@redhat.com>
  • Loading branch information
ShyamsundarR authored and mergify[bot] committed Jun 24, 2019
1 parent 22ff5c0 commit c5762b6
Show file tree
Hide file tree
Showing 25 changed files with 284 additions and 402 deletions.
5 changes: 2 additions & 3 deletions docs/deploy-rbd.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,8 @@ provisioning.

**Required secrets:**

Admin credentials are required for provisioning new RBD images `ADMIN_NAME`:
`ADMIN_PASSWORD` - note that the key of the key-value pair is the name of the
client with admin privileges, and the value is its password
User credentials, with required access to the pool being used in the storage class,
is required for provisioning new RBD images.

## Deployment with Kubernetes

Expand Down
13 changes: 6 additions & 7 deletions e2e/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,6 @@ func createCephfsStorageClass(c kubernetes.Interface, f *framework.Framework) {
func createRBDStorageClass(c kubernetes.Interface, f *framework.Framework) {
scPath := fmt.Sprintf("%s/%s", rbdExamplePath, "storageclass.yaml")
sc := getStorageClass(scPath)
delete(sc.Parameters, "userid")
sc.Parameters["pool"] = "replicapool"
opt := metav1.ListOptions{
LabelSelector: "app=rook-ceph-tools",
Expand Down Expand Up @@ -283,10 +282,10 @@ func createCephfsSecret(c kubernetes.Interface, f *framework.Framework) {
LabelSelector: "app=rook-ceph-tools",
}
adminKey := execCommandInPod(f, "ceph auth get-key client.admin", rookNS, &opt)
sc.Data["adminID"] = []byte("admin")
sc.Data["adminKey"] = []byte(adminKey)
delete(sc.Data, "userID")
delete(sc.Data, "userKey")
sc.StringData["adminID"] = "admin"
sc.StringData["adminKey"] = adminKey
delete(sc.StringData, "userID")
delete(sc.StringData, "userKey")
_, err := c.CoreV1().Secrets("default").Create(&sc)
Expect(err).Should(BeNil())
}
Expand All @@ -298,8 +297,8 @@ func createRBDSecret(c kubernetes.Interface, f *framework.Framework) {
LabelSelector: "app=rook-ceph-tools",
}
adminKey := execCommandInPod(f, "ceph auth get-key client.admin", rookNS, &opt)
sc.Data["admin"] = []byte(adminKey)
delete(sc.Data, "kubernetes")
sc.StringData["userID"] = "admin"
sc.StringData["userKey"] = adminKey
_, err := c.CoreV1().Secrets("default").Create(&sc)
Expect(err).Should(BeNil())
}
Expand Down
10 changes: 5 additions & 5 deletions examples/cephfs/secret.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@ kind: Secret
metadata:
name: csi-cephfs-secret
namespace: default
data:
stringData:
# Required for statically provisioned volumes
userID: BASE64-ENCODED-VALUE
userKey: BASE64-ENCODED-VALUE
userID: <plaintext ID>
userKey: <Ceph auth key corresponding to ID above>

# Required for dynamically provisioned volumes
adminID: BASE64-ENCODED-VALUE
adminKey: BASE64-ENCODED-VALUE
adminID: <plaintext ID>
adminKey: <Ceph auth key corresponding to ID above>
11 changes: 6 additions & 5 deletions examples/rbd/secret.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,9 @@ kind: Secret
metadata:
name: csi-rbd-secret
namespace: default
data:
# Key value corresponds to a user name defined in ceph cluster
admin: BASE64-ENCODED-PASSWORD
# Key value corresponds to a user name defined in ceph cluster
kubernetes: BASE64-ENCODED-PASSWORD
stringData:
# Key values correspond to a user name and its key, as defined in the
# ceph cluster. User ID should have required access to the 'pool'
# specified in the storage class
userID: <plaintext ID>
userKey: <Ceph auth key corresponding to ID above>
7 changes: 2 additions & 5 deletions examples/rbd/storageclass.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,16 +24,13 @@ parameters:
# CSI RBD currently supports only `layering` feature.
imageFeatures: layering

# The secrets have to contain Ceph admin credentials.
# The secrets have to contain Ceph credentials with required access
# to the 'pool'.
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/node-publish-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-publish-secret-namespace: default

# Ceph users for operating RBD
adminid: admin
userid: kubernetes

# uncomment the following to use rbd-nbd as mounter on supported nodes
# mounter: rbd-nbd
reclaimPolicy: Delete
18 changes: 9 additions & 9 deletions pkg/cephfs/cephfs_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,15 +33,15 @@ type CephFilesystemDetails struct {
MDSMap MDSMap `json:"mdsmap"`
}

func getFscID(monitors, id, key, fsName string) (int64, error) {
func getFscID(monitors string, cr *util.Credentials, fsName string) (int64, error) {
// ceph fs get myfs --format=json
// {"mdsmap":{...},"id":2}
var fsDetails CephFilesystemDetails
err := execCommandJSON(&fsDetails,
"ceph",
"-m", monitors,
"--id", id,
"--key="+key,
"--id", cr.ID,
"--key="+cr.Key,
"-c", util.CephConfigPath,
"fs", "get", fsName, "--format=json",
)
Expand All @@ -61,15 +61,15 @@ type CephFilesystem struct {
DataPoolIDs []int `json:"data_pool_ids"`
}

func getMetadataPool(monitors, id, key, fsName string) (string, error) {
func getMetadataPool(monitors string, cr *util.Credentials, fsName string) (string, error) {
// ./tbox ceph fs ls --format=json
// [{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":4,...},...]
var filesystems []CephFilesystem
err := execCommandJSON(&filesystems,
"ceph",
"-m", monitors,
"--id", id,
"--key="+key,
"--id", cr.ID,
"--key="+cr.Key,
"-c", util.CephConfigPath,
"fs", "ls", "--format=json",
)
Expand All @@ -91,15 +91,15 @@ type CephFilesystemDump struct {
Filesystems []CephFilesystemDetails `json:"filesystems"`
}

func getFsName(monitors, id, key string, fscID int64) (string, error) {
func getFsName(monitors string, cr *util.Credentials, fscID int64) (string, error) {
// ./tbox ceph fs dump --format=json
// JSON: {...,"filesystems":[{"mdsmap":{},"id":<n>},...],...}
var fsDump CephFilesystemDump
err := execCommandJSON(&fsDump,
"ceph",
"-m", monitors,
"--id", id,
"--key="+key,
"--id", cr.ID,
"--key="+cr.Key,
"-c", util.CephConfigPath,
"fs", "dump", "--format=json",
)
Expand Down
24 changes: 12 additions & 12 deletions pkg/cephfs/cephuser.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,10 @@ type cephEntity struct {
Caps cephEntityCaps `json:"caps"`
}

func (ent *cephEntity) toCredentials() *credentials {
return &credentials{
id: ent.Entity[len(cephEntityClientPrefix):],
key: ent.Key,
func (ent *cephEntity) toCredentials() *util.Credentials {
return &util.Credentials{
ID: ent.Entity[len(cephEntityClientPrefix):],
Key: ent.Key,
}
}

Expand All @@ -63,30 +63,30 @@ func getSingleCephEntity(args ...string) (*cephEntity, error) {
return &ents[0], nil
}

func genUserIDs(adminCr *credentials, volID volumeID) (adminID, userID string) {
return cephEntityClientPrefix + adminCr.id, cephEntityClientPrefix + getCephUserName(volID)
func genUserIDs(adminCr *util.Credentials, volID volumeID) (adminID, userID string) {
return cephEntityClientPrefix + adminCr.ID, cephEntityClientPrefix + getCephUserName(volID)
}

func getCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) {
func getCephUser(volOptions *volumeOptions, adminCr *util.Credentials, volID volumeID) (*cephEntity, error) {
adminID, userID := genUserIDs(adminCr, volID)

return getSingleCephEntity(
"-m", volOptions.Monitors,
"-n", adminID,
"--key="+adminCr.key,
"--key="+adminCr.Key,
"-c", util.CephConfigPath,
"-f", "json",
"auth", "get", userID,
)
}

func createCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) {
func createCephUser(volOptions *volumeOptions, adminCr *util.Credentials, volID volumeID) (*cephEntity, error) {
adminID, userID := genUserIDs(adminCr, volID)

return getSingleCephEntity(
"-m", volOptions.Monitors,
"-n", adminID,
"--key="+adminCr.key,
"--key="+adminCr.Key,
"-c", util.CephConfigPath,
"-f", "json",
"auth", "get-or-create", userID,
Expand All @@ -97,14 +97,14 @@ func createCephUser(volOptions *volumeOptions, adminCr *credentials, volID volum
)
}

func deleteCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) error {
func deleteCephUser(volOptions *volumeOptions, adminCr *util.Credentials, volID volumeID) error {
adminID, userID := genUserIDs(adminCr, volID)

// TODO: Need to return success if userID is not found
return execCommandErr("ceph",
"-m", volOptions.Monitors,
"-n", adminID,
"--key="+adminCr.key,
"--key="+adminCr.Key,
"-c", util.CephConfigPath,
"auth", "rm", userID,
)
Expand Down
8 changes: 4 additions & 4 deletions pkg/cephfs/controllerserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ var (
// createBackingVolume creates the backing subvolume and user/key for the given volOptions and vID,
// and on any error cleans up any created entities
func (cs *ControllerServer) createBackingVolume(volOptions *volumeOptions, vID *volumeIdentifier, secret map[string]string) error {
cr, err := getAdminCredentials(secret)
cr, err := util.GetAdminCredentials(secret)
if err != nil {
return status.Error(codes.InvalidArgument, err.Error())
}
Expand Down Expand Up @@ -168,14 +168,14 @@ func (cs *ControllerServer) deleteVolumeDeprecated(req *csi.DeleteVolumeRequest)

// mons may have changed since create volume,
// retrieve the latest mons and override old mons
if mon, secretsErr := getMonValFromSecret(secrets); secretsErr == nil && len(mon) > 0 {
if mon, secretsErr := util.GetMonValFromSecret(secrets); secretsErr == nil && len(mon) > 0 {
klog.Infof("overriding monitors [%q] with [%q] for volume %s", ce.VolOptions.Monitors, mon, volID)
ce.VolOptions.Monitors = mon
}

// Deleting a volume requires admin credentials

cr, err := getAdminCredentials(secrets)
cr, err := util.GetAdminCredentials(secrets)
if err != nil {
klog.Errorf("failed to retrieve admin credentials: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error())
Expand Down Expand Up @@ -232,7 +232,7 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
}

// Deleting a volume requires admin credentials
cr, err := getAdminCredentials(secrets)
cr, err := util.GetAdminCredentials(secrets)
if err != nil {
klog.Errorf("failed to retrieve admin credentials: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error())
Expand Down
12 changes: 6 additions & 6 deletions pkg/cephfs/fsjournal.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,12 @@ func checkVolExists(volOptions *volumeOptions, secret map[string]string) (*volum
vid volumeIdentifier
)

cr, err := getAdminCredentials(secret)
cr, err := util.GetAdminCredentials(secret)
if err != nil {
return nil, err
}

imageUUID, err := volJournal.CheckReservation(volOptions.Monitors, cr.id, cr.key,
imageUUID, err := volJournal.CheckReservation(volOptions.Monitors, cr,
volOptions.MetadataPool, volOptions.RequestName, "")
if err != nil {
return nil, err
Expand Down Expand Up @@ -86,12 +86,12 @@ func checkVolExists(volOptions *volumeOptions, secret map[string]string) (*volum

// undoVolReservation is a helper routine to undo a name reservation for a CSI VolumeName
func undoVolReservation(volOptions *volumeOptions, vid volumeIdentifier, secret map[string]string) error {
cr, err := getAdminCredentials(secret)
cr, err := util.GetAdminCredentials(secret)
if err != nil {
return err
}

err = volJournal.UndoReservation(volOptions.Monitors, cr.id, cr.key, volOptions.MetadataPool,
err = volJournal.UndoReservation(volOptions.Monitors, cr, volOptions.MetadataPool,
vid.FsSubvolName, volOptions.RequestName)

return err
Expand All @@ -105,12 +105,12 @@ func reserveVol(volOptions *volumeOptions, secret map[string]string) (*volumeIde
vid volumeIdentifier
)

cr, err := getAdminCredentials(secret)
cr, err := util.GetAdminCredentials(secret)
if err != nil {
return nil, err
}

imageUUID, err := volJournal.ReserveName(volOptions.Monitors, cr.id, cr.key,
imageUUID, err := volJournal.ReserveName(volOptions.Monitors, cr,
volOptions.MetadataPool, volOptions.RequestName, "")
if err != nil {
return nil, err
Expand Down
6 changes: 3 additions & 3 deletions pkg/cephfs/mountcache.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,13 +90,13 @@ func mountOneCacheEntry(volOptions *volumeOptions, vid *volumeIdentifier, me *vo

var (
err error
cr *credentials
cr *util.Credentials
)
volID := vid.VolumeID

if volOptions.ProvisionVolume {
volOptions.RootPath = getVolumeRootPathCeph(volumeID(vid.FsSubvolName))
cr, err = getAdminCredentials(decodeCredentials(me.Secrets))
cr, err = util.GetAdminCredentials(decodeCredentials(me.Secrets))
if err != nil {
return err
}
Expand All @@ -107,7 +107,7 @@ func mountOneCacheEntry(volOptions *volumeOptions, vid *volumeIdentifier, me *vo
}
cr = entity.toCredentials()
} else {
cr, err = getUserCredentials(decodeCredentials(me.Secrets))
cr, err = util.GetUserCredentials(decodeCredentials(me.Secrets))
if err != nil {
return err
}
Expand Down
9 changes: 5 additions & 4 deletions pkg/cephfs/nodeserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"os"

csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
"github.com/ceph/ceph-csi/pkg/util"

"github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes"
Expand All @@ -40,9 +41,9 @@ var (
mtxNodeVolumeID = keymutex.NewHashed(0)
)

func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi.NodeStageVolumeRequest) (*credentials, error) {
func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi.NodeStageVolumeRequest) (*util.Credentials, error) {
var (
cr *credentials
cr *util.Credentials
secrets = req.GetSecrets()
)

Expand All @@ -51,7 +52,7 @@ func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi

// First, get admin credentials - those are needed for retrieving the user credentials

adminCr, err := getAdminCredentials(secrets)
adminCr, err := util.GetAdminCredentials(secrets)
if err != nil {
return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err)
}
Expand All @@ -67,7 +68,7 @@ func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi
} else {
// The volume is pre-made, credentials are in node stage secrets

userCr, err := getUserCredentials(req.GetSecrets())
userCr, err := util.GetUserCredentials(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to get user credentials from node stage secrets: %v", err)
}
Expand Down
Loading

0 comments on commit c5762b6

Please sign in to comment.