Skip to content

Commit

Permalink
storage policy quota - 1st set
Browse files Browse the repository at this point in the history
  • Loading branch information
kavyashree-r committed Sep 4, 2024
1 parent a87dcd7 commit 0440d5f
Show file tree
Hide file tree
Showing 3 changed files with 240 additions and 21 deletions.
67 changes: 64 additions & 3 deletions tests/e2e/csi_static_provisioning_basic.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"

apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
Expand Down Expand Up @@ -78,6 +79,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() {
ctx context.Context
nonSharedDatastoreURL string
fullSyncWaitTime int
isStorageQuotaFSSEnabled bool
)

ginkgo.BeforeEach(func() {
Expand Down Expand Up @@ -140,6 +142,8 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() {
} else {
fullSyncWaitTime = defaultFullSyncWaitTime
}
vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort
isStorageQuotaFSSEnabled = isFssEnabled(ctx, vcAddress, "STORAGE_QUOTA_M2")
})

ginkgo.AfterEach(func() {
Expand Down Expand Up @@ -868,8 +872,9 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() {
// 11. Verify CRD deleted automatically.
ginkgo.It("[csi-supervisor] [stretched-svc] Verify static provisioning workflow on SVC import "+
"FCD", ginkgo.Label(p0, block, wcp), func() {

var err error
var totalquota_used_before, storagepolicyquota_pvc_before, storagepolicy_usage_pvc_before, totalquota_used_after *resource.Quantity
var storagepolicyquota_pvc_after, storagepolicy_usage_pvc_after *resource.Quantity
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

Expand All @@ -879,7 +884,20 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() {
framework.Logf("pvc name :%s", pvcName)
namespace = getNamespaceToRunTests(f)

restConfig, _, profileID := staticProvisioningPreSetUpUtil(ctx)
restConfig, storageclass, profileID := staticProvisioningPreSetUpUtil(ctx)

if isStorageQuotaFSSEnabled {
totalquota_used_before, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, namespace)
framework.Logf("totalUsedQuota_Before :%v", totalquota_used_before)

storagepolicyquota_pvc_before, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig,
storagePolicyName, namespace, volExtensionName)
framework.Logf("volume ********** storagepolicyquota_pvc_before :%v ", storagepolicyquota_pvc_before)

storagepolicy_usage_pvc_before, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig,
storagePolicyName, namespace, pvcUsage)
framework.Logf("volume ********** storagepolicy_usage_pvc_before :%v", storagepolicy_usage_pvc_before)
}

ginkgo.By("Creating FCD Disk")
fcdID, err := e2eVSphere.createFCDwithValidProfileID(ctx,
Expand All @@ -904,6 +922,27 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() {
pv := getPvFromClaim(client, namespace, pvcName)
verifyBidirectionalReferenceOfPVandPVC(ctx, client, pvc, pv, fcdID)

if isStorageQuotaFSSEnabled {
totalquota_used_after, _ = getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, namespace)
framework.Logf("totalquota_used_after :%v", totalquota_used_after)

storagepolicyquota_pvc_after, _ = getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig,
storagePolicyName, namespace, volExtensionName)
framework.Logf("********** storagepolicyquota_pvc_after :%v", storagepolicyquota_pvc_after)

storagepolicy_usage_pvc_after, _ = getStoragePolicyUsageForSpecificResourceType(ctx, restConfig,
storagePolicyName, namespace, pvcUsage)
framework.Logf("********** pvc_Usage_Quota_After :%v", storagepolicy_usage_pvc_after)

quotavalidationStatus := validate_totalStoragequota(ctx, diskSizeInMb, totalquota_used_before, totalquota_used_after)
gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse())
quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb, storagepolicyquota_pvc_before, storagepolicyquota_pvc_after)
gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse())
quotavalidationStatus = validate_totalStoragequota(ctx, diskSizeInMb, storagepolicy_usage_pvc_before, storagepolicy_usage_pvc_after)
gomega.Expect(quotavalidationStatus).NotTo(gomega.BeFalse())

}

ginkgo.By("Creating pod")
pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expand Down Expand Up @@ -933,6 +972,28 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() {

defer func() {
testCleanUpUtil(ctx, restConfig, cnsRegisterVolume, namespace, pvc.Name, pv.Name)

if isStorageQuotaFSSEnabled {
totalquota_used_after_Cleanup, totalReservedQuota_after_Cleanup := getTotalQuotaConsumedByStoragePolicy(ctx, restConfig, storageclass.Name, namespace)
framework.Logf("totalquota_used_after :%v, totalReservedQuota_after: %v totalQuota", totalquota_used_after_Cleanup, totalReservedQuota_after_Cleanup)

storagepolicyquota_pvc_after_cleanup, pvc_reservedQuota_after_cleanup := getStoragePolicyQuotaForSpecificResourceType(ctx, restConfig,
storagePolicyName, namespace, volExtensionName)
framework.Logf("volume ********** storagepolicyquota_pvc_after_cleanup :%v, pvc_reservedQuota_after__cleanup: %v PolicyQuota", storagepolicyquota_pvc_after_cleanup, pvc_reservedQuota_after_cleanup)

pvc_Usage_Quota_After_cleanup, pvc_reserved_Quota_After_cleanup := getStoragePolicyUsageForSpecificResourceType(ctx, restConfig,
storagePolicyName, namespace, pvcUsage)
framework.Logf("volume ********** pvc_Usage_Quota_After :%v, pvc_reserved_Quota_After: %v ", pvc_Usage_Quota_After_cleanup, pvc_reserved_Quota_After_cleanup)

quotavalidationStatus_afterCleanup := validate_totalStoragequota_afterCleanUp(ctx, diskSizeInMb, totalquota_used_after, totalquota_used_after_Cleanup)
gomega.Expect(quotavalidationStatus_afterCleanup).NotTo(gomega.BeFalse())
quotavalidationStatus_afterCleanup = validate_totalStoragequota_afterCleanUp(ctx, diskSizeInMb, storagepolicyquota_pvc_after, storagepolicyquota_pvc_after_cleanup)
gomega.Expect(quotavalidationStatus_afterCleanup).NotTo(gomega.BeFalse())
quotavalidationStatus_afterCleanup = validate_totalStoragequota_afterCleanUp(ctx, diskSizeInMb, storagepolicy_usage_pvc_after, pvc_Usage_Quota_After_cleanup)
gomega.Expect(quotavalidationStatus_afterCleanup).NotTo(gomega.BeFalse())
reservedQuota := validate_reservedQuota_afterCleanUp(ctx, totalReservedQuota_after_Cleanup, pvc_reservedQuota_after_cleanup, pvc_reserved_Quota_After_cleanup)
gomega.Expect(reservedQuota).NotTo(gomega.BeFalse())
}
}()

})
Expand Down Expand Up @@ -2272,7 +2333,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() {
})

/*
VMDK is deleted from datastore but CNS volume is still present
VMDK is deleted from datastore but CNS volume is still presentx
STEPS:
1.Create FCD disk.
2.Creating Static PV with FCD ID and PVC from it.
Expand Down
6 changes: 6 additions & 0 deletions tests/e2e/e2e_common.go
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,12 @@ const (
podVMOnStretchedSupervisor = "stretched-svc"
stretchedSVCTopologyLevels = 1
envZonalStoragePolicyName2 = "ZONAL2_STORAGECLASS"
volExtensionName = "volume.cns.vsphere.vmware.com"
snapshotExtensionName = "snapshot.cns.vsphere.vmware.com"
vmServiceExtensionName = "vmservice.cns.vsphere.vmware.com"
pvcUsage = "-pvc-usage"
snapshotUsage = "-snapshot-usage"
vmUsage = "-vm-usage"
)

/*
Expand Down
188 changes: 170 additions & 18 deletions tests/e2e/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -1446,25 +1446,21 @@ func invokeVCenterServiceControl(ctx context.Context, command, service, host str
}

/*
Note: As per PR #2935677, even if cns_new_sync is enabled volume expansion
will not work if sps-service is down.
Keeping this code for reference. Disabling isFssEnabled util method as we won't be using
this util method in testcases.
isFssEnabled invokes the given command to check if vCenter has a particular FSS enabled or not
isFssEnabled invokes the given command to check if vCenter has a particular FSS enabled or not
*/
// func isFssEnabled(host, fss string) bool {
// sshCmd := fmt.Sprintf("python /usr/sbin/feature-state-wrapper.py %s", fss)
// framework.Logf("Checking if fss is enabled on vCenter host %v", host)
// result, err := fssh.SSH(ctx, sshCmd, host, framework.TestContext.Provider)
// fssh.LogResult(result)
// if err == nil && result.Code == 0 {
// return strings.TrimSpace(result.Stdout) == "enabled"
// } else {
// ginkgo.By(fmt.Sprintf("couldn't execute command: %s on vCenter host: %v", sshCmd, err))
// gomega.Expect(err).NotTo(gomega.HaveOccurred())
// }
// return false
// }
func isFssEnabled(ctx context.Context, host, fss string) bool {
sshCmd := fmt.Sprintf("python /usr/sbin/feature-state-wrapper.py %s", fss)
framework.Logf("Checking if fss is enabled on vCenter host %v", host)
result, err := fssh.SSH(ctx, sshCmd, host, framework.TestContext.Provider)
fssh.LogResult(result)
if err == nil && result.Code == 0 {
return strings.TrimSpace(result.Stdout) == "enabled"
} else {
ginkgo.By(fmt.Sprintf("couldn't execute command: %s on vCenter host: %v", sshCmd, err))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
return false
}

// waitVCenterServiceToBeInState invokes the status check for the given service and waits
// via service-control on the given vCenter host over SSH.
Expand Down Expand Up @@ -7026,3 +7022,159 @@ func removeStoragePolicyQuota(ctx context.Context, restClientConfig *rest.Config
framework.Logf("Quota after removing: %s", spq.Spec.Limit)

}

// Get storagePolicyQuota consumption based on resourceType (i.e., either volume, snapshot, vmservice)
func getStoragePolicyQuotaForSpecificResourceType(ctx context.Context, restClientConfig *rest.Config,
scName string, namespace string, extensionType string) (*resource.Quantity, *resource.Quantity) {
var usedQuota, reservedQuota *resource.Quantity
cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restClientConfig, cnsoperatorv1alpha1.GroupName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

spq := &storagepolicyv1alpha2.StoragePolicyQuota{}
err = cnsOperatorClient.Get(ctx,
pkgtypes.NamespacedName{Name: scName + storagePolicyQuota, Namespace: namespace}, spq)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

scLevelQuotaStatusList := spq.Status.SCLevelQuotaStatuses

for _, item := range scLevelQuotaStatusList {
if item.StorageClassName == scName {
resourceTypeLevelQuotaStatusList := spq.Status.ResourceTypeLevelQuotaStatuses

for _, item := range resourceTypeLevelQuotaStatusList {
if item.ResourceExtensionName == extensionType {
usedQuota = item.ResourceTypeSCLevelQuotaStatuses[0].SCLevelQuotaUsage.Used
reservedQuota = item.ResourceTypeSCLevelQuotaStatuses[0].SCLevelQuotaUsage.Reserved
ginkgo.By(fmt.Sprintf("usedQuota %v, reservedQuota %v", usedQuota, reservedQuota))
break
}

}
}
}

return usedQuota, reservedQuota
}

// Get total quota consumption by storagePolicy
func getTotalQuotaConsumedByStoragePolicy(ctx context.Context, restClientConfig *rest.Config,
scName string, namespace string) (*resource.Quantity, *resource.Quantity) {
var usedQuota, reservedQuota *resource.Quantity
cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restClientConfig, cnsoperatorv1alpha1.GroupName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

spq := &storagepolicyv1alpha2.StoragePolicyQuota{}
err = cnsOperatorClient.Get(ctx,
pkgtypes.NamespacedName{Name: scName + storagePolicyQuota, Namespace: namespace}, spq)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
scLevelQuotaStatusList := spq.Status.SCLevelQuotaStatuses
for _, item := range scLevelQuotaStatusList {
if item.StorageClassName == scName {
usedQuota = item.SCLevelQuotaUsage.Used
reservedQuota = item.SCLevelQuotaUsage.Reserved
ginkgo.By(fmt.Sprintf("usedQuota %v, reservedQuota %v", usedQuota, reservedQuota))
}
}
return usedQuota, reservedQuota
}

// Get getStoragePolicyUsageForSpecificResourceType based on resourceType (i.e., either volume, snapshot, vmservice)
// resourceUsage will be either pvcUsage, vmUsage and snapshotUsage
func getStoragePolicyUsageForSpecificResourceType(ctx context.Context, restClientConfig *rest.Config,
scName string, namespace string, resourceUsage string) (*resource.Quantity, *resource.Quantity) {

cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restClientConfig, cnsoperatorv1alpha1.GroupName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

//spq := &storagepolicyv1alpha2.StoragePolicyQuota{}
spq := &storagepolicyv1alpha2.StoragePolicyUsage{}
err = cnsOperatorClient.Get(ctx,
pkgtypes.NamespacedName{Name: scName + resourceUsage, Namespace: namespace}, spq)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

usedQuota := spq.Status.ResourceTypeLevelQuotaUsage.Used
reservedQuota := spq.Status.ResourceTypeLevelQuotaUsage.Reserved

return usedQuota, reservedQuota
}

func validate_totalStoragequota(ctx context.Context, diskSize int64, totalUsedQuotaBefore *resource.Quantity, totalUsedQuotaAfter *resource.Quantity) bool {
var validTotalQuota bool
validTotalQuota = false

out := make([]byte, 0, 64)
result, suffix := totalUsedQuotaBefore.CanonicalizeBytes(out)
value := string(result)
quotaBefore, err := strconv.ParseInt(string(value), 10, 64)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

ginkgo.By(fmt.Sprintf(" quotaBefore : %v%s", quotaBefore, string(suffix)))

result1, suffix1 := totalUsedQuotaAfter.CanonicalizeBytes(out)
value1 := string(result1)
quotaAfter, err := strconv.ParseInt(string(value1), 10, 64)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

ginkgo.By(fmt.Sprintf(" quotaAfter : %v%s", quotaAfter, string(suffix1)))

if string(suffix) == "Gi" {
var bytes int64 = diskSize
// Convert to Gi
//kibibytes := float64(bytes) / 1024
diskSize = int64(bytes) / 1024
fmt.Printf("diskSize: %dGi\n", diskSize)

}

if quotaBefore+diskSize == quotaAfter {
validTotalQuota = true
ginkgo.By(fmt.Sprintf("quotaBefore +d iskSize: %v, quotaAfter : %v", quotaBefore+diskSize, quotaAfter))
ginkgo.By(fmt.Sprintf("validTotalQuota on storagePolicy: %v", validTotalQuota))

}
return validTotalQuota
}

func validate_totalStoragequota_afterCleanUp(ctx context.Context, diskSize int64, totalUsedQuotaBefore *resource.Quantity, totalUsedQuotaAfterCleanup *resource.Quantity) bool {
var validTotalQuota bool
validTotalQuota = false

out := make([]byte, 0, 64)
result, suffix := totalUsedQuotaBefore.CanonicalizeBytes(out)
value := string(result)
quotaBefore, err := strconv.ParseInt(string(value), 10, 64)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

ginkgo.By(fmt.Sprintf(" quotaBefore : %v%s", quotaBefore, string(suffix)))

result1, suffix1 := totalUsedQuotaAfterCleanup.CanonicalizeBytes(out)
value1 := string(result1)
quotaAfter, err := strconv.ParseInt(string(value1), 10, 64)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf(" quotaAfter : %v%s", quotaAfter, string(suffix1)))

if string(suffix) == "Gi" {
var bytes int64 = diskSize
// Convert to Gi
//kibibytes := float64(bytes) / 1024
diskSize = int64(bytes) / 1024
fmt.Printf("diskSize: %dGi\n", diskSize)

}

if quotaBefore-diskSize == quotaAfter {
validTotalQuota = true
ginkgo.By(fmt.Sprintf("quotaBefore +d iskSize: %v, quotaAfter : %v", quotaBefore-diskSize, quotaAfter))
ginkgo.By(fmt.Sprintf("validTotalQuota on storagePolicy: %v", validTotalQuota))

}
return validTotalQuota
}

// validate_reservedQuota_afterCleanUp after the volume goes to bound state or after teast clean up , expected reserved quota should be "0"
func validate_reservedQuota_afterCleanUp(ctx context.Context, total_reservedQuota *resource.Quantity, policy_reservedQuota *resource.Quantity, storagepolicyUsage_reserved_Quota *resource.Quantity) bool {

ginkgo.By(fmt.Sprintf("reservedQuota on total storageQuota CR: %v,storagePolicyQuota CR: %v, storagePolicyUsage CR: %v ", total_reservedQuota.String(), policy_reservedQuota.String(), storagepolicyUsage_reserved_Quota.String()))
return total_reservedQuota.String() == "0" && policy_reservedQuota.String() == "0" && storagepolicyUsage_reserved_Quota.String() == "0"

}

0 comments on commit 0440d5f

Please sign in to comment.