From 07ef8820d852db2275545ba75ea4cb046f06dcf5 Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Tue, 8 Oct 2024 20:29:15 +0300 Subject: [PATCH] Add support for /32 and /128 allocations for CIDRPool --- README.md | 2 +- api/v1alpha1/cidrpool_test.go | 8 +- api/v1alpha1/cidrpool_validate.go | 10 +- .../controllers/cidrpool/cidrpool.go | 8 +- .../controllers/cidrpool/cidrpool_test.go | 196 ++++++++++++++++++ 5 files changed, 207 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 10b7c2f..cb5fc7b 100644 --- a/README.md +++ b/README.md @@ -394,7 +394,7 @@ spec: > __Notes:__ > > * pool name is composed of alphanumeric letters separated by dots(`.`) underscores(`_`) or hyphens(`-`). -> * `perNodeBlockSize` minimum size is 2. +> * `perNodeBlockSize` minimum size is 1. > * `subnet` must be large enough to accommodate at least one `perNodeBlockSize` block of IPs. diff --git a/api/v1alpha1/cidrpool_test.go b/api/v1alpha1/cidrpool_test.go index 7a7284f..0c6d5d0 100644 --- a/api/v1alpha1/cidrpool_test.go +++ b/api/v1alpha1/cidrpool_test.go @@ -185,8 +185,8 @@ var _ = Describe("CIDRPool", func() { }, Entry("empty", "", int32(30), false), Entry("invalid value", "aaaa", int32(30), false), - Entry("/32", "192.168.1.1/32", int32(32), false), - Entry("/128", "2001:db8:3333:4444::0/128", int32(128), false), + Entry("/32", "192.168.1.1/32", int32(32), true), + Entry("/128", "2001:db8:3333:4444::0/128", int32(128), true), Entry("valid ipv4", "192.168.1.0/24", int32(30), true), Entry("valid ipv6", "2001:db8:3333:4444::0/64", int32(120), true), ) @@ -203,8 +203,8 @@ var _ = Describe("CIDRPool", func() { Entry("not set", "192.168.0.0/16", int32(0), false), Entry("negative", "192.168.0.0/16", int32(-10), false), Entry("larger than CIDR", "192.168.0.0/16", int32(8), false), - Entry("smaller than 31 for IPv4 pool", "192.168.0.0/16", int32(32), false), - Entry("smaller than 127 for IPv6 pool", "2001:db8:3333:4444::0/64", int32(128), false), + Entry("32 for IPv4 pool", "192.168.0.0/16", int32(32), true), + Entry("128 for IPv6 pool", "2001:db8:3333:4444::0/64", int32(128), true), Entry("match CIDR prefix size - ipv4", "192.168.0.0/16", int32(16), true), Entry("match CIDR prefix size - ipv6", "2001:db8:3333:4444::0/64", int32(64), true), ) diff --git a/api/v1alpha1/cidrpool_validate.go b/api/v1alpha1/cidrpool_validate.go index aa4a895..87ae6f4 100644 --- a/api/v1alpha1/cidrpool_validate.go +++ b/api/v1alpha1/cidrpool_validate.go @@ -60,12 +60,6 @@ func (r *CIDRPool) validateCIDR() field.ErrorList { return field.ErrorList{field.Invalid(field.NewPath("spec", "cidr"), r.Spec.CIDR, "network prefix has host bits set")} } - setBits, bitsTotal := network.Mask.Size() - if setBits == bitsTotal { - return field.ErrorList{field.Invalid( - field.NewPath("spec", "cidr"), r.Spec.CIDR, "single IP prefixes are not supported")} - } - if r.Spec.GatewayIndex != nil && *r.Spec.GatewayIndex < 0 { return field.ErrorList{field.Invalid( field.NewPath("spec", "gatewayIndex"), r.Spec.GatewayIndex, "must not be negative")} @@ -75,9 +69,9 @@ func (r *CIDRPool) validateCIDR() field.ErrorList { return field.ErrorList{field.Invalid( field.NewPath("spec", "perNodeNetworkPrefix"), r.Spec.PerNodeNetworkPrefix, "must not be negative")} } - + setBits, bitsTotal := network.Mask.Size() if r.Spec.PerNodeNetworkPrefix == 0 || - r.Spec.PerNodeNetworkPrefix >= int32(bitsTotal) || + r.Spec.PerNodeNetworkPrefix > int32(bitsTotal) || r.Spec.PerNodeNetworkPrefix < int32(setBits) { return field.ErrorList{field.Invalid( field.NewPath("spec", "perNodeNetworkPrefix"), diff --git a/pkg/ipam-node/controllers/cidrpool/cidrpool.go b/pkg/ipam-node/controllers/cidrpool/cidrpool.go index cb5c92f..3c5cde9 100644 --- a/pkg/ipam-node/controllers/cidrpool/cidrpool.go +++ b/pkg/ipam-node/controllers/cidrpool/cidrpool.go @@ -72,7 +72,7 @@ func (r *CIDRPoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c } _, nodeSubnet, _ := net.ParseCIDR(alloc.Prefix) startIP := ip.NextIP(nodeSubnet.IP) - if ip.IsPointToPointSubnet(nodeSubnet) { + if ip.IsPointToPointSubnet(nodeSubnet) || ip.IsSingleIPSubnet(nodeSubnet) { startIP = nodeSubnet.IP } endIP := ip.LastIP(nodeSubnet) @@ -80,7 +80,7 @@ func (r *CIDRPoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c for _, r := range cidrPool.Spec.Routes { routes = append(routes, pool.Route{Dst: r.Dst}) } - pool := &pool.Pool{ + p := &pool.Pool{ Name: cidrPool.Name, Subnet: alloc.Prefix, Gateway: alloc.Gateway, @@ -89,9 +89,9 @@ func (r *CIDRPoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c Exclusions: buildExclusions(cidrPool.Spec.Exclusions, nodeSubnet, startIP, endIP), Routes: routes, } - pool.DefaultGateway = cidrPool.Spec.DefaultGateway + p.DefaultGateway = cidrPool.Spec.DefaultGateway reqLog.Info("CIDRPool config updated", "name", cidrPool.Name) - r.PoolManager.UpdatePool(poolKey, pool) + r.PoolManager.UpdatePool(poolKey, p) found = true break } diff --git a/pkg/ipam-node/controllers/cidrpool/cidrpool_test.go b/pkg/ipam-node/controllers/cidrpool/cidrpool_test.go index 6da5667..92d9e09 100644 --- a/pkg/ipam-node/controllers/cidrpool/cidrpool_test.go +++ b/pkg/ipam-node/controllers/cidrpool/cidrpool_test.go @@ -14,13 +14,33 @@ package controllers import ( + "context" "net" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + ipamv1alpha1 "github.com/Mellanox/nvidia-k8s-ipam/api/v1alpha1" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" + poolPkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" +) + +const ( + testNamespace = "test-ns" + testNodeName = "test-node" ) var _ = Describe("CIDRPool", func() { @@ -67,4 +87,180 @@ var _ = Describe("CIDRPool", func() { }, ), ) + Context("Controller tests", Ordered, func() { + var ( + err error + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + cancelFunc context.CancelFunc + ctx context.Context + poolManager poolPkg.Manager + ) + + BeforeAll(func() { + poolManager = poolPkg.NewManager() + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{"../../../../deploy/crds"}, + CRDInstallOptions: envtest.CRDInstallOptions{ + ErrorIfPathMissing: true, + }, + } + ctx, cancelFunc = context.WithCancel(context.Background()) + Expect(ipamv1alpha1.AddToScheme(scheme.Scheme)).NotTo(HaveOccurred()) + + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + Expect(k8sClient.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}})).To(BeNil()) + + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + Metrics: metricsserver.Options{BindAddress: "0"}, + Cache: cache.Options{ + DefaultNamespaces: map[string]cache.Config{testNamespace: {}}, + ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: {Namespaces: map[string]cache.Config{cache.AllNamespaces: {}}}}, + }, + }) + Expect(err).NotTo(HaveOccurred()) + Expect((&CIDRPoolReconciler{ + PoolManager: poolManager, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + NodeName: testNodeName, + }).SetupWithManager(mgr)).NotTo(HaveOccurred()) + + go func() { + defer GinkgoRecover() + Expect(mgr.Start(ctx)).NotTo(HaveOccurred()) + }() + }) + AfterAll(func() { + cancelFunc() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) + }) + AfterEach(func() { + cidrPoolList := &ipamv1alpha1.CIDRPoolList{} + Expect(k8sClient.List(ctx, cidrPoolList)).NotTo(HaveOccurred()) + for _, p := range cidrPoolList.Items { + Expect(k8sClient.Delete(ctx, &p)).NotTo(HaveOccurred()) + } + Eventually(func(g Gomega) { + g.Expect(k8sClient.List(ctx, cidrPoolList)).NotTo(HaveOccurred()) + g.Expect(cidrPoolList.Items).To(BeEmpty()) + g.Expect(poolManager.GetPools()).To(BeEmpty()) + }).WithTimeout(time.Minute).WithPolling(time.Second).Should(Succeed()) + }) + It("Valid pool config", func() { + p := &ipamv1alpha1.CIDRPool{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pool", Namespace: testNamespace}, + Spec: ipamv1alpha1.CIDRPoolSpec{ + CIDR: "10.10.0.0/16", + GatewayIndex: ptr.To[int32](1), + PerNodeNetworkPrefix: 24, + Exclusions: []ipamv1alpha1.ExcludeRange{ + {StartIP: "10.10.33.10", EndIP: "10.10.33.20"}, + {StartIP: "10.10.10.10", EndIP: "10.10.10.20"}, + }, + DefaultGateway: true, + Routes: []ipamv1alpha1.Route{{Dst: "5.5.5.5/32"}}, + }, + } + Expect(k8sClient.Create(ctx, p)).NotTo(HaveOccurred()) + p.Status.Allocations = []ipamv1alpha1.CIDRPoolAllocation{{ + NodeName: testNodeName, + Gateway: "10.10.10.1", + Prefix: "10.10.10.0/24", + }} + Expect(k8sClient.Status().Update(ctx, p)).NotTo(HaveOccurred()) + Eventually(func(g Gomega) { + g.Expect(poolManager.GetPoolByKey( + common.GetPoolKey("test-pool", common.PoolTypeCIDRPool))).To( + Equal(&pool.Pool{ + Name: "test-pool", + Subnet: "10.10.10.0/24", + StartIP: "10.10.10.1", + EndIP: "10.10.10.254", + Gateway: "10.10.10.1", + Exclusions: []pool.ExclusionRange{{ + StartIP: "10.10.10.10", + EndIP: "10.10.10.20", + }}, + Routes: []pool.Route{{Dst: "5.5.5.5/32"}}, + DefaultGateway: true, + })) + }).WithTimeout(time.Second * 15).WithPolling(time.Second).Should(Succeed()) + }) + It("Valid pool config /32 cidr", func() { + p := &ipamv1alpha1.CIDRPool{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pool", Namespace: testNamespace}, + Spec: ipamv1alpha1.CIDRPoolSpec{ + CIDR: "10.10.10.0/24", + PerNodeNetworkPrefix: 32, + }, + } + Expect(k8sClient.Create(ctx, p)).NotTo(HaveOccurred()) + p.Status.Allocations = []ipamv1alpha1.CIDRPoolAllocation{{ + NodeName: testNodeName, + Prefix: "10.10.10.12/32", + }} + Expect(k8sClient.Status().Update(ctx, p)).NotTo(HaveOccurred()) + Eventually(func(g Gomega) { + g.Expect(poolManager.GetPoolByKey( + common.GetPoolKey("test-pool", common.PoolTypeCIDRPool))).To( + Equal(&pool.Pool{ + Name: "test-pool", + Subnet: "10.10.10.12/32", + StartIP: "10.10.10.12", + EndIP: "10.10.10.12", + Exclusions: []pool.ExclusionRange{}, + Routes: []pool.Route{}, + })) + }).WithTimeout(time.Second * 15).WithPolling(time.Second).Should(Succeed()) + }) + It("Update Valid config with invalid", func() { + p := &ipamv1alpha1.CIDRPool{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pool", Namespace: testNamespace}, + Spec: ipamv1alpha1.CIDRPoolSpec{ + CIDR: "10.10.0.0/16", + GatewayIndex: ptr.To[int32](1), + PerNodeNetworkPrefix: 24, + }, + } + Expect(k8sClient.Create(ctx, p)).NotTo(HaveOccurred()) + p.Status.Allocations = []ipamv1alpha1.CIDRPoolAllocation{{ + NodeName: testNodeName, + Gateway: "10.10.10.1", + Prefix: "10.10.10.0/24", + }} + Expect(k8sClient.Status().Update(ctx, p)).NotTo(HaveOccurred()) + Eventually(func(g Gomega) { + g.Expect(poolManager.GetPoolByKey( + common.GetPoolKey("test-pool", common.PoolTypeCIDRPool))).To( + Equal(&pool.Pool{ + Name: "test-pool", + Subnet: "10.10.10.0/24", + StartIP: "10.10.10.1", + EndIP: "10.10.10.254", + Gateway: "10.10.10.1", + Exclusions: []pool.ExclusionRange{}, + Routes: []pool.Route{}, + })) + }).WithTimeout(time.Second * 15).WithPolling(time.Second).Should(Succeed()) + p.Spec.GatewayIndex = ptr.To[int32](10) + Expect(k8sClient.Update(ctx, p)).NotTo(HaveOccurred()) + Eventually(func(g Gomega) { + g.Expect(poolManager.GetPoolByKey(common.GetPoolKey("test-pool", common.PoolTypeCIDRPool))).To(BeNil()) + }).WithTimeout(time.Second * 15).WithPolling(time.Second).Should(Succeed()) + }) + }) })