diff --git a/cmd/antrea-controller/controller.go b/cmd/antrea-controller/controller.go index 1bc6f750d48..2f67a366c8a 100644 --- a/cmd/antrea-controller/controller.go +++ b/cmd/antrea-controller/controller.go @@ -117,6 +117,7 @@ func run(o *Options) error { cgv1a2Informer := crdInformerFactory.Crd().V1alpha2().ClusterGroups() cgInformer := crdInformerFactory.Crd().V1alpha3().ClusterGroups() egressInformer := crdInformerFactory.Crd().V1alpha2().Egresses() + externalIPPoolInformer := crdInformerFactory.Crd().V1alpha2().ExternalIPPools() clusterIdentityAllocator := clusteridentity.NewClusterIdentityAllocator( env.GetAntreaNamespace(), @@ -226,7 +227,7 @@ func run(o *Options) error { var egressController *egress.EgressController if features.DefaultFeatureGate.Enabled(features.Egress) { - egressController = egress.NewEgressController(groupEntityIndex, egressInformer, egressGroupStore) + egressController = egress.NewEgressController(crdClient, groupEntityIndex, egressInformer, externalIPPoolInformer, egressGroupStore) } var traceflowController *traceflow.Controller diff --git a/pkg/controller/egress/controller.go b/pkg/controller/egress/controller.go index 77a2746769e..80d533100c1 100644 --- a/pkg/controller/egress/controller.go +++ b/pkg/controller/egress/controller.go @@ -15,9 +15,16 @@ package egress import ( + "context" + "errors" + "fmt" + "net" "reflect" + "sync" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" @@ -27,7 +34,10 @@ import ( "antrea.io/antrea/pkg/apis/controlplane" egressv1alpha2 "antrea.io/antrea/pkg/apis/crd/v1alpha2" "antrea.io/antrea/pkg/apiserver/storage" + clientset "antrea.io/antrea/pkg/client/clientset/versioned" egressinformers "antrea.io/antrea/pkg/client/informers/externalversions/crd/v1alpha2" + egresslisters "antrea.io/antrea/pkg/client/listers/crd/v1alpha2" + "antrea.io/antrea/pkg/controller/egress/ipallocator" "antrea.io/antrea/pkg/controller/grouping" antreatypes "antrea.io/antrea/pkg/controller/types" ) @@ -43,11 +53,37 @@ const ( defaultWorkers = 4 // egressGroupType is the type used when registering EgressGroups to the grouping interface. egressGroupType grouping.GroupType = "egressGroup" + + externalIPPoolIndex = "externalIPPool" +) + +var ( + externalIPPoolNotFound = errors.New("ExternalIPPool not found") ) +// ipAllocation contains the IP and the IP Pool which allocates it. +type ipAllocation struct { + ip net.IP + ipPool string +} + // EgressController is responsible for synchronizing the EgressGroups selected by Egresses. type EgressController struct { + crdClient clientset.Interface + externalIPPoolLister egresslisters.ExternalIPPoolLister + externalIPPoolListerSynced cache.InformerSynced + // ipAllocatorMap is a map from ExternalIPPool name to MultiIPAllocator. + ipAllocatorMap map[string]ipallocator.MultiIPAllocator + ipAllocatorMutex sync.RWMutex + + // ipAllocationMap is a map from Egress name to ipAllocation, which is used to check whether the Egress's IP has + // changed and to release the IP after the Egress is removed. + ipAllocationMap map[string]*ipAllocation + ipAllocationMutex sync.RWMutex + egressInformer egressinformers.EgressInformer + egressLister egresslisters.EgressLister + egressIndexer cache.Indexer // egressListerSynced is a function which returns true if the Egresses shared informer has been synced at least once. egressListerSynced cache.InformerSynced // egressGroupStore is the storage where the EgressGroups are stored. @@ -61,16 +97,24 @@ type EgressController struct { } // NewEgressController returns a new *EgressController. -func NewEgressController(groupingInterface grouping.Interface, +func NewEgressController(crdClient clientset.Interface, groupingInterface grouping.Interface, egressInformer egressinformers.EgressInformer, + externalIPPoolInformer egressinformers.ExternalIPPoolInformer, egressGroupStore storage.Interface) *EgressController { c := &EgressController{ - egressInformer: egressInformer, - egressListerSynced: egressInformer.Informer().HasSynced, - egressGroupStore: egressGroupStore, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "egress"), - groupingInterface: groupingInterface, - groupingInterfaceSynced: groupingInterface.HasSynced, + crdClient: crdClient, + egressInformer: egressInformer, + egressLister: egressInformer.Lister(), + egressListerSynced: egressInformer.Informer().HasSynced, + egressIndexer: egressInformer.Informer().GetIndexer(), + externalIPPoolLister: externalIPPoolInformer.Lister(), + externalIPPoolListerSynced: externalIPPoolInformer.Informer().HasSynced, + egressGroupStore: egressGroupStore, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "egress"), + groupingInterface: groupingInterface, + groupingInterfaceSynced: groupingInterface.HasSynced, + ipAllocatorMap: map[string]ipallocator.MultiIPAllocator{}, + ipAllocationMap: map[string]*ipAllocation{}, } // Add handlers for Group events and Egress events. c.groupingInterface.AddEventHandler(egressGroupType, c.enqueueEgressGroup) @@ -82,6 +126,25 @@ func NewEgressController(groupingInterface grouping.Interface, }, resyncPeriod, ) + // externalIPPoolIndex will be used to get all Egresses associated with a given ExternalIPPool. + egressInformer.Informer().AddIndexers(cache.Indexers{externalIPPoolIndex: func(obj interface{}) (strings []string, e error) { + egress, ok := obj.(*egressv1alpha2.Egress) + if !ok { + return nil, fmt.Errorf("obj is not Egress: %+v", obj) + } + if egress.Spec.ExternalIPPool == "" { + return nil, nil + } + return []string{egress.Spec.ExternalIPPool}, nil + }}) + externalIPPoolInformer.Informer().AddEventHandlerWithResyncPeriod( + cache.ResourceEventHandlerFuncs{ + AddFunc: c.addExternalIPPool, + UpdateFunc: c.updateExternalIPPool, + DeleteFunc: c.deleteExternalIPPool, + }, + resyncPeriod, + ) return c } @@ -92,17 +155,108 @@ func (c *EgressController) Run(stopCh <-chan struct{}) { klog.Infof("Starting %s", controllerName) defer klog.Infof("Shutting down %s", controllerName) - cacheSyncs := []cache.InformerSynced{c.egressListerSynced, c.groupingInterfaceSynced} + cacheSyncs := []cache.InformerSynced{c.egressListerSynced, c.externalIPPoolListerSynced, c.groupingInterfaceSynced} if !cache.WaitForNamedCacheSync(controllerName, stopCh, cacheSyncs...) { return } + // Initialize the ipAllocatorMap and ipAllocationMap with the existing ExternalIPPools and Egresses. + ipPools, _ := c.externalIPPoolLister.List(labels.Everything()) + for _, ipPool := range ipPools { + c.createOrUpdateIPAllocator(ipPool) + } + egresses, _ := c.egressLister.List(labels.Everything()) + for _, egress := range egresses { + c.updateIPAllocation(egress) + } + for i := 0; i < defaultWorkers; i++ { go wait.Until(c.egressGroupWorker, time.Second, stopCh) } <-stopCh } +// updateIPAllocation sets the EgressIP of an Egress as allocated in the specified ExternalIPPool and records the +// allocation in ipAllocationMap. +func (c *EgressController) updateIPAllocation(egress *egressv1alpha2.Egress) { + // Ignore Egress that is not associated to ExternalIPPool or doesn't have EgressIP assigned. + if egress.Spec.ExternalIPPool == "" || egress.Spec.EgressIP == "" { + return + } + ipAllocator, exists := c.getIPAllocator(egress.Spec.ExternalIPPool) + if !exists { + klog.ErrorS(externalIPPoolNotFound, "Failed to allocate EgressIP", "egress", egress) + return + } + ip := net.ParseIP(egress.Spec.EgressIP) + err := ipAllocator.AllocateIP(ip) + if err != nil { + klog.ErrorS(err, "Failed to allocate EgressIP", "egress", egress) + return + } + // Record the valid IP allocation. + c.setIPAllocation(egress.Name, ip, egress.Spec.ExternalIPPool) +} + +// createOrUpdateIPAllocator creates or updates the IP allocator based on the provided ExternalIPPool. +// Currently it's assumed that only new ranges will be added and existing ranges should not be deleted. +// TODO: Use validation webhook to ensure it. +func (c *EgressController) createOrUpdateIPAllocator(ipPool *egressv1alpha2.ExternalIPPool) bool { + changed := false + c.ipAllocationMutex.Lock() + defer c.ipAllocationMutex.Unlock() + + existingIPRanges := sets.NewString() + multiIPAllocator, exists := c.ipAllocatorMap[ipPool.Name] + if !exists { + multiIPAllocator = ipallocator.MultiIPAllocator{} + changed = true + } else { + existingIPRanges.Insert(multiIPAllocator.Names()...) + } + + for _, ipRange := range ipPool.Spec.IPRanges { + ipRangeStr := ipRange.CIDR + if ipRangeStr == "" { + ipRangeStr = fmt.Sprintf("%s-%s", ipRange.Start, ipRange.End) + } + // The ipRange is already in the allocator. + if existingIPRanges.Has(ipRangeStr) { + continue + } + var ipAllocator *ipallocator.SingleIPAllocator + var err error + if ipRange.CIDR != "" { + ipAllocator, err = ipallocator.NewCIDRAllocator(ipRange.CIDR) + } else { + ipAllocator, err = ipallocator.NewIPRangeAllocator(ipRange.Start, ipRange.End) + } + if err != nil { + klog.ErrorS(err, "Failed to create IPAllocator", "ipRange", ipRange) + continue + } + multiIPAllocator = append(multiIPAllocator, ipAllocator) + changed = true + } + c.ipAllocatorMap[ipPool.Name] = multiIPAllocator + return changed +} + +// deleteIPAllocator deletes the IP allocator of the given IP pool. +func (c *EgressController) deleteIPAllocator(ipPoolName string) { + c.ipAllocationMutex.Lock() + defer c.ipAllocationMutex.Unlock() + delete(c.ipAllocatorMap, ipPoolName) +} + +// getIPAllocator gets the IP allocator of the given IP pool. +func (c *EgressController) getIPAllocator(ipPoolName string) (ipallocator.MultiIPAllocator, bool) { + c.ipAllocationMutex.RLock() + defer c.ipAllocationMutex.RUnlock() + ipAllocator, exists := c.ipAllocatorMap[ipPoolName] + return ipAllocator, exists +} + func (c *EgressController) egressGroupWorker() { for c.processNextEgressGroupWorkItem() { } @@ -115,7 +269,7 @@ func (c *EgressController) processNextEgressGroupWorkItem() bool { } defer c.queue.Done(key) - err := c.syncEgressGroup(key.(string)) + err := c.syncEgress(key.(string)) if err != nil { // Put the item back on the workqueue to handle any transient errors. c.queue.AddRateLimited(key) @@ -128,13 +282,117 @@ func (c *EgressController) processNextEgressGroupWorkItem() bool { return true } -func (c *EgressController) syncEgressGroup(key string) error { +func (c *EgressController) getIPAllocation(egressName string) (net.IP, string, bool) { + c.ipAllocationMutex.RLock() + defer c.ipAllocationMutex.RUnlock() + allocation, exists := c.ipAllocationMap[egressName] + if !exists { + return nil, "", false + } + return allocation.ip, allocation.ipPool, true +} + +func (c *EgressController) deleteIPAllocation(egressName string) { + c.ipAllocationMutex.Lock() + defer c.ipAllocationMutex.Unlock() + delete(c.ipAllocationMap, egressName) +} + +func (c *EgressController) setIPAllocation(egressName string, ip net.IP, poolName string) { + c.ipAllocationMutex.Lock() + defer c.ipAllocationMutex.Unlock() + c.ipAllocationMap[egressName] = &ipAllocation{ + ip: ip, + ipPool: poolName, + } +} + +// syncEgressIP is responsible for releasing stale EgressIP and allocating new EgressIP for an Egress if applicable. +func (c *EgressController) syncEgressIP(egress *egressv1alpha2.Egress) (net.IP, error) { + prevIP, prevIPPool, exists := c.getIPAllocation(egress.Name) + if exists { + // The Egress IP doesn't change, do nothing. + if prevIP.String() == egress.Spec.EgressIP && prevIPPool == egress.Spec.ExternalIPPool { + return prevIP, nil + } + // Either EgressIP or ExternalIPPool changes, release the previous one first. + c.releaseEgressIP(egress.Name, prevIP, prevIPPool) + } + + // Skip allocating EgressIP if ExternalIPPool is not specified and returns whatever user specifies. + if egress.Spec.ExternalIPPool == "" { + return net.ParseIP(egress.Spec.EgressIP), nil + } + + ipSetAllocator, exists := c.ipAllocatorMap[egress.Spec.ExternalIPPool] + if !exists { + return nil, fmt.Errorf("ExternalIPPool %s was not found", egress.Spec.ExternalIPPool) + } + + var ip net.IP + // User specifies the Egress IP, try to allocate it. If it fails, the datapath may still work, we just don't track + // the IP allocation so deleting this Egress won't release the IP to the Pool. + if egress.Spec.EgressIP != "" { + ip = net.ParseIP(egress.Spec.EgressIP) + if err := ipSetAllocator.AllocateIP(ip); err != nil { + return nil, fmt.Errorf("error when allocating IP %v for Egress %s from ExternalIPPool %s: %v", ip, egress.Name, egress.Spec.ExternalIPPool, err) + } + } else { + var err error + // User doesn't specify the Egress IP, allocate one. + if ip, err = ipSetAllocator.AllocateNext(); err != nil { + return nil, err + } + toUpdate := egress.DeepCopy() + toUpdate.Spec.EgressIP = ip.String() + if _, err = c.crdClient.CrdV1alpha2().Egresses().Update(context.TODO(), toUpdate, metav1.UpdateOptions{}); err != nil { + ipSetAllocator.Release(ip) + return nil, fmt.Errorf("error when updating Egress %v", egress) + } + } + c.setIPAllocation(egress.Name, ip, egress.Spec.ExternalIPPool) + klog.InfoS("Allocated EgressIP", "egress", egress.Name, "ip", ip, "pool", egress.Spec.ExternalIPPool) + return ip, nil +} + +func (c *EgressController) releaseEgressIP(egressName string, egressIP net.IP, poolName string) { + allocator, exists := c.getIPAllocator(poolName) + if !exists { + klog.ErrorS(externalIPPoolNotFound, "Failed to release EgressIP", "egress", egressName, "ip", egressIP, "pool", poolName) + } + if err := allocator.Release(egressIP); err != nil { + klog.ErrorS(err, "Failed to release EgressIP", "egress", egressName, "ip", egressIP, "pool", poolName) + } + c.deleteIPAllocation(egressName) + klog.InfoS("Released EgressIP", "egress", egressName, "ip", egressIP, "pool", poolName) +} + +func (c *EgressController) syncEgress(key string) error { startTime := time.Now() defer func() { d := time.Since(startTime) - klog.V(2).Infof("Finished syncing EgressGroup %s. (%v)", key, d) + klog.V(2).Infof("Finished syncing Egress %s. (%v)", key, d) }() + egress, err := c.egressLister.Get(key) + if err != nil { + // The Egress had been deleted, release its EgressIP if there was one. + prevIP, prevIPPool, exists := c.getIPAllocation(key) + if !exists { + return nil + } + c.releaseEgressIP(key, prevIP, prevIPPool) + return nil + } + + // Sync Egress IP if ExternalIPPool is specified. Otherwise the Egress IP is supposed to be set and configured on + // network interfaces manually. + if egress.Spec.ExternalIPPool != "" { + if _, err := c.syncEgressIP(egress); err != nil { + return err + } + } + egressGroupObj, found, _ := c.egressGroupStore.Get(key) if !found { klog.V(2).Infof("EgressGroup %s not found", key) @@ -209,15 +467,12 @@ func (c *EgressController) updateEgress(old, cur interface{}) { oldEgress := old.(*egressv1alpha2.Egress) curEgress := cur.(*egressv1alpha2.Egress) klog.Infof("Processing Egress %s UPDATE event with selector (%s)", curEgress.Name, curEgress.Spec.AppliedTo) - // Do nothing if AppliedTo doesn't change. // TODO: Define custom Equal function to be more efficient. - if reflect.DeepEqual(oldEgress.Spec.AppliedTo, curEgress.Spec.AppliedTo) { - return + if !reflect.DeepEqual(oldEgress.Spec.AppliedTo, curEgress.Spec.AppliedTo) { + // Update the group's selector in the grouping interface. + groupSelector := antreatypes.NewGroupSelector("", curEgress.Spec.AppliedTo.PodSelector, curEgress.Spec.AppliedTo.NamespaceSelector, nil) + c.groupingInterface.AddGroup(egressGroupType, curEgress.Name, groupSelector) } - - // Update the group's selector in the grouping interface. - groupSelector := antreatypes.NewGroupSelector("", curEgress.Spec.AppliedTo.PodSelector, curEgress.Spec.AppliedTo.NamespaceSelector, nil) - c.groupingInterface.AddGroup(egressGroupType, curEgress.Name, groupSelector) c.queue.Add(curEgress.Name) } @@ -228,4 +483,37 @@ func (c *EgressController) deleteEgress(obj interface{}) { c.egressGroupStore.Delete(egress.Name) // Unregister the group from the grouping interface. c.groupingInterface.DeleteGroup(egressGroupType, egress.Name) + c.queue.Add(egress.Name) +} + +// addExternalIPPool processes ExternalIPPool ADD events and creates corresponding IP allocator. +func (c *EgressController) addExternalIPPool(obj interface{}) { + pool := obj.(*egressv1alpha2.ExternalIPPool) + klog.InfoS("Processing ExternalIPPool ADD event", "pool", pool.Name, "ipRanges", pool.Spec.IPRanges) + c.createOrUpdateIPAllocator(pool) + c.enqueueEgresses(pool.Name) +} + +// enqueueEgresses enqueues all Egresses that are associated with the provided ExternalIPPool. +func (c *EgressController) enqueueEgresses(poolName string) { + objects, _ := c.egressIndexer.ByIndex(externalIPPoolIndex, poolName) + for _, object := range objects { + egress := object.(*egressv1alpha2.Egress) + c.queue.Add(egress.Name) + } +} + +func (c *EgressController) updateExternalIPPool(_, cur interface{}) { + pool := cur.(*egressv1alpha2.ExternalIPPool) + klog.InfoS("Processing ExternalIPPool UPDATE event", "pool", pool.Name, "ipRanges", pool.Spec.IPRanges) + if c.createOrUpdateIPAllocator(pool) { + c.enqueueEgresses(pool.Name) + } +} + +// deleteEgress processes Egress DELETE events and deletes corresponding EgressGroup. +func (c *EgressController) deleteExternalIPPool(obj interface{}) { + pool := obj.(*egressv1alpha2.ExternalIPPool) + klog.InfoS("Processing ExternalIPPool DELETE event", "pool", pool.Name, "ipRanges", pool.Spec.IPRanges) + c.deleteIPAllocator(pool.Name) } diff --git a/pkg/controller/egress/controller_test.go b/pkg/controller/egress/controller_test.go index 9fe2a3c5615..9b4477128e4 100644 --- a/pkg/controller/egress/controller_test.go +++ b/pkg/controller/egress/controller_test.go @@ -17,6 +17,8 @@ package egress import ( "context" "fmt" + "k8s.io/apimachinery/pkg/util/wait" + "net" "testing" "time" @@ -55,8 +57,26 @@ var ( // Fake Namespaces nsDefault = newNamespace("default", map[string]string{"company": "default"}) nsOther = newNamespace("other", map[string]string{"company": "other"}) + // Fake ExternalIPPools + eipFoo1 = newExternalIPPool("pool1", "1.1.1.0/24", "", "") + eipFoo2 = newExternalIPPool("pool2", "", "2.2.2.10", "2.2.2.20") ) +func newExternalIPPool(name, cidr, start, end string) *v1alpha2.ExternalIPPool { + pool := &v1alpha2.ExternalIPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + if len(cidr) > 0 { + pool.Spec.IPRanges = append(pool.Spec.IPRanges, corev1a2.IPRange{CIDR: cidr}) + } + if len(start) > 0 && len(end) > 0 { + pool.Spec.IPRanges = append(pool.Spec.IPRanges, corev1a2.IPRange{Start: start, End: end}) + } + return pool +} + func newNamespace(name string, labels map[string]string) *v1.Namespace { return &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -94,19 +114,20 @@ type egressController struct { } // objects is an initial set of K8s objects that is exposed through the client. -func newController(objects ...runtime.Object) *egressController { +func newController(objects, crdObjects []runtime.Object) *egressController { client := fake.NewSimpleClientset(objects...) - crdClient := fakeversioned.NewSimpleClientset() + crdClient := fakeversioned.NewSimpleClientset(crdObjects...) informerFactory := informers.NewSharedInformerFactory(client, resyncPeriod) crdInformerFactory := crdinformers.NewSharedInformerFactory(crdClient, resyncPeriod) egressGroupStore := store.NewEgressGroupStore() egressInformer := crdInformerFactory.Crd().V1alpha2().Egresses() + externalIPPoolInformer := crdInformerFactory.Crd().V1alpha2().ExternalIPPools() groupEntityIndex := grouping.NewGroupEntityIndex() groupingController := grouping.NewGroupEntityController(groupEntityIndex, informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Namespaces(), crdInformerFactory.Crd().V1alpha2().ExternalEntities()) - controller := NewEgressController(groupEntityIndex, egressInformer, egressGroupStore) + controller := NewEgressController(crdClient, groupEntityIndex, egressInformer, externalIPPoolInformer, egressGroupStore) return &egressController{ controller, client, @@ -121,6 +142,7 @@ func TestAddEgress(t *testing.T) { tests := []struct { name string inputEgress *v1alpha2.Egress + expectedEgressIP string expectedEgressGroups map[string]*controlplane.EgressGroup }{ { @@ -139,6 +161,7 @@ func TestAddEgress(t *testing.T) { EgressIP: "1.1.1.1", }, }, + expectedEgressIP: "1.1.1.1", expectedEgressGroups: map[string]*controlplane.EgressGroup{ node1: { ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, @@ -168,6 +191,7 @@ func TestAddEgress(t *testing.T) { EgressIP: "1.1.1.1", }, }, + expectedEgressIP: "1.1.1.1", expectedEgressGroups: map[string]*controlplane.EgressGroup{ node1: { ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, @@ -198,6 +222,39 @@ func TestAddEgress(t *testing.T) { EgressIP: "1.1.1.1", }, }, + expectedEgressIP: "1.1.1.1", + expectedEgressGroups: map[string]*controlplane.EgressGroup{ + node1: { + ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, + GroupMembers: []controlplane.GroupMember{ + {Pod: &controlplane.PodReference{Name: podFoo1.Name, Namespace: podFoo1.Namespace}}, + {Pod: &controlplane.PodReference{Name: podFoo1InOtherNamespace.Name, Namespace: podFoo1InOtherNamespace.Namespace}}, + }, + }, + node2: { + ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, + GroupMembers: []controlplane.GroupMember{ + {Pod: &controlplane.PodReference{Name: podFoo2.Name, Namespace: podFoo2.Namespace}}, + }, + }, + node3: nil, + }, + }, + { + name: "Egress with podSelector and empty EgressIP", + inputEgress: &v1alpha2.Egress{ + ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, + Spec: v1alpha2.EgressSpec{ + AppliedTo: corev1a2.AppliedTo{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "foo"}, + }, + }, + EgressIP: "", + ExternalIPPool: eipFoo1.Name, + }, + }, + expectedEgressIP: "1.1.1.1", expectedEgressGroups: map[string]*controlplane.EgressGroup{ node1: { ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, @@ -223,7 +280,9 @@ func TestAddEgress(t *testing.T) { var fakeObjects []runtime.Object fakeObjects = append(fakeObjects, nsDefault, nsOther) fakeObjects = append(fakeObjects, podFoo1, podFoo2, podBar1, podFoo1InOtherNamespace, podUnscheduled, podNonIP) - controller := newController(fakeObjects...) + var fakeCRDObjects []runtime.Object + fakeCRDObjects = append(fakeCRDObjects, eipFoo1) + controller := newController(fakeObjects, fakeCRDObjects) controller.informerFactory.Start(stopCh) controller.crdInformerFactory.Start(stopCh) go controller.groupingController.Run(stopCh) @@ -233,7 +292,7 @@ func TestAddEgress(t *testing.T) { for nodeName, expectedEgressGroup := range tt.expectedEgressGroups { watcher, err := controller.egressGroupStore.Watch(context.TODO(), "", nil, fields.ParseSelectorOrDie(fmt.Sprintf("nodeName=%s", nodeName))) - assert.NoError(t, err) + require.NoError(t, err) gotEgressGroup := func() *controlplane.EgressGroup { for { select { @@ -257,6 +316,10 @@ func TestAddEgress(t *testing.T) { assert.ElementsMatch(t, expectedEgressGroup.GroupMembers, gotEgressGroup.GroupMembers) } } + + gotEgress, err := controller.crdClient.CrdV1alpha2().Egresses().Get(context.TODO(), tt.inputEgress.Name, metav1.GetOptions{}) + require.NoError(t, err) + assert.Equal(t, tt.expectedEgressIP, gotEgress.Spec.EgressIP) }) } } @@ -264,7 +327,7 @@ func TestAddEgress(t *testing.T) { func TestUpdateEgress(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) - controller := newController(nsDefault, podFoo1) + controller := newController([]runtime.Object{nsDefault, podFoo1}, []runtime.Object{eipFoo1, eipFoo2}) controller.informerFactory.Start(stopCh) controller.crdInformerFactory.Start(stopCh) go controller.groupingController.Run(stopCh) @@ -278,7 +341,8 @@ func TestUpdateEgress(t *testing.T) { MatchLabels: map[string]string{"app": "foo"}, }, }, - EgressIP: "1.1.1.1", + EgressIP: "", + ExternalIPPool: eipFoo1.Name, }, } controller.crdClient.CrdV1alpha2().Egresses().Create(context.TODO(), egress, metav1.CreateOptions{}) @@ -301,6 +365,15 @@ func TestUpdateEgress(t *testing.T) { } } + gotEgressIP := func() string { + var err error + egress, err = controller.crdClient.CrdV1alpha2().Egresses().Get(context.TODO(), egress.Name, metav1.GetOptions{}) + if err != nil { + return "" + } + return egress.Spec.EgressIP + } + assert.Equal(t, &watch.Event{ Type: watch.Added, Object: &controlplane.EgressGroup{ @@ -310,6 +383,8 @@ func TestUpdateEgress(t *testing.T) { }, }, }, getEvent()) + assert.Equal(t, "1.1.1.1", gotEgressIP()) + checkExternalIPPoolUsed(t, controller, eipFoo1.Name, 1) // Add a Pod matching the Egress's selector and running on this Node. controller.client.CoreV1().Pods(podFoo1InOtherNamespace.Namespace).Create(context.TODO(), podFoo1InOtherNamespace, metav1.CreateOptions{}) @@ -335,12 +410,14 @@ func TestUpdateEgress(t *testing.T) { }, }, getEvent()) - // Updating the Egress's spec to make it match no Pods on this Node. + // Updating the Egress's spec to make it match no Pods on this Node and use a new ExternalIPPool. egress.Spec.AppliedTo = corev1a2.AppliedTo{ PodSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": "non-existing-app"}, }, } + egress.Spec.ExternalIPPool = eipFoo2.Name + egress.Spec.EgressIP = "" controller.crdClient.CrdV1alpha2().Egresses().Update(context.TODO(), egress, metav1.UpdateOptions{}) assert.Equal(t, &watch.Event{ Type: watch.Deleted, @@ -348,4 +425,226 @@ func TestUpdateEgress(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, }, }, getEvent()) + assert.Equal(t, "2.2.2.10", gotEgressIP()) + checkExternalIPPoolUsed(t, controller, eipFoo1.Name, 0) + checkExternalIPPoolUsed(t, controller, eipFoo2.Name, 1) + + // Updating the Egress's spec to make it use a non-existing ExternalIPPool. The previous EgressIP should be release. + newEIP := newExternalIPPool("newPool", "3.3.3.0/24", "", "") + egress.Spec.ExternalIPPool = newEIP.Name + egress.Spec.EgressIP = "" + controller.crdClient.CrdV1alpha2().Egresses().Update(context.TODO(), egress, metav1.UpdateOptions{}) + err = wait.PollImmediate(50*time.Millisecond, 1*time.Second, func() (found bool, err error) { + _, _, exists := controller.getIPAllocation(egress.Name) + return !exists, nil + }) + assert.NoError(t, err, "IP allocation was not deleted after the Egress updated its ExternalIPPool") + checkExternalIPPoolUsed(t, controller, eipFoo2.Name, 0) + + // Create the ExternalIPPool. An EgressIP should be allocated. + controller.crdClient.CrdV1alpha2().ExternalIPPools().Create(context.TODO(), newEIP, metav1.CreateOptions{}) + err = wait.PollImmediate(50*time.Millisecond, 1*time.Second, func() (found bool, err error) { + _, _, exists := controller.getIPAllocation(egress.Name) + return exists, nil + }) + assert.NoError(t, err, "IP was not allocated after the ExternalIPPool was created") + checkExternalIPPoolUsed(t, controller, newEIP.Name, 1) + + // Delete the Egress. The EgressIP should be released. + controller.crdClient.CrdV1alpha2().Egresses().Delete(context.TODO(), egress.Name, metav1.DeleteOptions{}) + err = wait.PollImmediate(50*time.Millisecond, 1*time.Second, func() (found bool, err error) { + _, _, exists := controller.getIPAllocation(egress.Name) + return !exists, nil + }) + assert.NoError(t, err, "IP allocation was not deleted after the Egress was deleted") + checkExternalIPPoolUsed(t, controller, newEIP.Name, 0) +} + +func TestSyncEgressIP(t *testing.T) { + tests := []struct { + name string + existingEgresses []*v1alpha2.Egress + existingExternalIPPool *v1alpha2.ExternalIPPool + inputEgress *v1alpha2.Egress + expectedEgressIP string + expectedExternalIPPoolUsed int + expectErr bool + }{ + { + name: "Egress with empty EgressIP and existing ExternalIPPool", + existingExternalIPPool: newExternalIPPool("ipPoolA", "1.1.1.0/24", "", ""), + inputEgress: &v1alpha2.Egress{ + ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, + Spec: v1alpha2.EgressSpec{ + EgressIP: "", + ExternalIPPool: "ipPoolA", + }, + }, + expectedEgressIP: "1.1.1.1", + expectedExternalIPPoolUsed: 1, + expectErr: false, + }, + { + name: "Egress with empty EgressIP and non-existing ExternalIPPool", + existingExternalIPPool: newExternalIPPool("ipPoolA", "1.1.1.0/24", "", ""), + inputEgress: &v1alpha2.Egress{ + ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, + Spec: v1alpha2.EgressSpec{ + EgressIP: "", + ExternalIPPool: "ipPoolB", + }, + }, + expectedEgressIP: "", + expectedExternalIPPoolUsed: 0, + expectErr: true, + }, + { + name: "Egress with non-empty EgressIP and proper ExternalIPPool", + existingExternalIPPool: newExternalIPPool("ipPoolA", "1.1.1.0/24", "", ""), + inputEgress: &v1alpha2.Egress{ + ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, + Spec: v1alpha2.EgressSpec{ + EgressIP: "1.1.1.2", + ExternalIPPool: "ipPoolA", + }, + }, + expectedEgressIP: "1.1.1.2", + expectedExternalIPPoolUsed: 1, + expectErr: false, + }, + { + name: "Egress with non-empty EgressIP and improper ExternalIPPool", + existingExternalIPPool: newExternalIPPool("ipPoolA", "1.1.1.0/24", "", ""), + inputEgress: &v1alpha2.Egress{ + ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, + Spec: v1alpha2.EgressSpec{ + EgressIP: "1.1.2.2", + ExternalIPPool: "ipPoolA", + }, + }, + expectedEgressIP: "", + expectedExternalIPPoolUsed: 0, + expectErr: true, + }, + { + name: "Egress with updated EgressIP", + existingEgresses: []*v1alpha2.Egress{ + { + ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, + Spec: v1alpha2.EgressSpec{ + EgressIP: "1.1.1.2", + ExternalIPPool: "ipPoolA", + }, + }, + }, + existingExternalIPPool: newExternalIPPool("ipPoolA", "1.1.1.0/24", "", ""), + inputEgress: &v1alpha2.Egress{ + ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, + Spec: v1alpha2.EgressSpec{ + EgressIP: "1.1.1.3", + ExternalIPPool: "ipPoolA", + }, + }, + expectedEgressIP: "1.1.1.3", + expectedExternalIPPoolUsed: 1, + expectErr: false, + }, + { + name: "Egress with unchanged EgressIP", + existingEgresses: []*v1alpha2.Egress{ + { + ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, + Spec: v1alpha2.EgressSpec{ + EgressIP: "1.1.1.2", + ExternalIPPool: "ipPoolA", + }, + }, + }, + existingExternalIPPool: newExternalIPPool("ipPoolA", "1.1.1.0/24", "", ""), + inputEgress: &v1alpha2.Egress{ + ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, + Spec: v1alpha2.EgressSpec{ + EgressIP: "1.1.1.2", + ExternalIPPool: "ipPoolA", + }, + }, + expectedEgressIP: "1.1.1.2", + expectedExternalIPPoolUsed: 1, + expectErr: false, + }, + { + name: "Egress with conflicted EgressIP", + existingEgresses: []*v1alpha2.Egress{ + { + ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, + Spec: v1alpha2.EgressSpec{ + EgressIP: "1.1.1.2", + ExternalIPPool: "ipPoolA", + }, + }, + }, + existingExternalIPPool: newExternalIPPool("ipPoolA", "1.1.1.0/24", "", ""), + inputEgress: &v1alpha2.Egress{ + ObjectMeta: metav1.ObjectMeta{Name: "egressB", UID: "uidB"}, + Spec: v1alpha2.EgressSpec{ + EgressIP: "1.1.1.2", + ExternalIPPool: "ipPoolA", + }, + }, + expectedEgressIP: "", + expectedExternalIPPoolUsed: 1, + expectErr: true, + }, + { + name: "Egress with empty ExternalIPPool", + existingEgresses: []*v1alpha2.Egress{ + { + ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, + Spec: v1alpha2.EgressSpec{ + EgressIP: "1.1.1.2", + ExternalIPPool: "ipPoolA", + }, + }, + }, + existingExternalIPPool: newExternalIPPool("ipPoolA", "1.1.1.0/24", "", ""), + inputEgress: &v1alpha2.Egress{ + ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"}, + Spec: v1alpha2.EgressSpec{ + EgressIP: "10.10.10.10", + }, + }, + expectedEgressIP: "10.10.10.10", + expectedExternalIPPoolUsed: 0, + expectErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + stopCh := make(chan struct{}) + defer close(stopCh) + var fakeObjects []runtime.Object + fakeObjects = append(fakeObjects, tt.inputEgress) + controller := newController(nil, fakeObjects) + controller.informerFactory.Start(stopCh) + controller.crdInformerFactory.Start(stopCh) + controller.createOrUpdateIPAllocator(tt.existingExternalIPPool) + for _, egress := range tt.existingEgresses { + controller.updateIPAllocation(egress) + } + gotEgressIP, err := controller.syncEgressIP(tt.inputEgress) + if tt.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, net.ParseIP(tt.expectedEgressIP), gotEgressIP) + checkExternalIPPoolUsed(t, controller, tt.existingExternalIPPool.Name, tt.expectedExternalIPPoolUsed) + }) + } +} + +func checkExternalIPPoolUsed(t *testing.T, controller *egressController, poolName string, used int) { + ipAllocator, exists := controller.getIPAllocator(poolName) + require.True(t, exists) + assert.Equal(t, used, ipAllocator.Used()) } diff --git a/pkg/controller/egress/ipallocator/allocator.go b/pkg/controller/egress/ipallocator/allocator.go new file mode 100644 index 00000000000..511e205389e --- /dev/null +++ b/pkg/controller/egress/ipallocator/allocator.go @@ -0,0 +1,224 @@ +// Copyright 2021 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipallocator + +import ( + "fmt" + "math/big" + "net" + "sync" + + utilnet "k8s.io/utils/net" +) + +type IPAllocator interface { + AllocateIP(ip net.IP) error + + AllocateNext() (net.IP, error) + + Release(ip net.IP) error + + Used() int +} + +// IPAllocator is responsible for allocating IPs from a contiguous IP range. +type SingleIPAllocator struct { + // The string format of the IP range. e.g. 10.10.10.0/24, or 10.10.10.10-10.10.10.20. + ipRangeStr string + + mutex sync.RWMutex + // base is a cached version of the start IP in the CIDR range as a *big.Int. + base *big.Int + // max is the maximum size of the usable addresses in the range. + max int + // allocated is a bit array of the allocated items in the range. + allocated *big.Int + // count is the number of currently allocated elements in the range. + count int +} + +// NewCIDRAllocator creates an IPAllocator based on the provided CIDR. +func NewCIDRAllocator(cidr string) (*SingleIPAllocator, error) { + ip, ipNet, err := net.ParseCIDR(cidr) + if err != nil { + return nil, err + } + base := utilnet.BigForIP(ip) + // Start from "x.x.x.1". + base.Add(base, big.NewInt(1)) + max := utilnet.RangeSize(ipNet) - 1 + if ip.To4() != nil { + // Don't use the IPv4 network's broadcast address. + max-- + } + if max < 0 { + return nil, fmt.Errorf("no available IP in %s", cidr) + } + // In case a big range occupies too much memory, allow at most 65536 IP for each IP range. + if max > 65536 { + max = 65536 + } + + allocator := &SingleIPAllocator{ + ipRangeStr: cidr, + base: base, + max: int(max), + allocated: big.NewInt(0), + count: 0, + } + return allocator, nil +} + +// NewIPRangeAllocator creates an IPAllocator based on the provided start IP and end IP. +// The start IP and end IP are inclusive. +func NewIPRangeAllocator(startIP, endIP string) (*SingleIPAllocator, error) { + ipRangeStr := fmt.Sprintf("%s-%s", startIP, endIP) + ip := net.ParseIP(startIP) + if ip == nil { + return nil, fmt.Errorf("invalid start IP %s", startIP) + } + base := utilnet.BigForIP(ip) + ip = net.ParseIP(endIP) + if ip == nil { + return nil, fmt.Errorf("invalid end IP %s", endIP) + } + offset := big.NewInt(0).Sub(utilnet.BigForIP(ip), base).Int64() + if offset < 0 { + return nil, fmt.Errorf("invalid IP range %s", ipRangeStr) + } + max := offset + 1 + // In case a big range occupies too much memory, allow at most 65536 IP for each ipset. + if max > 65536 { + max = 65536 + } + + allocator := &SingleIPAllocator{ + ipRangeStr: ipRangeStr, + base: base, + max: int(max), + allocated: big.NewInt(0), + count: 0, + } + return allocator, nil +} + +func (a *SingleIPAllocator) Name() string { + return a.ipRangeStr +} + +// AllocateIP allocates the specified IP. It returns error if the IP is not in the range or already allocated. +func (a *SingleIPAllocator) AllocateIP(ip net.IP) error { + offset := int(big.NewInt(0).Sub(utilnet.BigForIP(ip), a.base).Int64()) + if offset < 0 || offset >= a.max { + return fmt.Errorf("IP %v is not in the ipset", ip) + } + + a.mutex.Lock() + defer a.mutex.Unlock() + if a.allocated.Bit(offset) == 1 { + return fmt.Errorf("IP %v is already allocated", ip) + } + a.allocated.SetBit(a.allocated, offset, 1) + a.count++ + return nil +} + +// AllocateNext allocates an IP from the IP range. It returns error if no IP is available. +func (a *SingleIPAllocator) AllocateNext() (net.IP, error) { + a.mutex.Lock() + defer a.mutex.Unlock() + if a.count >= a.max { + return nil, fmt.Errorf("no available IP") + } + for i := 0; i < a.max; i++ { + if a.allocated.Bit(i) == 0 { + a.allocated.SetBit(a.allocated, i, 1) + a.count++ + ip := utilnet.AddIPOffset(a.base, i) + return ip, nil + } + } + return nil, fmt.Errorf("no available IP") +} + +// Release releases the provided IP. It returns error if the IP is not in the range or not allocated. +func (a *SingleIPAllocator) Release(ip net.IP) error { + offset := int(big.NewInt(0).Sub(utilnet.BigForIP(ip), a.base).Int64()) + if offset < 0 || offset >= a.max { + return fmt.Errorf("IP %v is not in the ipset", ip) + } + + a.mutex.Lock() + defer a.mutex.Unlock() + if a.allocated.Bit(offset) == 0 { + return fmt.Errorf("IP %v is not allocated", ip) + } + a.allocated.SetBit(a.allocated, offset, 0) + a.count-- + return nil +} + +// Used returns the number of the allocated IPs. +func (a *SingleIPAllocator) Used() int { + a.mutex.RLock() + defer a.mutex.RUnlock() + return a.count +} + +// MultiIPAllocator is responsible for allocating IPs from multiple contiguous IP ranges. +type MultiIPAllocator []*SingleIPAllocator + +func (ma MultiIPAllocator) Names() []string { + names := make([]string, 0, len(ma)) + for _, a := range ma { + names = append(names, a.Name()) + } + return names +} + +func (ma MultiIPAllocator) AllocateIP(ip net.IP) error { + for _, a := range ma { + if err := a.AllocateIP(ip); err == nil { + return nil + } + } + return fmt.Errorf("cannot allocate IP %v in any range", ip) +} + +func (ma MultiIPAllocator) AllocateNext() (net.IP, error) { + for _, a := range ma { + if ip, err := a.AllocateNext(); err == nil { + return ip, nil + } + } + return nil, fmt.Errorf("cannot allocate IP in any range") +} + +func (ma MultiIPAllocator) Release(ip net.IP) error { + for _, a := range ma { + if err := a.Release(ip); err == nil { + return nil + } + } + return fmt.Errorf("cannot release IP in any range") +} + +func (ma MultiIPAllocator) Used() int { + used := 0 + for _, a := range ma { + used += a.Used() + } + return used +} diff --git a/pkg/controller/egress/ipallocator/allocator_test.go b/pkg/controller/egress/ipallocator/allocator_test.go new file mode 100644 index 00000000000..5f75f01fcf2 --- /dev/null +++ b/pkg/controller/egress/ipallocator/allocator_test.go @@ -0,0 +1,181 @@ +// Copyright 2021 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipallocator + +import ( + "net" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func newCIDRAllocator(cidr string) *SingleIPAllocator { + allocator, _ := NewCIDRAllocator(cidr) + return allocator +} + +func newIPRangeAllocator(start, end string) *SingleIPAllocator { + allocator, _ := NewIPRangeAllocator(start, end) + return allocator +} + +func TestAllocateNext(t *testing.T) { + tests := []struct { + name string + ipAllocator IPAllocator + ipRanges []string + wantNum int + wantFirst net.IP + wantLast net.IP + }{ + { + name: "IPv4-CIDR-prefix-24", + ipAllocator: newCIDRAllocator("10.10.10.0/24"), + wantNum: 254, + wantFirst: net.ParseIP("10.10.10.1"), + wantLast: net.ParseIP("10.10.10.254"), + }, + { + name: "IPv4-CIDR-prefix-30", + ipAllocator: newCIDRAllocator("10.10.10.128/30"), + wantNum: 2, + wantFirst: net.ParseIP("10.10.10.129"), + wantLast: net.ParseIP("10.10.10.130"), + }, + { + name: "IPv4-range", + ipAllocator: newIPRangeAllocator("1.1.1.10", "1.1.1.20"), + wantNum: 11, + wantFirst: net.ParseIP("1.1.1.10"), + wantLast: net.ParseIP("1.1.1.20"), + }, + { + name: "IPv4-multiple", + ipAllocator: MultiIPAllocator{newIPRangeAllocator("1.1.1.10", "1.1.1.20"), newCIDRAllocator("10.10.10.128/30")}, + wantNum: 13, + wantFirst: net.ParseIP("1.1.1.10"), + wantLast: net.ParseIP("10.10.10.130"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotFirst, err := tt.ipAllocator.AllocateNext() + require.NoError(t, err) + assert.Equal(t, tt.wantFirst, gotFirst) + for i := 0; i < tt.wantNum-2; i++ { + _, err := tt.ipAllocator.AllocateNext() + require.NoError(t, err) + } + gotLast, err := tt.ipAllocator.AllocateNext() + require.NoError(t, err) + assert.Equal(t, tt.wantLast, gotLast) + + _, err = tt.ipAllocator.AllocateNext() + require.Error(t, err) + }) + } +} + +func TestAllocateIP(t *testing.T) { + tests := []struct { + name string + ipAllocator IPAllocator + allocatedIP1 net.IP + allocatedIP2 net.IP + wantErr1 bool + wantErr2 bool + }{ + { + name: "IPv4-duplicate", + ipAllocator: newCIDRAllocator("10.10.10.0/24"), + allocatedIP1: net.ParseIP("10.10.10.1"), + allocatedIP2: net.ParseIP("10.10.10.1"), + wantErr1: false, + wantErr2: true, + }, + { + name: "IPv4-no-duplicate", + ipAllocator: MultiIPAllocator{newIPRangeAllocator("1.1.1.10", "1.1.1.20"), newCIDRAllocator("10.10.10.128/30")}, + allocatedIP1: net.ParseIP("1.1.1.10"), + allocatedIP2: net.ParseIP("10.10.10.129"), + wantErr1: false, + wantErr2: false, + }, + { + name: "IPv4-out-of-scope", + ipAllocator: MultiIPAllocator{newIPRangeAllocator("1.1.1.10", "1.1.1.20"), newCIDRAllocator("10.10.10.128/30")}, + allocatedIP1: net.ParseIP("1.1.1.21"), + allocatedIP2: net.ParseIP("10.10.10.127"), + wantErr1: true, + wantErr2: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.ipAllocator.AllocateIP(tt.allocatedIP1) + if tt.wantErr1 { + require.Error(t, err) + } else { + require.NoError(t, err) + } + err = tt.ipAllocator.AllocateIP(tt.allocatedIP2) + if tt.wantErr2 { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestAllocateRelease(t *testing.T) { + tests := []struct { + name string + ipAllocator IPAllocator + }{ + { + name: "IPv4-single", + ipAllocator: newCIDRAllocator("10.10.10.0/24"), + }, + { + name: "IPv4-multiple", + ipAllocator: MultiIPAllocator{newIPRangeAllocator("1.1.1.10", "1.1.1.20"), newCIDRAllocator("10.10.10.128/30")}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got1, err := tt.ipAllocator.AllocateNext() + require.NoError(t, err) + assert.Equal(t, 1, tt.ipAllocator.Used()) + + err = tt.ipAllocator.Release(got1) + require.NoError(t, err) + assert.Equal(t, 0, tt.ipAllocator.Used()) + + err = tt.ipAllocator.Release(got1) + require.Error(t, err) + + got2, err := tt.ipAllocator.AllocateNext() + require.NoError(t, err) + assert.Equal(t, got1, got2) + }) + } +} + +func TestName(t *testing.T) { + ma := MultiIPAllocator{newIPRangeAllocator("1.1.1.10", "1.1.1.20"), newCIDRAllocator("10.10.10.128/30")} + assert.Equal(t, []string{"1.1.1.10-1.1.1.20", "10.10.10.128/30"}, ma.Names()) +}