diff --git a/config/rbac/aggregation_role.yaml b/config/rbac/aggregation_role.yaml new file mode 100644 index 00000000000..07102e208b6 --- /dev/null +++ b/config/rbac/aggregation_role.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: aggregated-manager-role +aggregationRule: + clusterRoleSelectors: + - matchLabels: + cluster.x-k8s.io/aggregate-to-capz-manager: "true" +rules: [] \ No newline at end of file diff --git a/config/rbac/capz_manager_role_patch.yaml b/config/rbac/capz_manager_role_patch.yaml new file mode 100644 index 00000000000..1704966d935 --- /dev/null +++ b/config/rbac/capz_manager_role_patch.yaml @@ -0,0 +1,6 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role + labels: + cluster.x-k8s.io/aggregate-to-capz-manager: "true" diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index e82521ffdcc..c9cfa1c0782 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -2,7 +2,11 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - role.yaml +- aggregation_role.yaml - role_binding.yaml - service_account.yaml - leader_election_role.yaml - leader_election_role_binding.yaml + +patches: +- path: capz_manager_role_patch.yaml \ No newline at end of file diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml index 5a95f66d6f8..c2d7565e283 100644 --- a/config/rbac/role_binding.yaml +++ b/config/rbac/role_binding.yaml @@ -5,7 +5,7 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: manager-role + name: aggregated-manager-role subjects: - kind: ServiceAccount name: manager diff --git a/exp/controllers/azuremachinepool_controller.go b/exp/controllers/azuremachinepool_controller.go index b4568551c27..49b621273a4 100644 --- a/exp/controllers/azuremachinepool_controller.go +++ b/exp/controllers/azuremachinepool_controller.go @@ -18,12 +18,15 @@ package controllers import ( "context" + "reflect" "time" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -60,6 +63,7 @@ type ( Timeouts reconciler.Timeouts WatchFilterValue string createAzureMachinePoolService azureMachinePoolServiceCreator + BootstrapConfigGVK schema.GroupVersionKind } // annotationReaderWriter provides an interface to read and write annotations. @@ -72,12 +76,20 @@ type ( type azureMachinePoolServiceCreator func(machinePoolScope *scope.MachinePoolScope) (*azureMachinePoolService, error) // NewAzureMachinePoolReconciler returns a new AzureMachinePoolReconciler instance. -func NewAzureMachinePoolReconciler(client client.Client, recorder record.EventRecorder, timeouts reconciler.Timeouts, watchFilterValue string) *AzureMachinePoolReconciler { +func NewAzureMachinePoolReconciler(client client.Client, recorder record.EventRecorder, timeouts reconciler.Timeouts, watchFilterValue, bootstrapConfigGVK string) *AzureMachinePoolReconciler { + gvk := schema.FromAPIVersionAndKind(kubeadmv1.GroupVersion.String(), reflect.TypeOf((*kubeadmv1.KubeadmConfig)(nil)).Elem().Name()) + userGVK, _ := schema.ParseKindArg(bootstrapConfigGVK) + + if userGVK != nil { + gvk = *userGVK + } + ampr := &AzureMachinePoolReconciler{ - Client: client, - Recorder: recorder, - Timeouts: timeouts, - WatchFilterValue: watchFilterValue, + Client: client, + Recorder: recorder, + Timeouts: timeouts, + WatchFilterValue: watchFilterValue, + BootstrapConfigGVK: gvk, } ampr.createAzureMachinePoolService = newAzureMachinePoolService @@ -108,6 +120,8 @@ func (ampr *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mg return errors.Wrapf(err, "failed to create AzureManagedCluster to AzureMachinePools mapper") } + config := &metav1.PartialObjectMetadata{} + config.SetGroupVersionKind(ampr.BootstrapConfigGVK) c, err := ctrl.NewControllerManagedBy(mgr). WithOptions(options.Options). For(&infrav1exp.AzureMachinePool{}). @@ -127,10 +141,10 @@ func (ampr *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mg &infrav1.AzureManagedControlPlane{}, handler.EnqueueRequestsFromMapFunc(azureManagedControlPlaneMapper), ). - // watch for changes in KubeadmConfig to sync bootstrap token + // watch for changes in KubeadmConfig (or any BootstrapConfig) to sync bootstrap token Watches( - &kubeadmv1.KubeadmConfig{}, - handler.EnqueueRequestsFromMapFunc(KubeadmConfigToInfrastructureMapFunc(ctx, ampr.Client, log)), + config, + handler.EnqueueRequestsFromMapFunc(BootstrapperConfigToInfrastructureMapFunc(ctx, ampr.Client, log)), builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), ). Build(r) diff --git a/exp/controllers/azuremachinepool_controller_test.go b/exp/controllers/azuremachinepool_controller_test.go index ba810458dcf..d64c9cebb6f 100644 --- a/exp/controllers/azuremachinepool_controller_test.go +++ b/exp/controllers/azuremachinepool_controller_test.go @@ -44,7 +44,7 @@ var _ = Describe("AzureMachinePoolReconciler", func() { Context("Reconcile an AzureMachinePool", func() { It("should not error with minimal set up", func() { reconciler := NewAzureMachinePoolReconciler(testEnv, testEnv.GetEventRecorderFor("azuremachinepool-reconciler"), - reconciler.Timeouts{}, "") + reconciler.Timeouts{}, "", "") By("Calling reconcile") instance := &infrav1exp.AzureMachinePool{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ @@ -79,7 +79,7 @@ func TestAzureMachinePoolReconcilePaused(t *testing.T) { recorder := record.NewFakeRecorder(1) - reconciler := NewAzureMachinePoolReconciler(c, recorder, reconciler.Timeouts{}, "") + reconciler := NewAzureMachinePoolReconciler(c, recorder, reconciler.Timeouts{}, "", "") name := test.RandomName("paused", 10) namespace := "default" diff --git a/exp/controllers/helpers.go b/exp/controllers/helpers.go index dfa908b0b7d..91191fc5a60 100644 --- a/exp/controllers/helpers.go +++ b/exp/controllers/helpers.go @@ -32,7 +32,6 @@ import ( infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util" ctrl "sigs.k8s.io/controller-runtime" @@ -372,28 +371,22 @@ func MachinePoolMachineHasStateOrVersionChange(logger logr.Logger) predicate.Fun } } -// KubeadmConfigToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for KubeadmConfig events and returns. -func KubeadmConfigToInfrastructureMapFunc(ctx context.Context, c client.Client, log logr.Logger) handler.MapFunc { +// BootstrapperConfigToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for Config events and returns. +func BootstrapperConfigToInfrastructureMapFunc(ctx context.Context, c client.Client, log logr.Logger) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultMappingTimeout) defer cancel() - kc, ok := o.(*kubeadmv1.KubeadmConfig) - if !ok { - log.V(4).Info("attempt to map incorrect type", "type", fmt.Sprintf("%T", o)) - return nil - } - mpKey := client.ObjectKey{ - Namespace: kc.Namespace, - Name: kc.Name, + Namespace: o.GetNamespace(), + Name: o.GetName(), } // fetch MachinePool to get reference mp := &expv1.MachinePool{} if err := c.Get(ctx, mpKey, mp); err != nil { if !apierrors.IsNotFound(err) { - log.Error(err, "failed to fetch MachinePool for KubeadmConfig") + log.Error(err, "failed to fetch MachinePool to validate Bootstrap.ConfigRef") } return []reconcile.Request{} } @@ -404,8 +397,8 @@ func KubeadmConfigToInfrastructureMapFunc(ctx context.Context, c client.Client, return []reconcile.Request{} } sameKind := ref.Kind != o.GetObjectKind().GroupVersionKind().Kind - sameName := ref.Name == kc.Name - sameNamespace := ref.Namespace == kc.Namespace + sameName := ref.Name == o.GetName() + sameNamespace := ref.Namespace == o.GetNamespace() if !sameKind || !sameName || !sameNamespace { log.V(4).Info("Bootstrap.ConfigRef does not match", "sameKind", sameKind, @@ -417,10 +410,7 @@ func KubeadmConfigToInfrastructureMapFunc(ctx context.Context, c client.Client, return []reconcile.Request{} } - key := client.ObjectKey{ - Namespace: kc.Namespace, - Name: kc.Name, - } + key := client.ObjectKeyFromObject(o) log.V(4).Info("adding KubeadmConfig to watch", "key", key) return []reconcile.Request{ diff --git a/exp/controllers/suite_test.go b/exp/controllers/suite_test.go index 6b43bc2ad87..5a2b710485d 100644 --- a/exp/controllers/suite_test.go +++ b/exp/controllers/suite_test.go @@ -54,7 +54,7 @@ var _ = BeforeSuite(func() { ctx = log.IntoContext(ctx, logr.New(testEnv.Log)) Expect(NewAzureMachinePoolReconciler(testEnv, testEnv.GetEventRecorderFor("azuremachinepool-reconciler"), - reconciler.Timeouts{}, "").SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) + reconciler.Timeouts{}, "", "").SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) Expect(NewAzureMachinePoolMachineController(testEnv, testEnv.GetEventRecorderFor("azuremachinepoolmachine-reconciler"), reconciler.Timeouts{}, "").SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) diff --git a/main.go b/main.go index 4d22db923ec..70ff1fb14a8 100644 --- a/main.go +++ b/main.go @@ -105,6 +105,7 @@ var ( azureMachineConcurrency int azureMachinePoolConcurrency int azureMachinePoolMachineConcurrency int + azureBootrapConfigGVK string debouncingTimer time.Duration syncPeriod time.Duration healthAddr string @@ -253,6 +254,12 @@ func InitFlags(fs *pflag.FlagSet) { "Enable tracing to the opentelemetry-collector service in the same namespace.", ) + fs.StringVar(&azureBootrapConfigGVK, + "bootstrap-config-gvk", + "", + "Provide fully qualified GVK string to override default kubeadm config watch source, in the form of Kind.version.group (default: KubeadmConfig.v1beta1.bootstrap.cluster.x-k8s.io)", + ) + flags.AddDiagnosticsOptions(fs, &diagnosticsOptions) feature.MutableGates.AddFlag(fs) @@ -426,6 +433,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { mgr.GetEventRecorderFor("azuremachinepool-reconciler"), timeouts, watchFilterValue, + azureBootrapConfigGVK, ).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}, Cache: mpCache}); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AzureMachinePool") os.Exit(1)