Skip to content

Commit

Permalink
Decouple MachinePool reconciler from Kubeadm
Browse files Browse the repository at this point in the history
Signed-off-by: Danil-Grigorev <danil.grigorev@suse.com>
  • Loading branch information
Danil-Grigorev committed Jun 20, 2024
1 parent a52056d commit befc216
Show file tree
Hide file tree
Showing 9 changed files with 62 additions and 30 deletions.
10 changes: 10 additions & 0 deletions config/rbac/aggregation_role.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: aggregated-manager-role
aggregationRule:
clusterRoleSelectors:
- matchLabels:
cluster.x-k8s.io/aggregate-to-capz-manager: "true"
rules: []
6 changes: 6 additions & 0 deletions config/rbac/capz_manager_role_patch.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: manager-role
labels:
cluster.x-k8s.io/aggregate-to-capz-manager: "true"
4 changes: 4 additions & 0 deletions config/rbac/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,11 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- role.yaml
- aggregation_role.yaml
- role_binding.yaml
- service_account.yaml
- leader_election_role.yaml
- leader_election_role_binding.yaml

patches:
- path: capz_manager_role_patch.yaml
2 changes: 1 addition & 1 deletion config/rbac/role_binding.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ metadata:
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: manager-role
name: aggregated-manager-role
subjects:
- kind: ServiceAccount
name: manager
Expand Down
30 changes: 22 additions & 8 deletions exp/controllers/azuremachinepool_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,15 @@ package controllers

import (
"context"
"reflect"
"time"

"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/tools/record"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
Expand Down Expand Up @@ -60,6 +63,7 @@ type (
Timeouts reconciler.Timeouts
WatchFilterValue string
createAzureMachinePoolService azureMachinePoolServiceCreator
BootstrapConfigGVK schema.GroupVersionKind
}

// annotationReaderWriter provides an interface to read and write annotations.
Expand All @@ -72,12 +76,20 @@ type (
type azureMachinePoolServiceCreator func(machinePoolScope *scope.MachinePoolScope) (*azureMachinePoolService, error)

// NewAzureMachinePoolReconciler returns a new AzureMachinePoolReconciler instance.
func NewAzureMachinePoolReconciler(client client.Client, recorder record.EventRecorder, timeouts reconciler.Timeouts, watchFilterValue string) *AzureMachinePoolReconciler {
func NewAzureMachinePoolReconciler(client client.Client, recorder record.EventRecorder, timeouts reconciler.Timeouts, watchFilterValue, bootstrapConfigGVK string) *AzureMachinePoolReconciler {
gvk := schema.FromAPIVersionAndKind(kubeadmv1.GroupVersion.String(), reflect.TypeOf((*kubeadmv1.KubeadmConfig)(nil)).Elem().Name())
userGVK, _ := schema.ParseKindArg(bootstrapConfigGVK)

if userGVK != nil {
gvk = *userGVK

Check warning on line 84 in exp/controllers/azuremachinepool_controller.go

View check run for this annotation

Codecov / codecov/patch

exp/controllers/azuremachinepool_controller.go#L84

Added line #L84 was not covered by tests
}

ampr := &AzureMachinePoolReconciler{
Client: client,
Recorder: recorder,
Timeouts: timeouts,
WatchFilterValue: watchFilterValue,
Client: client,
Recorder: recorder,
Timeouts: timeouts,
WatchFilterValue: watchFilterValue,
BootstrapConfigGVK: gvk,
}

ampr.createAzureMachinePoolService = newAzureMachinePoolService
Expand Down Expand Up @@ -108,6 +120,8 @@ func (ampr *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mg
return errors.Wrapf(err, "failed to create AzureManagedCluster to AzureMachinePools mapper")
}

config := &metav1.PartialObjectMetadata{}
config.SetGroupVersionKind(ampr.BootstrapConfigGVK)
c, err := ctrl.NewControllerManagedBy(mgr).
WithOptions(options.Options).
For(&infrav1exp.AzureMachinePool{}).
Expand All @@ -127,10 +141,10 @@ func (ampr *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mg
&infrav1.AzureManagedControlPlane{},
handler.EnqueueRequestsFromMapFunc(azureManagedControlPlaneMapper),
).
// watch for changes in KubeadmConfig to sync bootstrap token
// watch for changes in KubeadmConfig (or any BootstrapConfig) to sync bootstrap token
Watches(
&kubeadmv1.KubeadmConfig{},
handler.EnqueueRequestsFromMapFunc(KubeadmConfigToInfrastructureMapFunc(ctx, ampr.Client, log)),
config,
handler.EnqueueRequestsFromMapFunc(BootstrapperConfigToInfrastructureMapFunc(ctx, ampr.Client, log)),
builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}),
).
Build(r)
Expand Down
4 changes: 2 additions & 2 deletions exp/controllers/azuremachinepool_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ var _ = Describe("AzureMachinePoolReconciler", func() {
Context("Reconcile an AzureMachinePool", func() {
It("should not error with minimal set up", func() {
reconciler := NewAzureMachinePoolReconciler(testEnv, testEnv.GetEventRecorderFor("azuremachinepool-reconciler"),
reconciler.Timeouts{}, "")
reconciler.Timeouts{}, "", "")
By("Calling reconcile")
instance := &infrav1exp.AzureMachinePool{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}}
result, err := reconciler.Reconcile(context.Background(), ctrl.Request{
Expand Down Expand Up @@ -79,7 +79,7 @@ func TestAzureMachinePoolReconcilePaused(t *testing.T) {

recorder := record.NewFakeRecorder(1)

reconciler := NewAzureMachinePoolReconciler(c, recorder, reconciler.Timeouts{}, "")
reconciler := NewAzureMachinePoolReconciler(c, recorder, reconciler.Timeouts{}, "", "")
name := test.RandomName("paused", 10)
namespace := "default"

Expand Down
26 changes: 8 additions & 18 deletions exp/controllers/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ import (
infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-azure/util/reconciler"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
ctrl "sigs.k8s.io/controller-runtime"
Expand Down Expand Up @@ -372,28 +371,22 @@ func MachinePoolMachineHasStateOrVersionChange(logger logr.Logger) predicate.Fun
}
}

// KubeadmConfigToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for KubeadmConfig events and returns.
func KubeadmConfigToInfrastructureMapFunc(ctx context.Context, c client.Client, log logr.Logger) handler.MapFunc {
// BootstrapperConfigToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for <Bootstrap>Config events and returns.
func BootstrapperConfigToInfrastructureMapFunc(ctx context.Context, c client.Client, log logr.Logger) handler.MapFunc {
return func(ctx context.Context, o client.Object) []reconcile.Request {
ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultMappingTimeout)
defer cancel()

kc, ok := o.(*kubeadmv1.KubeadmConfig)
if !ok {
log.V(4).Info("attempt to map incorrect type", "type", fmt.Sprintf("%T", o))
return nil
}

mpKey := client.ObjectKey{
Namespace: kc.Namespace,
Name: kc.Name,
Namespace: o.GetNamespace(),
Name: o.GetName(),

Check warning on line 382 in exp/controllers/helpers.go

View check run for this annotation

Codecov / codecov/patch

exp/controllers/helpers.go#L381-L382

Added lines #L381 - L382 were not covered by tests
}

// fetch MachinePool to get reference
mp := &expv1.MachinePool{}
if err := c.Get(ctx, mpKey, mp); err != nil {
if !apierrors.IsNotFound(err) {
log.Error(err, "failed to fetch MachinePool for KubeadmConfig")
log.Error(err, "failed to fetch MachinePool to validate Bootstrap.ConfigRef")

Check warning on line 389 in exp/controllers/helpers.go

View check run for this annotation

Codecov / codecov/patch

exp/controllers/helpers.go#L389

Added line #L389 was not covered by tests
}
return []reconcile.Request{}
}
Expand All @@ -404,8 +397,8 @@ func KubeadmConfigToInfrastructureMapFunc(ctx context.Context, c client.Client,
return []reconcile.Request{}
}
sameKind := ref.Kind != o.GetObjectKind().GroupVersionKind().Kind
sameName := ref.Name == kc.Name
sameNamespace := ref.Namespace == kc.Namespace
sameName := ref.Name == o.GetName()
sameNamespace := ref.Namespace == o.GetNamespace()

Check warning on line 401 in exp/controllers/helpers.go

View check run for this annotation

Codecov / codecov/patch

exp/controllers/helpers.go#L400-L401

Added lines #L400 - L401 were not covered by tests
if !sameKind || !sameName || !sameNamespace {
log.V(4).Info("Bootstrap.ConfigRef does not match",
"sameKind", sameKind,
Expand All @@ -417,10 +410,7 @@ func KubeadmConfigToInfrastructureMapFunc(ctx context.Context, c client.Client,
return []reconcile.Request{}
}

key := client.ObjectKey{
Namespace: kc.Namespace,
Name: kc.Name,
}
key := client.ObjectKeyFromObject(o)

Check warning on line 413 in exp/controllers/helpers.go

View check run for this annotation

Codecov / codecov/patch

exp/controllers/helpers.go#L413

Added line #L413 was not covered by tests
log.V(4).Info("adding KubeadmConfig to watch", "key", key)

return []reconcile.Request{
Expand Down
2 changes: 1 addition & 1 deletion exp/controllers/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ var _ = BeforeSuite(func() {
ctx = log.IntoContext(ctx, logr.New(testEnv.Log))

Expect(NewAzureMachinePoolReconciler(testEnv, testEnv.GetEventRecorderFor("azuremachinepool-reconciler"),
reconciler.Timeouts{}, "").SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed())
reconciler.Timeouts{}, "", "").SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed())

Expect(NewAzureMachinePoolMachineController(testEnv, testEnv.GetEventRecorderFor("azuremachinepoolmachine-reconciler"),
reconciler.Timeouts{}, "").SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed())
Expand Down
8 changes: 8 additions & 0 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ var (
azureMachineConcurrency int
azureMachinePoolConcurrency int
azureMachinePoolMachineConcurrency int
azureBootrapConfigGVK string
debouncingTimer time.Duration
syncPeriod time.Duration
healthAddr string
Expand Down Expand Up @@ -253,6 +254,12 @@ func InitFlags(fs *pflag.FlagSet) {
"Enable tracing to the opentelemetry-collector service in the same namespace.",
)

fs.StringVar(&azureBootrapConfigGVK,
"bootstrap-config-gvk",
"",
"Provide fully qualified GVK string to override default kubeadm config watch source, in the form of Kind.version.group (default: KubeadmConfig.v1beta1.bootstrap.cluster.x-k8s.io)",
)

flags.AddDiagnosticsOptions(fs, &diagnosticsOptions)

feature.MutableGates.AddFlag(fs)
Expand Down Expand Up @@ -426,6 +433,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) {
mgr.GetEventRecorderFor("azuremachinepool-reconciler"),
timeouts,
watchFilterValue,
azureBootrapConfigGVK,
).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}, Cache: mpCache}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "AzureMachinePool")
os.Exit(1)
Expand Down

0 comments on commit befc216

Please sign in to comment.