Skip to content

Commit

Permalink
Decouple MachinePool reconciler from Kubeadm
Browse files Browse the repository at this point in the history
Signed-off-by: Danil-Grigorev <danil.grigorev@suse.com>
  • Loading branch information
Danil-Grigorev committed Jun 20, 2024
1 parent a52056d commit 1483d29
Show file tree
Hide file tree
Showing 5 changed files with 41 additions and 29 deletions.
30 changes: 22 additions & 8 deletions exp/controllers/azuremachinepool_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,15 @@ package controllers

import (
"context"
"reflect"
"time"

"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/tools/record"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
Expand Down Expand Up @@ -60,6 +63,7 @@ type (
Timeouts reconciler.Timeouts
WatchFilterValue string
createAzureMachinePoolService azureMachinePoolServiceCreator
BootstrapConfigGVK schema.GroupVersionKind
}

// annotationReaderWriter provides an interface to read and write annotations.
Expand All @@ -72,12 +76,20 @@ type (
type azureMachinePoolServiceCreator func(machinePoolScope *scope.MachinePoolScope) (*azureMachinePoolService, error)

// NewAzureMachinePoolReconciler returns a new AzureMachinePoolReconciler instance.
func NewAzureMachinePoolReconciler(client client.Client, recorder record.EventRecorder, timeouts reconciler.Timeouts, watchFilterValue string) *AzureMachinePoolReconciler {
func NewAzureMachinePoolReconciler(client client.Client, recorder record.EventRecorder, timeouts reconciler.Timeouts, watchFilterValue, bootstrapConfigGVK string) *AzureMachinePoolReconciler {
gvk := schema.FromAPIVersionAndKind(kubeadmv1.GroupVersion.String(), reflect.TypeFor[kubeadmv1.KubeadmConfig]().Name())

Check failure on line 80 in exp/controllers/azuremachinepool_controller.go

View workflow job for this annotation

GitHub Actions / coverage

undefined: reflect.TypeFor) (typecheck)

Check failure on line 80 in exp/controllers/azuremachinepool_controller.go

View workflow job for this annotation

GitHub Actions / coverage

undefined: reflect.TypeFor (typecheck)
userGVK, _ := schema.ParseKindArg(bootstrapConfigGVK)

if userGVK != nil {
gvk = *userGVK
}

ampr := &AzureMachinePoolReconciler{
Client: client,
Recorder: recorder,
Timeouts: timeouts,
WatchFilterValue: watchFilterValue,
Client: client,
Recorder: recorder,
Timeouts: timeouts,
WatchFilterValue: watchFilterValue,
BootstrapConfigGVK: gvk,
}

ampr.createAzureMachinePoolService = newAzureMachinePoolService
Expand Down Expand Up @@ -108,6 +120,8 @@ func (ampr *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mg
return errors.Wrapf(err, "failed to create AzureManagedCluster to AzureMachinePools mapper")
}

config := &metav1.PartialObjectMetadata{}
config.SetGroupVersionKind(ampr.BootstrapConfigGVK)
c, err := ctrl.NewControllerManagedBy(mgr).
WithOptions(options.Options).
For(&infrav1exp.AzureMachinePool{}).
Expand All @@ -127,10 +141,10 @@ func (ampr *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mg
&infrav1.AzureManagedControlPlane{},
handler.EnqueueRequestsFromMapFunc(azureManagedControlPlaneMapper),
).
// watch for changes in KubeadmConfig to sync bootstrap token
// watch for changes in KubeadmConfig (or any BootstrapConfig) to sync bootstrap token
Watches(
&kubeadmv1.KubeadmConfig{},
handler.EnqueueRequestsFromMapFunc(KubeadmConfigToInfrastructureMapFunc(ctx, ampr.Client, log)),
config,
handler.EnqueueRequestsFromMapFunc(BootstrapperConfigToInfrastructureMapFunc(ctx, ampr.Client, log)),
builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}),
).
Build(r)
Expand Down
4 changes: 2 additions & 2 deletions exp/controllers/azuremachinepool_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ var _ = Describe("AzureMachinePoolReconciler", func() {
Context("Reconcile an AzureMachinePool", func() {
It("should not error with minimal set up", func() {
reconciler := NewAzureMachinePoolReconciler(testEnv, testEnv.GetEventRecorderFor("azuremachinepool-reconciler"),
reconciler.Timeouts{}, "")
reconciler.Timeouts{}, "", "")
By("Calling reconcile")
instance := &infrav1exp.AzureMachinePool{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}}
result, err := reconciler.Reconcile(context.Background(), ctrl.Request{
Expand Down Expand Up @@ -79,7 +79,7 @@ func TestAzureMachinePoolReconcilePaused(t *testing.T) {

recorder := record.NewFakeRecorder(1)

reconciler := NewAzureMachinePoolReconciler(c, recorder, reconciler.Timeouts{}, "")
reconciler := NewAzureMachinePoolReconciler(c, recorder, reconciler.Timeouts{}, "", "")
name := test.RandomName("paused", 10)
namespace := "default"

Expand Down
26 changes: 8 additions & 18 deletions exp/controllers/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ import (
infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-azure/util/reconciler"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
ctrl "sigs.k8s.io/controller-runtime"
Expand Down Expand Up @@ -372,28 +371,22 @@ func MachinePoolMachineHasStateOrVersionChange(logger logr.Logger) predicate.Fun
}
}

// KubeadmConfigToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for KubeadmConfig events and returns.
func KubeadmConfigToInfrastructureMapFunc(ctx context.Context, c client.Client, log logr.Logger) handler.MapFunc {
// BootstrapperConfigToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for <Bootstrap>Config events and returns.
func BootstrapperConfigToInfrastructureMapFunc(ctx context.Context, c client.Client, log logr.Logger) handler.MapFunc {
return func(ctx context.Context, o client.Object) []reconcile.Request {
ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultMappingTimeout)
defer cancel()

kc, ok := o.(*kubeadmv1.KubeadmConfig)
if !ok {
log.V(4).Info("attempt to map incorrect type", "type", fmt.Sprintf("%T", o))
return nil
}

mpKey := client.ObjectKey{
Namespace: kc.Namespace,
Name: kc.Name,
Namespace: o.GetNamespace(),
Name: o.GetName(),
}

// fetch MachinePool to get reference
mp := &expv1.MachinePool{}
if err := c.Get(ctx, mpKey, mp); err != nil {
if !apierrors.IsNotFound(err) {
log.Error(err, "failed to fetch MachinePool for KubeadmConfig")
log.Error(err, "failed to fetch MachinePool to validate Bootstrap.ConfigRef")
}
return []reconcile.Request{}
}
Expand All @@ -404,8 +397,8 @@ func KubeadmConfigToInfrastructureMapFunc(ctx context.Context, c client.Client,
return []reconcile.Request{}
}
sameKind := ref.Kind != o.GetObjectKind().GroupVersionKind().Kind
sameName := ref.Name == kc.Name
sameNamespace := ref.Namespace == kc.Namespace
sameName := ref.Name == o.GetName()
sameNamespace := ref.Namespace == o.GetNamespace()
if !sameKind || !sameName || !sameNamespace {
log.V(4).Info("Bootstrap.ConfigRef does not match",
"sameKind", sameKind,
Expand All @@ -417,10 +410,7 @@ func KubeadmConfigToInfrastructureMapFunc(ctx context.Context, c client.Client,
return []reconcile.Request{}
}

key := client.ObjectKey{
Namespace: kc.Namespace,
Name: kc.Name,
}
key := client.ObjectKeyFromObject(o)
log.V(4).Info("adding KubeadmConfig to watch", "key", key)

return []reconcile.Request{
Expand Down
2 changes: 1 addition & 1 deletion exp/controllers/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ var _ = BeforeSuite(func() {
ctx = log.IntoContext(ctx, logr.New(testEnv.Log))

Expect(NewAzureMachinePoolReconciler(testEnv, testEnv.GetEventRecorderFor("azuremachinepool-reconciler"),
reconciler.Timeouts{}, "").SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed())
reconciler.Timeouts{}, "", "").SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed())

Expect(NewAzureMachinePoolMachineController(testEnv, testEnv.GetEventRecorderFor("azuremachinepoolmachine-reconciler"),
reconciler.Timeouts{}, "").SetupWithManager(ctx, testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed())
Expand Down
8 changes: 8 additions & 0 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ var (
azureMachineConcurrency int
azureMachinePoolConcurrency int
azureMachinePoolMachineConcurrency int
azureBootrapConfigGVK string
debouncingTimer time.Duration
syncPeriod time.Duration
healthAddr string
Expand Down Expand Up @@ -253,6 +254,12 @@ func InitFlags(fs *pflag.FlagSet) {
"Enable tracing to the opentelemetry-collector service in the same namespace.",
)

fs.StringVar(&azureBootrapConfigGVK,
"bootstrap-config-gvk",
"",
"Provide fully qualified GVK string to override default kubeadm config watch source, in the form of Kind.version.group (default: KubeadmConfig.v1beta1.bootstrap.cluster.x-k8s.io)",
)

flags.AddDiagnosticsOptions(fs, &diagnosticsOptions)

feature.MutableGates.AddFlag(fs)
Expand Down Expand Up @@ -426,6 +433,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) {
mgr.GetEventRecorderFor("azuremachinepool-reconciler"),
timeouts,
watchFilterValue,
azureBootrapConfigGVK,
).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}, Cache: mpCache}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "AzureMachinePool")
os.Exit(1)
Expand Down

0 comments on commit 1483d29

Please sign in to comment.