Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 10 additions & 9 deletions cmd/cluster-capi-operator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -242,12 +242,13 @@ func main() {
}
}

func getClusterOperatorStatusClient(mgr manager.Manager, controller string, managedNamespace string) operatorstatus.ClusterOperatorStatusClient {
func getClusterOperatorStatusClient(mgr manager.Manager, controller string, platform configv1.PlatformType, managedNamespace string) operatorstatus.ClusterOperatorStatusClient {
return operatorstatus.ClusterOperatorStatusClient{
Client: mgr.GetClient(),
Recorder: mgr.GetEventRecorderFor(controller),
ReleaseVersion: util.GetReleaseVersion(),
ManagedNamespace: managedNamespace,
Platform: platform,
}
}

Expand Down Expand Up @@ -293,12 +294,12 @@ func setupPlatformReconcilers(mgr manager.Manager, infra *configv1.Infrastructur
}

// The ClusterOperator Controller must run under all circumstances as it manages the ClusterOperator object for this operator.
setupClusterOperatorController(mgr, managedNamespace, isUnsupportedPlatform)
setupClusterOperatorController(mgr, platform, managedNamespace, isUnsupportedPlatform)
}

func setupReconcilers(mgr manager.Manager, infra *configv1.Infrastructure, platform configv1.PlatformType, infraClusterObject client.Object, containerImages map[string]string, applyClient *kubernetes.Clientset, apiextensionsClient *apiextensionsclient.Clientset, managedNamespace string) {
if err := (&corecluster.CoreClusterController{
ClusterOperatorStatusClient: getClusterOperatorStatusClient(mgr, "cluster-capi-operator-cluster-resource-controller", managedNamespace),
ClusterOperatorStatusClient: getClusterOperatorStatusClient(mgr, "cluster-capi-operator-cluster-resource-controller", platform, managedNamespace),
Cluster: &clusterv1.Cluster{},
Platform: platform,
Infra: infra,
Expand All @@ -308,15 +309,15 @@ func setupReconcilers(mgr manager.Manager, infra *configv1.Infrastructure, platf
}

if err := (&secretsync.UserDataSecretController{
ClusterOperatorStatusClient: getClusterOperatorStatusClient(mgr, "cluster-capi-operator-user-data-secret-controller", managedNamespace),
ClusterOperatorStatusClient: getClusterOperatorStatusClient(mgr, "cluster-capi-operator-user-data-secret-controller", platform, managedNamespace),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
klog.Error(err, "unable to create user-data-secret controller", "controller", "UserDataSecret")
os.Exit(1)
}

if err := (&kubeconfig.KubeconfigReconciler{
ClusterOperatorStatusClient: getClusterOperatorStatusClient(mgr, "cluster-capi-operator-kubeconfig-controller", managedNamespace),
ClusterOperatorStatusClient: getClusterOperatorStatusClient(mgr, "cluster-capi-operator-kubeconfig-controller", platform, managedNamespace),
Scheme: mgr.GetScheme(),
RestCfg: mgr.GetConfig(),
}).SetupWithManager(mgr); err != nil {
Expand All @@ -325,7 +326,7 @@ func setupReconcilers(mgr manager.Manager, infra *configv1.Infrastructure, platf
}

if err := (&capiinstaller.CapiInstallerController{
ClusterOperatorStatusClient: getClusterOperatorStatusClient(mgr, "cluster-capi-operator-capi-installer-controller", managedNamespace),
ClusterOperatorStatusClient: getClusterOperatorStatusClient(mgr, "cluster-capi-operator-capi-installer-controller", platform, managedNamespace),
Scheme: mgr.GetScheme(),
Images: containerImages,
RestCfg: mgr.GetConfig(),
Expand All @@ -338,7 +339,7 @@ func setupReconcilers(mgr manager.Manager, infra *configv1.Infrastructure, platf
}

if err := (&infracluster.InfraClusterController{
ClusterOperatorStatusClient: getClusterOperatorStatusClient(mgr, "cluster-capi-operator-infracluster-controller", managedNamespace),
ClusterOperatorStatusClient: getClusterOperatorStatusClient(mgr, "cluster-capi-operator-infracluster-controller", platform, managedNamespace),
Scheme: mgr.GetScheme(),
Images: containerImages,
RestCfg: mgr.GetConfig(),
Expand Down Expand Up @@ -382,10 +383,10 @@ func getAzureCloudEnvironment(ps *configv1.PlatformStatus) configv1.AzureCloudEn
return ps.Azure.CloudName
}

func setupClusterOperatorController(mgr manager.Manager, ns string, isUnsupportedPlatform bool) {
func setupClusterOperatorController(mgr manager.Manager, platform configv1.PlatformType, ns string, isUnsupportedPlatform bool) {
// ClusterOperator watches and keeps the cluster-api ClusterObject up to date.
if err := (&clusteroperator.ClusterOperatorController{
ClusterOperatorStatusClient: getClusterOperatorStatusClient(mgr, "cluster-capi-operator-clusteroperator-controller", ns),
ClusterOperatorStatusClient: getClusterOperatorStatusClient(mgr, "cluster-capi-operator-clusteroperator-controller", platform, ns),
Scheme: mgr.GetScheme(),
IsUnsupportedPlatform: isUnsupportedPlatform,
}).SetupWithManager(mgr); err != nil {
Expand Down
107 changes: 107 additions & 0 deletions manifests/0000_30_cluster-api_12_clusteroperator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,110 @@ status:
name: cluster-capi-operator
namespace: openshift-cluster-api
resource: deployments
- group: "cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: clusters
- group: "cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: machinesets
- group: "cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: machines
- group: "cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: machinedeployments

- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: awsclusters
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you're actively managing these entries in your operator (which it looks like you are), I don't think you need to go through the work of manually listing them here in your ClusterOperator manifest. You need enough in the manifest that a must-gather while your operator isn't running collects enough to figure out why the operator isn't running. And it seems unlikely that folks would need to get all the way out to cloud-specific types to figure out why the operator wasn't running?

But also, 🤷, if you don't mind managing this list by hand or you find some way to automate it, having the cloud-specific types in this manifest doesn't hurt.

- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: awsmachines
- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: awsmachinetemplates

- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: azureclusters
- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: azuremachines
- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: azuremachinetemplates

- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: gcpclusters
- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: gcpmachines
- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: gcpmachinetemplates

- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: ibmpowerclusters
- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: ibmpowermachines
- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: ibmpowermachinetemplates

- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: metal3clusters
- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: metal3machines
- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: metal3machinetemplates

- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: openstackclusters
- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: openstackmachines
- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: openstackmachinetemplates

- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: vsphereclusters
- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: vspheremachines
- group: "infrastructure.cluster.x-k8s.io"
name: ""
namespace: openshift-cluster-api
resource: vspheremachinetemplates
45 changes: 40 additions & 5 deletions pkg/operatorstatus/operator_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ type ClusterOperatorStatusClient struct {
Recorder record.EventRecorder
ManagedNamespace string
ReleaseVersion string
Platform configv1.PlatformType
}

// SetStatusAvailable sets the Available condition to True, with the given reason
Expand Down Expand Up @@ -171,16 +172,50 @@ func (r *ClusterOperatorStatusClient) SyncStatus(ctx context.Context, co *config
return nil
}

func platformToInfraPrefix(platform configv1.PlatformType) string {
switch platform {
case configv1.BareMetalPlatformType:
return "Metal3"
default:
return string(platform)
}
}

func (r *ClusterOperatorStatusClient) relatedObjects() []configv1.ObjectReference {
// TBD: Add an actual set of object references from getResources method
return []configv1.ObjectReference{
references := []configv1.ObjectReference{
{Resource: "namespaces", Name: controllers.DefaultManagedNamespace},
{Group: configv1.GroupName, Resource: "clusteroperators", Name: controllers.ClusterOperatorName},
{Resource: "namespaces", Name: r.ManagedNamespace},
{Group: "", Resource: "serviceaccounts", Name: "cluster-capi-operator"},
{Group: "", Resource: "configmaps", Name: "cluster-capi-operator-images"},
{Group: "apps", Resource: "deployments", Name: "cluster-capi-operator"},
{Group: "", Resource: "serviceaccounts", Name: "cluster-capi-operator", Namespace: controllers.DefaultManagedNamespace},
{Group: "", Resource: "configmaps", Name: "cluster-capi-operator-images", Namespace: controllers.DefaultManagedNamespace},
{Group: "apps", Resource: "deployments", Name: "cluster-capi-operator", Namespace: controllers.DefaultManagedNamespace},
{Group: "cluster.x-k8s.io", Resource: "clusters", Namespace: r.ManagedNamespace},
{Group: "cluster.x-k8s.io", Resource: "machines", Namespace: r.ManagedNamespace},
{Group: "cluster.x-k8s.io", Resource: "machinesets", Namespace: r.ManagedNamespace},
}

platformPrefix := platformToInfraPrefix(r.Platform)

for groupVersionKind, t := range r.Scheme().AllKnownTypes() {
if strings.HasSuffix(groupVersionKind.Group, "cluster.x-k8s.io") {
// Ignore lists
if _, found := t.FieldByName("ObjectMeta"); !found {
continue
}

if strings.HasPrefix(t.Name(), platformPrefix) {
ref := configv1.ObjectReference{
Group: groupVersionKind.Group,
Resource: strings.ToLower(t.Name()),
Namespace: r.ManagedNamespace,
}

references = append(references, ref)
}
}
}

return references
}
func (r *ClusterOperatorStatusClient) operandVersions() []configv1.OperandVersion {
return []configv1.OperandVersion{{Name: controllers.OperatorVersionKey, Version: r.ReleaseVersion}}
Expand Down