diff --git a/api/falcon/v1alpha1/falconnodesensor_funcs.go b/api/falcon/v1alpha1/falconnodesensor_funcs.go deleted file mode 100644 index 9f2fbba9..00000000 --- a/api/falcon/v1alpha1/falconnodesensor_funcs.go +++ /dev/null @@ -1,6 +0,0 @@ -package v1alpha1 - -// TargetNs returns a namespace to which the node sensor should be installed to -func (n *FalconNodeSensor) TargetNs() string { - return "falcon-system" -} diff --git a/api/falcon/v1alpha1/falconnodesensor_types.go b/api/falcon/v1alpha1/falconnodesensor_types.go index 15790fe1..9ce14b93 100644 --- a/api/falcon/v1alpha1/falconnodesensor_types.go +++ b/api/falcon/v1alpha1/falconnodesensor_types.go @@ -15,6 +15,13 @@ type FalconNodeSensorSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file + // Namespace where the Falcon Sensor should be installed. + // For best security practices, this should be a dedicated namespace that is not used for any other purpose. + // It also should not be the same namespace where the Falcon Operator, or other Falcon resources are deployed. + // +kubebuilder:default:=falcon-system + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1,xDescriptors={"urn:alm:descriptor:io.kubernetes:Namespace"} + InstallNamespace string `json:"installNamespace,omitempty"` + // Various configuration for DaemonSet Deployment // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="DaemonSet Configuration",order=3 Node FalconNodeSensorConfig `json:"node,omitempty"` diff --git a/config/crd/bases/falcon.crowdstrike.com_falconnodesensors.yaml b/config/crd/bases/falcon.crowdstrike.com_falconnodesensors.yaml index 66229d26..63ba6261 100644 --- a/config/crd/bases/falcon.crowdstrike.com_falconnodesensors.yaml +++ b/config/crd/bases/falcon.crowdstrike.com_falconnodesensors.yaml @@ -127,6 +127,14 @@ spec: - client_secret - cloud_region type: object + installNamespace: + default: falcon-system + description: Namespace where the Falcon Sensor should be installed. + For best security practices, this should be a dedicated namespace + that is not used for any other purpose. It also should not be the + same namespace where the Falcon Operator, or other Falcon resources + are deployed. + type: string node: description: Various configuration for DaemonSet Deployment properties: diff --git a/deploy/falcon-operator.yaml b/deploy/falcon-operator.yaml index f581caf2..2319b0bc 100644 --- a/deploy/falcon-operator.yaml +++ b/deploy/falcon-operator.yaml @@ -2743,6 +2743,14 @@ spec: - client_secret - cloud_region type: object + installNamespace: + default: falcon-system + description: Namespace where the Falcon Sensor should be installed. + For best security practices, this should be a dedicated namespace + that is not used for any other purpose. It also should not be the + same namespace where the Falcon Operator, or other Falcon resources + are deployed. + type: string node: description: Various configuration for DaemonSet Deployment properties: diff --git a/docs/deployment/openshift/resources/node/README.md b/docs/deployment/openshift/resources/node/README.md index d8f751a1..96277f72 100644 --- a/docs/deployment/openshift/resources/node/README.md +++ b/docs/deployment/openshift/resources/node/README.md @@ -57,6 +57,7 @@ spec: #### Node Configuration Settings | Spec | Description | | :---------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------- | +| installNamespace | (optional) Override the default namespace of falcon-system | | node.tolerations | (optional) See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ for examples on configuring tolerations | | node.nodeAffinity | (optional) See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ for examples on configuring nodeAffinity | | node.image | (optional) Location of the Falcon Sensor Image. Specify only when you mirror the original image to your own image repository | diff --git a/docs/resources/node/README.md b/docs/resources/node/README.md index bf24af1c..aa3da3ac 100644 --- a/docs/resources/node/README.md +++ b/docs/resources/node/README.md @@ -57,6 +57,7 @@ spec: #### Node Configuration Settings | Spec | Description | | :---------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------- | +| installNamespace | (optional) Override the default namespace of falcon-system | | node.tolerations | (optional) See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ for examples on configuring tolerations | | node.nodeAffinity | (optional) See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ for examples on configuring nodeAffinity | | node.image | (optional) Location of the Falcon Sensor Image. Specify only when you mirror the original image to your own image repository | diff --git a/docs/src/resources/node.md.tmpl b/docs/src/resources/node.md.tmpl index b9cb4985..1eeca64e 100644 --- a/docs/src/resources/node.md.tmpl +++ b/docs/src/resources/node.md.tmpl @@ -57,6 +57,7 @@ spec: #### Node Configuration Settings | Spec | Description | | :---------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------- | +| installNamespace | (optional) Override the default namespace of falcon-system | | node.tolerations | (optional) See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ for examples on configuring tolerations | | node.nodeAffinity | (optional) See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ for examples on configuring nodeAffinity | | node.image | (optional) Location of the Falcon Sensor Image. Specify only when you mirror the original image to your own image repository | diff --git a/internal/controller/admission/falconadmission_controller.go b/internal/controller/admission/falconadmission_controller.go index a8d39800..d5011aea 100644 --- a/internal/controller/admission/falconadmission_controller.go +++ b/internal/controller/admission/falconadmission_controller.go @@ -105,6 +105,25 @@ func (r *FalconAdmissionReconciler) Reconcile(ctx context.Context, req ctrl.Requ return ctrl.Result{}, err } + validate, err := k8sutils.CheckRunningPodLabels(r.Client, ctx, falconAdmission.Spec.InstallNamespace, common.CRLabels("deployment", falconAdmission.Name, common.FalconAdmissionController)) + if err != nil { + return ctrl.Result{}, err + } + if !validate { + err = k8sutils.ConditionsUpdate(r.Client, ctx, req, log, falconAdmission, &falconAdmission.Status, metav1.Condition{ + Status: metav1.ConditionFalse, + Reason: falconv1alpha1.ReasonReqNotMet, + Type: falconv1alpha1.ConditionFailed, + Message: "FalconAdmission must not be installed in a namespace with other workloads running. Please change the namespace in the CR configuration.", + ObservedGeneration: falconAdmission.GetGeneration(), + }) + if err != nil { + return ctrl.Result{}, err + } + log.Error(nil, "FalconAdmission is attempting to install in a namespace with existing pods. Please update the CR configuration to a namespace that does not have workoads already running.") + return ctrl.Result{}, err + } + // Let's just set the status as Unknown when no status is available if falconAdmission.Status.Conditions == nil || len(falconAdmission.Status.Conditions) == 0 { err := retry.RetryOnConflict(retry.DefaultRetry, func() error { diff --git a/internal/controller/assets/daemonset.go b/internal/controller/assets/daemonset.go index 94a4aec7..e371016a 100644 --- a/internal/controller/assets/daemonset.go +++ b/internal/controller/assets/daemonset.go @@ -194,7 +194,7 @@ func Daemonset(dsName, image, serviceAccount string, node *falconv1alpha1.Falcon return &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: dsName, - Namespace: node.TargetNs(), + Namespace: node.Spec.InstallNamespace, Labels: common.CRLabels("daemonset", dsName, common.FalconKernelSensor), }, Spec: appsv1.DaemonSetSpec{ @@ -286,7 +286,7 @@ func RemoveNodeDirDaemonset(dsName, image, serviceAccount string, node *falconv1 return &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: dsName, - Namespace: node.TargetNs(), + Namespace: node.Spec.InstallNamespace, Labels: common.CRLabels("cleanup", dsName, common.FalconKernelSensor), }, Spec: appsv1.DaemonSetSpec{ diff --git a/internal/controller/assets/daemonset_test.go b/internal/controller/assets/daemonset_test.go index 74c00574..58169103 100644 --- a/internal/controller/assets/daemonset_test.go +++ b/internal/controller/assets/daemonset_test.go @@ -202,7 +202,7 @@ func TestDaemonset(t *testing.T) { want := &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: dsName, - Namespace: falconNode.Namespace, + Namespace: falconNode.Spec.InstallNamespace, Labels: common.CRLabels("daemonset", dsName, common.FalconKernelSensor), }, Spec: appsv1.DaemonSetSpec{ @@ -310,7 +310,7 @@ func TestRemoveNodeDirDaemonset(t *testing.T) { want := &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: dsName, - Namespace: falconNode.Namespace, + Namespace: falconNode.Spec.InstallNamespace, Labels: common.CRLabels("cleanup", dsName, common.FalconKernelSensor), }, Spec: appsv1.DaemonSetSpec{ diff --git a/internal/controller/common/utils.go b/internal/controller/common/utils.go index 98ccf09e..ccf660a1 100644 --- a/internal/controller/common/utils.go +++ b/internal/controller/common/utils.go @@ -189,6 +189,30 @@ func ConditionsUpdate(r client.Client, ctx context.Context, req ctrl.Request, lo return nil } +func CheckRunningPodLabels(r client.Client, ctx context.Context, namespace string, matchingLabels client.MatchingLabels) (bool, error) { + podList := &corev1.PodList{} + + listOpts := []client.ListOption{ + client.InNamespace(namespace), + } + + if err := r.List(ctx, podList, listOpts...); err != nil { + return false, fmt.Errorf("unable to list pods: %v", err) + } + + for _, pod := range podList.Items { + if pod.ObjectMeta.Labels != nil { + for k, v := range matchingLabels { + if pod.ObjectMeta.Labels[k] != v { + return false, nil + } + } + } + } + + return true, nil +} + func GetReadyPod(r client.Client, ctx context.Context, namespace string, matchingLabels client.MatchingLabels) (*corev1.Pod, error) { podList := &corev1.PodList{} listOpts := []client.ListOption{ diff --git a/internal/controller/common/utils_test.go b/internal/controller/common/utils_test.go index 4a7b444e..5cf901ef 100644 --- a/internal/controller/common/utils_test.go +++ b/internal/controller/common/utils_test.go @@ -25,6 +25,51 @@ func getFakeClient(initObjs ...client.Object) (client.WithWatch, error) { return fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build(), nil } +func TestCheckRunningPodLabels(t *testing.T) { + ctx := context.Background() + + fakeClient, err := getFakeClient() + if err != nil { + t.Fatalf("TestCheckRunningPodLabels getFakeClient() error = %v", err) + } + + err = fakeClient.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace"}}) + if err != nil { + t.Fatalf("TestCheckRunningPodLabels Create() error = %v", err) + } + + testLabel := map[string]string{"crowdstrike.com/provider": "crowdstrike", "testLabel": "testPod"} + err = fakeClient.Create(ctx, &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "test-namespace", Labels: testLabel}}) + if err != nil { + t.Fatalf("TestCheckRunningPodLabels Create() error = %v", err) + } + + matchingLabels := client.MatchingLabels{"testLabel": "testPod"} + + got, err := CheckRunningPodLabels(fakeClient, ctx, "test-namespace", matchingLabels) + if err != nil { + t.Errorf("CheckRunningPodLabels() error = %v", err) + } + + want := true + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("CheckRunningPodLabels() mismatch (-want +got):\n%s", diff) + } + + // Test with non-matching labels + matchingLabels = client.MatchingLabels{"testLabel": "nonMatchingValue"} + + got, err = CheckRunningPodLabels(fakeClient, ctx, "test-namespace", matchingLabels) + if err != nil { + t.Errorf("CheckRunningPodLabels() error = %v", err) + } + + want = false + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("CheckRunningPodLabels() mismatch (-want +got):\n%s", diff) + } +} + func TestGetReadyPod(t *testing.T) { ctx := context.Background() diff --git a/internal/controller/falcon_node/falconnodesensor_controller.go b/internal/controller/falcon_node/falconnodesensor_controller.go index f8b55fb1..b197ad3c 100644 --- a/internal/controller/falcon_node/falconnodesensor_controller.go +++ b/internal/controller/falcon_node/falconnodesensor_controller.go @@ -6,6 +6,7 @@ import ( falconv1alpha1 "github.com/crowdstrike/falcon-operator/api/falcon/v1alpha1" "github.com/crowdstrike/falcon-operator/internal/controller/assets" + k8sutils "github.com/crowdstrike/falcon-operator/internal/controller/common" "github.com/crowdstrike/falcon-operator/pkg/common" "github.com/crowdstrike/falcon-operator/pkg/k8s_utils" "github.com/crowdstrike/falcon-operator/pkg/node" @@ -88,6 +89,23 @@ func (r *FalconNodeSensorReconciler) Reconcile(ctx context.Context, req ctrl.Req return ctrl.Result{}, err } + validate, err := k8sutils.CheckRunningPodLabels(r.Client, ctx, nodesensor.Spec.InstallNamespace, common.CRLabels("daemonset", nodesensor.Name, common.FalconKernelSensor)) + if err != nil { + return ctrl.Result{}, err + } + if !validate { + err = r.conditionsUpdate(falconv1alpha1.ConditionFailed, + metav1.ConditionFalse, + falconv1alpha1.ReasonReqNotMet, + "FalconNodeSensor must not be installed in a namespace with other workloads running. Please change the namespace in the CR configuration.", + ctx, nodesensor, logger) + if err != nil { + return ctrl.Result{}, err + } + logger.Error(nil, "FalconNodeSensor is attempting to install in a namespace with existing pods. Please update the CR configuration to a namespace that does not have workoads already running.") + return ctrl.Result{}, err + } + dsCondition := meta.FindStatusCondition(nodesensor.Status.Conditions, falconv1alpha1.ConditionSuccess) if dsCondition == nil { err = r.conditionsUpdate(falconv1alpha1.ConditionPending, @@ -206,7 +224,7 @@ func (r *FalconNodeSensorReconciler) Reconcile(ctx context.Context, req ctrl.Req // Check if the daemonset already exists, if not create a new one daemonset := &appsv1.DaemonSet{} - err = r.Get(ctx, types.NamespacedName{Name: nodesensor.Name, Namespace: nodesensor.TargetNs()}, daemonset) + err = r.Get(ctx, types.NamespacedName{Name: nodesensor.Name, Namespace: nodesensor.Spec.InstallNamespace}, daemonset) if err != nil && errors.IsNotFound(err) { ds := assets.Daemonset(nodesensor.Name, image, serviceAccount, nodesensor) @@ -380,7 +398,7 @@ func (r *FalconNodeSensorReconciler) Reconcile(ctx context.Context, req ctrl.Req // handleNamespace creates and updates the namespace func (r *FalconNodeSensorReconciler) handleNamespace(ctx context.Context, nodesensor *falconv1alpha1.FalconNodeSensor, logger logr.Logger) (bool, error) { ns := corev1.Namespace{} - err := r.Client.Get(ctx, types.NamespacedName{Name: nodesensor.TargetNs()}, &ns) + err := r.Client.Get(ctx, types.NamespacedName{Name: nodesensor.Spec.InstallNamespace}, &ns) if err == nil || (err != nil && !errors.IsNotFound(err)) { return false, err } @@ -391,7 +409,13 @@ func (r *FalconNodeSensorReconciler) handleNamespace(ctx context.Context, nodese Kind: "Namespace", }, ObjectMeta: metav1.ObjectMeta{ - Name: nodesensor.TargetNs(), + Name: nodesensor.Spec.InstallNamespace, + Labels: map[string]string{ + "pod-security.kubernetes.io/enforce": "privileged", + "pod-security.kubernetes.io/warn": "privileged", + "pod-security.kubernetes.io/audit": "privileged", + "security.openshift.io/scc.podSecurityLabelSync": "false", + }, }, } err = ctrl.SetControllerReference(nodesensor, &ns, r.Scheme) @@ -400,7 +424,7 @@ func (r *FalconNodeSensorReconciler) handleNamespace(ctx context.Context, nodese } err = r.Client.Create(ctx, &ns) if err != nil && !errors.IsAlreadyExists(err) { - logger.Error(err, "Failed to create new namespace", "Namespace.Name", nodesensor.TargetNs()) + logger.Error(err, "Failed to create new namespace", "Namespace.Name", nodesensor.Spec.InstallNamespace) return false, err } return true, nil @@ -430,7 +454,7 @@ func (r *FalconNodeSensorReconciler) handlePriorityClass(ctx context.Context, no pc := assets.PriorityClass(pcName, nodesensor.Spec.Node.PriorityClass.Value) - err := r.Get(ctx, types.NamespacedName{Name: pcName, Namespace: nodesensor.TargetNs()}, existingPC) + err := r.Get(ctx, types.NamespacedName{Name: pcName, Namespace: nodesensor.Spec.InstallNamespace}, existingPC) if err != nil && errors.IsNotFound(err) { err = ctrl.SetControllerReference(nodesensor, pc, r.Scheme) if err != nil { @@ -484,14 +508,14 @@ func (r *FalconNodeSensorReconciler) handleConfigMaps(ctx context.Context, confi var updated bool cmName := nodesensor.Name + "-config" confCm := &corev1.ConfigMap{} - configmap := assets.SensorConfigMap(cmName, nodesensor.TargetNs(), common.FalconKernelSensor, config.SensorEnvVars()) + configmap := assets.SensorConfigMap(cmName, nodesensor.Spec.InstallNamespace, common.FalconKernelSensor, config.SensorEnvVars()) - err := r.Get(ctx, types.NamespacedName{Name: cmName, Namespace: nodesensor.TargetNs()}, confCm) + err := r.Get(ctx, types.NamespacedName{Name: cmName, Namespace: nodesensor.Spec.InstallNamespace}, confCm) if err != nil && errors.IsNotFound(err) { // does not exist, create err = controllerutil.SetControllerReference(nodesensor, configmap, r.Scheme) if err != nil { - logger.Error(err, "Failed to format new Configmap", "Configmap.Namespace", nodesensor.TargetNs(), "Configmap.Name", cmName) + logger.Error(err, "Failed to format new Configmap", "Configmap.Namespace", nodesensor.Spec.InstallNamespace, "Configmap.Name", cmName) return nil, updated, err } @@ -500,11 +524,11 @@ func (r *FalconNodeSensorReconciler) handleConfigMaps(ctx context.Context, confi // We have got NotFound error during the Get(), but then we have got AlreadyExists error from Create(). Client cache is invalid. err = r.Update(ctx, configmap) if err != nil { - logger.Error(err, "Failed to update Configmap", "Configmap.Namespace", nodesensor.TargetNs(), "Configmap.Name", cmName) + logger.Error(err, "Failed to update Configmap", "Configmap.Namespace", nodesensor.Spec.InstallNamespace, "Configmap.Name", cmName) } return configmap, updated, nil } else { - logger.Error(err, "Failed to create new Configmap", "Configmap.Namespace", nodesensor.TargetNs(), "Configmap.Name", cmName) + logger.Error(err, "Failed to create new Configmap", "Configmap.Namespace", nodesensor.Spec.InstallNamespace, "Configmap.Name", cmName) return nil, updated, err } @@ -520,7 +544,7 @@ func (r *FalconNodeSensorReconciler) handleConfigMaps(ctx context.Context, confi if !reflect.DeepEqual(confCm.Data, configmap.Data) { err = r.Update(ctx, configmap) if err != nil { - logger.Error(err, "Failed to update Configmap", "Configmap.Namespace", nodesensor.TargetNs(), "Configmap.Name", cmName) + logger.Error(err, "Failed to update Configmap", "Configmap.Namespace", nodesensor.Spec.InstallNamespace, "Configmap.Name", cmName) return nil, updated, err } @@ -536,7 +560,7 @@ func (r *FalconNodeSensorReconciler) handleCrowdStrikeSecrets(ctx context.Contex return nil } secret := corev1.Secret{} - err := r.Client.Get(ctx, types.NamespacedName{Name: common.FalconPullSecretName, Namespace: nodesensor.TargetNs()}, &secret) + err := r.Client.Get(ctx, types.NamespacedName{Name: common.FalconPullSecretName, Namespace: nodesensor.Spec.InstallNamespace}, &secret) if err == nil || !errors.IsNotFound(err) { return err } @@ -547,7 +571,7 @@ func (r *FalconNodeSensorReconciler) handleCrowdStrikeSecrets(ctx context.Contex } secretData := map[string][]byte{corev1.DockerConfigJsonKey: common.CleanDecodedBase64(pulltoken)} - secret = *assets.Secret(common.FalconPullSecretName, nodesensor.TargetNs(), common.FalconKernelSensor, secretData, corev1.SecretTypeDockerConfigJson) + secret = *assets.Secret(common.FalconPullSecretName, nodesensor.Spec.InstallNamespace, common.FalconKernelSensor, secretData, corev1.SecretTypeDockerConfigJson) err = ctrl.SetControllerReference(nodesensor, &secret, r.Scheme) if err != nil { logger.Error(err, "Unable to assign Controller Reference to the Pull Secret") @@ -555,11 +579,11 @@ func (r *FalconNodeSensorReconciler) handleCrowdStrikeSecrets(ctx context.Contex err = r.Client.Create(ctx, &secret) if err != nil { if !errors.IsAlreadyExists(err) { - logger.Error(err, "Failed to create new Pull Secret", "Secret.Namespace", nodesensor.TargetNs(), "Secret.Name", common.FalconPullSecretName) + logger.Error(err, "Failed to create new Pull Secret", "Secret.Namespace", nodesensor.Spec.InstallNamespace, "Secret.Name", common.FalconPullSecretName) return err } } else { - logger.Info("Created a new Pull Secret", "Secret.Namespace", nodesensor.TargetNs(), "Secret.Name", common.FalconPullSecretName) + logger.Info("Created a new Pull Secret", "Secret.Namespace", nodesensor.Spec.InstallNamespace, "Secret.Name", common.FalconPullSecretName) } return nil } @@ -770,7 +794,7 @@ func (r *FalconNodeSensorReconciler) handleClusterRoleBinding(ctx context.Contex { Kind: "ServiceAccount", Name: common.NodeServiceAccountName, - Namespace: nodesensor.TargetNs(), + Namespace: nodesensor.Spec.InstallNamespace, }, }, } @@ -791,7 +815,7 @@ func (r *FalconNodeSensorReconciler) handleClusterRoleBinding(ctx context.Contex // handleServiceAccount creates and updates the service account and grants necessary permissions to it func (r *FalconNodeSensorReconciler) handleServiceAccount(ctx context.Context, nodesensor *falconv1alpha1.FalconNodeSensor, logger logr.Logger) (bool, error) { sa := corev1.ServiceAccount{} - err := r.Client.Get(ctx, types.NamespacedName{Name: common.NodeServiceAccountName, Namespace: nodesensor.TargetNs()}, &sa) + err := r.Client.Get(ctx, types.NamespacedName{Name: common.NodeServiceAccountName, Namespace: nodesensor.Spec.InstallNamespace}, &sa) if err == nil || (err != nil && !errors.IsNotFound(err)) { return false, err } @@ -801,7 +825,7 @@ func (r *FalconNodeSensorReconciler) handleServiceAccount(ctx context.Context, n Kind: "ServiceAccount", }, ObjectMeta: metav1.ObjectMeta{ - Namespace: nodesensor.TargetNs(), + Namespace: nodesensor.Spec.InstallNamespace, Name: common.NodeServiceAccountName, Labels: common.CRLabels("serviceaccount", common.NodeServiceAccountName, common.FalconKernelSensor), }, @@ -813,7 +837,7 @@ func (r *FalconNodeSensorReconciler) handleServiceAccount(ctx context.Context, n logger.Info("Creating FalconNodeSensor ServiceAccount") err = r.Client.Create(ctx, &sa) if err != nil && !errors.IsAlreadyExists(err) { - logger.Error(err, "Failed to create new ServiceAccount", "Namespace.Name", nodesensor.TargetNs()) + logger.Error(err, "Failed to create new ServiceAccount", "Namespace.Name", nodesensor.Spec.InstallNamespace) return false, err } return true, nil @@ -824,7 +848,7 @@ func (r *FalconNodeSensorReconciler) handleSAAnnotations(ctx context.Context, no sa := corev1.ServiceAccount{} saAnnotations := nodesensor.Spec.Node.ServiceAccount.Annotations - err := r.Get(ctx, types.NamespacedName{Name: common.NodeServiceAccountName, Namespace: nodesensor.TargetNs()}, &sa) + err := r.Get(ctx, types.NamespacedName{Name: common.NodeServiceAccountName, Namespace: nodesensor.Spec.InstallNamespace}, &sa) if err != nil && errors.IsNotFound(err) { logger.Error(err, "Could not get FalconNodeSensor ServiceAccount") return err @@ -842,7 +866,7 @@ func (r *FalconNodeSensorReconciler) handleSAAnnotations(ctx context.Context, no err = r.Update(ctx, &sa) if err != nil { - logger.Error(err, "Failed to update ServiceAccount Annotations", "ServiceAccount.Namespace", nodesensor.TargetNs(), "Annotations", saAnnotations) + logger.Error(err, "Failed to update ServiceAccount Annotations", "ServiceAccount.Namespace", nodesensor.Spec.InstallNamespace, "Annotations", saAnnotations) return err } logger.Info("Updating FalconNodeSensor ServiceAccount Annotations", "Annotations", saAnnotations) @@ -884,7 +908,7 @@ func (r *FalconNodeSensorReconciler) finalizeDaemonset(ctx context.Context, imag // Get a list of DS and return the DS within the correct NS if err := r.List(ctx, dsList, &client.ListOptions{ LabelSelector: labels.SelectorFromSet(labels.Set{common.FalconComponentKey: common.FalconKernelSensor}), - Namespace: nodesensor.TargetNs(), + Namespace: nodesensor.Spec.InstallNamespace, }); err != nil { return err } @@ -893,7 +917,7 @@ func (r *FalconNodeSensorReconciler) finalizeDaemonset(ctx context.Context, imag if err := r.Delete(ctx, &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ - Name: nodesensor.Name, Namespace: nodesensor.TargetNs(), + Name: nodesensor.Name, Namespace: nodesensor.Spec.InstallNamespace, }, }); err != nil && !errors.IsNotFound(err) { logger.Error(err, "Failed to cleanup Falcon sensor DaemonSet pods") @@ -901,7 +925,7 @@ func (r *FalconNodeSensorReconciler) finalizeDaemonset(ctx context.Context, imag } // Check if the cleanup DS is created. If not, create it. - err := r.Get(ctx, types.NamespacedName{Name: dsCleanupName, Namespace: nodesensor.TargetNs()}, daemonset) + err := r.Get(ctx, types.NamespacedName{Name: dsCleanupName, Namespace: nodesensor.Spec.InstallNamespace}, daemonset) if err != nil && errors.IsNotFound(err) { // Define a new DS for cleanup ds := assets.RemoveNodeDirDaemonset(dsCleanupName, image, serviceAccount, nodesensor) @@ -918,7 +942,7 @@ func (r *FalconNodeSensorReconciler) finalizeDaemonset(ctx context.Context, imag // List all pods with the "cleanup" label in the appropriate NS if err := r.List(ctx, &pods, &client.ListOptions{ LabelSelector: labels.SelectorFromSet(labels.Set{common.FalconInstanceNameKey: "cleanup"}), - Namespace: nodesensor.TargetNs(), + Namespace: nodesensor.Spec.InstallNamespace, }); err != nil { return err } @@ -946,7 +970,7 @@ func (r *FalconNodeSensorReconciler) finalizeDaemonset(ctx context.Context, imag logger.Info("Waiting for cleanup pods to complete. Retrying....", "Number of pods still processing task", completedCount) } - err = r.Get(ctx, types.NamespacedName{Name: dsCleanupName, Namespace: nodesensor.TargetNs()}, daemonset) + err = r.Get(ctx, types.NamespacedName{Name: dsCleanupName, Namespace: nodesensor.Spec.InstallNamespace}, daemonset) if err != nil && errors.IsNotFound(err) { logger.Info("Clean-up daemonset has been removed") break @@ -957,7 +981,7 @@ func (r *FalconNodeSensorReconciler) finalizeDaemonset(ctx context.Context, imag if err := r.Delete(ctx, &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ - Name: dsCleanupName, Namespace: nodesensor.TargetNs(), + Name: dsCleanupName, Namespace: nodesensor.Spec.InstallNamespace, }, }); err != nil && !errors.IsNotFound(err) { logger.Error(err, "Failed to cleanup Falcon sensor DaemonSet pods") diff --git a/pkg/common/constants.go b/pkg/common/constants.go index b8e29882..d25bcd6e 100644 --- a/pkg/common/constants.go +++ b/pkg/common/constants.go @@ -43,5 +43,5 @@ const ( FalconPullSecretName = "crowdstrike-falcon-pull-secret" NodeServiceAccountName = "falcon-operator-node-sensor" AdmissionServiceAccountName = "falcon-operator-admission-controller" - NodeClusterRoleBindingName = "crowdstrike-falcon-node-sensor" + NodeClusterRoleBindingName = "falcon-operator-node-sensor-rolebinding" ) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 10d0db4b..264eb922 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -373,7 +373,7 @@ var _ = Describe("falcon", Ordered, func() { ) status, err := utils.Run(cmd) fmt.Println(string(status)) - ExpectWithOffset(2, err).NotTo(HaveOccurred()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) if !strings.Contains(string(status), "Success") { return fmt.Errorf("status condition with type Success should be set") }