From 1e7e1fe34338f7da2db30cc01ee58fa4a099d9d0 Mon Sep 17 00:00:00 2001 From: Daniel Pacak Date: Sat, 16 Oct 2021 22:33:31 +0200 Subject: [PATCH] feat(conftest): Associate Rego policies with K8s resources Signed-off-by: Daniel Pacak --- .../configauditreport/conftest/suite_test.go | 7 +- .../conftest/testdata/run_as_root.rego | 38 ++-- .../testdata/service_with_external_ip.rego | 10 + itest/starboard/suite_test.go | 3 +- pkg/configauditreport/builder.go | 2 +- pkg/configauditreport/builder_test.go | 10 +- pkg/configauditreport/plugin.go | 18 +- pkg/configauditreport/scanner.go | 19 +- pkg/operator/controller/configauditreport.go | 42 ++-- pkg/operator/controller/plugins_config.go | 193 +++++++++++------- pkg/plugin/conftest/plugin.go | 166 ++++++++++----- pkg/plugin/conftest/plugin_test.go | 89 ++++---- pkg/plugin/polaris/plugin.go | 28 +-- pkg/plugin/polaris/plugin_test.go | 18 +- 14 files changed, 412 insertions(+), 231 deletions(-) create mode 100644 itest/starboard-operator/configauditreport/conftest/testdata/service_with_external_ip.rego diff --git a/itest/starboard-operator/configauditreport/conftest/suite_test.go b/itest/starboard-operator/configauditreport/conftest/suite_test.go index 9e2908482..076ad4d61 100644 --- a/itest/starboard-operator/configauditreport/conftest/suite_test.go +++ b/itest/starboard-operator/configauditreport/conftest/suite_test.go @@ -50,6 +50,8 @@ var ( //go:embed testdata/run_as_root.rego runAsRootPolicy string + //go:embed testdata/service_with_external_ip.rego + serviceWithExternalIPPolicy string ) func TestIntegrationOperatorWithConftest(t *testing.T) { @@ -111,7 +113,10 @@ var _ = BeforeSuite(func() { Data: map[string]string{ "conftest.imageRef": "docker.io/openpolicyagent/conftest:v0.25.0", - "conftest.policy.runs_as_root.rego": runAsRootPolicy, + "conftest.policy.runs_as_root.rego": runAsRootPolicy, + "conftest.policy.runs_as_root.kinds": "Workload", + "conftest.policy.service_with_external_ip.rego": serviceWithExternalIPPolicy, + "conftest.policy.service_with_external_ip.kinds": "Service", }, } err = kubeClient.Create(context.Background(), conftestCM) diff --git a/itest/starboard-operator/configauditreport/conftest/testdata/run_as_root.rego b/itest/starboard-operator/configauditreport/conftest/testdata/run_as_root.rego index 1f19978b0..ddb5cf5c1 100644 --- a/itest/starboard-operator/configauditreport/conftest/testdata/run_as_root.rego +++ b/itest/starboard-operator/configauditreport/conftest/testdata/run_as_root.rego @@ -1,31 +1,31 @@ -package main +package kubernetes.configaudit.run_as_root deny[res] { - input.kind == "ReplicaSet" - not input.spec.template.spec.securityContext.runAsNonRoot + input.kind == "ReplicaSet" + not input.spec.template.spec.securityContext.runAsNonRoot - res := { - "msg": "Containers must not run as root", - "title": "Run as root" - } + res := { + "msg": "Containers must not run as root", + "title": "Run as root", + } } deny[res] { - input.kind == "Pod" - not input.spec.securityContext.runAsNonRoot + input.kind == "Pod" + not input.spec.securityContext.runAsNonRoot - res := { - "msg": "Containers must not run as root", - "title": "Run as root" - } + res := { + "msg": "Containers must not run as root", + "title": "Run as root", + } } deny[res] { - input.kind == "CronJob" - not input.spec.jobTemplate.spec.template.spec.securityContext.runAsNonRoot + input.kind == "CronJob" + not input.spec.jobTemplate.spec.template.spec.securityContext.runAsNonRoot - res := { - "msg": "Containers must not run as root", - "title": "Run as root" - } + res := { + "msg": "Containers must not run as root", + "title": "Run as root", + } } diff --git a/itest/starboard-operator/configauditreport/conftest/testdata/service_with_external_ip.rego b/itest/starboard-operator/configauditreport/conftest/testdata/service_with_external_ip.rego new file mode 100644 index 000000000..be0ada5aa --- /dev/null +++ b/itest/starboard-operator/configauditreport/conftest/testdata/service_with_external_ip.rego @@ -0,0 +1,10 @@ +package kubernetes.configaudit.service_with_external_ip + +deny[res] { + input.kind == "Service" + count(input.spec.externalIPs) > 0 + res := { + "msg": "Service with external IP", + "title": "Service with external IP", + } +} diff --git a/itest/starboard/suite_test.go b/itest/starboard/suite_test.go index 805b28398..2ec409a0b 100644 --- a/itest/starboard/suite_test.go +++ b/itest/starboard/suite_test.go @@ -44,7 +44,8 @@ var ( Namespace: "starboard", }, Data: map[string]string{ - "conftest.imageRef": "docker.io/openpolicyagent/conftest:v0.25.0", + "conftest.imageRef": "docker.io/openpolicyagent/conftest:v0.25.0", + "conftest.policy.runs_as_root.kinds": "Workload", "conftest.policy.runs_as_root.rego": ` package main diff --git a/pkg/configauditreport/builder.go b/pkg/configauditreport/builder.go index bc91ee10d..a6cda7cde 100644 --- a/pkg/configauditreport/builder.go +++ b/pkg/configauditreport/builder.go @@ -75,7 +75,7 @@ func (s *ScanJobBuilder) Get() (*batchv1.Job, []*corev1.Secret, error) { jobSpec.Tolerations = append(jobSpec.Tolerations, s.tolerations...) - pluginConfigHash, err := s.plugin.GetConfigHash(s.pluginContext) + pluginConfigHash, err := s.plugin.ConfigHash(s.pluginContext, kube.Kind(s.object.GetObjectKind().GroupVersionKind().Kind)) if err != nil { return nil, nil, err } diff --git a/pkg/configauditreport/builder_test.go b/pkg/configauditreport/builder_test.go index aabc8b892..194b0bea0 100644 --- a/pkg/configauditreport/builder_test.go +++ b/pkg/configauditreport/builder_test.go @@ -119,12 +119,12 @@ type testPlugin struct { configHash string } -func (p *testPlugin) SupportsKind(_ kube.Kind) bool { - return true +func (p *testPlugin) SupportedKinds() []kube.Kind { + return []kube.Kind{} } -func (p *testPlugin) IsReady(_ starboard.PluginContext) (bool, error) { - return true, nil +func (p *testPlugin) IsApplicable(_ starboard.PluginContext, _ client.Object) (bool, string, error) { + return true, "", nil } func (p *testPlugin) Init(_ starboard.PluginContext) error { @@ -143,7 +143,7 @@ func (p *testPlugin) GetContainerName() string { return "" } -func (p *testPlugin) GetConfigHash(_ starboard.PluginContext) (string, error) { +func (p *testPlugin) ConfigHash(_ starboard.PluginContext, _ kube.Kind) (string, error) { return p.configHash, nil } diff --git a/pkg/configauditreport/plugin.go b/pkg/configauditreport/plugin.go index b62487c5f..052cfbd9a 100644 --- a/pkg/configauditreport/plugin.go +++ b/pkg/configauditreport/plugin.go @@ -32,15 +32,15 @@ type Plugin interface { // to read logs from. GetContainerName() string - // GetConfigHash returns hash of the plugin's configuration settings. The computed hash - // is used to invalidate v1alpha1.ConfigAuditReport object whenever configuration changes. - GetConfigHash(ctx starboard.PluginContext) (string, error) + // ConfigHash returns hash of the plugin's configuration settings. The computed hash + // is used to invalidate v1alpha1.ConfigAuditReport and v1alpha1.ClusterConfigAuditReport + // objects whenever configuration applicable to the specified resource kind changes. + ConfigHash(ctx starboard.PluginContext, kind kube.Kind) (string, error) - // SupportsKind returns true if the given resource kind is supported by - // this plugin, false otherwise. - SupportsKind(kind kube.Kind) bool + // SupportedKinds returns kinds supported by this plugin. + SupportedKinds() []kube.Kind - // IsReady returns true if the plugin is ready for reconciliation, false - // otherwise. - IsReady(ctx starboard.PluginContext) (bool, error) + // IsApplicable return true if the given object can be scanned by this + // plugin, false otherwise. + IsApplicable(ctx starboard.PluginContext, obj client.Object) (bool, string, error) } diff --git a/pkg/configauditreport/scanner.go b/pkg/configauditreport/scanner.go index 13eee3cbe..82ed50200 100644 --- a/pkg/configauditreport/scanner.go +++ b/pkg/configauditreport/scanner.go @@ -46,7 +46,7 @@ func NewScanner( } func (s *Scanner) Scan(ctx context.Context, partial kube.Object) (*ReportBuilder, error) { - if !s.plugin.SupportsKind(partial.Kind) { + if !s.supportsKind(partial.Kind) { return nil, fmt.Errorf("kind %s is not supported by %s plugin", partial.Kind, s.pluginContext.GetName()) } obj, err := s.objectResolver.GetObjectFromPartialObject(ctx, partial) @@ -54,6 +54,14 @@ func (s *Scanner) Scan(ctx context.Context, partial kube.Object) (*ReportBuilder return nil, err } + applicable, reason, err := s.plugin.IsApplicable(s.pluginContext, obj) + if err != nil { + return nil, err + } + if !applicable { + return nil, fmt.Errorf("not applicable: %s", reason) + } + owner, err := s.objectResolver.ReportOwner(ctx, obj) if err != nil { return nil, err @@ -127,3 +135,12 @@ func (s *Scanner) Scan(ctx context.Context, partial kube.Object) (*ReportBuilder PluginConfigHash(pluginConfigHash). Data(result), nil } + +func (s *Scanner) supportsKind(kind kube.Kind) bool { + for _, k := range s.plugin.SupportedKinds() { + if k == kind { + return true + } + } + return false +} diff --git a/pkg/operator/controller/configauditreport.go b/pkg/operator/controller/configauditreport.go index 1854b6141..e29bedb04 100644 --- a/pkg/operator/controller/configauditreport.go +++ b/pkg/operator/controller/configauditreport.go @@ -75,7 +75,7 @@ func (r *ConfigAuditReportReconciler) SetupWithManager(mgr ctrl.Manager) error { } for _, resource := range resources { - if !r.Plugin.SupportsKind(resource.kind) { + if !r.supportsKind(resource.kind) { r.Logger.Info("Skipping unsupported kind", "pluginName", r.PluginContext.GetName(), "kind", resource.kind) continue } @@ -94,7 +94,7 @@ func (r *ConfigAuditReportReconciler) SetupWithManager(mgr ctrl.Manager) error { } for _, resource := range clusterResources { - if !r.Plugin.SupportsKind(resource.kind) { + if !r.supportsKind(resource.kind) { r.Logger.Info("Skipping unsupported kind", "pluginName", r.PluginContext.GetName(), "kind", resource.kind) continue } @@ -120,23 +120,19 @@ func (r *ConfigAuditReportReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r.reconcileJobs()) } +func (r *ConfigAuditReportReconciler) supportsKind(kind kube.Kind) bool { + for _, k := range r.Plugin.SupportedKinds() { + if k == kind { + return true + } + } + return false +} + func (r *ConfigAuditReportReconciler) reconcileResource(resourceKind kube.Kind) reconcile.Func { return func(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := r.Logger.WithValues("kind", resourceKind, "name", req.NamespacedName) - ready, err := r.Plugin.IsReady(r.PluginContext) - if err != nil { - return ctrl.Result{}, fmt.Errorf("checking whether plugin is ready: %w", err) - } - if !ready { - log.V(1).Info("Pushing back reconcile key", - "reason", "plugin not ready", - "pluginName", r.PluginContext.GetName(), - "retryAfter", r.ScanJobRetryAfter) - // TODO Introduce more generic param to retry processing a given key. - return ctrl.Result{RequeueAfter: r.Config.ScanJobRetryAfter}, nil - } - resourcePartial := kube.GetPartialObjectFromKindAndNamespacedName(resourceKind, req.NamespacedName) log.V(1).Info("Getting resource from cache") @@ -169,12 +165,26 @@ func (r *ConfigAuditReportReconciler) reconcileResource(resourceKind kube.Kind) } } + // Skip processing if plugin is not applicable to this object + applicable, reason, err := r.Plugin.IsApplicable(r.PluginContext, resource) + if err != nil { + return ctrl.Result{}, fmt.Errorf("checking whether plugin is applicable: %w", err) + } + if !applicable { + log.V(1).Info("Pushing back reconcile key", + "reason", reason, + "pluginName", r.PluginContext.GetName(), + "retryAfter", r.ScanJobRetryAfter) + // TODO Introduce more generic param to retry processing a given key. + return ctrl.Result{RequeueAfter: r.Config.ScanJobRetryAfter}, nil + } + resourceSpecHash, err := kube.ComputeSpecHash(resource) if err != nil { return ctrl.Result{}, fmt.Errorf("computing spec hash: %w", err) } - pluginConfigHash, err := r.Plugin.GetConfigHash(r.PluginContext) + pluginConfigHash, err := r.Plugin.ConfigHash(r.PluginContext, kube.Kind(resource.GetObjectKind().GroupVersionKind().Kind)) if err != nil { return ctrl.Result{}, fmt.Errorf("computing plugin config hash: %w", err) } diff --git a/pkg/operator/controller/plugins_config.go b/pkg/operator/controller/plugins_config.go index 6203b8903..c9622ea21 100644 --- a/pkg/operator/controller/plugins_config.go +++ b/pkg/operator/controller/plugins_config.go @@ -7,6 +7,7 @@ import ( "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" "github.com/aquasecurity/starboard/pkg/configauditreport" "github.com/aquasecurity/starboard/pkg/ext" + "github.com/aquasecurity/starboard/pkg/kube" "github.com/aquasecurity/starboard/pkg/operator/etc" "github.com/aquasecurity/starboard/pkg/operator/predicate" "github.com/aquasecurity/starboard/pkg/starboard" @@ -17,6 +18,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) type PluginsConfigReconciler struct { @@ -28,94 +30,147 @@ type PluginsConfigReconciler struct { } func (r *PluginsConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&corev1.ConfigMap{}, builder.WithPredicates( - predicate.Not(predicate.IsBeingTerminated), - predicate.HasName(starboard.GetPluginConfigMapName(r.PluginContext.GetName())), - predicate.InNamespace(r.Config.Namespace))). - Complete(r) + opts := builder.WithPredicates( + predicate.Not(predicate.IsBeingTerminated), + predicate.HasName(starboard.GetPluginConfigMapName(r.PluginContext.GetName())), + predicate.InNamespace(r.Config.Namespace)) + + for _, kind := range r.Plugin.SupportedKinds() { + if kube.IsClusterScopedKind(string(kind)) { + err := ctrl.NewControllerManagedBy(mgr). + For(&corev1.ConfigMap{}, opts). + Complete(r.reconcileClusterConfig(kind)) + if err != nil { + return err + } + } else { + err := ctrl.NewControllerManagedBy(mgr). + For(&corev1.ConfigMap{}, opts). + Complete(r.reconcileConfig(kind)) + if err != nil { + return err + } + } + } + return nil } -func (r *PluginsConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - log := r.Logger.WithValues("configMap", req.NamespacedName) +func (r *PluginsConfigReconciler) reconcileConfig(kind kube.Kind) reconcile.Func { + return func(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Logger.WithValues("configMap", req.NamespacedName) - cm := &corev1.ConfigMap{} + cm := &corev1.ConfigMap{} - err := r.Client.Get(ctx, req.NamespacedName, cm) - if err != nil { - if errors.IsNotFound(err) { - log.V(1).Info("Ignoring cached ConfigMap that must have been deleted") - return ctrl.Result{}, nil + err := r.Client.Get(ctx, req.NamespacedName, cm) + if err != nil { + if errors.IsNotFound(err) { + log.V(1).Info("Ignoring cached ConfigMap that must have been deleted") + return ctrl.Result{}, nil + } + return ctrl.Result{}, fmt.Errorf("getting ConfigMap from cache: %w", err) } - return ctrl.Result{}, fmt.Errorf("getting ConfigMap from cache: %w", err) - } - configHash, err := r.Plugin.GetConfigHash(r.PluginContext) - if err != nil { - return ctrl.Result{}, fmt.Errorf("getting config hash: %w", err) - } + configHash, err := r.Plugin.ConfigHash(r.PluginContext, kind) + if err != nil { + return ctrl.Result{}, fmt.Errorf("getting config hash: %w", err) + } - labelSelector, err := labels.Parse(fmt.Sprintf("%s != %s", starboard.LabelPluginConfigHash, configHash)) - if err != nil { - return ctrl.Result{}, fmt.Errorf("parsing label selector: %w", err) - } + labelSelector, err := labels.Parse(fmt.Sprintf("%s!=%s,%s=%s", + starboard.LabelPluginConfigHash, configHash, + starboard.LabelResourceKind, kind)) + if err != nil { + return ctrl.Result{}, fmt.Errorf("parsing label selector: %w", err) + } - var reportList v1alpha1.ConfigAuditReportList - err = r.Client.List(ctx, &reportList, - client.Limit(r.Config.BatchDeleteLimit+1), - client.MatchingLabelsSelector{Selector: labelSelector}) - if err != nil { - return ctrl.Result{}, fmt.Errorf("listing reports: %w", err) + var reportList v1alpha1.ConfigAuditReportList + err = r.Client.List(ctx, &reportList, + client.Limit(r.Config.BatchDeleteLimit+1), + client.MatchingLabelsSelector{Selector: labelSelector}) + if err != nil { + return ctrl.Result{}, fmt.Errorf("listing reports: %w", err) + } + + log.V(1).Info("Listing ConfigAuditReports", + "reportsCount", len(reportList.Items), + "batchDeleteLimit", r.Config.BatchDeleteLimit, + "labelSelector", labelSelector.String()) + + for i := 0; i < ext.MinInt(r.Config.BatchDeleteLimit, len(reportList.Items)); i++ { + report := reportList.Items[i] + log.V(1).Info("Deleting ConfigAuditReport", "report", report.Namespace+"/"+report.Name) + err := r.Client.Delete(ctx, &report) + if err != nil { + if !errors.IsNotFound(err) { + return ctrl.Result{}, fmt.Errorf("deleting ConfigAuditReport: %w", err) + } + } + } + if len(reportList.Items)-r.Config.BatchDeleteLimit > 0 { + log.V(1).Info("Requeuing reconciliation key", "requeueAfter", r.Config.BatchDeleteDelay) + return ctrl.Result{RequeueAfter: r.Config.BatchDeleteDelay}, nil + } + + log.V(1).Info("Finished reconciling key", "labelSelector", labelSelector) + return ctrl.Result{}, nil } +} - log.V(1).Info("Listing ConfigAuditReports", - "reportsCount", len(reportList.Items), - "batchDeleteLimit", r.Config.BatchDeleteLimit, - "labelSelector", labelSelector.String()) +func (r *PluginsConfigReconciler) reconcileClusterConfig(kind kube.Kind) reconcile.Func { + return func(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Logger.WithValues("configMap", req.NamespacedName) - for i := 0; i < ext.MinInt(r.Config.BatchDeleteLimit, len(reportList.Items)); i++ { - report := reportList.Items[i] - log.V(1).Info("Deleting ConfigAuditReport", "report", report.Namespace+"/"+report.Name) - err := r.Client.Delete(ctx, &report) + cm := &corev1.ConfigMap{} + + err := r.Client.Get(ctx, req.NamespacedName, cm) if err != nil { - if !errors.IsNotFound(err) { - return ctrl.Result{}, fmt.Errorf("deleting ConfigAuditReport: %w", err) + if errors.IsNotFound(err) { + log.V(1).Info("Ignoring cached ConfigMap that must have been deleted") + return ctrl.Result{}, nil } + return ctrl.Result{}, fmt.Errorf("getting ConfigMap from cache: %w", err) } - } - if len(reportList.Items)-r.Config.BatchDeleteLimit > 0 { - log.V(1).Info("Requeuing reconciliation key", "requeueAfter", r.Config.BatchDeleteDelay) - return ctrl.Result{RequeueAfter: r.Config.BatchDeleteDelay}, nil - } - var clusterReportList v1alpha1.ClusterConfigAuditReportList - err = r.Client.List(ctx, &clusterReportList, - client.Limit(r.Config.BatchDeleteLimit+1), - client.MatchingLabelsSelector{Selector: labelSelector}) - if err != nil { - return ctrl.Result{}, fmt.Errorf("listing reports: %w", err) - } + configHash, err := r.Plugin.ConfigHash(r.PluginContext, kind) + if err != nil { + return ctrl.Result{}, fmt.Errorf("getting config hash: %w", err) + } - log.V(1).Info("Listing ClusterConfigAuditReports", - "reportsCount", len(clusterReportList.Items), - "batchDeleteLimit", r.Config.BatchDeleteLimit, - "labelSelector", labelSelector) + labelSelector, err := labels.Parse(fmt.Sprintf("%s!=%s,%s=%s", + starboard.LabelPluginConfigHash, configHash, + starboard.LabelResourceKind, kind)) + if err != nil { + return ctrl.Result{}, fmt.Errorf("parsing label selector: %w", err) + } - for i := 0; i < ext.MinInt(r.Config.BatchDeleteLimit, len(clusterReportList.Items)); i++ { - report := clusterReportList.Items[i] - log.V(1).Info("Deleting ClusterConfigAuditReport", "report", report.Name) - err := r.Client.Delete(ctx, &report) + var clusterReportList v1alpha1.ClusterConfigAuditReportList + err = r.Client.List(ctx, &clusterReportList, + client.Limit(r.Config.BatchDeleteLimit+1), + client.MatchingLabelsSelector{Selector: labelSelector}) if err != nil { - if !errors.IsNotFound(err) { - return ctrl.Result{}, fmt.Errorf("deleting ClusterConfigAuditReport: %w", err) + return ctrl.Result{}, fmt.Errorf("listing reports: %w", err) + } + + log.V(1).Info("Listing ClusterConfigAuditReports", + "reportsCount", len(clusterReportList.Items), + "batchDeleteLimit", r.Config.BatchDeleteLimit, + "labelSelector", labelSelector) + + for i := 0; i < ext.MinInt(r.Config.BatchDeleteLimit, len(clusterReportList.Items)); i++ { + report := clusterReportList.Items[i] + log.V(1).Info("Deleting ClusterConfigAuditReport", "report", report.Name) + err := r.Client.Delete(ctx, &report) + if err != nil { + if !errors.IsNotFound(err) { + return ctrl.Result{}, fmt.Errorf("deleting ClusterConfigAuditReport: %w", err) + } } } - } - if len(clusterReportList.Items)-r.Config.BatchDeleteLimit > 0 { - log.V(1).Info("Requeuing reconciliation key", "requeueAfter", r.Config.BatchDeleteDelay) - return ctrl.Result{RequeueAfter: r.Config.BatchDeleteDelay}, nil - } + if len(clusterReportList.Items)-r.Config.BatchDeleteLimit > 0 { + log.V(1).Info("Requeuing reconciliation key", "requeueAfter", r.Config.BatchDeleteDelay) + return ctrl.Result{RequeueAfter: r.Config.BatchDeleteDelay}, nil + } - log.V(1).Info("Finished reconciling key", "labelSelector", labelSelector) - return ctrl.Result{}, nil + log.V(1).Info("Finished reconciling key", "labelSelector", labelSelector) + return ctrl.Result{}, nil + } } diff --git a/pkg/plugin/conftest/plugin.go b/pkg/plugin/conftest/plugin.go index e107f648d..b03fe79de 100644 --- a/pkg/plugin/conftest/plugin.go +++ b/pkg/plugin/conftest/plugin.go @@ -2,6 +2,7 @@ package conftest import ( "encoding/json" + "errors" "fmt" "io" "strings" @@ -37,6 +38,14 @@ const ( keyResourcesLimitsCPU = "conftest.resources.limits.cpu" keyResourcesLimitsMemory = "conftest.resources.limits.memory" keyPrefixPolicy = "conftest.policy." + keyPrefixLibrary = "conftest.library." + keySuffixKinds = ".kinds" + keySuffixRego = ".rego" +) + +const ( + kindAny = "*" + kindWorkload = "Workload" ) // Config defines configuration params for this plugin. @@ -49,22 +58,56 @@ func (c Config) GetImageRef() (string, error) { return c.GetRequiredData(keyImageRef) } -// GetPolicies returns Config keys prefixed with `conftest.policy.` that define -// Rego policies. -func (c Config) GetPolicies() map[string]string { - policies := make(map[string]string) - +func (c Config) GetLibraries() map[string]string { + libs := make(map[string]string) for key, value := range c.Data { - if !strings.HasPrefix(key, keyPrefixPolicy) { + if !strings.HasPrefix(key, keyPrefixLibrary) { continue } - if !strings.HasSuffix(key, ".rego") { + if !strings.HasSuffix(key, keySuffixRego) { continue } - policies[key] = value + libs[key] = value } + return libs +} - return policies +func (c Config) GetPoliciesByKind(kind string) (map[string]string, error) { + policies := make(map[string]string) + for key, value := range c.Data { + if !strings.HasSuffix(key, keySuffixKinds) { + continue + } + for _, k := range strings.Split(value, ",") { + if k == kindWorkload && !c.IsWorkload(kind) { + continue + } + if k != kindAny && k != kindWorkload && k != kind { + continue + } + + policyKey := strings.TrimSuffix(key, keySuffixKinds) + keySuffixRego + var ok bool + + policies[policyKey], ok = c.Data[policyKey] + if !ok { + return nil, fmt.Errorf("policy not found: %s", policyKey) + } + } + } + return policies, nil +} + +// TODO move to kube package? +func (c Config) IsWorkload(kind string) bool { + return kind == "Pod" || + kind == "Deployment" || + kind == "ReplicaSet" || + kind == "ReplicationController" || + kind == "StatefulSet" || + kind == "DaemonSet" || + kind == "Job" || + kind == "CronJob" } // GetResourceRequirements constructs ResourceRequirements from the Config. @@ -124,37 +167,47 @@ func NewPlugin(idGenerator ext.IDGenerator, clock ext.Clock) configauditreport.P } var ( - supportedKinds = map[kube.Kind]bool{ - kube.KindPod: true, - kube.KindDeployment: true, - kube.KindReplicaSet: true, - kube.KindReplicationController: true, - kube.KindStatefulSet: true, - kube.KindDaemonSet: true, - kube.KindCronJob: true, - kube.KindJob: true, - kube.KindService: true, - kube.KindConfigMap: true, - kube.KindRole: true, - kube.KindRoleBinding: true, - - kube.KindClusterRole: true, - kube.KindClusterRoleBindings: true, - kube.KindCustomResourceDefinition: true, + supportedKinds = []kube.Kind{ + kube.KindPod, + kube.KindDeployment, + kube.KindReplicaSet, + kube.KindReplicationController, + kube.KindStatefulSet, + kube.KindDaemonSet, + kube.KindCronJob, + kube.KindJob, + kube.KindService, + kube.KindConfigMap, + kube.KindRole, + kube.KindRoleBinding, + + kube.KindClusterRole, + kube.KindClusterRoleBindings, + kube.KindCustomResourceDefinition, } ) -func (p *plugin) SupportsKind(kind kube.Kind) bool { - return supportedKinds[kind] +func (p *plugin) SupportedKinds() []kube.Kind { + return supportedKinds } -// IsReady returns true if there is at least one policy, false otherwise. -func (p *plugin) IsReady(ctx starboard.PluginContext) (bool, error) { +// IsApplicable returns true if there is at least one policy applicable to the specified object kind, false otherwise. +func (p *plugin) IsApplicable(ctx starboard.PluginContext, obj client.Object) (bool, string, error) { config, err := p.newConfigFrom(ctx) if err != nil { - return false, err + return false, "", err + } + if obj.GetObjectKind().GroupVersionKind().Kind == "" { + return false, "", errors.New("object kind must not be nil") + } + policies, err := config.GetPoliciesByKind(obj.GetObjectKind().GroupVersionKind().Kind) + if err != nil { + return false, "", err } - return len(config.GetPolicies()) > 0, nil + if len(policies) == 0 { + return false, fmt.Sprintf("no Rego policies found for kind %s", obj.GetObjectKind().GroupVersionKind().Kind), nil + } + return true, "", nil } func (p *plugin) Init(ctx starboard.PluginContext) error { @@ -169,19 +222,16 @@ func (p *plugin) Init(ctx starboard.PluginContext) error { }) } -func (p *plugin) GetConfigHash(ctx starboard.PluginContext) (string, error) { - cm, err := ctx.GetConfig() +func (p *plugin) ConfigHash(ctx starboard.PluginContext, kind kube.Kind) (string, error) { + config, err := p.newConfigFrom(ctx) if err != nil { - return "", fmt.Errorf("getting config: %w", err) + return "", err } - data := make(map[string]string) - for key, value := range cm.Data { - if strings.HasPrefix(key, "conftest.resources.") { - continue - } - data[key] = value + modules, err := p.modulesByKind(config, string(kind)) + if err != nil { + return "", err } - return kube.ComputeHash(data), nil + return kube.ComputeHash(modules), nil } func (p *plugin) GetScanJobSpec(ctx starboard.PluginContext, obj client.Object) (corev1.PodSpec, []*corev1.Secret, error) { @@ -194,7 +244,10 @@ func (p *plugin) GetScanJobSpec(ctx starboard.PluginContext, obj client.Object) return corev1.PodSpec{}, nil, fmt.Errorf("getting image ref: %w", err) } - policies := config.GetPolicies() + modules, err := p.modulesByKind(config, obj.GetObjectKind().GroupVersionKind().Kind) + if err != nil { + return corev1.PodSpec{}, nil, err + } var volumeMounts []corev1.VolumeMount var volumeItems []corev1.KeyToPath @@ -202,25 +255,25 @@ func (p *plugin) GetScanJobSpec(ctx starboard.PluginContext, obj client.Object) secretName := configauditreport.GetScanJobName(obj) + "-volume" secretData := make(map[string]string) - for policy, script := range policies { - policyName := strings.TrimPrefix(policy, keyPrefixPolicy) + for module, script := range modules { + moduleName := strings.TrimPrefix(module, keyPrefixPolicy) + moduleName = strings.TrimPrefix(moduleName, keyPrefixLibrary) // Copy policies so even if the starboard-conftest-config ConfigMap has changed // before the scan Job is run, it won't fail with references to non-existent config key error. - secretData[policy] = script + secretData[module] = script volumeItems = append(volumeItems, corev1.KeyToPath{ - Key: policy, - Path: policyName, + Key: module, + Path: moduleName, }) volumeMounts = append(volumeMounts, corev1.VolumeMount{ Name: secretName, - MountPath: "/project/policy/" + policyName, - SubPath: policyName, + MountPath: "/project/policy/" + moduleName, + SubPath: moduleName, ReadOnly: true, }) - } workloadAsYAML, err := yaml.Marshal(obj) @@ -297,6 +350,17 @@ func (p *plugin) GetScanJobSpec(ctx starboard.PluginContext, obj client.Object) }}, nil } +func (p *plugin) modulesByKind(config Config, kind string) (map[string]string, error) { + modules, err := config.GetPoliciesByKind(kind) + if err != nil { + return nil, err + } + for key, value := range config.GetLibraries() { + modules[key] = value + } + return modules, nil +} + func (p *plugin) GetContainerName() string { return containerName } diff --git a/pkg/plugin/conftest/plugin_test.go b/pkg/plugin/conftest/plugin_test.go index e0b56f120..0c862c347 100644 --- a/pkg/plugin/conftest/plugin_test.go +++ b/pkg/plugin/conftest/plugin_test.go @@ -20,6 +20,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -28,7 +29,7 @@ var ( fixedClock = ext.NewFixedClock(fixedTime) ) -func TestConfig_GetPolicies(t *testing.T) { +func TestConfig_GetPoliciesByKind(t *testing.T) { g := NewGomegaWithT(t) config := conftest.Config{ PluginConfig: starboard.PluginConfig{ @@ -40,10 +41,12 @@ func TestConfig_GetPolicies(t *testing.T) { "conftest.resources.limits.cpu": "300m", "conftest.resources.limits.memory": "300M", - "conftest.policy.libkubernetes.rego": "", - "conftest.policy.libutil.rego": "", - "conftest.policy.access_to_host_pid.rego": "", - "conftest.policy.cpu_not_limited.rego": "", + "conftest.library.kubernetes.rego": "", + "conftest.library.utils.rego": "", + "conftest.policy.access_to_host_pid.rego": "", + "conftest.policy.cpu_not_limited.rego": "", + "conftest.policy.access_to_host_pid.kinds": "Pod,ReplicaSet", + "conftest.policy.cpu_not_limited.kinds": "Workload", // This one should be skipped (no .rego suffix) "conftest.policy.privileged": "", @@ -52,9 +55,7 @@ func TestConfig_GetPolicies(t *testing.T) { }, }, } - g.Expect(config.GetPolicies()).To(Equal(map[string]string{ - "conftest.policy.libkubernetes.rego": "", - "conftest.policy.libutil.rego": "", + g.Expect(config.GetPoliciesByKind("Pod")).To(Equal(map[string]string{ "conftest.policy.access_to_host_pid.rego": "", "conftest.policy.cpu_not_limited.rego": "", })) @@ -121,11 +122,12 @@ func TestConfig_GetResourceRequirements(t *testing.T) { } } -func TestPlugin_IsReady(t *testing.T) { +func TestPlugin_IsApplicable(t *testing.T) { testCases := []struct { name string configData map[string]string + obj client.Object expected bool }{ { @@ -133,12 +135,19 @@ func TestPlugin_IsReady(t *testing.T) { configData: map[string]string{ "conftest.imageRef": "openpolicyagent/conftest:v0.25.0", }, + obj: &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + }, expected: false, }, { name: "Should return true if there is at least one policy", configData: map[string]string{ - "conftest.imageRef": "openpolicyagent/conftest:v0.25.0", + "conftest.imageRef": "openpolicyagent/conftest:v0.25.0", + "conftest.policy.kubernetes.kinds": "Pod", "conftest.policy.kubernetes.rego": `package main deny[res] { @@ -153,6 +162,12 @@ deny[res] { } } `}, + obj: &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + }, expected: true, }, } @@ -179,7 +194,7 @@ deny[res] { Get() instance := conftest.NewPlugin(ext.NewSimpleIDGenerator(), fixedClock) - ready, err := instance.IsReady(pluginContext) + ready, _, err := instance.IsApplicable(pluginContext, tc.obj) g.Expect(err).ToNot(HaveOccurred()) g.Expect(ready).To(Equal(tc.expected)) }) @@ -303,11 +318,15 @@ func TestPlugin_GetScanJobSpec(t *testing.T) { "conftest.resources.limits.cpu": "300m", "conftest.resources.limits.memory": "300M", - "conftest.policy.libkubernetes.rego": "", - "conftest.policy.libutil.rego": "", + "conftest.library.kubernetes.rego": "", + "conftest.library.utils.rego": "", "conftest.policy.access_to_host_pid.rego": "", "conftest.policy.cpu_not_limited.rego": "", - "conftest.policy.privileged": "", // This one should be skipped (no .rego suffix) + + "conftest.policy.access_to_host_pid.kinds": "*", + "conftest.policy.cpu_not_limited.kinds": "*", + + "conftest.policy.privileged": "", // This one should be skipped (no .rego suffix) "foo": "bar", // This one should be skipped (no conftest.policy. prefix) }, @@ -363,14 +382,14 @@ func TestPlugin_GetScanJobSpec(t *testing.T) { "VolumeMounts": ConsistOf( corev1.VolumeMount{ Name: "scan-configauditreport-5d4445db4f-volume", - MountPath: "/project/policy/libkubernetes.rego", - SubPath: "libkubernetes.rego", + MountPath: "/project/policy/kubernetes.rego", + SubPath: "kubernetes.rego", ReadOnly: true, }, corev1.VolumeMount{ Name: "scan-configauditreport-5d4445db4f-volume", - MountPath: "/project/policy/libutil.rego", - SubPath: "libutil.rego", + MountPath: "/project/policy/utils.rego", + SubPath: "utils.rego", ReadOnly: true, }, corev1.VolumeMount{ @@ -415,12 +434,12 @@ func TestPlugin_GetScanJobSpec(t *testing.T) { "SecretName": Equal("scan-configauditreport-5d4445db4f-volume"), "Items": ConsistOf( corev1.KeyToPath{ - Key: "conftest.policy.libkubernetes.rego", - Path: "libkubernetes.rego", + Key: "conftest.library.kubernetes.rego", + Path: "kubernetes.rego", }, corev1.KeyToPath{ - Key: "conftest.policy.libutil.rego", - Path: "libutil.rego", + Key: "conftest.library.utils.rego", + Path: "utils.rego", }, corev1.KeyToPath{ Key: "conftest.policy.access_to_host_pid.rego", @@ -443,8 +462,8 @@ func TestPlugin_GetScanJobSpec(t *testing.T) { Namespace: "starboard-ns", }, StringData: map[string]string{ - "conftest.policy.libkubernetes.rego": "", - "conftest.policy.libutil.rego": "", + "conftest.library.kubernetes.rego": "", + "conftest.library.utils.rego": "", "conftest.policy.access_to_host_pid.rego": "", "conftest.policy.cpu_not_limited.rego": "", "starboard.workload.yaml": `metadata: @@ -671,7 +690,7 @@ func TestPlugin_ParseConfigAuditReportData(t *testing.T) { })) } -func TestPlugin_GetConfigHash(t *testing.T) { +func TestPlugin_ConfigHash(t *testing.T) { newPluginContextWithConfigData := func(data map[string]string) starboard.PluginContext { return starboard.NewPluginContext(). @@ -693,19 +712,19 @@ func TestPlugin_GetConfigHash(t *testing.T) { g := NewGomegaWithT(t) pluginContext1 := newPluginContextWithConfigData(map[string]string{ - "foo": "bar", - "brown": "fox", + "conftest.policy.policyA.rego": "foo", + "conftest.policy.policyA.kinds": "Pod", }) pluginContext2 := newPluginContextWithConfigData(map[string]string{ - "brown": "fox", - "foo": "baz", + "conftest.policy.policyA.rego": "bar", + "conftest.policy.policyA.kinds": "Pod", }) plugin := conftest.NewPlugin(ext.NewSimpleIDGenerator(), fixedClock) - hash1, err := plugin.GetConfigHash(pluginContext1) + hash1, err := plugin.ConfigHash(pluginContext1, "Pod") g.Expect(err).ToNot(HaveOccurred()) - hash2, err := plugin.GetConfigHash(pluginContext2) + hash2, err := plugin.ConfigHash(pluginContext2, "Pod") g.Expect(err).ToNot(HaveOccurred()) g.Expect(hash1).ToNot(Equal(hash2)) }) @@ -723,10 +742,10 @@ func TestPlugin_GetConfigHash(t *testing.T) { }) plugin := conftest.NewPlugin(ext.NewSimpleIDGenerator(), fixedClock) - hash1, err := plugin.GetConfigHash(pluginContext1) + hash1, err := plugin.ConfigHash(pluginContext1, "") g.Expect(err).ToNot(HaveOccurred()) - hash2, err := plugin.GetConfigHash(pluginContext2) + hash2, err := plugin.ConfigHash(pluginContext2, "") g.Expect(err).ToNot(HaveOccurred()) g.Expect(hash1).To(Equal(hash2)) }) @@ -744,10 +763,10 @@ func TestPlugin_GetConfigHash(t *testing.T) { }) plugin := conftest.NewPlugin(ext.NewSimpleIDGenerator(), fixedClock) - hash1, err := plugin.GetConfigHash(pluginContext1) + hash1, err := plugin.ConfigHash(pluginContext1, "") g.Expect(err).ToNot(HaveOccurred()) - hash2, err := plugin.GetConfigHash(pluginContext2) + hash2, err := plugin.ConfigHash(pluginContext2, "") g.Expect(err).ToNot(HaveOccurred()) g.Expect(hash1).To(Equal(hash2)) }) diff --git a/pkg/plugin/polaris/plugin.go b/pkg/plugin/polaris/plugin.go index fb15de42d..df9ac2422 100644 --- a/pkg/plugin/polaris/plugin.go +++ b/pkg/plugin/polaris/plugin.go @@ -285,24 +285,24 @@ func NewPlugin(clock ext.Clock) configauditreport.Plugin { } var ( - supportedKinds = map[kube.Kind]bool{ - kube.KindPod: true, - kube.KindDeployment: true, - kube.KindReplicaSet: true, - kube.KindReplicationController: true, - kube.KindStatefulSet: true, - kube.KindDaemonSet: true, - kube.KindCronJob: true, - kube.KindJob: true, + supportedKinds = []kube.Kind{ + kube.KindPod, + kube.KindDeployment, + kube.KindReplicaSet, + kube.KindReplicationController, + kube.KindStatefulSet, + kube.KindDaemonSet, + kube.KindCronJob, + kube.KindJob, } ) -func (p *plugin) SupportsKind(kind kube.Kind) bool { - return supportedKinds[kind] +func (p *plugin) SupportedKinds() []kube.Kind { + return supportedKinds } -func (p *plugin) IsReady(_ starboard.PluginContext) (bool, error) { - return true, nil +func (p *plugin) IsApplicable(_ starboard.PluginContext, _ client.Object) (bool, string, error) { + return true, "", nil } // Init ensures the default Config required by this plugin. @@ -319,7 +319,7 @@ func (p *plugin) Init(ctx starboard.PluginContext) error { }) } -func (p *plugin) GetConfigHash(ctx starboard.PluginContext) (string, error) { +func (p *plugin) ConfigHash(ctx starboard.PluginContext, _ kube.Kind) (string, error) { cm, err := ctx.GetConfig() if err != nil { return "", err diff --git a/pkg/plugin/polaris/plugin_test.go b/pkg/plugin/polaris/plugin_test.go index a0434ab3e..c80449b19 100644 --- a/pkg/plugin/polaris/plugin_test.go +++ b/pkg/plugin/polaris/plugin_test.go @@ -102,7 +102,7 @@ func TestConfig_GetResourceRequirements(t *testing.T) { } } -func TestPlugin_IsReady(t *testing.T) { +func TestPlugin_IsApplicable(t *testing.T) { t.Run("Should always return true", func(t *testing.T) { g := NewGomegaWithT(t) @@ -117,7 +117,7 @@ func TestPlugin_IsReady(t *testing.T) { Get() instance := polaris.NewPlugin(fixedClock) - ready, err := instance.IsReady(pluginContext) + ready, _, err := instance.IsApplicable(pluginContext, &corev1.Pod{}) g.Expect(err).ToNot(HaveOccurred()) g.Expect(ready).To(BeTrue()) }) @@ -418,7 +418,7 @@ func TestPlugin_ParseConfigAuditReportData(t *testing.T) { })) } -func TestPlugin_GetConfigHash(t *testing.T) { +func TestPlugin_ConfigHash(t *testing.T) { newPluginContextWithConfigData := func(data map[string]string) starboard.PluginContext { return starboard.NewPluginContext(). @@ -449,10 +449,10 @@ func TestPlugin_GetConfigHash(t *testing.T) { }) plugin := polaris.NewPlugin(fixedClock) - hash1, err := plugin.GetConfigHash(pluginContext1) + hash1, err := plugin.ConfigHash(pluginContext1, "") g.Expect(err).ToNot(HaveOccurred()) - hash2, err := plugin.GetConfigHash(pluginContext2) + hash2, err := plugin.ConfigHash(pluginContext2, "") g.Expect(err).ToNot(HaveOccurred()) g.Expect(hash1).ToNot(Equal(hash2)) }) @@ -470,10 +470,10 @@ func TestPlugin_GetConfigHash(t *testing.T) { }) plugin := polaris.NewPlugin(fixedClock) - hash1, err := plugin.GetConfigHash(pluginContext1) + hash1, err := plugin.ConfigHash(pluginContext1, "") g.Expect(err).ToNot(HaveOccurred()) - hash2, err := plugin.GetConfigHash(pluginContext2) + hash2, err := plugin.ConfigHash(pluginContext2, "") g.Expect(err).ToNot(HaveOccurred()) g.Expect(hash1).To(Equal(hash2)) }) @@ -491,10 +491,10 @@ func TestPlugin_GetConfigHash(t *testing.T) { }) plugin := polaris.NewPlugin(fixedClock) - hash1, err := plugin.GetConfigHash(pluginContext1) + hash1, err := plugin.ConfigHash(pluginContext1, "") g.Expect(err).ToNot(HaveOccurred()) - hash2, err := plugin.GetConfigHash(pluginContext2) + hash2, err := plugin.ConfigHash(pluginContext2, "") g.Expect(err).ToNot(HaveOccurred()) g.Expect(hash1).To(Equal(hash2)) })