diff --git a/pkg/cmd/commands.go b/pkg/cmd/commands.go new file mode 100644 index 000000000..299914ada --- /dev/null +++ b/pkg/cmd/commands.go @@ -0,0 +1,68 @@ +package cmd + +import ( + "errors" + "strings" + "time" + + "github.com/aquasecurity/starboard/pkg/kube" + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +func SetGlobalFlags(cf *genericclioptions.ConfigFlags, cmd *cobra.Command) { + cf.AddFlags(cmd.Flags()) + for _, c := range cmd.Commands() { + SetGlobalFlags(cf, c) + } +} + +func WorkloadFromArgs(namespace string, args []string) (workload kube.Workload, err error) { + if len(args) < 1 { + err = errors.New("required workload kind and name not specified") + return + } + + parts := strings.SplitN(args[0], "/", 2) + if len(parts) == 1 { + workload = kube.Workload{ + Namespace: namespace, + Kind: kube.WorkloadKindPod, + Name: parts[0], + } + return + } + kind, err := kube.WorkloadKindFromString(parts[0]) + if err != nil { + return + } + if "" == parts[1] { + err = errors.New("required workload name is blank") + return + } + workload = kube.Workload{ + Namespace: namespace, + Kind: kind, + Name: parts[1], + } + return +} + +const ( + scanJobTimeoutFlagName = "scan-job-timeout" +) + +func registerScannerOpts(cmd *cobra.Command) { + cmd.Flags().Duration(scanJobTimeoutFlagName, time.Duration(0), + "The length of time to wait before giving up on a scan job. Non-zero values should contain a"+ + " corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout the scan job.") + +} + +func getScannerOpts(cmd *cobra.Command) (opts kube.ScannerOpts, err error) { + opts.ScanJobTimeout, err = cmd.Flags().GetDuration(scanJobTimeoutFlagName) + if err != nil { + return + } + return +} diff --git a/pkg/cmd/find_vulnerabilities.go b/pkg/cmd/find_vulnerabilities.go index 886edab36..cfc88d577 100644 --- a/pkg/cmd/find_vulnerabilities.go +++ b/pkg/cmd/find_vulnerabilities.go @@ -65,7 +65,11 @@ NAME is the name of a particular Kubernetes workload. if err != nil { return err } - reports, err := trivy.NewScanner(kubernetesClientset).Scan(ctx, workload) + opts, err := getScannerOpts(cmd) + if err != nil { + return + } + reports, err := trivy.NewScanner(opts, kubernetesClientset).Scan(ctx, workload) if err != nil { return } @@ -78,5 +82,7 @@ NAME is the name of a particular Kubernetes workload. }, } + registerScannerOpts(cmd) + return cmd } diff --git a/pkg/cmd/kube_bench.go b/pkg/cmd/kube_bench.go index 6a4018780..4a551db18 100644 --- a/pkg/cmd/kube_bench.go +++ b/pkg/cmd/kube_bench.go @@ -26,7 +26,11 @@ func NewKubeBenchCmd(cf *genericclioptions.ConfigFlags) *cobra.Command { if err != nil { return } - report, node, err := kubebench.NewScanner(kubernetesClientset).Scan(ctx) + opts, err := getScannerOpts(cmd) + if err != nil { + return + } + report, node, err := kubebench.NewScanner(opts, kubernetesClientset).Scan(ctx) if err != nil { return } @@ -38,5 +42,8 @@ func NewKubeBenchCmd(cf *genericclioptions.ConfigFlags) *cobra.Command { return }, } + + registerScannerOpts(cmd) + return cmd } diff --git a/pkg/cmd/kube_hunter.go b/pkg/cmd/kube_hunter.go index 2caa803a5..4c82d4721 100644 --- a/pkg/cmd/kube_hunter.go +++ b/pkg/cmd/kube_hunter.go @@ -25,7 +25,11 @@ func NewKubeHunterCmd(cf *genericclioptions.ConfigFlags) *cobra.Command { if err != nil { return } - report, err := kubehunter.NewScanner(kubernetesClientset).Scan(ctx) + opts, err := getScannerOpts(cmd) + if err != nil { + return + } + report, err := kubehunter.NewScanner(opts, kubernetesClientset).Scan(ctx) if err != nil { return } @@ -40,5 +44,8 @@ func NewKubeHunterCmd(cf *genericclioptions.ConfigFlags) *cobra.Command { return }, } + + registerScannerOpts(cmd) + return cmd } diff --git a/pkg/cmd/polaris.go b/pkg/cmd/polaris.go index ef8872095..e8bd11155 100644 --- a/pkg/cmd/polaris.go +++ b/pkg/cmd/polaris.go @@ -25,7 +25,11 @@ func NewPolarisCmd(cf *genericclioptions.ConfigFlags) *cobra.Command { if err != nil { return } - reports, err := polaris.NewScanner(clientset).Scan(ctx) + opts, err := getScannerOpts(cmd) + if err != nil { + return + } + reports, err := polaris.NewScanner(opts, clientset).Scan(ctx) if err != nil { return } @@ -40,5 +44,8 @@ func NewPolarisCmd(cf *genericclioptions.ConfigFlags) *cobra.Command { return }, } + + registerScannerOpts(cmd) + return cmd } diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go index 38a371fbc..323184a61 100644 --- a/pkg/cmd/root.go +++ b/pkg/cmd/root.go @@ -1,11 +1,6 @@ package cmd import ( - "errors" - "strings" - - "github.com/aquasecurity/starboard/pkg/kube" - "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" ) @@ -32,45 +27,7 @@ func NewRootCmd(version VersionInfo) *cobra.Command { rootCmd.AddCommand(NewGetCmd(cf)) rootCmd.AddCommand(NewCleanupCmd(cf)) - SetFlags(cf, rootCmd) + SetGlobalFlags(cf, rootCmd) return rootCmd } - -func SetFlags(cf *genericclioptions.ConfigFlags, cmd *cobra.Command) { - cf.AddFlags(cmd.Flags()) - for _, c := range cmd.Commands() { - SetFlags(cf, c) - } -} - -func WorkloadFromArgs(namespace string, args []string) (workload kube.Workload, err error) { - if len(args) < 1 { - err = errors.New("required workload kind and name not specified") - return - } - - parts := strings.SplitN(args[0], "/", 2) - if len(parts) == 1 { - workload = kube.Workload{ - Namespace: namespace, - Kind: kube.WorkloadKindPod, - Name: parts[0], - } - return - } - kind, err := kube.WorkloadKindFromString(parts[0]) - if err != nil { - return - } - if "" == parts[1] { - err = errors.New("required workload name is blank") - return - } - workload = kube.Workload{ - Namespace: namespace, - Kind: kind, - Name: parts[1], - } - return -} diff --git a/pkg/find/vulnerabilities/scanner.go b/pkg/find/vulnerabilities/scanner.go index 4148a0120..82cc67a1e 100644 --- a/pkg/find/vulnerabilities/scanner.go +++ b/pkg/find/vulnerabilities/scanner.go @@ -2,12 +2,20 @@ package vulnerabilities import ( "context" - sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + + starboard "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" "github.com/aquasecurity/starboard/pkg/kube" core "k8s.io/api/core/v1" ) +// Scanner defines methods for vulnerability scanner. +// +// Scan scans all container images of the specified Kubernetes workload. +// Returns a map of container names to VulnerabilityReports. +// +// ScanByPodSpec scans all container images of the specified Kubernetes workload with the given PodSpec. +// Returns a map of container names to VulnerabilityReports. type Scanner interface { - Scan(ctx context.Context, workload kube.Workload) (reports map[string]sec.VulnerabilityReport, err error) - ScanByPodSpec(ctx context.Context, workload kube.Workload, spec core.PodSpec) (reports map[string]sec.VulnerabilityReport, err error) + Scan(ctx context.Context, workload kube.Workload) (reports map[string]starboard.VulnerabilityReport, err error) + ScanByPodSpec(ctx context.Context, workload kube.Workload, spec core.PodSpec) (reports map[string]starboard.VulnerabilityReport, err error) } diff --git a/pkg/find/vulnerabilities/trivy/scanner.go b/pkg/find/vulnerabilities/trivy/scanner.go index c294ec262..bce8e4594 100644 --- a/pkg/find/vulnerabilities/trivy/scanner.go +++ b/pkg/find/vulnerabilities/trivy/scanner.go @@ -4,10 +4,9 @@ import ( "context" "fmt" "io" - "time" "github.com/aquasecurity/starboard/pkg/ext" - + "github.com/aquasecurity/starboard/pkg/scanners" "k8s.io/klog" "github.com/aquasecurity/starboard/pkg/kube" @@ -33,19 +32,10 @@ const ( trivyImageRef = "docker.io/aquasec/trivy:0.8.0" ) -var ( - scanJobRunnerTimeout = 60 * time.Second -) - -type scanner struct { - clientset kubernetes.Interface - pods *pod.Manager - secrets *secret.Manager - converter Converter -} - -func NewScanner(clientset kubernetes.Interface) vulnerabilities.Scanner { +// NewScanner constructs a new vulnerability Scanner with the specified options and Kubernetes client Interface. +func NewScanner(opts kube.ScannerOpts, clientset kubernetes.Interface) vulnerabilities.Scanner { return &scanner{ + opts: opts, clientset: clientset, pods: pod.NewPodManager(clientset), secrets: secret.NewSecretManager(clientset), @@ -53,6 +43,15 @@ func NewScanner(clientset kubernetes.Interface) vulnerabilities.Scanner { } } +type scanner struct { + opts kube.ScannerOpts + clientset kubernetes.Interface + pods *pod.Manager + secrets *secret.Manager + converter Converter + scanners.Base +} + func (s *scanner) Scan(ctx context.Context, workload kube.Workload) (reports map[string]sec.VulnerabilityReport, err error) { klog.V(3).Infof("Getting Pod template for workload: %v", workload) podSpec, err := s.pods.GetPodSpecByWorkload(ctx, workload) @@ -69,26 +68,26 @@ func (s *scanner) Scan(ctx context.Context, workload kube.Workload) (reports map } func (s *scanner) ScanByPodSpec(ctx context.Context, workload kube.Workload, spec core.PodSpec) (map[string]sec.VulnerabilityReport, error) { + klog.V(3).Infof("Scanning with options: %+v", s.opts) job, err := s.prepareJob(ctx, workload, spec) if err != nil { return nil, fmt.Errorf("preparing scan job: %w", err) } - err = runner.New(scanJobRunnerTimeout). - Run(ctx, kube.NewRunnableJob(s.clientset, job)) + err = runner.New().Run(ctx, kube.NewRunnableJob(s.clientset, job)) if err != nil { return nil, fmt.Errorf("running scan job: %w", err) } defer func() { - klog.V(3).Infof("Deleting job: %s/%s", job.Namespace, job.Name) + klog.V(3).Infof("Deleting Job: %s/%s", job.Namespace, job.Name) background := meta.DeletePropagationBackground _ = s.clientset.BatchV1().Jobs(job.Namespace).Delete(ctx, job.Name, meta.DeleteOptions{ PropagationPolicy: &background, }) }() - klog.V(3).Infof("Scan job completed: %s/%s", job.Namespace, job.Name) + klog.V(3).Infof("Scan Job completed: %s/%s", job.Namespace, job.Name) job, err = s.clientset.BatchV1().Jobs(job.Namespace).Get(ctx, job.Name, meta.GetOptions{}) if err != nil { @@ -183,7 +182,7 @@ func (s *scanner) prepareJob(ctx context.Context, workload kube.Workload, spec c Spec: batch.JobSpec{ BackoffLimit: pointer.Int32Ptr(1), Completions: pointer.Int32Ptr(1), - ActiveDeadlineSeconds: pointer.Int64Ptr(int64(scanJobRunnerTimeout.Seconds())), + ActiveDeadlineSeconds: s.GetActiveDeadlineSeconds(s.opts.ScanJobTimeout), Template: core.PodTemplateSpec{ ObjectMeta: meta.ObjectMeta{ Labels: map[string]string{ diff --git a/pkg/kube/workload.go b/pkg/kube/workload.go index 330955781..c54eaaf9c 100644 --- a/pkg/kube/workload.go +++ b/pkg/kube/workload.go @@ -2,6 +2,7 @@ package kube import ( "fmt" + "time" ) const ( @@ -91,3 +92,8 @@ func WorkloadKindFromString(s string) (WorkloadKind, error) { } return WorkloadKindUnknown, fmt.Errorf("unrecognized workload: %s", s) } + +// ScannerOpts holds configuration of the vulnerability Scanner. +type ScannerOpts struct { + ScanJobTimeout time.Duration +} diff --git a/pkg/kubebench/scanner.go b/pkg/kubebench/scanner.go index a1544f14a..a04128ddf 100644 --- a/pkg/kubebench/scanner.go +++ b/pkg/kubebench/scanner.go @@ -4,6 +4,8 @@ import ( "context" "fmt" + "github.com/aquasecurity/starboard/pkg/scanners" + "k8s.io/klog" starboard "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" @@ -17,8 +19,6 @@ import ( meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" - "time" - "k8s.io/client-go/kubernetes" ) @@ -27,18 +27,17 @@ const ( kubeBenchContainerImage = "aquasec/kube-bench:latest" ) -var ( - runnerTimeout = 60 * time.Second -) - type Scanner struct { + opts kube.ScannerOpts clientset kubernetes.Interface pods *pod.Manager converter Converter + scanners.Base } -func NewScanner(clientset kubernetes.Interface) *Scanner { +func NewScanner(opts kube.ScannerOpts, clientset kubernetes.Interface) *Scanner { return &Scanner{ + opts: opts, clientset: clientset, pods: pod.NewPodManager(clientset), converter: DefaultConverter, @@ -50,8 +49,7 @@ func (s *Scanner) Scan(ctx context.Context) (report starboard.CISKubeBenchOutput kubeBenchJob := s.prepareKubeBenchJob() // 2. Run the prepared Job and wait for its completion or failure - err = runner.New(runnerTimeout). - Run(ctx, kube.NewRunnableJob(s.clientset, kubeBenchJob)) + err = runner.New().Run(ctx, kube.NewRunnableJob(s.clientset, kubeBenchJob)) if err != nil { err = fmt.Errorf("running kube-bench job: %w", err) return @@ -108,7 +106,7 @@ func (s *Scanner) prepareKubeBenchJob() *batch.Job { Spec: batch.JobSpec{ BackoffLimit: pointer.Int32Ptr(1), Completions: pointer.Int32Ptr(1), - ActiveDeadlineSeconds: pointer.Int64Ptr(int64(runnerTimeout.Seconds())), + ActiveDeadlineSeconds: s.GetActiveDeadlineSeconds(s.opts.ScanJobTimeout), Template: core.PodTemplateSpec{ ObjectMeta: meta.ObjectMeta{ Labels: map[string]string{ diff --git a/pkg/kubehunter/scanner.go b/pkg/kubehunter/scanner.go index dbe92da11..214de94a6 100644 --- a/pkg/kubehunter/scanner.go +++ b/pkg/kubehunter/scanner.go @@ -3,7 +3,8 @@ package kubehunter import ( "context" "fmt" - "time" + + "github.com/aquasecurity/starboard/pkg/scanners" "k8s.io/klog" @@ -25,18 +26,16 @@ const ( kubeHunterContainerImage = "aquasec/kube-hunter:latest" ) -var ( - runnerTimeout = 90 * time.Second - jobTimeout = 60 * time.Second -) - type Scanner struct { + opts kube.ScannerOpts clientset kubernetes.Interface pods *pod.Manager + scanners.Base } -func NewScanner(clientset kubernetes.Interface) *Scanner { +func NewScanner(opts kube.ScannerOpts, clientset kubernetes.Interface) *Scanner { return &Scanner{ + opts: opts, clientset: clientset, pods: pod.NewPodManager(clientset), } @@ -47,8 +46,7 @@ func (s *Scanner) Scan(ctx context.Context) (report starboard.KubeHunterOutput, kubeHunterJob := s.prepareKubeHunterJob() // 2. Run the prepared Job and wait for its completion or failure - err = runner.New(runnerTimeout). - Run(ctx, kube.NewRunnableJob(s.clientset, kubeHunterJob)) + err = runner.New().Run(ctx, kube.NewRunnableJob(s.clientset, kubeHunterJob)) if err != nil { err = fmt.Errorf("running kube-hunter job: %w", err) return @@ -97,7 +95,7 @@ func (s *Scanner) prepareKubeHunterJob() *batch.Job { Spec: batch.JobSpec{ BackoffLimit: pointer.Int32Ptr(1), Completions: pointer.Int32Ptr(1), - ActiveDeadlineSeconds: pointer.Int64Ptr(int64(jobTimeout.Seconds())), + ActiveDeadlineSeconds: s.GetActiveDeadlineSeconds(s.opts.ScanJobTimeout), Template: core.PodTemplateSpec{ ObjectMeta: meta.ObjectMeta{ Labels: map[string]string{ diff --git a/pkg/polaris/scanner.go b/pkg/polaris/scanner.go index ec185537d..1b46357bd 100644 --- a/pkg/polaris/scanner.go +++ b/pkg/polaris/scanner.go @@ -3,9 +3,9 @@ package polaris import ( "context" "fmt" - "time" starboard "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + "github.com/aquasecurity/starboard/pkg/scanners" "k8s.io/utils/pointer" @@ -21,11 +21,6 @@ import ( "k8s.io/client-go/kubernetes" ) -const ( - runnerTimeout = 90 * time.Second - jobTimeout = 60 * time.Second -) - const ( polarisContainerName = "polaris" // TODO: The latest semver tagged image 0.6.0 doesn't return audit checks ?! @@ -35,13 +30,16 @@ const ( ) type Scanner struct { + opts kube.ScannerOpts clientset kubernetes.Interface pods *pod.Manager converter Converter + scanners.Base } -func NewScanner(clientset kubernetes.Interface) *Scanner { +func NewScanner(opts kube.ScannerOpts, clientset kubernetes.Interface) *Scanner { return &Scanner{ + opts: opts, clientset: clientset, pods: pod.NewPodManager(clientset), converter: DefaultConverter, @@ -51,8 +49,7 @@ func NewScanner(clientset kubernetes.Interface) *Scanner { func (s *Scanner) Scan(ctx context.Context) (reports []starboard.ConfigAudit, err error) { polarisJob := s.preparePolarisJob() - err = runner.New(runnerTimeout). - Run(ctx, kube.NewRunnableJob(s.clientset, polarisJob)) + err = runner.New().Run(ctx, kube.NewRunnableJob(s.clientset, polarisJob)) if err != nil { err = fmt.Errorf("running polaris job: %w", err) return @@ -93,7 +90,7 @@ func (s *Scanner) preparePolarisJob() *batch.Job { Spec: batch.JobSpec{ BackoffLimit: pointer.Int32Ptr(1), Completions: pointer.Int32Ptr(1), - ActiveDeadlineSeconds: pointer.Int64Ptr(int64(jobTimeout.Seconds())), + ActiveDeadlineSeconds: s.GetActiveDeadlineSeconds(s.opts.ScanJobTimeout), Template: core.PodTemplateSpec{ ObjectMeta: meta.ObjectMeta{ Labels: map[string]string{ diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 1276a651f..b6d8fc53b 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -34,27 +34,59 @@ type Runner interface { Run(ctx context.Context, task Runnable) error } +// New constructs a new ready-to-use Runner for running a Runnable task. +func New() Runner { + return &runner{ + complete: make(chan error), + timeoutDuration: 0, + } +} + +// NewWithTimeout constructs a new ready-to-use Runner with the specified timeout for running a Runnable task. +func NewWithTimeout(d time.Duration) Runner { + return &runner{ + complete: make(chan error), + timeoutDuration: d, + timeout: time.After(d), + } +} + type runner struct { // complete channel reports that processing is done complete chan error + // timeout duration + timeoutDuration time.Duration // timeout channel reports that time has run out timeout <-chan time.Time } -// New constructs a new ready-to-use Runner with the specified timeout for running a Task. -func New(d time.Duration) Runner { - return &runner{ - complete: make(chan error), - timeout: time.After(d), - } -} - // Run runs the specified task and monitors channel events. -func (r *runner) Run(ctx context.Context, task Runnable) error { +func (r *runner) Run(ctx context.Context, task Runnable) (err error) { go func() { r.complete <- task.Run(ctx) }() + if r.timeoutDuration > 0 { + err = r.runWithTimeout() + return + } else { + err = r.runAndWaitForever() + return + } +} + +func (r *runner) runAndWaitForever() (err error) { + klog.V(3).Info("Running task and waiting forever") + select { + // Signaled when processing is done. + case err := <-r.complete: + klog.V(3).Infof("Stopping runner on task completion with error: %v", err) + return err + } +} + +func (r *runner) runWithTimeout() (err error) { + klog.V(3).Infof("Running task with timeout: %v", r.timeoutDuration) select { // Signaled when processing is done. case err := <-r.complete: diff --git a/pkg/scanners/base.go b/pkg/scanners/base.go new file mode 100644 index 000000000..0c96870cf --- /dev/null +++ b/pkg/scanners/base.go @@ -0,0 +1,18 @@ +package scanners + +import ( + "time" + + "k8s.io/utils/pointer" +) + +type Base struct { +} + +func (s *Base) GetActiveDeadlineSeconds(d time.Duration) (timeout *int64) { + if d > 0 { + timeout = pointer.Int64Ptr(int64(d.Seconds())) + return + } + return +} diff --git a/pkg/scanners/base_test.go b/pkg/scanners/base_test.go new file mode 100644 index 000000000..c669121dd --- /dev/null +++ b/pkg/scanners/base_test.go @@ -0,0 +1 @@ +package scanners