Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(cli): Resolve report owners to be consistent with operator #731

Merged
merged 1 commit into from
Oct 6, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
feat(cli): Resolve report owners to be consistent with operator
Starboard CLI was not consistent with Starboard Operator
when it comes to assigning VulnerabilityReports and ConfigAuditReports
to K8s workloads. For example, the report generated by the following
command was controlled by the nginx Deployment, whereas the Operator
would associate it with the active ReplicaSet (aka current revision).

    starboard scan vulnerabilityreports deploy/nginx

With this patch Starboard CLI will resolve the current revision of
a Deployment and use it as the owner of VulnerabilityReport and
ConfigAuditReport instances.

Signed-off-by: Daniel Pacak <pacak.daniel@gmail.com>
  • Loading branch information
danielpacak committed Oct 5, 2021
commit d97f1a7e1f451577822fe26e0aab7cb97a9ef00d
30 changes: 15 additions & 15 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
# Set the default goal
.DEFAULT_GOAL := build

export KUBECONFIG ?= ${HOME}/.kube/config

# Active module mode, as we use Go modules to manage dependencies
export GO111MODULE=on
GOPATH=$(shell go env GOPATH)
GOBIN=$(GOPATH)/bin
GINKGO=$(GOBIN)/ginkgo

SOURCES := $(shell find . -name '*.go')

Expand Down Expand Up @@ -37,12 +40,12 @@ build-starboard-scanner-aqua: $(SOURCES)
.PHONY: get-ginkgo
## Installs Ginkgo CLI
get-ginkgo:
go install github.com/onsi/ginkgo/ginkgo
@go install github.com/onsi/ginkgo/ginkgo

.PHONY: get-qtc
## Installs quicktemplate compiler
get-qtc:
go install github.com/valyala/quicktemplate/qtc
@go install github.com/valyala/quicktemplate/qtc

.PHONY: compile-templates
## Converts quicktemplate files (*.qtpl) into Go code
Expand All @@ -60,10 +63,8 @@ unit-tests: $(SOURCES)

.PHONY: itests-starboard
## Runs integration tests for Starboard CLI with code coverage enabled
itests-starboard: check-env get-ginkgo
$(GOBIN)/ginkgo \
--progress \
--v \
itests-starboard: check-kubeconfig get-ginkgo
@$(GINKGO) \
-coverprofile=coverage.txt \
-coverpkg=github.com/aquasecurity/starboard/pkg/cmd,\
github.com/aquasecurity/starboard/pkg/plugin,\
Expand All @@ -79,10 +80,8 @@ itests-starboard: check-env get-ginkgo

.PHONY: itests-starboard-operator
## Runs integration tests for Starboard Operator with code coverage enabled
itests-starboard-operator: check-env get-ginkgo
$(GOBIN)/ginkgo \
--progress \
--v \
itests-starboard-operator: check-kubeconfig get-ginkgo
@$(GINKGO) \
-coverprofile=coverage.txt \
-coverpkg=github.com/aquasecurity/starboard/pkg/operator,\
github.com/aquasecurity/starboard/pkg/operator/predicate,\
Expand All @@ -97,10 +96,8 @@ itests-starboard-operator: check-env get-ginkgo
./itest/starboard-operator

.PHONY: integration-operator-conftest
integration-operator-conftest: check-env get-ginkgo
$(GOBIN)/ginkgo \
--progress \
--v \
integration-operator-conftest: check-kubeconfig get-ginkgo
@$(GINKGO) \
-coverprofile=coverage.txt \
-coverpkg=github.com/aquasecurity/starboard/pkg/operator,\
github.com/aquasecurity/starboard/pkg/operator/predicate,\
Expand All @@ -110,9 +107,12 @@ integration-operator-conftest: check-env get-ginkgo
github.com/aquasecurity/starboard/pkg/configauditreport \
./itest/starboard-operator/configauditreport/conftest

check-env:
.PHONY: check-kubeconfig
check-kubeconfig:
ifndef KUBECONFIG
$(error Environment variable KUBECONFIG is not set)
else
@echo "KUBECONFIG=${KUBECONFIG}"
endif

.PHONY: clean
Expand Down
41 changes: 33 additions & 8 deletions itest/starboard/starboard_cli_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@ import (
"sigs.k8s.io/yaml"
)

var (
assertTimeout = 10 * time.Second
)

var _ = Describe("Starboard CLI", func() {

BeforeEach(func() {
Expand Down Expand Up @@ -475,15 +479,29 @@ var _ = Describe("Starboard CLI", func() {

Context("when Deployment is specified as workload", func() {

var ctx context.Context
var deploy *appsv1.Deployment

DeploymentIsReady := func() (bool, error) {
var d appsv1.Deployment
err := kubeClient.Get(context.TODO(), client.ObjectKey{Namespace: deploy.Namespace, Name: deploy.Name}, &d)
if err != nil {
return false, err
}
return d.Status.ReadyReplicas == *d.Spec.Replicas, nil
}

BeforeEach(func() {
deploy = helper.NewDeployment().WithName("nginx").
ctx = context.TODO()
deploy = helper.NewDeployment().WithRandomName("nginx").
WithNamespace(testNamespace.Name).
WithContainer("nginx-container", "nginx:1.16").
Build()
err := kubeClient.Create(context.TODO(), deploy)
err := kubeClient.Create(ctx, deploy)
Expect(err).ToNot(HaveOccurred())

// Wait for the deployment to be ready.
Eventually(DeploymentIsReady, assertTimeout).Should(BeTrue())
})

It("should create VulnerabilityReport", func() {
Expand All @@ -495,20 +513,27 @@ var _ = Describe("Starboard CLI", func() {
}, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())

// Get updated deployment to find its revision.
err = kubeClient.Get(ctx, client.ObjectKey{Namespace: deploy.Namespace, Name: deploy.Name}, deploy)
Expect(err).ToNot(HaveOccurred())

revision, err := objectResolver.ReplicaSetByDeployment(ctx, deploy)
Expect(err).ToNot(HaveOccurred())

var reportList v1alpha1.VulnerabilityReportList
err = kubeClient.List(context.TODO(), &reportList, client.MatchingLabels{
starboard.LabelResourceKind: string(kube.KindDeployment),
starboard.LabelResourceName: deploy.Name,
starboard.LabelResourceNamespace: deploy.Namespace,
err = kubeClient.List(ctx, &reportList, client.MatchingLabels{
starboard.LabelResourceKind: string(kube.KindReplicaSet),
starboard.LabelResourceName: revision.Name,
starboard.LabelResourceNamespace: revision.Namespace,
})
Expect(err).ToNot(HaveOccurred())
Expect(reportList.Items).To(MatchAllElements(groupByContainerName, Elements{
"nginx-container": IsVulnerabilityReportForContainerOwnedBy("nginx-container", deploy),
"nginx-container": IsVulnerabilityReportForContainerOwnedBy("nginx-container", revision),
}))
})

AfterEach(func() {
err := kubeClient.Delete(context.TODO(), deploy)
err := kubeClient.Delete(ctx, deploy)
Expect(err).ToNot(HaveOccurred())
})
})
Expand Down
6 changes: 6 additions & 0 deletions itest/starboard/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"os"
"testing"

"github.com/aquasecurity/starboard/pkg/kube"
"github.com/aquasecurity/starboard/pkg/starboard"
corev1 "k8s.io/api/core/v1"
apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1"
Expand All @@ -20,6 +21,7 @@ import (
var (
kubeClient client.Client
apiextensionsClientset apiextensions.ApiextensionsV1beta1Interface
objectResolver *kube.ObjectResolver
)

var (
Expand Down Expand Up @@ -100,6 +102,10 @@ var _ = BeforeSuite(func() {
})
Expect(err).ToNot(HaveOccurred())

objectResolver = &kube.ObjectResolver{
Client: kubeClient,
}

apiextensionsClientset, err = apiextensions.NewForConfig(config)
Expect(err).ToNot(HaveOccurred())

Expand Down
13 changes: 9 additions & 4 deletions pkg/configauditreport/scanner.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,16 @@ func NewScanner(
}
}

func (s *Scanner) Scan(ctx context.Context, obj kube.Object) (*ReportBuilder, error) {
if !s.plugin.SupportsKind(obj.Kind) {
return nil, fmt.Errorf("kind %s is not supported by %s plugin", obj.Kind, s.pluginContext.GetName())
func (s *Scanner) Scan(ctx context.Context, partial kube.Object) (*ReportBuilder, error) {
if !s.plugin.SupportsKind(partial.Kind) {
return nil, fmt.Errorf("kind %s is not supported by %s plugin", partial.Kind, s.pluginContext.GetName())
}
owner, err := s.objectResolver.GetObjectFromPartialObject(ctx, obj)
obj, err := s.objectResolver.GetObjectFromPartialObject(ctx, partial)
if err != nil {
return nil, err
}

owner, err := s.objectResolver.ReportOwner(ctx, obj)
if err != nil {
return nil, err
}
Expand Down
129 changes: 129 additions & 0 deletions pkg/kube/object.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package kube
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"

Expand Down Expand Up @@ -301,6 +302,134 @@ func (o *ObjectResolver) GetObjectFromPartialObject(ctx context.Context, workloa
if err != nil {
return nil, err
}
return o.ensureGVK(obj)
}

var ErrReplicaSetNotFound = errors.New("replicaset not found")

// ReportOwner resolves the owner of v1alpha1.VulnerabilityReport
// in the hierarchy of the specified built-in K8s workload.
func (o *ObjectResolver) ReportOwner(ctx context.Context, workload client.Object) (client.Object, error) {
switch v := workload.(type) {
case *appsv1.Deployment:
return o.ReplicaSetByDeployment(ctx, workload.(*appsv1.Deployment))
case *batchv1.Job:
controller := metav1.GetControllerOf(workload)
if controller == nil {
// Unmanaged Job
return workload, nil
}
if controller.Kind == string(KindCronJob) {
return o.CronJobByJob(ctx, workload.(*batchv1.Job))
}
// Job controlled by sth else (usually frameworks)
return workload, nil
case *corev1.Pod:
controller := metav1.GetControllerOf(workload)
if controller == nil {
// Unmanaged Pod
return workload, nil
}
if controller.Kind == string(KindReplicaSet) {
return o.ReplicaSetByPod(ctx, workload.(*corev1.Pod))
}
if controller.Kind == string(KindJob) {
// Managed by Job or CronJob
job, err := o.JobByPod(ctx, workload.(*corev1.Pod))
if err != nil {
return nil, err
}
return o.ReportOwner(ctx, job)
}
// Pod controlled by sth else (usually frameworks)
return workload, nil
case *appsv1.ReplicaSet, *corev1.ReplicationController, *appsv1.StatefulSet, *appsv1.DaemonSet, *batchv1beta1.CronJob:
return workload, nil
default:
return nil, fmt.Errorf("unsupported workload kind: %T", v)
}
}

// ReplicaSetByDeployment returns the current revision of the specified Deployment.
// If the current revision cannot be found the ErrReplicaSetNotFound error
// is returned.
func (o *ObjectResolver) ReplicaSetByDeployment(ctx context.Context, deploy *appsv1.Deployment) (*appsv1.ReplicaSet, error) {
var rsList appsv1.ReplicaSetList
err := o.Client.List(ctx, &rsList,
client.InNamespace(deploy.Namespace),
client.MatchingLabelsSelector{
Selector: labels.SelectorFromSet(deploy.Spec.Selector.MatchLabels),
})
if err != nil {
return nil, err
}
if len(rsList.Items) == 0 {
return nil, ErrReplicaSetNotFound
}
for _, rs := range rsList.Items {
if deploy.Annotations["deployment.kubernetes.io/revision"] !=
rs.Annotations["deployment.kubernetes.io/revision"] {
continue
}
obj, err := o.ensureGVK(rs.DeepCopy())
return obj.(*appsv1.ReplicaSet), err
}
return nil, ErrReplicaSetNotFound
}

// ReplicaSetByPod returns the controller ReplicaSet of the specified Pod.
func (o *ObjectResolver) ReplicaSetByPod(ctx context.Context, pod *corev1.Pod) (*appsv1.ReplicaSet, error) {
controller := metav1.GetControllerOf(pod)
if controller == nil {
return nil, fmt.Errorf("did not find a controller for pod %q", pod.Name)
}
if controller.Kind != "ReplicaSet" {
return nil, fmt.Errorf("pod %q is controlled by a %q, want replicaset", pod.Name, controller.Kind)
}
rs := &appsv1.ReplicaSet{}
err := o.Client.Get(ctx, client.ObjectKey{Namespace: pod.Namespace, Name: controller.Name}, rs)
if err != nil {
return nil, err
}
obj, err := o.ensureGVK(rs)
return obj.(*appsv1.ReplicaSet), err
}

func (o *ObjectResolver) CronJobByJob(ctx context.Context, job *batchv1.Job) (*batchv1beta1.CronJob, error) {
controller := metav1.GetControllerOf(job)
if controller == nil {
return nil, fmt.Errorf("did not find a controller for job %q", job.Name)
}
if controller.Kind != "CronJob" {
return nil, fmt.Errorf("pod %q is controlled by a %q, want CronJob", job.Name, controller.Kind)
}
cj := &batchv1beta1.CronJob{}
err := o.Client.Get(ctx, client.ObjectKey{Namespace: job.Namespace, Name: controller.Name}, cj)
if err != nil {
return nil, err
}
obj, err := o.ensureGVK(cj)
return obj.(*batchv1beta1.CronJob), err
}

func (o *ObjectResolver) JobByPod(ctx context.Context, pod *corev1.Pod) (*batchv1.Job, error) {
controller := metav1.GetControllerOf(pod)
if controller == nil {
return nil, fmt.Errorf("did not find a controller for pod %q", pod.Name)
}
if controller.Kind != "Job" {
return nil, fmt.Errorf("pod %q is controlled by a %q, want replicaset", pod.Name, controller.Kind)
}
rs := &batchv1.Job{}
err := o.Client.Get(ctx, client.ObjectKey{Namespace: pod.Namespace, Name: controller.Name}, rs)
if err != nil {
return nil, err
}
obj, err := o.ensureGVK(rs)
return obj.(*batchv1.Job), err
}

func (o *ObjectResolver) ensureGVK(obj client.Object) (client.Object, error) {
gvk, err := apiutil.GVKForObject(obj, o.Client.Scheme())
if err != nil {
return nil, err
Expand Down
Loading