From 2d52d470fa4d9a271e8664ad10a564b14c971869 Mon Sep 17 00:00:00 2001 From: Steve Sloka Date: Wed, 6 Oct 2021 14:51:54 -0400 Subject: [PATCH] cmd/contour: Use ContourConfiguration for all config (#4048) Convert the ServeContext to a ContourConfiguration and use that for all config items which will allow for the ServeContext to be deprecated and the optional ContourConfig CRD to be specified. Signed-off-by: Steve Sloka --- .github/workflows/prbuild.yaml | 8 + apis/projectcontour/v1alpha1/contourconfig.go | 31 +- cmd/contour/contour.go | 8 +- cmd/contour/leadership.go | 41 +- cmd/contour/serve.go | 808 +++++++----- cmd/contour/serve_test.go | 105 +- cmd/contour/servecontext.go | 292 ++++- cmd/contour/servecontext_test.go | 1165 ++++++++++++++++- examples/contour/01-crds.yaml | 29 +- examples/render/contour-gateway.yaml | 29 +- examples/render/contour.yaml | 29 +- internal/dag/cache.go | 6 +- internal/dag/httpproxy_processor.go | 3 +- internal/envoy/v3/accesslog.go | 4 +- internal/envoy/v3/accesslog_test.go | 8 +- internal/featuretests/v3/featuretests.go | 4 +- internal/featuretests/v3/listeners_test.go | 2 +- internal/k8s/informers.go | 5 - internal/xdscache/v3/listener.go | 128 +- .../docs/main/config/api-reference.html | 19 +- test/e2e/deployment.go | 131 +- test/e2e/fixtures.go | 78 ++ test/e2e/gateway/gateway_test.go | 11 +- .../multiple_gateways_and_classes_test.go | 12 +- test/e2e/httpproxy/httpproxy_test.go | 31 +- test/e2e/infra/infra_test.go | 9 +- test/e2e/ingress/ingress_test.go | 11 +- 27 files changed, 2434 insertions(+), 573 deletions(-) diff --git a/.github/workflows/prbuild.yaml b/.github/workflows/prbuild.yaml index c42ac54c0ad..38438568b70 100644 --- a/.github/workflows/prbuild.yaml +++ b/.github/workflows/prbuild.yaml @@ -43,6 +43,8 @@ jobs: # have to update branch protection rules every time we change # a Kubernetes version number. kubernetes_version: ["kubernetes:latest", "kubernetes:n-1", "kubernetes:n-2"] + # run tests using the configuration crd as well as without + config_type: ["ConfigmapConfiguration", "ContourConfiguration"] # include defines an additional variable (the specific node # image to use) for each kubernetes_version value. include: @@ -52,6 +54,11 @@ jobs: node_image: "docker.io/kindest/node:v1.21.2@sha256:9d07ff05e4afefbba983fac311807b3c17a5f36e7061f6cb7e2ba756255b2be4" - kubernetes_version: "kubernetes:n-2" node_image: "docker.io/kindest/node:v1.20.7@sha256:cbeaf907fc78ac97ce7b625e4bf0de16e3ea725daf6b04f930bd14c67c671ff9" + - config_type: "ConfigmapConfiguration" + use_config_crd: "false" + - config_type: "ContourConfiguration" + use_config_crd: "true" + steps: - uses: actions/checkout@v2 # TODO uncomment the below once we're using the image @@ -82,6 +89,7 @@ jobs: env: NODEIMAGE: ${{ matrix.node_image }} LOAD_PREBUILT_IMAGE: "true" + USE_CONTOUR_CONFIGURATION_CRD: ${{ matrix.use_config_crd }} run: | make e2e - uses: act10ns/slack@v1 diff --git a/apis/projectcontour/v1alpha1/contourconfig.go b/apis/projectcontour/v1alpha1/contourconfig.go index fa0b99f3b95..a8ff99af53f 100644 --- a/apis/projectcontour/v1alpha1/contourconfig.go +++ b/apis/projectcontour/v1alpha1/contourconfig.go @@ -46,7 +46,7 @@ type ContourConfigurationSpec struct { // Envoy contains parameters for Envoy as well // as how to optionally configure a managed Envoy fleet. // +optional - // +kubebuilder:default={listener: {useProxyProtocol: false, disableAllowChunkedLength: false, connectionBalancer: "", tls: { minimumProtocolVersion: "1.2", cipherSuites: "[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]";"[ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]";"ECDHE-ECDSA-AES256-GCM-SHA384";"ECDHE-RSA-AES256-GCM-SHA384" }}, service: {name: "envoy", namespace: "projectcontour"}, http: {address: "0.0.0.0", port: 8080, accessLog: "/dev/stdout"}, https: {address: "0.0.0.0", port: 8443, accessLog: "/dev/stdout"}, metrics: {address: "0.0.0.0", port: 8002}, logging: { accessLogFormat: "envoy"}, defaultHTTPVersions: "http/1.1";"http/2", cluster: {dnsLookupFamily: "auto"}, network: { adminPort: 9001}} + // +kubebuilder:default={listener: {useProxyProtocol: false, disableAllowChunkedLength: false, connectionBalancer: "", tls: { minimumProtocolVersion: "1.2", cipherSuites: "[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]";"[ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]";"ECDHE-ECDSA-AES256-GCM-SHA384";"ECDHE-RSA-AES256-GCM-SHA384" }}, service: {name: "envoy", namespace: "projectcontour"}, http: {address: "0.0.0.0", port: 8080, accessLog: "/dev/stdout"}, https: {address: "0.0.0.0", port: 8443, accessLog: "/dev/stdout"}, metrics: {address: "0.0.0.0", port: 8002}, logging: { accessLogFormat: "envoy"}, defaultHTTPVersions: "HTTP/1.1";"HTTP/2", cluster: {dnsLookupFamily: "auto"}, network: { adminPort: 9001}} Envoy EnvoyConfig `json:"envoy"` // Gateway contains parameters for the gateway-api Gateway that Contour @@ -79,7 +79,7 @@ type ContourConfigurationSpec struct { // +optional Policy *PolicyConfig `json:"policy,omitempty"` - // Metrics defines the endpoints Envoy use to serve to metrics. + // Metrics defines the endpoints Contour uses to serve to metrics. // +optional // +kubebuilder:default={address: "0.0.0.0", port: 8000} Metrics MetricsConfig `json:"metrics"` @@ -169,11 +169,11 @@ type MetricsConfig struct { } // HTTPVersionType is the name of a supported HTTP version. -// +kubebuilder:validation:Enum="http/1.1";"http/2" +// +kubebuilder:validation:Enum="HTTP/1.1";"HTTP/2" type HTTPVersionType string -const HTTPVersion1 HTTPVersionType = "http/1.1" -const HTTPVersion2 HTTPVersionType = "http/2" +const HTTPVersion1 HTTPVersionType = "HTTP/1.1" +const HTTPVersion2 HTTPVersionType = "HTTP/2" // EnvoyConfig defines how Envoy is to be Configured from Contour. type EnvoyConfig struct { @@ -254,7 +254,7 @@ type DebugConfig struct { // +kubebuilder:default=0 // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=9 - KubernetesDebugLogLevel int `json:"kubernetesLogLevel"` + KubernetesDebugLogLevel uint `json:"kubernetesLogLevel"` } // EnvoyListenerConfig hold various configurable Envoy listener values. @@ -283,21 +283,6 @@ type EnvoyListenerConfig struct { // +kubebuilder:validation:Enum="[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]";"[ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]";"ECDHE-ECDSA-AES128-GCM-SHA256";"ECDHE-RSA-AES128-GCM-SHA256";"ECDHE-ECDSA-AES128-SHA";"ECDHE-RSA-AES128-SHA";"AES128-GCM-SHA256";"AES128-SHA";"ECDHE-ECDSA-AES256-GCM-SHA384";"ECDHE-RSA-AES256-GCM-SHA384";"ECDHE-ECDSA-AES256-SHA";"ECDHE-RSA-AES256-SHA";"AES256-GCM-SHA384";"AES256-SHA" type TLSCipherType string -const CIPHER1 TLSCipherType = "[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]" -const CIPHER2 TLSCipherType = "[ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]" -const CIPHER3 TLSCipherType = "ECDHE-ECDSA-AES128-GCM-SHA256" -const CIPHER4 TLSCipherType = "ECDHE-RSA-AES128-GCM-SHA256" -const CIPHER5 TLSCipherType = "ECDHE-ECDSA-AES128-SHA" -const CIPHER6 TLSCipherType = "ECDHE-RSA-AES128-SHA" -const CIPHER7 TLSCipherType = "AES128-GCM-SHA256" -const CIPHER8 TLSCipherType = "AES128-SHA" -const CIPHER9 TLSCipherType = "ECDHE-ECDSA-AES256-GCM-SHA384" -const CIPHER10 TLSCipherType = "ECDHE-RSA-AES256-GCM-SHA384" -const CIPHER11 TLSCipherType = "ECDHE-ECDSA-AES256-SHA" -const CIPHER12 TLSCipherType = "ECDHE-RSA-AES256-SHA" -const CIPHER13 TLSCipherType = "AES256-GCM-SHA384" -const CIPHER14 TLSCipherType = "AES256-SHA" - // EnvoyTLS describes tls parameters for Envoy listneners. type EnvoyTLS struct { // MinimumProtocolVersion is the minimum TLS version this vhost should @@ -518,6 +503,10 @@ type PolicyConfig struct { // ResponseHeadersPolicy defines the response headers set/removed on all routes // +optional ResponseHeadersPolicy *HeadersPolicy `json:"responseHeaders,omitempty"` + + // ApplyToIngress determines if the Policies will apply to ingress objects + // +optional + ApplyToIngress bool `json:"applyToIngress"` } type HeadersPolicy struct { diff --git a/cmd/contour/contour.go b/cmd/contour/contour.go index 20e10130e0b..8059e59ff3c 100644 --- a/cmd/contour/contour.go +++ b/cmd/contour/contour.go @@ -120,7 +120,13 @@ func main() { log.WithError(err).Fatal("invalid configuration") } - if err := doServe(log, serveCtx); err != nil { + // Build out serve deps. + serve, err := NewServer(log, serveCtx) + if err != nil { + log.WithError(err).Fatal("unable to initialize Server dependencies required to start Contour") + } + + if err := serve.doServe(); err != nil { log.WithError(err).Fatal("Contour server failed") } case version.FullCommand(): diff --git a/cmd/contour/leadership.go b/cmd/contour/leadership.go index 09f6369980f..a2db918c8c0 100644 --- a/cmd/contour/leadership.go +++ b/cmd/contour/leadership.go @@ -16,11 +16,13 @@ package main import ( "context" "os" + "time" + + contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" "github.com/google/uuid" "github.com/projectcontour/contour/internal/k8s" "github.com/projectcontour/contour/internal/workgroup" - "github.com/projectcontour/contour/pkg/config" "github.com/sirupsen/logrus" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" @@ -40,15 +42,15 @@ func disableLeaderElection(log logrus.FieldLogger) chan struct{} { func setupLeadershipElection( g *workgroup.Group, log logrus.FieldLogger, - conf *config.LeaderElectionParameters, + conf contour_api_v1alpha1.LeaderElectionConfig, clients *k8s.Clients, updateNow func(), ) chan struct{} { le, leader, deposed := newLeaderElector(log, conf, clients) g.AddContext(func(electionCtx context.Context) error { log.WithFields(logrus.Fields{ - "configmapname": conf.Name, - "configmapnamespace": conf.Namespace, + "configmapname": conf.Configmap.Name, + "configmapnamespace": conf.Configmap.Namespace, }).Info("started leader election") le.Run(electionCtx) @@ -85,7 +87,7 @@ func setupLeadershipElection( // channels by which to observe elections and depositions. func newLeaderElector( log logrus.FieldLogger, - conf *config.LeaderElectionParameters, + conf contour_api_v1alpha1.LeaderElectionConfig, clients *k8s.Clients, ) (*leaderelection.LeaderElector, chan struct{}, chan struct{}) { log = log.WithField("context", "leaderelection") @@ -97,12 +99,25 @@ func newLeaderElector( rl := newResourceLock(log, conf, clients) + leaseDuration, err := time.ParseDuration(conf.LeaseDuration) + if err != nil { + log.WithError(err).Fatalf("could not parse LeaseDuration: %q", conf.LeaseDuration) + } + renewDeadline, err := time.ParseDuration(conf.RenewDeadline) + if err != nil { + log.WithError(err).Fatalf("could not parse RenewDeadline: %q", conf.RenewDeadline) + } + retryPeriod, err := time.ParseDuration(conf.RetryPeriod) + if err != nil { + log.WithError(err).Fatalf("could not parse RetryPeriod: %q", conf.RetryPeriod) + } + // Make the leader elector, ready to be used in the Workgroup. le, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ Lock: rl, - LeaseDuration: conf.LeaseDuration, - RenewDeadline: conf.RenewDeadline, - RetryPeriod: conf.RetryPeriod, + LeaseDuration: leaseDuration, + RenewDeadline: renewDeadline, + RetryPeriod: retryPeriod, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(_ context.Context) { log.WithFields(logrus.Fields{ @@ -126,7 +141,7 @@ func newLeaderElector( // newResourceLock creates a new resourcelock.Interface based on the Pod's name, // or a uuid if the name cannot be determined. -func newResourceLock(log logrus.FieldLogger, conf *config.LeaderElectionParameters, clients *k8s.Clients) resourcelock.Interface { +func newResourceLock(log logrus.FieldLogger, conf contour_api_v1alpha1.LeaderElectionConfig, clients *k8s.Clients) resourcelock.Interface { resourceLockID, found := os.LookupEnv("POD_NAME") if !found { resourceLockID = uuid.New().String() @@ -138,8 +153,8 @@ func newResourceLock(log logrus.FieldLogger, conf *config.LeaderElectionParamete // cycle (ie nine months). // Figure out the resource lock ID resourcelock.ConfigMapsResourceLock, - conf.Namespace, - conf.Name, + conf.Configmap.Namespace, + conf.Configmap.Name, clients.ClientSet().CoreV1(), clients.ClientSet().CoordinationV1(), resourcelock.ResourceLockConfig{ @@ -148,8 +163,8 @@ func newResourceLock(log logrus.FieldLogger, conf *config.LeaderElectionParamete ) if err != nil { log.WithError(err). - WithField("name", conf.Name). - WithField("namespace", conf.Namespace). + WithField("name", conf.Configmap.Name). + WithField("namespace", conf.Configmap.Namespace). WithField("identity", resourceLockID). Fatal("failed to create new resource lock") } diff --git a/cmd/contour/serve.go b/cmd/contour/serve.go index 33439c6356e..d9c09617329 100644 --- a/cmd/contour/serve.go +++ b/cmd/contour/serve.go @@ -17,20 +17,21 @@ import ( "context" "errors" "fmt" + "log" "net" "net/http" "os" "os/signal" + "regexp" "strconv" "syscall" "time" - "github.com/projectcontour/contour/internal/controller" - envoy_server_v3 "github.com/envoyproxy/go-control-plane/pkg/server/v3" contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" "github.com/projectcontour/contour/internal/annotation" "github.com/projectcontour/contour/internal/contour" + "github.com/projectcontour/contour/internal/controller" "github.com/projectcontour/contour/internal/dag" "github.com/projectcontour/contour/internal/debug" "github.com/projectcontour/contour/internal/health" @@ -110,6 +111,7 @@ func registerServe(app *kingpin.Application) (*kingpin.CmdClause, *serveContext) } parsed = true + ctx.Config = *params return nil @@ -161,32 +163,74 @@ func registerServe(app *kingpin.Application) (*kingpin.CmdClause, *serveContext) return serve, ctx } -// doServe runs the contour serve subcommand. -func doServe(log logrus.FieldLogger, ctx *serveContext) error { +type Server struct { + group workgroup.Group + log logrus.FieldLogger + ctx *serveContext + clients *k8s.Clients + mgr manager.Manager + registry *prometheus.Registry +} + +// NewServer returns a Server object which contains the initial configuration +// objects required to start an instance of Contour. +func NewServer(log logrus.FieldLogger, ctx *serveContext) (*Server, error) { + + // Set up workgroup runner. + var group workgroup.Group + // Establish k8s core & dynamic client connections. clients, err := k8s.NewClients(ctx.Config.Kubeconfig, ctx.Config.InCluster) if err != nil { - return fmt.Errorf("failed to create Kubernetes clients: %w", err) + return nil, fmt.Errorf("failed to create Kubernetes clients: %w", err) } - // Set up workgroup runner. - var g workgroup.Group - scheme, err := k8s.NewContourScheme() if err != nil { - log.WithError(err).Fatal("unable to create scheme") + return nil, fmt.Errorf("unable to create scheme: %w", err) } - // Get the ContourConfiguration CRD if specified - if len(ctx.contourConfigurationName) > 0 { + // Instantiate a controller-runtime manager. + mgr, err := manager.New(controller_config.GetConfigOrDie(), manager.Options{ + Scheme: scheme, + }) + if err != nil { + return nil, fmt.Errorf("unable to set up controller manager: %w", err) + } + // Set up Prometheus registry and register base metrics. + registry := prometheus.NewRegistry() + registry.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) + registry.MustRegister(collectors.NewGoCollector()) + + return &Server{ + group: group, + log: log, + ctx: ctx, + clients: clients, + mgr: mgr, + registry: registry, + }, nil +} + +// doServe runs the contour serve subcommand. +func (s *Server) doServe() error { + + var contourConfiguration contour_api_v1alpha1.ContourConfigurationSpec + + // Get the ContourConfiguration CRD if specified + if len(s.ctx.contourConfigurationName) > 0 { // Determine the name/namespace of the configuration resource utilizing the environment // variable "CONTOUR_NAMESPACE" which should exist on the Contour deployment. // - // If the env variable is not present, it will return "" and still fail the lookup - // of the ContourConfiguration in the cluster. - namespacedName := types.NamespacedName{Name: ctx.contourConfigurationName, Namespace: os.Getenv("CONTOUR_NAMESPACE")} - client := clients.DynamicClient().Resource(contour_api_v1alpha1.ContourConfigurationGVR).Namespace(namespacedName.Namespace) + // If the env variable is not present, it will default to "projectcontour". + contourNamespace, found := os.LookupEnv("CONTOUR_NAMESPACE") + if !found { + contourNamespace = "projectcontour" + } + + namespacedName := types.NamespacedName{Name: s.ctx.contourConfigurationName, Namespace: contourNamespace} + client := s.clients.DynamicClient().Resource(contour_api_v1alpha1.ContourConfigurationGVR).Namespace(namespacedName.Namespace) // ensure the specified ContourConfiguration exists res, err := client.Get(context.Background(), namespacedName.Name, metav1.GetOptions{}) @@ -194,58 +238,46 @@ func doServe(log logrus.FieldLogger, ctx *serveContext) error { return fmt.Errorf("error getting contour configuration %s: %v", namespacedName, err) } - var contourConfiguration contour_api_v1alpha1.ContourConfiguration - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(res.Object, &contourConfiguration); err != nil { + var contourConfig contour_api_v1alpha1.ContourConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(res.Object, &contourConfig); err != nil { return fmt.Errorf("error converting contour configuration %s: %v", namespacedName, err) } - } - // Instantiate a controller-runtime manager. We need this regardless of whether - // we're running the Gateway API controllers or not, because we use its cache - // everywhere. - mgr, err := manager.New(controller_config.GetConfigOrDie(), manager.Options{ - Scheme: scheme, - }) - if err != nil { - log.WithError(err).Fatal("unable to set up controller manager") + // Copy the Spec from the parsed Configuration + contourConfiguration = contourConfig.Spec + } else { + // No contour configuration passed, so convert the ServeContext into a ContourConfigurationSpec. + contourConfiguration = s.ctx.convertToContourConfigurationSpec() } // Register the manager with the workgroup. - g.AddContext(func(taskCtx context.Context) error { - return mgr.Start(signals.SetupSignalHandler()) + s.group.AddContext(func(taskCtx context.Context) error { + return s.mgr.Start(signals.SetupSignalHandler()) }) // informerNamespaces is a list of namespaces that we should start informers for. var informerNamespaces []string - fallbackCert := namespacedNameOf(ctx.Config.TLS.FallbackCertificate) - clientCert := namespacedNameOf(ctx.Config.TLS.ClientCertificate) - - if rootNamespaces := ctx.proxyRootNamespaces(); len(rootNamespaces) > 0 { - informerNamespaces = append(informerNamespaces, rootNamespaces...) + if len(contourConfiguration.HTTPProxy.RootNamespaces) > 0 { + informerNamespaces = append(informerNamespaces, contourConfiguration.HTTPProxy.RootNamespaces...) // Add the FallbackCertificateNamespace to informerNamespaces if it isn't present. - if !contains(informerNamespaces, ctx.Config.TLS.FallbackCertificate.Namespace) && fallbackCert != nil { - informerNamespaces = append(informerNamespaces, ctx.Config.TLS.FallbackCertificate.Namespace) - log.WithField("context", "fallback-certificate"). + if !contains(informerNamespaces, contourConfiguration.HTTPProxy.FallbackCertificate.Namespace) && contourConfiguration.HTTPProxy.FallbackCertificate != nil { + informerNamespaces = append(informerNamespaces, contourConfiguration.HTTPProxy.FallbackCertificate.Namespace) + s.log.WithField("context", "fallback-certificate"). Infof("fallback certificate namespace %q not defined in 'root-namespaces', adding namespace to watch", - ctx.Config.TLS.FallbackCertificate.Namespace) + contourConfiguration.HTTPProxy.FallbackCertificate.Namespace) } // Add the client certificate namespace to informerNamespaces if it isn't present. - if !contains(informerNamespaces, ctx.Config.TLS.ClientCertificate.Namespace) && clientCert != nil { - informerNamespaces = append(informerNamespaces, ctx.Config.TLS.ClientCertificate.Namespace) - log.WithField("context", "envoy-client-certificate"). + if !contains(informerNamespaces, contourConfiguration.Envoy.ClientCertificate.Namespace) && contourConfiguration.Envoy.ClientCertificate != nil { + informerNamespaces = append(informerNamespaces, contourConfiguration.Envoy.ClientCertificate.Namespace) + s.log.WithField("context", "envoy-client-certificate"). Infof("client certificate namespace %q not defined in 'root-namespaces', adding namespace to watch", - ctx.Config.TLS.ClientCertificate.Namespace) + contourConfiguration.Envoy.ClientCertificate.Namespace) } } - // Set up Prometheus registry and register base metrics. - registry := prometheus.NewRegistry() - registry.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) - registry.MustRegister(collectors.NewGoCollector()) - // Before we can build the event handler, we need to initialize the converter we'll // use to convert from Unstructured. converter, err := k8s.NewUnstructuredConverter() @@ -253,116 +285,41 @@ func doServe(log logrus.FieldLogger, ctx *serveContext) error { return err } - // XXX(jpeach) we know the config file validated, so all - // the timeouts will parse. Shall we add a `timeout.MustParse()` - // and use it here? - - connectionIdleTimeout, err := timeout.Parse(ctx.Config.Timeouts.ConnectionIdleTimeout) - if err != nil { - return fmt.Errorf("error parsing connection idle timeout: %w", err) - } - streamIdleTimeout, err := timeout.Parse(ctx.Config.Timeouts.StreamIdleTimeout) - if err != nil { - return fmt.Errorf("error parsing stream idle timeout: %w", err) - } - delayedCloseTimeout, err := timeout.Parse(ctx.Config.Timeouts.DelayedCloseTimeout) - if err != nil { - return fmt.Errorf("error parsing delayed close timeout: %w", err) - } - maxConnectionDuration, err := timeout.Parse(ctx.Config.Timeouts.MaxConnectionDuration) - if err != nil { - return fmt.Errorf("error parsing max connection duration: %w", err) - } - connectionShutdownGracePeriod, err := timeout.Parse(ctx.Config.Timeouts.ConnectionShutdownGracePeriod) - if err != nil { - return fmt.Errorf("error parsing connection shutdown grace period: %w", err) - } - requestTimeout, err := timeout.Parse(ctx.Config.Timeouts.RequestTimeout) - if err != nil { - return fmt.Errorf("error parsing request timeout: %w", err) - } - - // connection balancer - if ok := ctx.Config.Listener.ConnectionBalancer == "exact" || ctx.Config.Listener.ConnectionBalancer == ""; !ok { - log.Warnf("Invalid listener connection balancer value %q. Only 'exact' connection balancing is supported for now.", ctx.Config.Listener.ConnectionBalancer) - ctx.Config.Listener.ConnectionBalancer = "" - } - - listenerConfig := xdscache_v3.ListenerConfig{ - UseProxyProto: ctx.useProxyProto, - HTTPListeners: map[string]xdscache_v3.Listener{ - "ingress_http": { - Name: "ingress_http", - Address: ctx.httpAddr, - Port: ctx.httpPort, - }, - }, - HTTPSListeners: map[string]xdscache_v3.Listener{ - "ingress_https": { - Name: "ingress_https", - Address: ctx.httpsAddr, - Port: ctx.httpsPort, - }, - }, - HTTPAccessLog: ctx.httpAccessLog, - HTTPSAccessLog: ctx.httpsAccessLog, - AccessLogType: ctx.Config.AccessLogFormat, - AccessLogFields: ctx.Config.AccessLogFields, - AccessLogFormatString: ctx.Config.AccessLogFormatString, - AccessLogFormatterExtensions: ctx.Config.AccessLogFormatterExtensions(), - MinimumTLSVersion: annotation.MinTLSVersion(ctx.Config.TLS.MinimumProtocolVersion, "1.2"), - CipherSuites: config.SanitizeCipherSuites(ctx.Config.TLS.CipherSuites), - RequestTimeout: requestTimeout, - ConnectionIdleTimeout: connectionIdleTimeout, - StreamIdleTimeout: streamIdleTimeout, - DelayedCloseTimeout: delayedCloseTimeout, - MaxConnectionDuration: maxConnectionDuration, - ConnectionShutdownGracePeriod: connectionShutdownGracePeriod, - DefaultHTTPVersions: parseDefaultHTTPVersions(ctx.Config.DefaultHTTPVersions), - AllowChunkedLength: !ctx.Config.DisableAllowChunkedLength, - XffNumTrustedHops: ctx.Config.Network.XffNumTrustedHops, - ConnectionBalancer: ctx.Config.Listener.ConnectionBalancer, - } - - if ctx.Config.RateLimitService.ExtensionService != "" { - namespacedName := k8s.NamespacedNameFrom(ctx.Config.RateLimitService.ExtensionService) - client := clients.DynamicClient().Resource(contour_api_v1alpha1.ExtensionServiceGVR).Namespace(namespacedName.Namespace) - - // ensure the specified ExtensionService exists - res, err := client.Get(context.Background(), namespacedName.Name, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("error getting rate limit extension service %s: %v", namespacedName, err) - } - var extensionSvc contour_api_v1alpha1.ExtensionService - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(res.Object, &extensionSvc); err != nil { - return fmt.Errorf("error converting rate limit extension service %s: %v", namespacedName, err) - } - // get the response timeout from the ExtensionService - var responseTimeout timeout.Setting - if tp := extensionSvc.Spec.TimeoutPolicy; tp != nil { - responseTimeout, err = timeout.Parse(tp.Response) - if err != nil { - return fmt.Errorf("error parsing rate limit extension service %s response timeout: %v", namespacedName, err) - } - } + cipherSuites := []string{} + for _, cs := range contourConfiguration.Envoy.Listener.TLS.CipherSuites { + cipherSuites = append(cipherSuites, string(cs)) + } + + listenerConfig := xdscache_v3.NewListenerConfig( + contourConfiguration.Envoy.Listener.UseProxyProto, + contourConfiguration.Envoy.HTTPListener, + contourConfiguration.Envoy.HTTPSListener, + contourConfiguration.Envoy.Logging.AccessLogFormat, + contourConfiguration.Envoy.Logging.AccessLogFields, + contourConfiguration.Envoy.Logging.AccessLogFormatString, + AccessLogFormatterExtensions(contourConfiguration.Envoy.Logging.AccessLogFormat, contourConfiguration.Envoy.Logging.AccessLogFields, contourConfiguration.Envoy.Logging.AccessLogFormatString), + annotation.MinTLSVersion(contourConfiguration.Envoy.Listener.TLS.MinimumProtocolVersion, "1.2"), + config.SanitizeCipherSuites(cipherSuites), + contourConfiguration.Envoy.Timeouts, + parseDefaultHTTPVersions(contourConfiguration.Envoy.DefaultHTTPVersions), + !contourConfiguration.Envoy.Listener.DisableAllowChunkedLength, + contourConfiguration.Envoy.Network.XffNumTrustedHops, + contourConfiguration.Envoy.Listener.ConnectionBalancer, + s.log, + ) - listenerConfig.RateLimitConfig = &xdscache_v3.RateLimitConfig{ - ExtensionService: namespacedName, - Domain: ctx.Config.RateLimitService.Domain, - Timeout: responseTimeout, - FailOpen: ctx.Config.RateLimitService.FailOpen, - EnableXRateLimitHeaders: ctx.Config.RateLimitService.EnableXRateLimitHeaders, - } + if err = s.setupRateLimitService(contourConfiguration, &listenerConfig); err != nil { + return err } - contourMetrics := metrics.NewMetrics(registry) + contourMetrics := metrics.NewMetrics(s.registry) // Endpoints updates are handled directly by the EndpointsTranslator // due to their high update rate and their orthogonal nature. - endpointHandler := xdscache_v3.NewEndpointsTranslator(log.WithField("context", "endpointstranslator")) + endpointHandler := xdscache_v3.NewEndpointsTranslator(s.log.WithField("context", "endpointstranslator")) resources := []xdscache.ResourceCache{ - xdscache_v3.NewListenerCache(listenerConfig, ctx.statsAddr, ctx.statsPort, ctx.Config.Network.EnvoyAdminPort), + xdscache_v3.NewListenerCache(listenerConfig, contourConfiguration.Envoy.Metrics.Address, contourConfiguration.Envoy.Metrics.Port, contourConfiguration.Envoy.Network.EnvoyAdminPort), &xdscache_v3.SecretCache{}, &xdscache_v3.RouteCache{}, &xdscache_v3.ClusterCache{}, @@ -370,17 +327,31 @@ func doServe(log logrus.FieldLogger, ctx *serveContext) error { } // snapshotHandler is used to produce new snapshots when the internal state changes for any xDS resource. - snapshotHandler := xdscache.NewSnapshotHandler(resources, log.WithField("context", "snapshotHandler")) + snapshotHandler := xdscache.NewSnapshotHandler(resources, s.log.WithField("context", "snapshotHandler")) // register observer for endpoints updates. endpointHandler.Observer = contour.ComposeObservers(snapshotHandler) // Log that we're using the fallback certificate if configured. - if fallbackCert != nil { - log.WithField("context", "fallback-certificate").Infof("enabled fallback certificate with secret: %q", fallbackCert) + if contourConfiguration.HTTPProxy.FallbackCertificate != nil { + s.log.WithField("context", "fallback-certificate").Infof("enabled fallback certificate with secret: %q", contourConfiguration.HTTPProxy.FallbackCertificate) } - if clientCert != nil { - log.WithField("context", "envoy-client-certificate").Infof("enabled client certificate with secret: %q", clientCert) + if contourConfiguration.Envoy.ClientCertificate != nil { + s.log.WithField("context", "envoy-client-certificate").Infof("enabled client certificate with secret: %q", contourConfiguration.Envoy.ClientCertificate) + } + + ingressClassName := "" + if contourConfiguration.Ingress != nil && contourConfiguration.Ingress.ClassName != nil { + ingressClassName = *contourConfiguration.Ingress.ClassName + } + + var clientCert *types.NamespacedName + var fallbackCert *types.NamespacedName + if contourConfiguration.Envoy.ClientCertificate != nil { + clientCert = &types.NamespacedName{Name: contourConfiguration.Envoy.ClientCertificate.Name, Namespace: contourConfiguration.Envoy.ClientCertificate.Namespace} + } + if contourConfiguration.HTTPProxy.FallbackCertificate != nil { + fallbackCert = &types.NamespacedName{Name: contourConfiguration.HTTPProxy.FallbackCertificate.Name, Namespace: contourConfiguration.HTTPProxy.FallbackCertificate.Namespace} } // Build the core Kubernetes event handler. @@ -388,8 +359,19 @@ func doServe(log logrus.FieldLogger, ctx *serveContext) error { HoldoffDelay: 100 * time.Millisecond, HoldoffMaxDelay: 500 * time.Millisecond, Observer: dag.ComposeObservers(append(xdscache.ObserversOf(resources), snapshotHandler)...), - Builder: getDAGBuilder(ctx, clients, clientCert, fallbackCert, log), - FieldLogger: log.WithField("context", "contourEventHandler"), + Builder: s.getDAGBuilder(dagBuilderConfig{ + ingressClassName: ingressClassName, + rootNamespaces: contourConfiguration.HTTPProxy.RootNamespaces, + gatewayAPIConfigured: contourConfiguration.Gateway != nil, + disablePermitInsecure: contourConfiguration.HTTPProxy.DisablePermitInsecure, + enableExternalNameService: contourConfiguration.EnableExternalNameService, + dnsLookupFamily: contourConfiguration.Envoy.Cluster.DNSLookupFamily, + headersPolicy: contourConfiguration.Policy, + clients: s.clients, + clientCert: clientCert, + fallbackCert: fallbackCert, + }), + FieldLogger: s.log.WithField("context", "contourEventHandler"), } // Wrap contourHandler in an EventRecorder which tracks API server events. @@ -399,81 +381,31 @@ func doServe(log logrus.FieldLogger, ctx *serveContext) error { } // Register leadership election. - if ctx.DisableLeaderElection { - contourHandler.IsLeader = disableLeaderElection(log) + if contourConfiguration.LeaderElection.DisableLeaderElection { + contourHandler.IsLeader = disableLeaderElection(s.log) } else { - contourHandler.IsLeader = setupLeadershipElection(&g, log, &ctx.Config.LeaderElection, clients, contourHandler.UpdateNow) + contourHandler.IsLeader = setupLeadershipElection(&s.group, s.log, contourConfiguration.LeaderElection, s.clients, contourHandler.UpdateNow) } // Start setting up StatusUpdateHandler since we need it in // the Gateway API controllers. Will finish setting it up and // start it later. sh := k8s.StatusUpdateHandler{ - Log: log.WithField("context", "StatusUpdateHandler"), - Clients: clients, - Cache: mgr.GetCache(), + Log: s.log.WithField("context", "StatusUpdateHandler"), + Clients: s.clients, + Cache: s.mgr.GetCache(), Converter: converter, } // Inform on DefaultResources. for _, r := range k8s.DefaultResources() { - if err := informOnResource(clients, r, eventHandler, mgr.GetCache()); err != nil { - log.WithError(err).WithField("resource", r).Fatal("failed to create informer") + if err := informOnResource(s.clients, r, eventHandler, s.mgr.GetCache()); err != nil { + s.log.WithError(err).WithField("resource", r).Fatal("failed to create informer") } } - for _, r := range k8s.IngressV1Resources() { - if err := informOnResource(clients, r, eventHandler, mgr.GetCache()); err != nil { - log.WithError(err).WithField("resource", r).Fatal("failed to create informer") - } - } - - // Only inform on Gateway API resources if Gateway API is found. - if ctx.Config.GatewayConfig != nil { - if clients.ResourcesExist(k8s.GatewayAPIResources()...) { - // Create and register the gatewayclass controller with the manager. - gatewayClassControllerName := ctx.Config.GatewayConfig.ControllerName - if _, err := controller.NewGatewayClassController( - mgr, - eventHandler, - sh.Writer(), - log.WithField("context", "gatewayclass-controller"), - gatewayClassControllerName, - contourHandler.IsLeader, - ); err != nil { - log.WithError(err).Fatal("failed to create gatewayclass-controller") - } - - // Create and register the NewGatewayController controller with the manager. - if _, err := controller.NewGatewayController( - mgr, - eventHandler, - sh.Writer(), - log.WithField("context", "gateway-controller"), - gatewayClassControllerName, - contourHandler.IsLeader, - ); err != nil { - log.WithError(err).Fatal("failed to create gateway-controller") - } - - // Create and register the NewHTTPRouteController controller with the manager. - if _, err := controller.NewHTTPRouteController(mgr, eventHandler, log.WithField("context", "httproute-controller")); err != nil { - log.WithError(err).Fatal("failed to create httproute-controller") - } - - // Create and register the NewTLSRouteController controller with the manager. - if _, err := controller.NewTLSRouteController(mgr, eventHandler, log.WithField("context", "tlsroute-controller")); err != nil { - log.WithError(err).Fatal("failed to create tlsroute-controller") - } - - // Inform on Namespaces. - if err := informOnResource(clients, k8s.NamespacesResource(), eventHandler, mgr.GetCache()); err != nil { - log.WithError(err).WithField("resource", k8s.NamespacesResource()).Fatal("failed to create informer") - } - } else { - log.Fatalf("Gateway API Gateway configured but APIs not installed in cluster.") - } - } + // Inform on Gateway API resources. + s.setupGatewayAPI(contourConfiguration, s.mgr, eventHandler, &sh, contourHandler.IsLeader) // Inform on secrets, filtering by root namespaces. for _, r := range k8s.SecretsResources() { @@ -484,67 +416,32 @@ func doServe(log logrus.FieldLogger, ctx *serveContext) error { handler = k8s.NewNamespaceFilter(informerNamespaces, eventHandler) } - if err := informOnResource(clients, r, handler, mgr.GetCache()); err != nil { - log.WithError(err).WithField("resource", r).Fatal("failed to create informer") + if err := informOnResource(s.clients, r, handler, s.mgr.GetCache()); err != nil { + s.log.WithError(err).WithField("resource", r).Fatal("failed to create informer") } } // Inform on endpoints. for _, r := range k8s.EndpointsResources() { - if err := informOnResource(clients, r, &contour.EventRecorder{ + if err := informOnResource(s.clients, r, &contour.EventRecorder{ Next: endpointHandler, Counter: contourMetrics.EventHandlerOperations, - }, mgr.GetCache()); err != nil { - log.WithError(err).WithField("resource", r).Fatal("failed to create informer") + }, s.mgr.GetCache()); err != nil { + s.log.WithError(err).WithField("resource", r).Fatal("failed to create informer") } } // Register our event handler with the workgroup. - g.Add(contourHandler.Start()) + s.group.Add(contourHandler.Start()) - // Create metrics service and register with workgroup. - metricsvc := httpsvc.Service{ - Addr: ctx.metricsAddr, - Port: ctx.metricsPort, - FieldLogger: log.WithField("context", "metricsvc"), - ServeMux: http.ServeMux{}, - } - - metricsvc.ServeMux.Handle("/metrics", metrics.Handler(registry)) - - if ctx.healthAddr == ctx.metricsAddr && ctx.healthPort == ctx.metricsPort { - h := health.Handler(clients.ClientSet()) - metricsvc.ServeMux.Handle("/health", h) - metricsvc.ServeMux.Handle("/healthz", h) - } - - g.Add(metricsvc.Start) + // Create metrics service. + s.setupMetrics(contourConfiguration.Metrics, contourConfiguration.Health, s.registry) // Create a separate health service if required. - if ctx.healthAddr != ctx.metricsAddr || ctx.healthPort != ctx.metricsPort { - healthsvc := httpsvc.Service{ - Addr: ctx.healthAddr, - Port: ctx.healthPort, - FieldLogger: log.WithField("context", "healthsvc"), - } - - h := health.Handler(clients.ClientSet()) - healthsvc.ServeMux.Handle("/health", h) - healthsvc.ServeMux.Handle("/healthz", h) - - g.Add(healthsvc.Start) - } + s.setupHealth(contourConfiguration.Health, contourConfiguration.Metrics) // Create debug service and register with workgroup. - debugsvc := debug.Service{ - Service: httpsvc.Service{ - Addr: ctx.debugAddr, - Port: ctx.debugPort, - FieldLogger: log.WithField("context", "debugsvc"), - }, - Builder: &contourHandler.Builder, - } - g.Add(debugsvc.Start) + s.setupDebugService(contourConfiguration.Debug, contourHandler) // Once we have the leadership detection channel, we can // push DAG rebuild metrics onto the observer stack. @@ -557,7 +454,7 @@ func doServe(log logrus.FieldLogger, ctx *serveContext) error { // Finish setting up the StatusUpdateHandler and // add it to the work group. sh.LeaderElected = contourHandler.IsLeader - g.Add(sh.Start) + s.group.Add(sh.Start) // Now we have the statusUpdateHandler, we can create the event handler's StatusUpdater, which will take the // status updates from the DAG, and send them to the status update handler. @@ -565,46 +462,120 @@ func doServe(log logrus.FieldLogger, ctx *serveContext) error { // Set up ingress load balancer status writer. lbsw := loadBalancerStatusWriter{ - log: log.WithField("context", "loadBalancerStatusWriter"), - cache: mgr.GetCache(), + log: s.log.WithField("context", "loadBalancerStatusWriter"), + cache: s.mgr.GetCache(), isLeader: contourHandler.IsLeader, lbStatus: make(chan corev1.LoadBalancerStatus, 1), - ingressClassName: ctx.ingressClassName, + ingressClassName: ingressClassName, statusUpdater: sh.Writer(), Converter: converter, } - g.Add(lbsw.Start) + s.group.Add(lbsw.Start) // Register an informer to watch envoy's service if we haven't been given static details. - if lbAddr := ctx.Config.IngressStatusAddress; lbAddr != "" { - log.WithField("loadbalancer-address", lbAddr).Info("Using supplied information for Ingress status") - lbsw.lbStatus <- parseStatusFlag(lbAddr) + if contourConfiguration.Ingress != nil && contourConfiguration.Ingress.StatusAddress != nil { + s.log.WithField("loadbalancer-address", *contourConfiguration.Ingress.StatusAddress).Info("Using supplied information for Ingress status") + lbsw.lbStatus <- parseStatusFlag(*contourConfiguration.Ingress.StatusAddress) } else { serviceHandler := &k8s.ServiceStatusLoadBalancerWatcher{ - ServiceName: ctx.Config.EnvoyServiceName, + ServiceName: contourConfiguration.Envoy.Service.Name, LBStatus: lbsw.lbStatus, - Log: log.WithField("context", "serviceStatusLoadBalancerWatcher"), + Log: s.log.WithField("context", "serviceStatusLoadBalancerWatcher"), } for _, r := range k8s.ServicesResources() { var handler cache.ResourceEventHandler = serviceHandler - if ctx.Config.EnvoyServiceNamespace != "" { - handler = k8s.NewNamespaceFilter([]string{ctx.Config.EnvoyServiceNamespace}, handler) + if contourConfiguration.Envoy.Service.Namespace != "" { + handler = k8s.NewNamespaceFilter([]string{contourConfiguration.Envoy.Service.Namespace}, handler) } - if err := informOnResource(clients, r, handler, mgr.GetCache()); err != nil { - log.WithError(err).WithField("resource", r).Fatal("failed to create informer") + if err := informOnResource(s.clients, r, handler, s.mgr.GetCache()); err != nil { + s.log.WithError(err).WithField("resource", r).Fatal("failed to create informer") } } - log.WithField("envoy-service-name", ctx.Config.EnvoyServiceName). - WithField("envoy-service-namespace", ctx.Config.EnvoyServiceNamespace). + s.log.WithField("envoy-service-name", contourConfiguration.Envoy.Service.Name). + WithField("envoy-service-namespace", contourConfiguration.Envoy.Service.Namespace). Info("Watching Service for Ingress status") } - g.AddContext(func(taskCtx context.Context) error { - log := log.WithField("context", "xds") + s.setupXDSServer(s.mgr, s.registry, contourConfiguration.XDSServer, snapshotHandler, resources) + + // Set up SIGTERM handler for graceful shutdown. + s.group.Add(func(stop <-chan struct{}) error { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGTERM, syscall.SIGINT) + select { + case sig := <-c: + s.log.WithField("context", "sigterm-handler").WithField("signal", sig).Info("shutting down") + case <-stop: + // Do nothing. The group is shutting down. + } + return nil + }) + + // GO! + return s.group.Run(context.Background()) +} + +func (s *Server) setupRateLimitService(contourConfiguration contour_api_v1alpha1.ContourConfigurationSpec, listenerConfig *xdscache_v3.ListenerConfig) error { + if contourConfiguration.RateLimitService == nil { + return nil + } + + namespacedName := &types.NamespacedName{ + Namespace: contourConfiguration.RateLimitService.ExtensionService.Namespace, + Name: contourConfiguration.RateLimitService.ExtensionService.Name, + } + client := s.clients.DynamicClient().Resource(contour_api_v1alpha1.ExtensionServiceGVR).Namespace(namespacedName.Namespace) + + // ensure the specified ExtensionService exists + res, err := client.Get(context.Background(), namespacedName.Name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("error getting rate limit extension service %s: %v", namespacedName, err) + } + var extensionSvc contour_api_v1alpha1.ExtensionService + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(res.Object, &extensionSvc); err != nil { + return fmt.Errorf("error converting rate limit extension service %s: %v", namespacedName, err) + } + // get the response timeout from the ExtensionService + var responseTimeout timeout.Setting + if tp := extensionSvc.Spec.TimeoutPolicy; tp != nil { + responseTimeout, err = timeout.Parse(tp.Response) + if err != nil { + return fmt.Errorf("error parsing rate limit extension service %s response timeout: %v", namespacedName, err) + } + } + + listenerConfig.RateLimitConfig = &xdscache_v3.RateLimitConfig{ + ExtensionService: *namespacedName, + Domain: contourConfiguration.RateLimitService.Domain, + Timeout: responseTimeout, + FailOpen: contourConfiguration.RateLimitService.FailOpen, + EnableXRateLimitHeaders: contourConfiguration.RateLimitService.EnableXRateLimitHeaders, + } + + return nil +} + +func (s *Server) setupDebugService(debugConfig contour_api_v1alpha1.DebugConfig, contourHandler *contour.EventHandler) { + debugsvc := debug.Service{ + Service: httpsvc.Service{ + Addr: debugConfig.Address, + Port: debugConfig.Port, + FieldLogger: s.log.WithField("context", "debugsvc"), + }, + Builder: &contourHandler.Builder, + } + s.group.Add(debugsvc.Start) +} + +func (s *Server) setupXDSServer(mgr manager.Manager, registry *prometheus.Registry, contourConfiguration contour_api_v1alpha1.XDSServerConfig, + snapshotHandler *xdscache.SnapshotHandler, resources []xdscache.ResourceCache) { + + s.group.AddContext(func(taskCtx context.Context) error { + log := s.log.WithField("context", "xds") log.Printf("waiting for informer caches to sync") if !mgr.GetCache().WaitForCacheSync(taskCtx) { @@ -612,32 +583,34 @@ func doServe(log logrus.FieldLogger, ctx *serveContext) error { } log.Printf("informer caches synced") - grpcServer := xds.NewServer(registry, ctx.grpcOptions(log)...) + grpcServer := xds.NewServer(registry, grpcOptions(log, contourConfiguration.TLS)...) - switch ctx.Config.Server.XDSServerType { - case config.EnvoyServerType: + switch contourConfiguration.Type { + case contour_api_v1alpha1.EnvoyServerType: v3cache := contour_xds_v3.NewSnapshotCache(false, log) snapshotHandler.AddSnapshotter(v3cache) contour_xds_v3.RegisterServer(envoy_server_v3.NewServer(taskCtx, v3cache, contour_xds_v3.NewRequestLoggingCallbacks(log)), grpcServer) - case config.ContourServerType: + case contour_api_v1alpha1.ContourServerType: contour_xds_v3.RegisterServer(contour_xds_v3.NewContourServer(log, xdscache.ResourcesOf(resources)...), grpcServer) default: // This can't happen due to config validation. - log.Fatalf("invalid xDS server type %q", ctx.Config.Server.XDSServerType) + log.Fatalf("invalid xDS server type %q", contourConfiguration.Type) } - addr := net.JoinHostPort(ctx.xdsAddr, strconv.Itoa(ctx.xdsPort)) + addr := net.JoinHostPort(contourConfiguration.Address, strconv.Itoa(contourConfiguration.Port)) l, err := net.Listen("tcp", addr) if err != nil { return err } log = log.WithField("address", addr) - if ctx.PermitInsecureGRPC { - log = log.WithField("insecure", true) + if tls := contourConfiguration.TLS; tls != nil { + if tls.Insecure { + log = log.WithField("insecure", true) + } } - log.Infof("started xDS server type: %q", ctx.Config.Server.XDSServerType) + log.Infof("started xDS server type: %q", contourConfiguration.Type) defer log.Info("stopped xDS server") go func() { @@ -652,87 +625,188 @@ func doServe(log logrus.FieldLogger, ctx *serveContext) error { return grpcServer.Serve(l) }) +} - // Set up SIGTERM handler for graceful shutdown. - g.Add(func(stop <-chan struct{}) error { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGTERM, syscall.SIGINT) - select { - case sig := <-c: - log.WithField("context", "sigterm-handler").WithField("signal", sig).Info("shutting down") - case <-stop: - // Do nothing. The group is shutting down. - } - return nil - }) +func (s *Server) setupMetrics(metricsConfig contour_api_v1alpha1.MetricsConfig, healthConfig contour_api_v1alpha1.HealthConfig, + registry *prometheus.Registry) { - // GO! - return g.Run(context.Background()) + // Create metrics service and register with workgroup. + metricsvc := httpsvc.Service{ + Addr: metricsConfig.Address, + Port: metricsConfig.Port, + FieldLogger: s.log.WithField("context", "metricsvc"), + ServeMux: http.ServeMux{}, + } + + metricsvc.ServeMux.Handle("/metrics", metrics.Handler(registry)) + + if healthConfig.Address == metricsConfig.Address && healthConfig.Port == metricsConfig.Port { + h := health.Handler(s.clients.ClientSet()) + metricsvc.ServeMux.Handle("/health", h) + metricsvc.ServeMux.Handle("/healthz", h) + } + + s.group.Add(metricsvc.Start) } -func getDAGBuilder(ctx *serveContext, clients *k8s.Clients, clientCert, fallbackCert *types.NamespacedName, log logrus.FieldLogger) dag.Builder { - var requestHeadersPolicy dag.HeadersPolicy - if ctx.Config.Policy.RequestHeadersPolicy.Set != nil { - requestHeadersPolicy.Set = make(map[string]string) - for k, v := range ctx.Config.Policy.RequestHeadersPolicy.Set { - requestHeadersPolicy.Set[k] = v +func (s *Server) setupHealth(healthConfig contour_api_v1alpha1.HealthConfig, + metricsConfig contour_api_v1alpha1.MetricsConfig) { + + if healthConfig.Address != metricsConfig.Address || healthConfig.Port != metricsConfig.Port { + healthsvc := httpsvc.Service{ + Addr: healthConfig.Address, + Port: healthConfig.Port, + FieldLogger: s.log.WithField("context", "healthsvc"), } + + h := health.Handler(s.clients.ClientSet()) + healthsvc.ServeMux.Handle("/health", h) + healthsvc.ServeMux.Handle("/healthz", h) + + s.group.Add(healthsvc.Start) } - if ctx.Config.Policy.RequestHeadersPolicy.Remove != nil { - requestHeadersPolicy.Remove = make([]string, 0, len(ctx.Config.Policy.RequestHeadersPolicy.Remove)) - requestHeadersPolicy.Remove = append(requestHeadersPolicy.Remove, ctx.Config.Policy.RequestHeadersPolicy.Remove...) +} + +func (s *Server) setupGatewayAPI(contourConfiguration contour_api_v1alpha1.ContourConfigurationSpec, + mgr manager.Manager, eventHandler *contour.EventRecorder, sh *k8s.StatusUpdateHandler, isLeader chan struct{}) { + + // Check if GatewayAPI is configured. + if contourConfiguration.Gateway != nil { + + // Only inform on GatewayAPI if found in the cluster. + if s.clients.ResourcesExist(k8s.GatewayAPIResources()...) { + + // Create and register the gatewayclass controller with the manager. + gatewayClassControllerName := contourConfiguration.Gateway.ControllerName + if _, err := controller.NewGatewayClassController( + mgr, + eventHandler, + sh.Writer(), + s.log.WithField("context", "gatewayclass-controller"), + gatewayClassControllerName, + isLeader, + ); err != nil { + s.log.WithError(err).Fatal("failed to create gatewayclass-controller") + } + + // Create and register the NewGatewayController controller with the manager. + if _, err := controller.NewGatewayController( + mgr, + eventHandler, + sh.Writer(), + s.log.WithField("context", "gateway-controller"), + gatewayClassControllerName, + isLeader, + ); err != nil { + s.log.WithError(err).Fatal("failed to create gateway-controller") + } + + // Create and register the NewHTTPRouteController controller with the manager. + if _, err := controller.NewHTTPRouteController(mgr, eventHandler, s.log.WithField("context", "httproute-controller")); err != nil { + s.log.WithError(err).Fatal("failed to create httproute-controller") + } + + // Create and register the NewTLSRouteController controller with the manager. + if _, err := controller.NewTLSRouteController(mgr, eventHandler, s.log.WithField("context", "tlsroute-controller")); err != nil { + s.log.WithError(err).Fatal("failed to create tlsroute-controller") + } + + // Inform on Namespaces. + if err := informOnResource(s.clients, k8s.NamespacesResource(), eventHandler, mgr.GetCache()); err != nil { + s.log.WithError(err).WithField("resource", k8s.NamespacesResource()).Fatal("failed to create informer") + } + } else { + log.Fatalf("Gateway API Gateway configured but APIs not installed in cluster.") + } } +} + +type dagBuilderConfig struct { + ingressClassName string + rootNamespaces []string + gatewayAPIConfigured bool + disablePermitInsecure bool + enableExternalNameService bool + dnsLookupFamily contour_api_v1alpha1.ClusterDNSFamilyType + headersPolicy *contour_api_v1alpha1.PolicyConfig + applyHeaderPolicyToIngress bool + clients *k8s.Clients + clientCert *types.NamespacedName + fallbackCert *types.NamespacedName +} +func (s *Server) getDAGBuilder(dbc dagBuilderConfig) dag.Builder { + + var requestHeadersPolicy dag.HeadersPolicy var responseHeadersPolicy dag.HeadersPolicy - if ctx.Config.Policy.ResponseHeadersPolicy.Set != nil { - responseHeadersPolicy.Set = make(map[string]string) - for k, v := range ctx.Config.Policy.ResponseHeadersPolicy.Set { - responseHeadersPolicy.Set[k] = v + + if dbc.headersPolicy != nil { + if dbc.headersPolicy.RequestHeadersPolicy != nil { + if dbc.headersPolicy.RequestHeadersPolicy.Set != nil { + requestHeadersPolicy.Set = make(map[string]string) + for k, v := range dbc.headersPolicy.RequestHeadersPolicy.Set { + requestHeadersPolicy.Set[k] = v + } + } + if dbc.headersPolicy.RequestHeadersPolicy.Remove != nil { + requestHeadersPolicy.Remove = make([]string, 0, len(dbc.headersPolicy.RequestHeadersPolicy.Remove)) + requestHeadersPolicy.Remove = append(requestHeadersPolicy.Remove, dbc.headersPolicy.RequestHeadersPolicy.Remove...) + } + } + + if dbc.headersPolicy.ResponseHeadersPolicy != nil { + if dbc.headersPolicy.ResponseHeadersPolicy.Set != nil { + responseHeadersPolicy.Set = make(map[string]string) + for k, v := range dbc.headersPolicy.ResponseHeadersPolicy.Set { + responseHeadersPolicy.Set[k] = v + } + } + if dbc.headersPolicy.ResponseHeadersPolicy.Remove != nil { + responseHeadersPolicy.Remove = make([]string, 0, len(dbc.headersPolicy.ResponseHeadersPolicy.Remove)) + responseHeadersPolicy.Remove = append(responseHeadersPolicy.Remove, dbc.headersPolicy.ResponseHeadersPolicy.Remove...) + } } - } - if ctx.Config.Policy.ResponseHeadersPolicy.Remove != nil { - responseHeadersPolicy.Remove = make([]string, 0, len(ctx.Config.Policy.ResponseHeadersPolicy.Remove)) - responseHeadersPolicy.Remove = append(responseHeadersPolicy.Remove, ctx.Config.Policy.ResponseHeadersPolicy.Remove...) } var requestHeadersPolicyIngress dag.HeadersPolicy var responseHeadersPolicyIngress dag.HeadersPolicy - if ctx.Config.Policy.ApplyToIngress { + if dbc.applyHeaderPolicyToIngress { requestHeadersPolicyIngress = requestHeadersPolicy responseHeadersPolicyIngress = responseHeadersPolicy } - log.Debugf("EnableExternalNameService is set to %t", ctx.Config.EnableExternalNameService) + s.log.Debugf("EnableExternalNameService is set to %t", dbc.enableExternalNameService) + // Get the appropriate DAG processors. dagProcessors := []dag.Processor{ &dag.IngressProcessor{ - EnableExternalNameService: ctx.Config.EnableExternalNameService, - FieldLogger: log.WithField("context", "IngressProcessor"), - ClientCertificate: clientCert, + EnableExternalNameService: dbc.enableExternalNameService, + FieldLogger: s.log.WithField("context", "IngressProcessor"), + ClientCertificate: dbc.clientCert, RequestHeadersPolicy: &requestHeadersPolicyIngress, ResponseHeadersPolicy: &responseHeadersPolicyIngress, }, &dag.ExtensionServiceProcessor{ // Note that ExtensionService does not support ExternalName, if it does get added, // need to bring EnableExternalNameService in here too. - FieldLogger: log.WithField("context", "ExtensionServiceProcessor"), - ClientCertificate: clientCert, + FieldLogger: s.log.WithField("context", "ExtensionServiceProcessor"), + ClientCertificate: dbc.clientCert, }, &dag.HTTPProxyProcessor{ - EnableExternalNameService: ctx.Config.EnableExternalNameService, - DisablePermitInsecure: ctx.Config.DisablePermitInsecure, - FallbackCertificate: fallbackCert, - DNSLookupFamily: ctx.Config.Cluster.DNSLookupFamily, - ClientCertificate: clientCert, + EnableExternalNameService: dbc.enableExternalNameService, + DisablePermitInsecure: dbc.disablePermitInsecure, + FallbackCertificate: dbc.fallbackCert, + DNSLookupFamily: dbc.dnsLookupFamily, + ClientCertificate: dbc.clientCert, RequestHeadersPolicy: &requestHeadersPolicy, ResponseHeadersPolicy: &responseHeadersPolicy, }, } - if ctx.Config.GatewayConfig != nil && clients.ResourcesExist(k8s.GatewayAPIResources()...) { + if dbc.gatewayAPIConfigured && dbc.clients.ResourcesExist(k8s.GatewayAPIResources()...) { dagProcessors = append(dagProcessors, &dag.GatewayAPIProcessor{ - EnableExternalNameService: ctx.Config.EnableExternalNameService, - FieldLogger: log.WithField("context", "GatewayAPIProcessor"), + EnableExternalNameService: dbc.enableExternalNameService, + FieldLogger: s.log.WithField("context", "GatewayAPIProcessor"), }) } @@ -741,19 +815,19 @@ func getDAGBuilder(ctx *serveContext, clients *k8s.Clients, clientCert, fallback dagProcessors = append(dagProcessors, &dag.ListenerProcessor{}) var configuredSecretRefs []*types.NamespacedName - if fallbackCert != nil { - configuredSecretRefs = append(configuredSecretRefs, fallbackCert) + if dbc.fallbackCert != nil { + configuredSecretRefs = append(configuredSecretRefs, dbc.fallbackCert) } - if clientCert != nil { - configuredSecretRefs = append(configuredSecretRefs, clientCert) + if dbc.clientCert != nil { + configuredSecretRefs = append(configuredSecretRefs, dbc.clientCert) } builder := dag.Builder{ Source: dag.KubernetesCache{ - RootNamespaces: ctx.proxyRootNamespaces(), - IngressClassName: ctx.ingressClassName, + RootNamespaces: dbc.rootNamespaces, + IngressClassName: dbc.ingressClassName, ConfiguredSecretRefs: configuredSecretRefs, - FieldLogger: log.WithField("context", "KubernetesCache"), + FieldLogger: s.log.WithField("context", "KubernetesCache"), }, Processors: dagProcessors, } @@ -787,3 +861,57 @@ func informOnResource(clients *k8s.Clients, gvr schema.GroupVersionResource, han inf.AddEventHandler(handler) return nil } + +// commandOperatorRegexp parses the command operators used in Envoy access log configuration +// +// Capture Groups: +// Given string "the start time is %START_TIME(%s):3% wow!" +// +// 0. Whole match "%START_TIME(%s):3%" +// 1. Full operator: "START_TIME(%s):3%" +// 2. Operator Name: "START_TIME" +// 3. Arguments: "(%s)" +// 4. Truncation length: ":3" +var commandOperatorRegexp = regexp.MustCompile(`%(([A-Z_]+)(\([^)]+\)(:[0-9]+)?)?%)?`) + +// AccessLogFormatterExtensions returns a list of formatter extension names required by the access log format. +// +// Note: When adding support for new formatter, update the list of extensions here and +// add corresponding configuration in internal/envoy/v3/accesslog.go extensionConfig(). +// Currently only one extension exist in Envoy. +func AccessLogFormatterExtensions(accessLogFormat contour_api_v1alpha1.AccessLogType, accessLogFields contour_api_v1alpha1.AccessLogFields, + accessLogFormatString *string) []string { + // Function that finds out if command operator is present in a format string. + contains := func(format, command string) bool { + tokens := commandOperatorRegexp.FindAllStringSubmatch(format, -1) + for _, t := range tokens { + if t[2] == command { + return true + } + } + return false + } + + extensionsMap := make(map[string]bool) + switch accessLogFormat { + case contour_api_v1alpha1.EnvoyAccessLog: + if accessLogFormatString != nil { + if contains(*accessLogFormatString, "REQ_WITHOUT_QUERY") { + extensionsMap["envoy.formatter.req_without_query"] = true + } + } + case contour_api_v1alpha1.JSONAccessLog: + for _, f := range accessLogFields.AsFieldMap() { + if contains(f, "REQ_WITHOUT_QUERY") { + extensionsMap["envoy.formatter.req_without_query"] = true + } + } + } + + var extensions []string + for k := range extensionsMap { + extensions = append(extensions, k) + } + + return extensions +} diff --git a/cmd/contour/serve_test.go b/cmd/contour/serve_test.go index 98f38421980..6b79713a598 100644 --- a/cmd/contour/serve_test.go +++ b/cmd/contour/serve_test.go @@ -16,6 +16,8 @@ package main import ( "testing" + contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" + "github.com/projectcontour/contour/internal/dag" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -35,7 +37,10 @@ func TestGetDAGBuilder(t *testing.T) { } t.Run("all default options", func(t *testing.T) { - got := getDAGBuilder(newServeContext(), nil, nil, nil, logrus.StandardLogger()) + serve := &Server{ + log: logrus.StandardLogger(), + } + got := serve.getDAGBuilder(dagBuilderConfig{rootNamespaces: []string{}, dnsLookupFamily: contour_api_v1alpha1.AutoClusterDNSFamily}) commonAssertions(t, &got) assert.Empty(t, got.Source.ConfiguredSecretRefs) }) @@ -43,7 +48,10 @@ func TestGetDAGBuilder(t *testing.T) { t.Run("client cert specified", func(t *testing.T) { clientCert := &types.NamespacedName{Namespace: "client-ns", Name: "client-name"} - got := getDAGBuilder(newServeContext(), nil, clientCert, nil, logrus.StandardLogger()) + serve := &Server{ + log: logrus.StandardLogger(), + } + got := serve.getDAGBuilder(dagBuilderConfig{rootNamespaces: []string{}, dnsLookupFamily: contour_api_v1alpha1.AutoClusterDNSFamily, clientCert: clientCert}) commonAssertions(t, &got) assert.ElementsMatch(t, got.Source.ConfiguredSecretRefs, []*types.NamespacedName{clientCert}) }) @@ -51,7 +59,10 @@ func TestGetDAGBuilder(t *testing.T) { t.Run("fallback cert specified", func(t *testing.T) { fallbackCert := &types.NamespacedName{Namespace: "fallback-ns", Name: "fallback-name"} - got := getDAGBuilder(newServeContext(), nil, nil, fallbackCert, logrus.StandardLogger()) + serve := &Server{ + log: logrus.StandardLogger(), + } + got := serve.getDAGBuilder(dagBuilderConfig{rootNamespaces: []string{}, dnsLookupFamily: contour_api_v1alpha1.AutoClusterDNSFamily, fallbackCert: fallbackCert}) commonAssertions(t, &got) assert.ElementsMatch(t, got.Source.ConfiguredSecretRefs, []*types.NamespacedName{fallbackCert}) }) @@ -60,33 +71,45 @@ func TestGetDAGBuilder(t *testing.T) { clientCert := &types.NamespacedName{Namespace: "client-ns", Name: "client-name"} fallbackCert := &types.NamespacedName{Namespace: "fallback-ns", Name: "fallback-name"} - got := getDAGBuilder(newServeContext(), nil, clientCert, fallbackCert, logrus.StandardLogger()) - + serve := &Server{ + log: logrus.StandardLogger(), + } + got := serve.getDAGBuilder(dagBuilderConfig{rootNamespaces: []string{}, dnsLookupFamily: contour_api_v1alpha1.AutoClusterDNSFamily, clientCert: clientCert, fallbackCert: fallbackCert}) commonAssertions(t, &got) assert.ElementsMatch(t, got.Source.ConfiguredSecretRefs, []*types.NamespacedName{clientCert, fallbackCert}) }) t.Run("request and response headers policy specified", func(t *testing.T) { - ctx := newServeContext() - ctx.Config.Policy.RequestHeadersPolicy.Set = map[string]string{ - "req-set-key-1": "req-set-val-1", - "req-set-key-2": "req-set-val-2", - } - ctx.Config.Policy.RequestHeadersPolicy.Remove = []string{"req-remove-key-1", "req-remove-key-2"} - ctx.Config.Policy.ResponseHeadersPolicy.Set = map[string]string{ - "res-set-key-1": "res-set-val-1", - "res-set-key-2": "res-set-val-2", + + policy := &contour_api_v1alpha1.PolicyConfig{ + RequestHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{ + Set: map[string]string{ + "req-set-key-1": "req-set-val-1", + "req-set-key-2": "req-set-val-2", + }, + Remove: []string{"req-remove-key-1", "req-remove-key-2"}, + }, + ResponseHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{ + Set: map[string]string{ + "res-set-key-1": "res-set-val-1", + "res-set-key-2": "res-set-val-2", + }, + Remove: []string{"res-remove-key-1", "res-remove-key-2"}, + }, + ApplyToIngress: false, } - ctx.Config.Policy.ResponseHeadersPolicy.Remove = []string{"res-remove-key-1", "res-remove-key-2"} - got := getDAGBuilder(ctx, nil, nil, nil, logrus.StandardLogger()) + serve := &Server{ + log: logrus.StandardLogger(), + } + got := serve.getDAGBuilder(dagBuilderConfig{rootNamespaces: []string{}, dnsLookupFamily: contour_api_v1alpha1.AutoClusterDNSFamily, headersPolicy: policy}) commonAssertions(t, &got) httpProxyProcessor := mustGetHTTPProxyProcessor(t, &got) - assert.EqualValues(t, ctx.Config.Policy.RequestHeadersPolicy.Set, httpProxyProcessor.RequestHeadersPolicy.Set) - assert.ElementsMatch(t, ctx.Config.Policy.RequestHeadersPolicy.Remove, httpProxyProcessor.RequestHeadersPolicy.Remove) - assert.EqualValues(t, ctx.Config.Policy.ResponseHeadersPolicy.Set, httpProxyProcessor.ResponseHeadersPolicy.Set) - assert.ElementsMatch(t, ctx.Config.Policy.ResponseHeadersPolicy.Remove, httpProxyProcessor.ResponseHeadersPolicy.Remove) + assert.EqualValues(t, policy.RequestHeadersPolicy.Set, httpProxyProcessor.RequestHeadersPolicy.Set) + assert.ElementsMatch(t, policy.RequestHeadersPolicy.Remove, httpProxyProcessor.RequestHeadersPolicy.Remove) + assert.EqualValues(t, policy.ResponseHeadersPolicy.Set, httpProxyProcessor.ResponseHeadersPolicy.Set) + assert.ElementsMatch(t, policy.ResponseHeadersPolicy.Remove, httpProxyProcessor.ResponseHeadersPolicy.Remove) ingressProcessor := mustGetIngressProcessor(t, &got) assert.EqualValues(t, map[string]string(nil), ingressProcessor.RequestHeadersPolicy.Set) @@ -96,27 +119,37 @@ func TestGetDAGBuilder(t *testing.T) { }) t.Run("request and response headers policy specified for ingress", func(t *testing.T) { - ctx := newServeContext() - ctx.Config.Policy.RequestHeadersPolicy.Set = map[string]string{ - "req-set-key-1": "req-set-val-1", - "req-set-key-2": "req-set-val-2", - } - ctx.Config.Policy.RequestHeadersPolicy.Remove = []string{"req-remove-key-1", "req-remove-key-2"} - ctx.Config.Policy.ResponseHeadersPolicy.Set = map[string]string{ - "res-set-key-1": "res-set-val-1", - "res-set-key-2": "res-set-val-2", + + policy := &contour_api_v1alpha1.PolicyConfig{ + RequestHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{ + Set: map[string]string{ + "req-set-key-1": "req-set-val-1", + "req-set-key-2": "req-set-val-2", + }, + Remove: []string{"req-remove-key-1", "req-remove-key-2"}, + }, + ResponseHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{ + Set: map[string]string{ + "res-set-key-1": "res-set-val-1", + "res-set-key-2": "res-set-val-2", + }, + Remove: []string{"res-remove-key-1", "res-remove-key-2"}, + }, + ApplyToIngress: false, } - ctx.Config.Policy.ResponseHeadersPolicy.Remove = []string{"res-remove-key-1", "res-remove-key-2"} - ctx.Config.Policy.ApplyToIngress = true - got := getDAGBuilder(ctx, nil, nil, nil, logrus.StandardLogger()) + serve := &Server{ + log: logrus.StandardLogger(), + } + got := serve.getDAGBuilder(dagBuilderConfig{rootNamespaces: []string{}, dnsLookupFamily: contour_api_v1alpha1.AutoClusterDNSFamily, + headersPolicy: policy, applyHeaderPolicyToIngress: true}) commonAssertions(t, &got) ingressProcessor := mustGetIngressProcessor(t, &got) - assert.EqualValues(t, ctx.Config.Policy.RequestHeadersPolicy.Set, ingressProcessor.RequestHeadersPolicy.Set) - assert.ElementsMatch(t, ctx.Config.Policy.RequestHeadersPolicy.Remove, ingressProcessor.RequestHeadersPolicy.Remove) - assert.EqualValues(t, ctx.Config.Policy.ResponseHeadersPolicy.Set, ingressProcessor.ResponseHeadersPolicy.Set) - assert.ElementsMatch(t, ctx.Config.Policy.ResponseHeadersPolicy.Remove, ingressProcessor.ResponseHeadersPolicy.Remove) + assert.EqualValues(t, policy.RequestHeadersPolicy.Set, ingressProcessor.RequestHeadersPolicy.Set) + assert.ElementsMatch(t, policy.RequestHeadersPolicy.Remove, ingressProcessor.RequestHeadersPolicy.Remove) + assert.EqualValues(t, policy.ResponseHeadersPolicy.Set, ingressProcessor.ResponseHeadersPolicy.Set) + assert.ElementsMatch(t, policy.ResponseHeadersPolicy.Remove, ingressProcessor.ResponseHeadersPolicy.Remove) }) // TODO(3453): test additional properties of the DAG builder (processor fields, cache fields, Gateway tests (requires a client fake)) diff --git a/cmd/contour/servecontext.go b/cmd/contour/servecontext.go index d33399c30ab..fecfe83a0ba 100644 --- a/cmd/contour/servecontext.go +++ b/cmd/contour/servecontext.go @@ -23,6 +23,10 @@ import ( "strings" "time" + "github.com/projectcontour/contour/internal/k8s" + "k8s.io/utils/pointer" + + contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" envoy_v3 "github.com/projectcontour/contour/internal/envoy/v3" xdscache_v3 "github.com/projectcontour/contour/internal/xdscache/v3" "github.com/projectcontour/contour/pkg/config" @@ -30,7 +34,6 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" - "k8s.io/apimachinery/pkg/types" ) type serveContext struct { @@ -86,6 +89,13 @@ type serveContext struct { DisableLeaderElection bool } +type ServerConfig struct { + // contour's xds service parameters + xdsAddr string + xdsPort int + caFile, contourCert, contourKey string +} + // newServeContext returns a serveContext initialized to defaults. func newServeContext() *serveContext { // Set defaults for parameters which are then overridden via flags, ENV, or ConfigFile @@ -108,23 +118,19 @@ func newServeContext() *serveContext { PermitInsecureGRPC: false, DisableLeaderElection: false, ServerConfig: ServerConfig{ - xdsAddr: "127.0.0.1", - xdsPort: 8001, + xdsAddr: "127.0.0.1", + xdsPort: 8001, + caFile: "", + contourCert: "", + contourKey: "", }, } } -type ServerConfig struct { - // contour's xds service parameters - xdsAddr string - xdsPort int - caFile, contourCert, contourKey string -} - // grpcOptions returns a slice of grpc.ServerOptions. // if ctx.PermitInsecureGRPC is false, the option set will // include TLS configuration. -func (ctx *serveContext) grpcOptions(log logrus.FieldLogger) []grpc.ServerOption { +func grpcOptions(log logrus.FieldLogger, contourXDSConfig *contour_api_v1alpha1.TLS) []grpc.ServerOption { opts := []grpc.ServerOption{ // By default the Go grpc library defaults to a value of ~100 streams per // connection. This number is likely derived from the HTTP/2 spec: @@ -145,18 +151,18 @@ func (ctx *serveContext) grpcOptions(log logrus.FieldLogger) []grpc.ServerOption Timeout: 20 * time.Second, }), } - if !ctx.PermitInsecureGRPC { - tlsconfig := ctx.tlsconfig(log) + if contourXDSConfig != nil && !contourXDSConfig.Insecure { + tlsconfig := tlsconfig(log, contourXDSConfig) creds := credentials.NewTLS(tlsconfig) opts = append(opts, grpc.Creds(creds)) } return opts } -// tlsconfig returns a new *tls.Config. If the context is not properly configured +// tlsconfig returns a new *tls.Config. If the TLS parameters passed are not properly configured // for tls communication, tlsconfig returns nil. -func (ctx *serveContext) tlsconfig(log logrus.FieldLogger) *tls.Config { - err := ctx.verifyTLSFlags() +func tlsconfig(log logrus.FieldLogger, contourXDSTLS *contour_api_v1alpha1.TLS) *tls.Config { + err := verifyTLSFlags(contourXDSTLS) if err != nil { log.WithError(err).Fatal("failed to verify TLS flags") } @@ -164,19 +170,22 @@ func (ctx *serveContext) tlsconfig(log logrus.FieldLogger) *tls.Config { // Define a closure that lazily loads certificates and key at TLS handshake // to ensure that latest certificates are used in case they have been rotated. loadConfig := func() (*tls.Config, error) { - cert, err := tls.LoadX509KeyPair(ctx.contourCert, ctx.contourKey) + if contourXDSTLS == nil { + return nil, nil + } + cert, err := tls.LoadX509KeyPair(contourXDSTLS.CertFile, contourXDSTLS.KeyFile) if err != nil { return nil, err } - ca, err := ioutil.ReadFile(ctx.caFile) + ca, err := ioutil.ReadFile(contourXDSTLS.CAFile) if err != nil { return nil, err } certPool := x509.NewCertPool() if ok := certPool.AppendCertsFromPEM(ca); !ok { - return nil, fmt.Errorf("unable to append certificate in %s to CA pool", ctx.caFile) + return nil, fmt.Errorf("unable to append certificate in %s to CA pool", contourXDSTLS.CAFile) } return &tls.Config{ @@ -203,12 +212,12 @@ func (ctx *serveContext) tlsconfig(log logrus.FieldLogger) *tls.Config { } // verifyTLSFlags indicates if the TLS flags are set up correctly. -func (ctx *serveContext) verifyTLSFlags() error { - if ctx.caFile == "" && ctx.contourCert == "" && ctx.contourKey == "" { +func verifyTLSFlags(contourXDSTLS *contour_api_v1alpha1.TLS) error { + if contourXDSTLS.CAFile == "" && contourXDSTLS.CertFile == "" && contourXDSTLS.KeyFile == "" { return errors.New("no TLS parameters and --insecure not supplied. You must supply one or the other") } // If one of the three TLS commands is not empty, they all must be not empty - if !(ctx.caFile != "" && ctx.contourCert != "" && ctx.contourKey != "") { + if !(contourXDSTLS.CAFile != "" && contourXDSTLS.CertFile != "" && contourXDSTLS.KeyFile != "") { return errors.New("you must supply all three TLS parameters - --contour-cafile, --contour-cert-file, --contour-key-file, or none of them") } @@ -230,14 +239,14 @@ func (ctx *serveContext) proxyRootNamespaces() []string { // parseDefaultHTTPVersions parses a list of supported HTTP versions // (of the form "HTTP/xx") into a slice of unique version constants. -func parseDefaultHTTPVersions(versions []config.HTTPVersionType) []envoy_v3.HTTPVersionType { +func parseDefaultHTTPVersions(versions []contour_api_v1alpha1.HTTPVersionType) []envoy_v3.HTTPVersionType { wanted := map[envoy_v3.HTTPVersionType]struct{}{} for _, v := range versions { switch v { - case config.HTTPVersion1: + case contour_api_v1alpha1.HTTPVersion1: wanted[envoy_v3.HTTPVersion1] = struct{}{} - case config.HTTPVersion2: + case contour_api_v1alpha1.HTTPVersion2: wanted[envoy_v3.HTTPVersion2] = struct{}{} } } @@ -245,19 +254,238 @@ func parseDefaultHTTPVersions(versions []config.HTTPVersionType) []envoy_v3.HTTP var parsed []envoy_v3.HTTPVersionType for k := range wanted { parsed = append(parsed, k) - } return parsed } -func namespacedNameOf(n config.NamespacedName) *types.NamespacedName { - if len(strings.TrimSpace(n.Name)) == 0 && len(strings.TrimSpace(n.Namespace)) == 0 { - return nil +func (ctx *serveContext) convertToContourConfigurationSpec() contour_api_v1alpha1.ContourConfigurationSpec { + ingress := &contour_api_v1alpha1.IngressConfig{} + if len(ctx.ingressClassName) > 0 { + ingress.ClassName = pointer.StringPtr(ctx.ingressClassName) + } + if len(ctx.Config.IngressStatusAddress) > 0 { + ingress.StatusAddress = pointer.StringPtr(ctx.Config.IngressStatusAddress) + } + + debugLogLevel := contour_api_v1alpha1.InfoLog + switch ctx.Config.Debug { + case true: + debugLogLevel = contour_api_v1alpha1.DebugLog + case false: + debugLogLevel = contour_api_v1alpha1.InfoLog + } + + var gatewayConfig *contour_api_v1alpha1.GatewayConfig + if ctx.Config.GatewayConfig != nil { + gatewayConfig = &contour_api_v1alpha1.GatewayConfig{ + ControllerName: ctx.Config.GatewayConfig.ControllerName, + } + } + + var cipherSuites []contour_api_v1alpha1.TLSCipherType + for _, suite := range ctx.Config.TLS.CipherSuites { + cipherSuites = append(cipherSuites, contour_api_v1alpha1.TLSCipherType(suite)) + } + + var accessLogFormat contour_api_v1alpha1.AccessLogType + switch ctx.Config.AccessLogFormat { + case config.EnvoyAccessLog: + accessLogFormat = contour_api_v1alpha1.EnvoyAccessLog + case config.JSONAccessLog: + accessLogFormat = contour_api_v1alpha1.JSONAccessLog + } + + var accessLogFields contour_api_v1alpha1.AccessLogFields + for _, alf := range ctx.Config.AccessLogFields { + accessLogFields = append(accessLogFields, alf) + } + + var defaultHTTPVersions []contour_api_v1alpha1.HTTPVersionType + for _, version := range ctx.Config.DefaultHTTPVersions { + switch version { + case config.HTTPVersion1: + defaultHTTPVersions = append(defaultHTTPVersions, contour_api_v1alpha1.HTTPVersion1) + case config.HTTPVersion2: + defaultHTTPVersions = append(defaultHTTPVersions, contour_api_v1alpha1.HTTPVersion2) + } } - return &types.NamespacedName{ - Namespace: n.Namespace, - Name: n.Name, + timeoutParams := &contour_api_v1alpha1.TimeoutParameters{} + if len(ctx.Config.Timeouts.RequestTimeout) > 0 { + timeoutParams.RequestTimeout = pointer.StringPtr(ctx.Config.Timeouts.RequestTimeout) } + if len(ctx.Config.Timeouts.ConnectionIdleTimeout) > 0 { + timeoutParams.ConnectionIdleTimeout = pointer.StringPtr(ctx.Config.Timeouts.ConnectionIdleTimeout) + } + if len(ctx.Config.Timeouts.StreamIdleTimeout) > 0 { + timeoutParams.StreamIdleTimeout = pointer.StringPtr(ctx.Config.Timeouts.StreamIdleTimeout) + } + if len(ctx.Config.Timeouts.MaxConnectionDuration) > 0 { + timeoutParams.MaxConnectionDuration = pointer.StringPtr(ctx.Config.Timeouts.MaxConnectionDuration) + } + if len(ctx.Config.Timeouts.DelayedCloseTimeout) > 0 { + timeoutParams.DelayedCloseTimeout = pointer.StringPtr(ctx.Config.Timeouts.DelayedCloseTimeout) + } + if len(ctx.Config.Timeouts.ConnectionShutdownGracePeriod) > 0 { + timeoutParams.ConnectionShutdownGracePeriod = pointer.StringPtr(ctx.Config.Timeouts.ConnectionShutdownGracePeriod) + } + + var dnsLookupFamily contour_api_v1alpha1.ClusterDNSFamilyType + switch ctx.Config.Cluster.DNSLookupFamily { + case config.AutoClusterDNSFamily: + dnsLookupFamily = contour_api_v1alpha1.AutoClusterDNSFamily + case config.IPv6ClusterDNSFamily: + dnsLookupFamily = contour_api_v1alpha1.IPv6ClusterDNSFamily + case config.IPv4ClusterDNSFamily: + dnsLookupFamily = contour_api_v1alpha1.IPv4ClusterDNSFamily + } + + var rateLimitService *contour_api_v1alpha1.RateLimitServiceConfig + if ctx.Config.RateLimitService.ExtensionService != "" { + rateLimitService = &contour_api_v1alpha1.RateLimitServiceConfig{ + ExtensionService: contour_api_v1alpha1.NamespacedName{ + Name: k8s.NamespacedNameFrom(ctx.Config.RateLimitService.ExtensionService).Name, + Namespace: k8s.NamespacedNameFrom(ctx.Config.RateLimitService.ExtensionService).Namespace, + }, + Domain: ctx.Config.RateLimitService.Domain, + FailOpen: ctx.Config.RateLimitService.FailOpen, + EnableXRateLimitHeaders: ctx.Config.RateLimitService.EnableXRateLimitHeaders, + } + } + + policy := &contour_api_v1alpha1.PolicyConfig{ + RequestHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{ + Set: ctx.Config.Policy.RequestHeadersPolicy.Set, + Remove: ctx.Config.Policy.RequestHeadersPolicy.Remove, + }, + ResponseHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{ + Set: ctx.Config.Policy.ResponseHeadersPolicy.Set, + Remove: ctx.Config.Policy.ResponseHeadersPolicy.Remove, + }, + ApplyToIngress: ctx.Config.Policy.ApplyToIngress, + } + + var clientCertificate *contour_api_v1alpha1.NamespacedName + if len(ctx.Config.TLS.ClientCertificate.Name) > 0 { + clientCertificate = &contour_api_v1alpha1.NamespacedName{ + Name: ctx.Config.TLS.ClientCertificate.Name, + Namespace: ctx.Config.TLS.ClientCertificate.Namespace, + } + } + + var accessLogFormatString *string + if len(ctx.Config.AccessLogFormatString) > 0 { + accessLogFormatString = pointer.StringPtr(ctx.Config.AccessLogFormatString) + } + + var fallbackCertificate *contour_api_v1alpha1.NamespacedName + if len(ctx.Config.TLS.FallbackCertificate.Name) > 0 { + fallbackCertificate = &contour_api_v1alpha1.NamespacedName{ + Name: ctx.Config.TLS.FallbackCertificate.Name, + Namespace: ctx.Config.TLS.FallbackCertificate.Namespace, + } + } + + // Convert serveContext to a ContourConfiguration + contourConfiguration := contour_api_v1alpha1.ContourConfigurationSpec{ + Ingress: ingress, + Debug: contour_api_v1alpha1.DebugConfig{ + Address: ctx.debugAddr, + Port: ctx.debugPort, + DebugLogLevel: debugLogLevel, + KubernetesDebugLogLevel: ctx.KubernetesDebug, + }, + Health: contour_api_v1alpha1.HealthConfig{ + Address: ctx.healthAddr, + Port: ctx.healthPort, + }, + Envoy: contour_api_v1alpha1.EnvoyConfig{ + Listener: contour_api_v1alpha1.EnvoyListenerConfig{ + UseProxyProto: ctx.useProxyProto, + DisableAllowChunkedLength: ctx.Config.DisableAllowChunkedLength, + ConnectionBalancer: ctx.Config.Listener.ConnectionBalancer, + TLS: contour_api_v1alpha1.EnvoyTLS{ + MinimumProtocolVersion: ctx.Config.TLS.MinimumProtocolVersion, + CipherSuites: cipherSuites, + }, + }, + Service: contour_api_v1alpha1.NamespacedName{ + Name: ctx.Config.EnvoyServiceName, + Namespace: ctx.Config.EnvoyServiceNamespace, + }, + HTTPListener: contour_api_v1alpha1.EnvoyListener{ + Address: ctx.httpAddr, + Port: ctx.httpPort, + AccessLog: ctx.httpAccessLog, + }, + HTTPSListener: contour_api_v1alpha1.EnvoyListener{ + Address: ctx.httpsAddr, + Port: ctx.httpsPort, + AccessLog: ctx.httpsAccessLog, + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: ctx.statsAddr, + Port: ctx.statsPort, + }, + ClientCertificate: clientCertificate, + Logging: contour_api_v1alpha1.EnvoyLogging{ + AccessLogFormat: accessLogFormat, + AccessLogFormatString: accessLogFormatString, + AccessLogFields: accessLogFields, + }, + DefaultHTTPVersions: defaultHTTPVersions, + Timeouts: timeoutParams, + Cluster: contour_api_v1alpha1.ClusterParameters{ + DNSLookupFamily: dnsLookupFamily, + }, + Network: contour_api_v1alpha1.NetworkParameters{ + XffNumTrustedHops: ctx.Config.Network.XffNumTrustedHops, + EnvoyAdminPort: ctx.Config.Network.EnvoyAdminPort, + }, + }, + Gateway: gatewayConfig, + HTTPProxy: contour_api_v1alpha1.HTTPProxyConfig{ + DisablePermitInsecure: ctx.Config.DisablePermitInsecure, + RootNamespaces: ctx.proxyRootNamespaces(), + FallbackCertificate: fallbackCertificate, + }, + LeaderElection: contour_api_v1alpha1.LeaderElectionConfig{ + LeaseDuration: ctx.Config.LeaderElection.LeaseDuration.String(), + RenewDeadline: ctx.Config.LeaderElection.RenewDeadline.String(), + RetryPeriod: ctx.Config.LeaderElection.RetryPeriod.String(), + Configmap: contour_api_v1alpha1.NamespacedName{ + Name: ctx.Config.LeaderElection.Name, + Namespace: ctx.Config.LeaderElection.Namespace, + }, + DisableLeaderElection: ctx.DisableLeaderElection, + }, + EnableExternalNameService: ctx.Config.EnableExternalNameService, + RateLimitService: rateLimitService, + Policy: policy, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: ctx.metricsAddr, + Port: ctx.metricsPort, + }, + } + + xdsServerType := contour_api_v1alpha1.ContourServerType + switch ctx.Config.Server.XDSServerType { + case config.EnvoyServerType: + xdsServerType = contour_api_v1alpha1.EnvoyServerType + } + + contourConfiguration.XDSServer = contour_api_v1alpha1.XDSServerConfig{ + Type: xdsServerType, + Address: ctx.xdsAddr, + Port: ctx.xdsPort, + TLS: &contour_api_v1alpha1.TLS{ + CAFile: ctx.caFile, + CertFile: ctx.contourCert, + KeyFile: ctx.contourKey, + Insecure: ctx.PermitInsecureGRPC, + }, + } + + return contourConfiguration } diff --git a/cmd/contour/servecontext_test.go b/cmd/contour/servecontext_test.go index 66feb65095a..13577bea024 100644 --- a/cmd/contour/servecontext_test.go +++ b/cmd/contour/servecontext_test.go @@ -26,9 +26,12 @@ import ( "testing" "time" + "github.com/projectcontour/contour/pkg/config" + "k8s.io/utils/pointer" + + contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" envoy_v3 "github.com/projectcontour/contour/internal/envoy/v3" "github.com/projectcontour/contour/internal/fixture" - "github.com/projectcontour/contour/pkg/config" "github.com/stretchr/testify/assert" "google.golang.org/grpc" ) @@ -82,38 +85,36 @@ func TestServeContextProxyRootNamespaces(t *testing.T) { func TestServeContextTLSParams(t *testing.T) { tests := map[string]struct { - ctx serveContext - expecterror bool + tls *contour_api_v1alpha1.TLS + expectError bool }{ "tls supplied correctly": { - ctx: serveContext{ - ServerConfig: ServerConfig{ - caFile: "cacert.pem", - contourCert: "contourcert.pem", - contourKey: "contourkey.pem", - }, + tls: &contour_api_v1alpha1.TLS{ + CAFile: "cacert.pem", + CertFile: "contourcert.pem", + KeyFile: "contourkey.pem", + Insecure: false, }, - expecterror: false, + expectError: false, }, "tls partially supplied": { - ctx: serveContext{ - ServerConfig: ServerConfig{ - contourCert: "contourcert.pem", - contourKey: "contourkey.pem", - }, + tls: &contour_api_v1alpha1.TLS{ + CertFile: "contourcert.pem", + KeyFile: "contourkey.pem", + Insecure: false, }, - expecterror: true, + expectError: true, }, "tls not supplied": { - ctx: serveContext{}, - expecterror: true, + tls: &contour_api_v1alpha1.TLS{}, + expectError: true, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - err := tc.ctx.verifyTLSFlags() + err := verifyTLSFlags(tc.tls) goterror := err != nil - if goterror != tc.expecterror { + if goterror != tc.expectError { t.Errorf("TLS Config: %s", err) } }) @@ -161,12 +162,11 @@ func TestServeContextCertificateHandling(t *testing.T) { checkFatalErr(t, err) defer os.RemoveAll(configDir) - ctx := serveContext{ - ServerConfig: ServerConfig{ - caFile: filepath.Join(configDir, "CAcert.pem"), - contourCert: filepath.Join(configDir, "contourcert.pem"), - contourKey: filepath.Join(configDir, "contourkey.pem"), - }, + contourTLS := &contour_api_v1alpha1.TLS{ + CAFile: filepath.Join(configDir, "CAcert.pem"), + CertFile: filepath.Join(configDir, "contourcert.pem"), + KeyFile: filepath.Join(configDir, "contourkey.pem"), + Insecure: false, } // Initial set of credentials must be linked into temp directory before @@ -176,7 +176,7 @@ func TestServeContextCertificateHandling(t *testing.T) { // Start a dummy server. log := fixture.NewTestLogger(t) - opts := ctx.grpcOptions(log) + opts := grpcOptions(log, contourTLS) g := grpc.NewServer(opts...) if g == nil { t.Error("failed to create server") @@ -218,12 +218,11 @@ func TestTlsVersionDeprecation(t *testing.T) { checkFatalErr(t, err) defer os.RemoveAll(configDir) - ctx := serveContext{ - ServerConfig: ServerConfig{ - caFile: filepath.Join(configDir, "CAcert.pem"), - contourCert: filepath.Join(configDir, "contourcert.pem"), - contourKey: filepath.Join(configDir, "contourkey.pem"), - }, + contourTLS := &contour_api_v1alpha1.TLS{ + CAFile: filepath.Join(configDir, "CAcert.pem"), + CertFile: filepath.Join(configDir, "contourcert.pem"), + KeyFile: filepath.Join(configDir, "contourkey.pem"), + Insecure: false, } err = linkFiles("testdata/1", configDir) @@ -231,7 +230,7 @@ func TestTlsVersionDeprecation(t *testing.T) { // Get preliminary TLS config from the serveContext. log := fixture.NewTestLogger(t) - preliminaryTLSConfig := ctx.tlsconfig(log) + preliminaryTLSConfig := tlsconfig(log, contourTLS) // Get actual TLS config that will be used during TLS handshake. tlsConfig, err := preliminaryTLSConfig.GetConfigForClient(nil) @@ -326,25 +325,25 @@ func peekError(conn net.Conn) error { func TestParseHTTPVersions(t *testing.T) { cases := map[string]struct { - versions []config.HTTPVersionType + versions []contour_api_v1alpha1.HTTPVersionType parseVersions []envoy_v3.HTTPVersionType }{ "empty": { - versions: []config.HTTPVersionType{}, + versions: []contour_api_v1alpha1.HTTPVersionType{}, parseVersions: nil, }, "http/1.1": { - versions: []config.HTTPVersionType{config.HTTPVersion1}, + versions: []contour_api_v1alpha1.HTTPVersionType{contour_api_v1alpha1.HTTPVersion1}, parseVersions: []envoy_v3.HTTPVersionType{envoy_v3.HTTPVersion1}, }, "http/1.1+http/2": { - versions: []config.HTTPVersionType{config.HTTPVersion1, config.HTTPVersion2}, + versions: []contour_api_v1alpha1.HTTPVersionType{contour_api_v1alpha1.HTTPVersion1, contour_api_v1alpha1.HTTPVersion2}, parseVersions: []envoy_v3.HTTPVersionType{envoy_v3.HTTPVersion1, envoy_v3.HTTPVersion2}, }, "http/1.1+http/2 duplicated": { - versions: []config.HTTPVersionType{ - config.HTTPVersion1, config.HTTPVersion2, - config.HTTPVersion1, config.HTTPVersion2}, + versions: []contour_api_v1alpha1.HTTPVersionType{ + contour_api_v1alpha1.HTTPVersion1, contour_api_v1alpha1.HTTPVersion2, + contour_api_v1alpha1.HTTPVersion1, contour_api_v1alpha1.HTTPVersion2}, parseVersions: []envoy_v3.HTTPVersionType{envoy_v3.HTTPVersion1, envoy_v3.HTTPVersion2}, }, } @@ -364,3 +363,1087 @@ func TestParseHTTPVersions(t *testing.T) { }) } } + +func TestConvertServeContext(t *testing.T) { + + defaultContext := newServeContext() + defaultContext.ServerConfig = ServerConfig{ + xdsAddr: "127.0.0.1", + xdsPort: 8001, + caFile: "/certs/ca.crt", + contourCert: "/certs/cert.crt", + contourKey: "/certs/cert.key", + } + + headersPolicyContext := newServeContext() + headersPolicyContext.Config.Policy = config.PolicyParameters{ + RequestHeadersPolicy: config.HeadersPolicy{ + Set: map[string]string{"custom-request-header-set": "foo-bar", "Host": "request-bar.com"}, + Remove: []string{"custom-request-header-remove"}, + }, + ResponseHeadersPolicy: config.HeadersPolicy{ + Set: map[string]string{"custom-response-header-set": "foo-bar", "Host": "response-bar.com"}, + Remove: []string{"custom-response-header-remove"}, + }, + ApplyToIngress: true, + } + + gatewayContext := newServeContext() + gatewayContext.Config.GatewayConfig = &config.GatewayParameters{ + ControllerName: "projectcontour.io/projectcontour/contour", + } + + ingressContext := newServeContext() + ingressContext.ingressClassName = "coolclass" + ingressContext.Config.IngressStatusAddress = "1.2.3.4" + + clientCertificate := newServeContext() + clientCertificate.Config.TLS.ClientCertificate = config.NamespacedName{ + Name: "cert", + Namespace: "secretplace", + } + + httpProxy := newServeContext() + httpProxy.Config.DisablePermitInsecure = true + httpProxy.Config.TLS.FallbackCertificate = config.NamespacedName{ + Name: "fallbackname", + Namespace: "fallbacknamespace", + } + + rateLimit := newServeContext() + rateLimit.Config.RateLimitService = config.RateLimitService{ + ExtensionService: "ratens/ratelimitext", + Domain: "contour", + FailOpen: true, + EnableXRateLimitHeaders: true, + } + + defaultHTTPVersions := newServeContext() + defaultHTTPVersions.Config.DefaultHTTPVersions = []config.HTTPVersionType{ + config.HTTPVersion1, + } + + accessLog := newServeContext() + accessLog.Config.AccessLogFormat = config.JSONAccessLog + accessLog.Config.AccessLogFormatString = "foo-bar-baz" + accessLog.Config.AccessLogFields = []string{"custom_field"} + + cases := map[string]struct { + serveContext *serveContext + contourConfig contour_api_v1alpha1.ContourConfigurationSpec + }{ + "default ServeContext": { + serveContext: defaultContext, + contourConfig: contour_api_v1alpha1.ContourConfigurationSpec{ + XDSServer: contour_api_v1alpha1.XDSServerConfig{ + Type: contour_api_v1alpha1.ContourServerType, + Address: "127.0.0.1", + Port: 8001, + TLS: &contour_api_v1alpha1.TLS{ + CAFile: "/certs/ca.crt", + CertFile: "/certs/cert.crt", + KeyFile: "/certs/cert.key", + Insecure: false, + }, + }, + Ingress: &contour_api_v1alpha1.IngressConfig{ + ClassName: nil, + StatusAddress: nil, + }, + Debug: contour_api_v1alpha1.DebugConfig{ + Address: "127.0.0.1", + Port: 6060, + DebugLogLevel: contour_api_v1alpha1.InfoLog, + KubernetesDebugLogLevel: 0, + }, + Health: contour_api_v1alpha1.HealthConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + Envoy: contour_api_v1alpha1.EnvoyConfig{ + Service: contour_api_v1alpha1.NamespacedName{ + Name: "envoy", + Namespace: "projectcontour", + }, + HTTPListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8080, + AccessLog: "/dev/stdout", + }, + HTTPSListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8443, + AccessLog: "/dev/stdout", + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8002, + }, + ClientCertificate: nil, + Logging: contour_api_v1alpha1.EnvoyLogging{ + AccessLogFormat: contour_api_v1alpha1.EnvoyAccessLog, + AccessLogFormatString: nil, + AccessLogFields: contour_api_v1alpha1.AccessLogFields([]string{ + "@timestamp", + "authority", + "bytes_received", + "bytes_sent", + "downstream_local_address", + "downstream_remote_address", + "duration", + "method", + "path", + "protocol", + "request_id", + "requested_server_name", + "response_code", + "response_flags", + "uber_trace_id", + "upstream_cluster", + "upstream_host", + "upstream_local_address", + "upstream_service_time", + "user_agent", + "x_forwarded_for", + }), + }, + DefaultHTTPVersions: nil, + Timeouts: &contour_api_v1alpha1.TimeoutParameters{ + ConnectionIdleTimeout: pointer.StringPtr("60s"), + }, + Cluster: contour_api_v1alpha1.ClusterParameters{ + DNSLookupFamily: contour_api_v1alpha1.AutoClusterDNSFamily, + }, + Network: contour_api_v1alpha1.NetworkParameters{ + EnvoyAdminPort: 9001, + }, + }, + Gateway: nil, + HTTPProxy: contour_api_v1alpha1.HTTPProxyConfig{ + DisablePermitInsecure: false, + FallbackCertificate: nil, + }, + LeaderElection: contour_api_v1alpha1.LeaderElectionConfig{ + LeaseDuration: "15s", + RenewDeadline: "10s", + RetryPeriod: "2s", + Configmap: contour_api_v1alpha1.NamespacedName{ + Name: "leader-elect", + Namespace: "projectcontour", + }, + DisableLeaderElection: false, + }, + EnableExternalNameService: false, + RateLimitService: nil, + Policy: &contour_api_v1alpha1.PolicyConfig{ + RequestHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{}, + ResponseHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{}, + ApplyToIngress: false, + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + }, + }, + "headers policy": { + serveContext: headersPolicyContext, + contourConfig: contour_api_v1alpha1.ContourConfigurationSpec{ + XDSServer: contour_api_v1alpha1.XDSServerConfig{ + Type: contour_api_v1alpha1.ContourServerType, + Address: "127.0.0.1", + Port: 8001, + TLS: &contour_api_v1alpha1.TLS{ + Insecure: false, + }, + }, + Ingress: &contour_api_v1alpha1.IngressConfig{ + ClassName: nil, + StatusAddress: nil, + }, + Debug: contour_api_v1alpha1.DebugConfig{ + Address: "127.0.0.1", + Port: 6060, + DebugLogLevel: contour_api_v1alpha1.InfoLog, + KubernetesDebugLogLevel: 0, + }, + Health: contour_api_v1alpha1.HealthConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + Envoy: contour_api_v1alpha1.EnvoyConfig{ + Service: contour_api_v1alpha1.NamespacedName{ + Name: "envoy", + Namespace: "projectcontour", + }, + HTTPListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8080, + AccessLog: "/dev/stdout", + }, + HTTPSListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8443, + AccessLog: "/dev/stdout", + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8002, + }, + ClientCertificate: nil, + Logging: contour_api_v1alpha1.EnvoyLogging{ + AccessLogFormat: contour_api_v1alpha1.EnvoyAccessLog, + AccessLogFormatString: nil, + AccessLogFields: contour_api_v1alpha1.AccessLogFields([]string{ + "@timestamp", + "authority", + "bytes_received", + "bytes_sent", + "downstream_local_address", + "downstream_remote_address", + "duration", + "method", + "path", + "protocol", + "request_id", + "requested_server_name", + "response_code", + "response_flags", + "uber_trace_id", + "upstream_cluster", + "upstream_host", + "upstream_local_address", + "upstream_service_time", + "user_agent", + "x_forwarded_for", + }), + }, + DefaultHTTPVersions: nil, + Timeouts: &contour_api_v1alpha1.TimeoutParameters{ + ConnectionIdleTimeout: pointer.StringPtr("60s"), + }, + Cluster: contour_api_v1alpha1.ClusterParameters{ + DNSLookupFamily: contour_api_v1alpha1.AutoClusterDNSFamily, + }, + Network: contour_api_v1alpha1.NetworkParameters{ + EnvoyAdminPort: 9001, + }, + }, + Gateway: nil, + HTTPProxy: contour_api_v1alpha1.HTTPProxyConfig{ + DisablePermitInsecure: false, + FallbackCertificate: nil, + }, + LeaderElection: contour_api_v1alpha1.LeaderElectionConfig{ + LeaseDuration: "15s", + RenewDeadline: "10s", + RetryPeriod: "2s", + Configmap: contour_api_v1alpha1.NamespacedName{ + Name: "leader-elect", + Namespace: "projectcontour", + }, + DisableLeaderElection: false, + }, + EnableExternalNameService: false, + RateLimitService: nil, + Policy: &contour_api_v1alpha1.PolicyConfig{ + RequestHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{ + Set: map[string]string{"custom-request-header-set": "foo-bar", "Host": "request-bar.com"}, + Remove: []string{"custom-request-header-remove"}, + }, + ResponseHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{ + Set: map[string]string{"custom-response-header-set": "foo-bar", "Host": "response-bar.com"}, + Remove: []string{"custom-response-header-remove"}, + }, + ApplyToIngress: true, + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + }, + }, + "ingress": { + serveContext: ingressContext, + contourConfig: contour_api_v1alpha1.ContourConfigurationSpec{ + XDSServer: contour_api_v1alpha1.XDSServerConfig{ + Type: contour_api_v1alpha1.ContourServerType, + Address: "127.0.0.1", + Port: 8001, + TLS: &contour_api_v1alpha1.TLS{ + Insecure: false, + }, + }, + Ingress: &contour_api_v1alpha1.IngressConfig{ + ClassName: pointer.StringPtr("coolclass"), + StatusAddress: pointer.StringPtr("1.2.3.4"), + }, + Debug: contour_api_v1alpha1.DebugConfig{ + Address: "127.0.0.1", + Port: 6060, + DebugLogLevel: contour_api_v1alpha1.InfoLog, + KubernetesDebugLogLevel: 0, + }, + Health: contour_api_v1alpha1.HealthConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + Envoy: contour_api_v1alpha1.EnvoyConfig{ + Service: contour_api_v1alpha1.NamespacedName{ + Name: "envoy", + Namespace: "projectcontour", + }, + HTTPListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8080, + AccessLog: "/dev/stdout", + }, + HTTPSListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8443, + AccessLog: "/dev/stdout", + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8002, + }, + ClientCertificate: nil, + Logging: contour_api_v1alpha1.EnvoyLogging{ + AccessLogFormat: contour_api_v1alpha1.EnvoyAccessLog, + AccessLogFormatString: nil, + AccessLogFields: contour_api_v1alpha1.AccessLogFields([]string{ + "@timestamp", + "authority", + "bytes_received", + "bytes_sent", + "downstream_local_address", + "downstream_remote_address", + "duration", + "method", + "path", + "protocol", + "request_id", + "requested_server_name", + "response_code", + "response_flags", + "uber_trace_id", + "upstream_cluster", + "upstream_host", + "upstream_local_address", + "upstream_service_time", + "user_agent", + "x_forwarded_for", + }), + }, + DefaultHTTPVersions: nil, + Timeouts: &contour_api_v1alpha1.TimeoutParameters{ + ConnectionIdleTimeout: pointer.StringPtr("60s"), + }, + Cluster: contour_api_v1alpha1.ClusterParameters{ + DNSLookupFamily: contour_api_v1alpha1.AutoClusterDNSFamily, + }, + Network: contour_api_v1alpha1.NetworkParameters{ + EnvoyAdminPort: 9001, + }, + }, + Gateway: nil, + HTTPProxy: contour_api_v1alpha1.HTTPProxyConfig{ + DisablePermitInsecure: false, + FallbackCertificate: nil, + }, + LeaderElection: contour_api_v1alpha1.LeaderElectionConfig{ + LeaseDuration: "15s", + RenewDeadline: "10s", + RetryPeriod: "2s", + Configmap: contour_api_v1alpha1.NamespacedName{ + Name: "leader-elect", + Namespace: "projectcontour", + }, + DisableLeaderElection: false, + }, + EnableExternalNameService: false, + RateLimitService: nil, + Policy: &contour_api_v1alpha1.PolicyConfig{ + RequestHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{}, + ResponseHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{}, + ApplyToIngress: false, + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + }, + }, + "gatewayapi": { + serveContext: gatewayContext, + contourConfig: contour_api_v1alpha1.ContourConfigurationSpec{ + XDSServer: contour_api_v1alpha1.XDSServerConfig{ + Type: contour_api_v1alpha1.ContourServerType, + Address: "127.0.0.1", + Port: 8001, + TLS: &contour_api_v1alpha1.TLS{ + Insecure: false, + }, + }, + Ingress: &contour_api_v1alpha1.IngressConfig{ + ClassName: nil, + StatusAddress: nil, + }, + Debug: contour_api_v1alpha1.DebugConfig{ + Address: "127.0.0.1", + Port: 6060, + DebugLogLevel: contour_api_v1alpha1.InfoLog, + KubernetesDebugLogLevel: 0, + }, + Health: contour_api_v1alpha1.HealthConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + Envoy: contour_api_v1alpha1.EnvoyConfig{ + Service: contour_api_v1alpha1.NamespacedName{ + Name: "envoy", + Namespace: "projectcontour", + }, + HTTPListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8080, + AccessLog: "/dev/stdout", + }, + HTTPSListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8443, + AccessLog: "/dev/stdout", + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8002, + }, + ClientCertificate: nil, + Logging: contour_api_v1alpha1.EnvoyLogging{ + AccessLogFormat: contour_api_v1alpha1.EnvoyAccessLog, + AccessLogFormatString: nil, + AccessLogFields: contour_api_v1alpha1.AccessLogFields([]string{ + "@timestamp", + "authority", + "bytes_received", + "bytes_sent", + "downstream_local_address", + "downstream_remote_address", + "duration", + "method", + "path", + "protocol", + "request_id", + "requested_server_name", + "response_code", + "response_flags", + "uber_trace_id", + "upstream_cluster", + "upstream_host", + "upstream_local_address", + "upstream_service_time", + "user_agent", + "x_forwarded_for", + }), + }, + DefaultHTTPVersions: nil, + Timeouts: &contour_api_v1alpha1.TimeoutParameters{ + ConnectionIdleTimeout: pointer.StringPtr("60s"), + }, + Cluster: contour_api_v1alpha1.ClusterParameters{ + DNSLookupFamily: contour_api_v1alpha1.AutoClusterDNSFamily, + }, + Network: contour_api_v1alpha1.NetworkParameters{ + EnvoyAdminPort: 9001, + }, + }, + Gateway: &contour_api_v1alpha1.GatewayConfig{ + ControllerName: "projectcontour.io/projectcontour/contour", + }, + HTTPProxy: contour_api_v1alpha1.HTTPProxyConfig{ + DisablePermitInsecure: false, + FallbackCertificate: nil, + }, + LeaderElection: contour_api_v1alpha1.LeaderElectionConfig{ + LeaseDuration: "15s", + RenewDeadline: "10s", + RetryPeriod: "2s", + Configmap: contour_api_v1alpha1.NamespacedName{ + Name: "leader-elect", + Namespace: "projectcontour", + }, + DisableLeaderElection: false, + }, + EnableExternalNameService: false, + RateLimitService: nil, + Policy: &contour_api_v1alpha1.PolicyConfig{ + RequestHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{}, + ResponseHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{}, + ApplyToIngress: false, + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + }, + }, + "client certificate": { + serveContext: clientCertificate, + contourConfig: contour_api_v1alpha1.ContourConfigurationSpec{ + XDSServer: contour_api_v1alpha1.XDSServerConfig{ + Type: contour_api_v1alpha1.ContourServerType, + Address: "127.0.0.1", + Port: 8001, + TLS: &contour_api_v1alpha1.TLS{ + Insecure: false, + }, + }, + Ingress: &contour_api_v1alpha1.IngressConfig{ + ClassName: nil, + StatusAddress: nil, + }, + Debug: contour_api_v1alpha1.DebugConfig{ + Address: "127.0.0.1", + Port: 6060, + DebugLogLevel: contour_api_v1alpha1.InfoLog, + KubernetesDebugLogLevel: 0, + }, + Health: contour_api_v1alpha1.HealthConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + Envoy: contour_api_v1alpha1.EnvoyConfig{ + Service: contour_api_v1alpha1.NamespacedName{ + Name: "envoy", + Namespace: "projectcontour", + }, + HTTPListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8080, + AccessLog: "/dev/stdout", + }, + HTTPSListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8443, + AccessLog: "/dev/stdout", + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8002, + }, + ClientCertificate: &contour_api_v1alpha1.NamespacedName{ + Name: "cert", + Namespace: "secretplace", + }, + Logging: contour_api_v1alpha1.EnvoyLogging{ + AccessLogFormat: contour_api_v1alpha1.EnvoyAccessLog, + AccessLogFormatString: nil, + AccessLogFields: contour_api_v1alpha1.AccessLogFields([]string{ + "@timestamp", + "authority", + "bytes_received", + "bytes_sent", + "downstream_local_address", + "downstream_remote_address", + "duration", + "method", + "path", + "protocol", + "request_id", + "requested_server_name", + "response_code", + "response_flags", + "uber_trace_id", + "upstream_cluster", + "upstream_host", + "upstream_local_address", + "upstream_service_time", + "user_agent", + "x_forwarded_for", + }), + }, + DefaultHTTPVersions: nil, + Timeouts: &contour_api_v1alpha1.TimeoutParameters{ + ConnectionIdleTimeout: pointer.StringPtr("60s"), + }, + Cluster: contour_api_v1alpha1.ClusterParameters{ + DNSLookupFamily: contour_api_v1alpha1.AutoClusterDNSFamily, + }, + Network: contour_api_v1alpha1.NetworkParameters{ + EnvoyAdminPort: 9001, + }, + }, + Gateway: nil, + HTTPProxy: contour_api_v1alpha1.HTTPProxyConfig{ + DisablePermitInsecure: false, + FallbackCertificate: nil, + }, + LeaderElection: contour_api_v1alpha1.LeaderElectionConfig{ + LeaseDuration: "15s", + RenewDeadline: "10s", + RetryPeriod: "2s", + Configmap: contour_api_v1alpha1.NamespacedName{ + Name: "leader-elect", + Namespace: "projectcontour", + }, + DisableLeaderElection: false, + }, + EnableExternalNameService: false, + RateLimitService: nil, + Policy: &contour_api_v1alpha1.PolicyConfig{ + RequestHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{}, + ResponseHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{}, + ApplyToIngress: false, + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + }, + }, + "httpproxy": { + serveContext: httpProxy, + contourConfig: contour_api_v1alpha1.ContourConfigurationSpec{ + XDSServer: contour_api_v1alpha1.XDSServerConfig{ + Type: contour_api_v1alpha1.ContourServerType, + Address: "127.0.0.1", + Port: 8001, + TLS: &contour_api_v1alpha1.TLS{ + Insecure: false, + }, + }, + Ingress: &contour_api_v1alpha1.IngressConfig{ + ClassName: nil, + StatusAddress: nil, + }, + Debug: contour_api_v1alpha1.DebugConfig{ + Address: "127.0.0.1", + Port: 6060, + DebugLogLevel: contour_api_v1alpha1.InfoLog, + KubernetesDebugLogLevel: 0, + }, + Health: contour_api_v1alpha1.HealthConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + Envoy: contour_api_v1alpha1.EnvoyConfig{ + Service: contour_api_v1alpha1.NamespacedName{ + Name: "envoy", + Namespace: "projectcontour", + }, + HTTPListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8080, + AccessLog: "/dev/stdout", + }, + HTTPSListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8443, + AccessLog: "/dev/stdout", + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8002, + }, + ClientCertificate: nil, + Logging: contour_api_v1alpha1.EnvoyLogging{ + AccessLogFormat: contour_api_v1alpha1.EnvoyAccessLog, + AccessLogFormatString: nil, + AccessLogFields: contour_api_v1alpha1.AccessLogFields([]string{ + "@timestamp", + "authority", + "bytes_received", + "bytes_sent", + "downstream_local_address", + "downstream_remote_address", + "duration", + "method", + "path", + "protocol", + "request_id", + "requested_server_name", + "response_code", + "response_flags", + "uber_trace_id", + "upstream_cluster", + "upstream_host", + "upstream_local_address", + "upstream_service_time", + "user_agent", + "x_forwarded_for", + }), + }, + DefaultHTTPVersions: nil, + Timeouts: &contour_api_v1alpha1.TimeoutParameters{ + ConnectionIdleTimeout: pointer.StringPtr("60s"), + }, + Cluster: contour_api_v1alpha1.ClusterParameters{ + DNSLookupFamily: contour_api_v1alpha1.AutoClusterDNSFamily, + }, + Network: contour_api_v1alpha1.NetworkParameters{ + EnvoyAdminPort: 9001, + }, + }, + Gateway: nil, + HTTPProxy: contour_api_v1alpha1.HTTPProxyConfig{ + DisablePermitInsecure: true, + FallbackCertificate: &contour_api_v1alpha1.NamespacedName{ + Name: "fallbackname", + Namespace: "fallbacknamespace", + }, + }, + LeaderElection: contour_api_v1alpha1.LeaderElectionConfig{ + LeaseDuration: "15s", + RenewDeadline: "10s", + RetryPeriod: "2s", + Configmap: contour_api_v1alpha1.NamespacedName{ + Name: "leader-elect", + Namespace: "projectcontour", + }, + DisableLeaderElection: false, + }, + EnableExternalNameService: false, + RateLimitService: nil, + Policy: &contour_api_v1alpha1.PolicyConfig{ + RequestHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{}, + ResponseHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{}, + ApplyToIngress: false, + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + }, + }, + "ratelimit": { + serveContext: rateLimit, + contourConfig: contour_api_v1alpha1.ContourConfigurationSpec{ + XDSServer: contour_api_v1alpha1.XDSServerConfig{ + Type: contour_api_v1alpha1.ContourServerType, + Address: "127.0.0.1", + Port: 8001, + TLS: &contour_api_v1alpha1.TLS{ + Insecure: false, + }, + }, + Ingress: &contour_api_v1alpha1.IngressConfig{ + ClassName: nil, + StatusAddress: nil, + }, + Debug: contour_api_v1alpha1.DebugConfig{ + Address: "127.0.0.1", + Port: 6060, + DebugLogLevel: contour_api_v1alpha1.InfoLog, + KubernetesDebugLogLevel: 0, + }, + Health: contour_api_v1alpha1.HealthConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + Envoy: contour_api_v1alpha1.EnvoyConfig{ + Service: contour_api_v1alpha1.NamespacedName{ + Name: "envoy", + Namespace: "projectcontour", + }, + HTTPListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8080, + AccessLog: "/dev/stdout", + }, + HTTPSListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8443, + AccessLog: "/dev/stdout", + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8002, + }, + ClientCertificate: nil, + Logging: contour_api_v1alpha1.EnvoyLogging{ + AccessLogFormat: contour_api_v1alpha1.EnvoyAccessLog, + AccessLogFormatString: nil, + AccessLogFields: contour_api_v1alpha1.AccessLogFields([]string{ + "@timestamp", + "authority", + "bytes_received", + "bytes_sent", + "downstream_local_address", + "downstream_remote_address", + "duration", + "method", + "path", + "protocol", + "request_id", + "requested_server_name", + "response_code", + "response_flags", + "uber_trace_id", + "upstream_cluster", + "upstream_host", + "upstream_local_address", + "upstream_service_time", + "user_agent", + "x_forwarded_for", + }), + }, + DefaultHTTPVersions: nil, + Timeouts: &contour_api_v1alpha1.TimeoutParameters{ + ConnectionIdleTimeout: pointer.StringPtr("60s"), + }, + Cluster: contour_api_v1alpha1.ClusterParameters{ + DNSLookupFamily: contour_api_v1alpha1.AutoClusterDNSFamily, + }, + Network: contour_api_v1alpha1.NetworkParameters{ + EnvoyAdminPort: 9001, + }, + }, + Gateway: nil, + HTTPProxy: contour_api_v1alpha1.HTTPProxyConfig{ + DisablePermitInsecure: false, + FallbackCertificate: nil, + }, + LeaderElection: contour_api_v1alpha1.LeaderElectionConfig{ + LeaseDuration: "15s", + RenewDeadline: "10s", + RetryPeriod: "2s", + Configmap: contour_api_v1alpha1.NamespacedName{ + Name: "leader-elect", + Namespace: "projectcontour", + }, + DisableLeaderElection: false, + }, + EnableExternalNameService: false, + RateLimitService: &contour_api_v1alpha1.RateLimitServiceConfig{ + ExtensionService: contour_api_v1alpha1.NamespacedName{ + Name: "ratelimitext", + Namespace: "ratens", + }, + Domain: "contour", + FailOpen: true, + EnableXRateLimitHeaders: true, + }, + Policy: &contour_api_v1alpha1.PolicyConfig{ + RequestHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{}, + ResponseHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{}, + ApplyToIngress: false, + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + }, + }, + "default http versions": { + serveContext: defaultHTTPVersions, + contourConfig: contour_api_v1alpha1.ContourConfigurationSpec{ + XDSServer: contour_api_v1alpha1.XDSServerConfig{ + Type: contour_api_v1alpha1.ContourServerType, + Address: "127.0.0.1", + Port: 8001, + TLS: &contour_api_v1alpha1.TLS{ + Insecure: false, + }, + }, + Ingress: &contour_api_v1alpha1.IngressConfig{ + ClassName: nil, + StatusAddress: nil, + }, + Debug: contour_api_v1alpha1.DebugConfig{ + Address: "127.0.0.1", + Port: 6060, + DebugLogLevel: contour_api_v1alpha1.InfoLog, + KubernetesDebugLogLevel: 0, + }, + Health: contour_api_v1alpha1.HealthConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + Envoy: contour_api_v1alpha1.EnvoyConfig{ + Service: contour_api_v1alpha1.NamespacedName{ + Name: "envoy", + Namespace: "projectcontour", + }, + HTTPListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8080, + AccessLog: "/dev/stdout", + }, + HTTPSListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8443, + AccessLog: "/dev/stdout", + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8002, + }, + ClientCertificate: nil, + Logging: contour_api_v1alpha1.EnvoyLogging{ + AccessLogFormat: contour_api_v1alpha1.EnvoyAccessLog, + AccessLogFormatString: nil, + AccessLogFields: contour_api_v1alpha1.AccessLogFields([]string{ + "@timestamp", + "authority", + "bytes_received", + "bytes_sent", + "downstream_local_address", + "downstream_remote_address", + "duration", + "method", + "path", + "protocol", + "request_id", + "requested_server_name", + "response_code", + "response_flags", + "uber_trace_id", + "upstream_cluster", + "upstream_host", + "upstream_local_address", + "upstream_service_time", + "user_agent", + "x_forwarded_for", + }), + }, + DefaultHTTPVersions: []contour_api_v1alpha1.HTTPVersionType{ + contour_api_v1alpha1.HTTPVersion1, + }, + Timeouts: &contour_api_v1alpha1.TimeoutParameters{ + ConnectionIdleTimeout: pointer.StringPtr("60s"), + }, + Cluster: contour_api_v1alpha1.ClusterParameters{ + DNSLookupFamily: contour_api_v1alpha1.AutoClusterDNSFamily, + }, + Network: contour_api_v1alpha1.NetworkParameters{ + EnvoyAdminPort: 9001, + }, + }, + Gateway: nil, + HTTPProxy: contour_api_v1alpha1.HTTPProxyConfig{ + DisablePermitInsecure: false, + FallbackCertificate: nil, + }, + LeaderElection: contour_api_v1alpha1.LeaderElectionConfig{ + LeaseDuration: "15s", + RenewDeadline: "10s", + RetryPeriod: "2s", + Configmap: contour_api_v1alpha1.NamespacedName{ + Name: "leader-elect", + Namespace: "projectcontour", + }, + DisableLeaderElection: false, + }, + EnableExternalNameService: false, + RateLimitService: nil, + Policy: &contour_api_v1alpha1.PolicyConfig{ + RequestHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{}, + ResponseHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{}, + ApplyToIngress: false, + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + }, + }, + "access log": { + serveContext: accessLog, + contourConfig: contour_api_v1alpha1.ContourConfigurationSpec{ + XDSServer: contour_api_v1alpha1.XDSServerConfig{ + Type: contour_api_v1alpha1.ContourServerType, + Address: "127.0.0.1", + Port: 8001, + TLS: &contour_api_v1alpha1.TLS{ + Insecure: false, + }, + }, + Ingress: &contour_api_v1alpha1.IngressConfig{ + ClassName: nil, + StatusAddress: nil, + }, + Debug: contour_api_v1alpha1.DebugConfig{ + Address: "127.0.0.1", + Port: 6060, + DebugLogLevel: contour_api_v1alpha1.InfoLog, + KubernetesDebugLogLevel: 0, + }, + Health: contour_api_v1alpha1.HealthConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + Envoy: contour_api_v1alpha1.EnvoyConfig{ + Service: contour_api_v1alpha1.NamespacedName{ + Name: "envoy", + Namespace: "projectcontour", + }, + HTTPListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8080, + AccessLog: "/dev/stdout", + }, + HTTPSListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8443, + AccessLog: "/dev/stdout", + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8002, + }, + ClientCertificate: nil, + Logging: contour_api_v1alpha1.EnvoyLogging{ + AccessLogFormat: contour_api_v1alpha1.JSONAccessLog, + AccessLogFormatString: pointer.StringPtr("foo-bar-baz"), + AccessLogFields: contour_api_v1alpha1.AccessLogFields([]string{ + "custom_field", + }), + }, + DefaultHTTPVersions: nil, + Timeouts: &contour_api_v1alpha1.TimeoutParameters{ + ConnectionIdleTimeout: pointer.StringPtr("60s"), + }, + Cluster: contour_api_v1alpha1.ClusterParameters{ + DNSLookupFamily: contour_api_v1alpha1.AutoClusterDNSFamily, + }, + Network: contour_api_v1alpha1.NetworkParameters{ + EnvoyAdminPort: 9001, + }, + }, + Gateway: nil, + HTTPProxy: contour_api_v1alpha1.HTTPProxyConfig{ + DisablePermitInsecure: false, + FallbackCertificate: nil, + }, + LeaderElection: contour_api_v1alpha1.LeaderElectionConfig{ + LeaseDuration: "15s", + RenewDeadline: "10s", + RetryPeriod: "2s", + Configmap: contour_api_v1alpha1.NamespacedName{ + Name: "leader-elect", + Namespace: "projectcontour", + }, + DisableLeaderElection: false, + }, + EnableExternalNameService: false, + RateLimitService: nil, + Policy: &contour_api_v1alpha1.PolicyConfig{ + RequestHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{}, + ResponseHeadersPolicy: &contour_api_v1alpha1.HeadersPolicy{}, + ApplyToIngress: false, + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + converted := tc.serveContext.convertToContourConfigurationSpec() + assert.Equal(t, tc.contourConfig, converted) + }) + } +} diff --git a/examples/contour/01-crds.yaml b/examples/contour/01-crds.yaml index 2b7a01d801d..ec40bec8555 100644 --- a/examples/contour/01-crds.yaml +++ b/examples/contour/01-crds.yaml @@ -81,8 +81,8 @@ spec: cluster: dnsLookupFamily: auto defaultHTTPVersions: - - http/1.1 - - http/2 + - HTTP/1.1 + - HTTP/2 http: accessLog: /dev/stdout address: 0.0.0.0 @@ -162,8 +162,8 @@ spec: description: HTTPVersionType is the name of a supported HTTP version. enum: - - http/1.1 - - http/2 + - HTTP/1.1 + - HTTP/2 type: string type: array http: @@ -542,7 +542,8 @@ spec: default: address: 0.0.0.0 port: 8000 - description: Metrics defines the endpoints Envoy use to serve to metrics. + description: Metrics defines the endpoints Contour uses to serve to + metrics. properties: address: description: Defines the metrics address interface. @@ -560,6 +561,10 @@ spec: description: Policy specifies default policy applied if not overridden by the user properties: + applyToIngress: + description: ApplyToIngress determines if the Policies will apply + to ingress objects + type: boolean requestHeaders: description: RequestHeadersPolicy defines the request headers set/removed on all routes @@ -986,8 +991,8 @@ spec: cluster: dnsLookupFamily: auto defaultHTTPVersions: - - http/1.1 - - http/2 + - HTTP/1.1 + - HTTP/2 http: accessLog: /dev/stdout address: 0.0.0.0 @@ -1067,8 +1072,8 @@ spec: description: HTTPVersionType is the name of a supported HTTP version. enum: - - http/1.1 - - http/2 + - HTTP/1.1 + - HTTP/2 type: string type: array http: @@ -1453,7 +1458,7 @@ spec: default: address: 0.0.0.0 port: 8000 - description: Metrics defines the endpoints Envoy use to serve + description: Metrics defines the endpoints Contour uses to serve to metrics. properties: address: @@ -1472,6 +1477,10 @@ spec: description: Policy specifies default policy applied if not overridden by the user properties: + applyToIngress: + description: ApplyToIngress determines if the Policies will + apply to ingress objects + type: boolean requestHeaders: description: RequestHeadersPolicy defines the request headers set/removed on all routes diff --git a/examples/render/contour-gateway.yaml b/examples/render/contour-gateway.yaml index 9c7a0dae6bb..0594a9ff7cd 100644 --- a/examples/render/contour-gateway.yaml +++ b/examples/render/contour-gateway.yaml @@ -277,8 +277,8 @@ spec: cluster: dnsLookupFamily: auto defaultHTTPVersions: - - http/1.1 - - http/2 + - HTTP/1.1 + - HTTP/2 http: accessLog: /dev/stdout address: 0.0.0.0 @@ -358,8 +358,8 @@ spec: description: HTTPVersionType is the name of a supported HTTP version. enum: - - http/1.1 - - http/2 + - HTTP/1.1 + - HTTP/2 type: string type: array http: @@ -738,7 +738,8 @@ spec: default: address: 0.0.0.0 port: 8000 - description: Metrics defines the endpoints Envoy use to serve to metrics. + description: Metrics defines the endpoints Contour uses to serve to + metrics. properties: address: description: Defines the metrics address interface. @@ -756,6 +757,10 @@ spec: description: Policy specifies default policy applied if not overridden by the user properties: + applyToIngress: + description: ApplyToIngress determines if the Policies will apply + to ingress objects + type: boolean requestHeaders: description: RequestHeadersPolicy defines the request headers set/removed on all routes @@ -1182,8 +1187,8 @@ spec: cluster: dnsLookupFamily: auto defaultHTTPVersions: - - http/1.1 - - http/2 + - HTTP/1.1 + - HTTP/2 http: accessLog: /dev/stdout address: 0.0.0.0 @@ -1263,8 +1268,8 @@ spec: description: HTTPVersionType is the name of a supported HTTP version. enum: - - http/1.1 - - http/2 + - HTTP/1.1 + - HTTP/2 type: string type: array http: @@ -1649,7 +1654,7 @@ spec: default: address: 0.0.0.0 port: 8000 - description: Metrics defines the endpoints Envoy use to serve + description: Metrics defines the endpoints Contour uses to serve to metrics. properties: address: @@ -1668,6 +1673,10 @@ spec: description: Policy specifies default policy applied if not overridden by the user properties: + applyToIngress: + description: ApplyToIngress determines if the Policies will + apply to ingress objects + type: boolean requestHeaders: description: RequestHeadersPolicy defines the request headers set/removed on all routes diff --git a/examples/render/contour.yaml b/examples/render/contour.yaml index 3c09b8d71d8..0c42cccde3a 100644 --- a/examples/render/contour.yaml +++ b/examples/render/contour.yaml @@ -274,8 +274,8 @@ spec: cluster: dnsLookupFamily: auto defaultHTTPVersions: - - http/1.1 - - http/2 + - HTTP/1.1 + - HTTP/2 http: accessLog: /dev/stdout address: 0.0.0.0 @@ -355,8 +355,8 @@ spec: description: HTTPVersionType is the name of a supported HTTP version. enum: - - http/1.1 - - http/2 + - HTTP/1.1 + - HTTP/2 type: string type: array http: @@ -735,7 +735,8 @@ spec: default: address: 0.0.0.0 port: 8000 - description: Metrics defines the endpoints Envoy use to serve to metrics. + description: Metrics defines the endpoints Contour uses to serve to + metrics. properties: address: description: Defines the metrics address interface. @@ -753,6 +754,10 @@ spec: description: Policy specifies default policy applied if not overridden by the user properties: + applyToIngress: + description: ApplyToIngress determines if the Policies will apply + to ingress objects + type: boolean requestHeaders: description: RequestHeadersPolicy defines the request headers set/removed on all routes @@ -1179,8 +1184,8 @@ spec: cluster: dnsLookupFamily: auto defaultHTTPVersions: - - http/1.1 - - http/2 + - HTTP/1.1 + - HTTP/2 http: accessLog: /dev/stdout address: 0.0.0.0 @@ -1260,8 +1265,8 @@ spec: description: HTTPVersionType is the name of a supported HTTP version. enum: - - http/1.1 - - http/2 + - HTTP/1.1 + - HTTP/2 type: string type: array http: @@ -1646,7 +1651,7 @@ spec: default: address: 0.0.0.0 port: 8000 - description: Metrics defines the endpoints Envoy use to serve + description: Metrics defines the endpoints Contour uses to serve to metrics. properties: address: @@ -1665,6 +1670,10 @@ spec: description: Policy specifies default policy applied if not overridden by the user properties: + applyToIngress: + description: ApplyToIngress determines if the Policies will + apply to ingress objects + type: boolean requestHeaders: description: RequestHeadersPolicy defines the request headers set/removed on all routes diff --git a/internal/dag/cache.go b/internal/dag/cache.go index b82a48ef2fe..8d5dc06120b 100644 --- a/internal/dag/cache.go +++ b/internal/dag/cache.go @@ -209,7 +209,8 @@ func (kc *KubernetesCache) Insert(obj interface{}) bool { case *contour_api_v1alpha1.ExtensionService: kc.extensions[k8s.NamespacedNameOf(obj)] = obj return true - + case *contour_api_v1alpha1.ContourConfiguration: + return false default: // not an interesting object kc.WithField("object", obj).Error("insert unknown object") @@ -300,7 +301,8 @@ func (kc *KubernetesCache) remove(obj interface{}) bool { _, ok := kc.extensions[m] delete(kc.extensions, m) return ok - + case *contour_api_v1alpha1.ContourConfiguration: + return false default: // not interesting kc.WithField("object", obj).Error("remove unknown object") diff --git a/internal/dag/httpproxy_processor.go b/internal/dag/httpproxy_processor.go index fb243fd725c..1cb84fc27ea 100644 --- a/internal/dag/httpproxy_processor.go +++ b/internal/dag/httpproxy_processor.go @@ -26,7 +26,6 @@ import ( "github.com/projectcontour/contour/internal/k8s" "github.com/projectcontour/contour/internal/status" "github.com/projectcontour/contour/internal/timeout" - "github.com/projectcontour/contour/pkg/config" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" @@ -71,7 +70,7 @@ type HTTPProxyProcessor struct { // for addresses in the IPv6 family and fallback to a lookup for addresses // in the IPv4 family. // Note: This only applies to externalName clusters. - DNSLookupFamily config.ClusterDNSFamilyType + DNSLookupFamily contour_api_v1alpha1.ClusterDNSFamilyType // ClientCertificate is the optional identifier of the TLS secret containing client certificate and // private key to be used when establishing TLS connection to upstream cluster. diff --git a/internal/envoy/v3/accesslog.go b/internal/envoy/v3/accesslog.go index 3b308ea953b..322cd161ceb 100644 --- a/internal/envoy/v3/accesslog.go +++ b/internal/envoy/v3/accesslog.go @@ -20,8 +20,8 @@ import ( envoy_req_without_query_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/formatter/req_without_query/v3" "github.com/envoyproxy/go-control-plane/pkg/wellknown" _struct "github.com/golang/protobuf/ptypes/struct" + contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" "github.com/projectcontour/contour/internal/protobuf" - "github.com/projectcontour/contour/pkg/config" ) // FileAccessLogEnvoy returns a new file based access log filter @@ -57,7 +57,7 @@ func FileAccessLogEnvoy(path string, format string, extensions []string) []*envo // FileAccessLogJSON returns a new file based access log filter // that will log in JSON format -func FileAccessLogJSON(path string, fields config.AccessLogFields, extensions []string) []*envoy_accesslog_v3.AccessLog { +func FileAccessLogJSON(path string, fields contour_api_v1alpha1.AccessLogFields, extensions []string) []*envoy_accesslog_v3.AccessLog { jsonformat := &_struct.Struct{ Fields: make(map[string]*_struct.Value), diff --git a/internal/envoy/v3/accesslog_test.go b/internal/envoy/v3/accesslog_test.go index 70120e36b35..af439ecea82 100644 --- a/internal/envoy/v3/accesslog_test.go +++ b/internal/envoy/v3/accesslog_test.go @@ -22,8 +22,8 @@ import ( envoy_req_without_query_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/formatter/req_without_query/v3" "github.com/envoyproxy/go-control-plane/pkg/wellknown" _struct "github.com/golang/protobuf/ptypes/struct" + contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" "github.com/projectcontour/contour/internal/protobuf" - "github.com/projectcontour/contour/pkg/config" ) func TestFileAccessLog(t *testing.T) { @@ -107,12 +107,12 @@ func TestFileAccessLog(t *testing.T) { func TestJSONFileAccessLog(t *testing.T) { tests := map[string]struct { path string - headers config.AccessLogFields + headers contour_api_v1alpha1.AccessLogFields want []*envoy_accesslog_v3.AccessLog }{ "only timestamp": { path: "/dev/stdout", - headers: config.AccessLogFields([]string{"@timestamp"}), + headers: contour_api_v1alpha1.AccessLogFields([]string{"@timestamp"}), want: []*envoy_accesslog_v3.AccessLog{{ Name: wellknown.FileAccessLog, ConfigType: &envoy_accesslog_v3.AccessLog_TypedConfig{ @@ -136,7 +136,7 @@ func TestJSONFileAccessLog(t *testing.T) { }, "custom fields should appear": { path: "/dev/stdout", - headers: config.AccessLogFields([]string{ + headers: contour_api_v1alpha1.AccessLogFields([]string{ "@timestamp", "method", "custom1=%REQ(X-CUSTOM-HEADER)%", diff --git a/internal/featuretests/v3/featuretests.go b/internal/featuretests/v3/featuretests.go index 0d652f51143..9154de7cc9a 100644 --- a/internal/featuretests/v3/featuretests.go +++ b/internal/featuretests/v3/featuretests.go @@ -62,8 +62,6 @@ const ( routeType = resource.RouteType listenerType = resource.ListenerType secretType = resource.SecretType - statsAddress = "0.0.0.0" - statsPort = 8002 ) func setup(t *testing.T, opts ...interface{}) (cache.ResourceEventHandler, *Contour, func()) { @@ -82,7 +80,7 @@ func setup(t *testing.T, opts ...interface{}) (cache.ResourceEventHandler, *Cont } resources := []xdscache.ResourceCache{ - xdscache_v3.NewListenerCache(conf, statsAddress, statsPort, 0), + xdscache_v3.NewListenerCache(conf, "0.0.0.0", 8002, 0), &xdscache_v3.SecretCache{}, &xdscache_v3.RouteCache{}, &xdscache_v3.ClusterCache{}, diff --git a/internal/featuretests/v3/listeners_test.go b/internal/featuretests/v3/listeners_test.go index 16602e95a5c..503512001d1 100644 --- a/internal/featuretests/v3/listeners_test.go +++ b/internal/featuretests/v3/listeners_test.go @@ -36,7 +36,7 @@ func customAdminPort(t *testing.T, port int) []xdscache.ResourceCache { et := xdscache_v3.NewEndpointsTranslator(log) conf := xdscache_v3.ListenerConfig{} return []xdscache.ResourceCache{ - xdscache_v3.NewListenerCache(conf, statsAddress, statsPort, port), + xdscache_v3.NewListenerCache(conf, "0.0.0.0", 8002, port), &xdscache_v3.SecretCache{}, &xdscache_v3.RouteCache{}, &xdscache_v3.ClusterCache{}, diff --git a/internal/k8s/informers.go b/internal/k8s/informers.go index f777af36513..c78da03bb38 100644 --- a/internal/k8s/informers.go +++ b/internal/k8s/informers.go @@ -42,11 +42,6 @@ func DefaultResources() []schema.GroupVersionResource { contour_api_v1alpha1.ExtensionServiceGVR, contour_api_v1alpha1.ContourConfigurationGVR, corev1.SchemeGroupVersion.WithResource("services"), - } -} - -func IngressV1Resources() []schema.GroupVersionResource { - return []schema.GroupVersionResource{ networking_v1.SchemeGroupVersion.WithResource("ingresses"), networking_v1.SchemeGroupVersion.WithResource("ingressclasses"), } diff --git a/internal/xdscache/v3/listener.go b/internal/xdscache/v3/listener.go index b7b863b4144..e6367511a99 100644 --- a/internal/xdscache/v3/listener.go +++ b/internal/xdscache/v3/listener.go @@ -18,12 +18,15 @@ import ( "sort" "sync" + "github.com/sirupsen/logrus" + envoy_accesslog_v3 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3" envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" http "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" envoy_tls_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" resource "github.com/envoyproxy/go-control-plane/pkg/resource/v3" "github.com/golang/protobuf/proto" + contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" "github.com/projectcontour/contour/internal/contour" "github.com/projectcontour/contour/internal/dag" envoy_v3 "github.com/projectcontour/contour/internal/envoy/v3" @@ -96,12 +99,12 @@ type ListenerConfig struct { // AccessLogType defines if Envoy logs should be output as Envoy's default or JSON. // Valid values: 'envoy', 'json' // If not set, defaults to 'envoy' - AccessLogType config.AccessLogType + AccessLogType contour_api_v1alpha1.AccessLogType // AccessLogFields sets the fields that should be shown in JSON logs. // Valid entries are the keys from internal/envoy/accesslog.go:jsonheaders // Defaults to a particular set of fields. - AccessLogFields config.AccessLogFields + AccessLogFields contour_api_v1alpha1.AccessLogFields // AccessLogFormatString sets the format string to be used for text based access logs. // Defaults to empty to defer to Envoy's default log format. @@ -156,6 +159,119 @@ type RateLimitConfig struct { EnableXRateLimitHeaders bool } +func NewListenerConfig( + useProxyProto bool, + httpListener contour_api_v1alpha1.EnvoyListener, + httpsListener contour_api_v1alpha1.EnvoyListener, + accessLogType contour_api_v1alpha1.AccessLogType, + accessLogFields contour_api_v1alpha1.AccessLogFields, + accessLogFormatString *string, + accessLogFormatterExtensions []string, + minimumTLSVersion string, + cipherSuites []string, + timeoutParameters *contour_api_v1alpha1.TimeoutParameters, + defaultHTTPVersions []envoy_v3.HTTPVersionType, + allowChunkedLength bool, + xffNumTrustedHops uint32, + connectionBalancer string, + log logrus.FieldLogger) ListenerConfig { + + // connection balancer + if ok := connectionBalancer == "exact" || connectionBalancer == ""; !ok { + log.Warnf("Invalid listener connection balancer value %q. Only 'exact' connection balancing is supported for now.", connectionBalancer) + connectionBalancer = "" + } + + var connectionIdleTimeoutSetting timeout.Setting + var streamIdleTimeoutSetting timeout.Setting + var delayedCloseTimeoutSetting timeout.Setting + var maxConnectionDurationSetting timeout.Setting + var connectionShutdownGracePeriodSetting timeout.Setting + var requestTimeoutSetting timeout.Setting + var err error + + if timeoutParameters != nil { + if timeoutParameters.ConnectionIdleTimeout != nil { + connectionIdleTimeoutSetting, err = timeout.Parse(*timeoutParameters.ConnectionIdleTimeout) + if err != nil { + log.Errorf("error parsing connection idle timeout: %w", err) + } + } + if timeoutParameters.StreamIdleTimeout != nil { + streamIdleTimeoutSetting, err = timeout.Parse(*timeoutParameters.StreamIdleTimeout) + if err != nil { + log.Errorf("error parsing stream idle timeout: %w", err) + } + } + if timeoutParameters.DelayedCloseTimeout != nil { + delayedCloseTimeoutSetting, err = timeout.Parse(*timeoutParameters.DelayedCloseTimeout) + if err != nil { + log.Errorf("error parsing delayed close timeout: %w", err) + } + } + if timeoutParameters.MaxConnectionDuration != nil { + maxConnectionDurationSetting, _ = timeout.Parse(*timeoutParameters.MaxConnectionDuration) + if err != nil { + log.Errorf("error parsing max connection duration: %w", err) + } + } + if timeoutParameters.ConnectionShutdownGracePeriod != nil { + connectionShutdownGracePeriodSetting, _ = timeout.Parse(*timeoutParameters.ConnectionShutdownGracePeriod) + if err != nil { + log.Errorf("error parsing connection shutdown grace period: %w", err) + } + } + if timeoutParameters.RequestTimeout != nil { + requestTimeoutSetting, _ = timeout.Parse(*timeoutParameters.RequestTimeout) + if err != nil { + log.Errorf("error parsing request timeout: %w", err) + } + } + } + + accessLogFormatStringConverted := "" + if accessLogFormatString != nil { + accessLogFormatStringConverted = *accessLogFormatString + } + + lc := ListenerConfig{ + UseProxyProto: useProxyProto, + HTTPListeners: map[string]Listener{ + "ingress_http": { + Name: "ingress_http", + Address: httpListener.Address, + Port: httpListener.Port, + }, + }, + HTTPSListeners: map[string]Listener{ + "ingress_https": { + Name: "ingress_https", + Address: httpsListener.Address, + Port: httpsListener.Port, + }, + }, + HTTPAccessLog: httpListener.AccessLog, + HTTPSAccessLog: httpsListener.AccessLog, + AccessLogType: accessLogType, + AccessLogFields: accessLogFields, + AccessLogFormatString: accessLogFormatStringConverted, + AccessLogFormatterExtensions: accessLogFormatterExtensions, + MinimumTLSVersion: minimumTLSVersion, + CipherSuites: cipherSuites, + RequestTimeout: requestTimeoutSetting, + ConnectionIdleTimeout: connectionIdleTimeoutSetting, + StreamIdleTimeout: streamIdleTimeoutSetting, + DelayedCloseTimeout: delayedCloseTimeoutSetting, + MaxConnectionDuration: maxConnectionDurationSetting, + ConnectionShutdownGracePeriod: connectionShutdownGracePeriodSetting, + DefaultHTTPVersions: defaultHTTPVersions, + AllowChunkedLength: !allowChunkedLength, + XffNumTrustedHops: xffNumTrustedHops, + ConnectionBalancer: connectionBalancer, + } + return lc +} + // DefaultListeners returns the configured Listeners or a single // Insecure (http) & single Secure (https) default listeners // if not provided. @@ -242,11 +358,11 @@ func (lvc *ListenerConfig) accesslogType() string { // accesslogFields returns the access log fields that should be configured // for Envoy, or a default set if not configured. -func (lvc *ListenerConfig) accesslogFields() config.AccessLogFields { +func (lvc *ListenerConfig) accesslogFields() contour_api_v1alpha1.AccessLogFields { if lvc.AccessLogFields != nil { return lvc.AccessLogFields } - return config.DefaultFields + return contour_api_v1alpha1.DefaultFields } func (lvc *ListenerConfig) newInsecureAccessLog() []*envoy_accesslog_v3.AccessLog { @@ -288,8 +404,8 @@ type ListenerCache struct { } // NewListenerCache returns an instance of a ListenerCache -func NewListenerCache(config ListenerConfig, statsAddress string, statsPort, adminPort int) *ListenerCache { - stats := envoy_v3.StatsListener(statsAddress, statsPort) +func NewListenerCache(config ListenerConfig, statsAddr string, statsPort, adminPort int) *ListenerCache { + stats := envoy_v3.StatsListener(statsAddr, statsPort) admin := envoy_v3.AdminListener("127.0.0.1", adminPort) listenerCache := &ListenerCache{ diff --git a/site/content/docs/main/config/api-reference.html b/site/content/docs/main/config/api-reference.html index b251d71c618..72dfc0c398d 100644 --- a/site/content/docs/main/config/api-reference.html +++ b/site/content/docs/main/config/api-reference.html @@ -3466,7 +3466,7 @@

ContourConfiguration (Optional) -

Metrics defines the endpoints Envoy use to serve to metrics.

+

Metrics defines the endpoints Contour uses to serve to metrics.

@@ -4045,7 +4045,7 @@

ContourConfiguratio (Optional) -

Metrics defines the endpoints Envoy use to serve to metrics.

+

Metrics defines the endpoints Contour uses to serve to metrics.

@@ -4234,7 +4234,7 @@

DebugConfig kubernetesLogLevel
-int +uint @@ -5409,6 +5409,19 @@

PolicyConfig

ResponseHeadersPolicy defines the response headers set/removed on all routes

+ + +applyToIngress +
+ +bool + + + +(Optional) +

ApplyToIngress determines if the Policies will apply to ingress objects

+ +

RateLimitServiceConfig diff --git a/test/e2e/deployment.go b/test/e2e/deployment.go index 98fac12df82..c5b84212d7d 100644 --- a/test/e2e/deployment.go +++ b/test/e2e/deployment.go @@ -18,18 +18,21 @@ package e2e import ( "context" + "crypto/rand" "errors" "fmt" "io" "io/ioutil" + "math/big" "os" "os/exec" "path/filepath" "runtime" + "strconv" "time" "github.com/onsi/gomega/gexec" - contourv1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" + contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" "github.com/projectcontour/contour/pkg/config" "gopkg.in/yaml.v2" apps_v1 "k8s.io/api/apps/v1" @@ -84,7 +87,7 @@ type Deployment struct { // Ratelimit deployment. RateLimitDeployment *apps_v1.Deployment RateLimitService *v1.Service - RateLimitExtensionService *contourv1alpha1.ExtensionService + RateLimitExtensionService *contour_api_v1alpha1.ExtensionService } // UnmarshalResources unmarshals resources from rendered Contour manifest in @@ -180,7 +183,7 @@ func (d *Deployment) UnmarshalResources() error { } defer rLESFile.Close() decoder = apimachinery_util_yaml.NewYAMLToJSONDecoder(rLESFile) - d.RateLimitExtensionService = new(contourv1alpha1.ExtensionService) + d.RateLimitExtensionService = new(contour_api_v1alpha1.ExtensionService) return decoder.Decode(d.RateLimitExtensionService) } @@ -382,7 +385,7 @@ func (d *Deployment) EnsureRateLimitResources(namespace string, configContents s extSvc := d.RateLimitExtensionService.DeepCopy() extSvc.Namespace = setNamespace - return d.ensureResource(extSvc, new(contourv1alpha1.ExtensionService)) + return d.ensureResource(extSvc, new(contour_api_v1alpha1.ExtensionService)) } // Convenience method for deploying the pieces of the deployment needed for @@ -436,6 +439,7 @@ func (d *Deployment) EnsureResourcesForLocalContour() error { "--xds-resource-version=v3", "--admin-address=/admin/admin.sock", ) + session, err := gexec.Start(bootstrapCmd, d.cmdOutputWriter, d.cmdOutputWriter) if err != nil { return err @@ -555,40 +559,115 @@ func (d *Deployment) DeleteResourcesForLocalContour() error { // Starts local contour, applying arguments and marshaling config into config // file. Returns running Contour command and config file so we can clean them // up. -func (d *Deployment) StartLocalContour(config *config.Parameters, additionalArgs ...string) (*gexec.Session, string, error) { - configFile, err := ioutil.TempFile("", "contour-config-*.yaml") - if err != nil { - return nil, "", err - } - defer configFile.Close() +func (d *Deployment) StartLocalContour(config *config.Parameters, contourConfiguration *contour_api_v1alpha1.ContourConfiguration, additionalArgs ...string) (*gexec.Session, string, error) { + + var content []byte + var configReferenceName string + var contourServeArgs []string + var err error + + // Look for the ENV variable to tell if this test run should use + // the ContourConfiguration file or the ContourConfiguration CRD. + if useContourConfiguration, variableFound := os.LookupEnv("USE_CONTOUR_CONFIGURATION_CRD"); variableFound && useContourConfiguration == "true" { + port, _ := strconv.Atoi(d.localContourPort) + + contourConfiguration.Name = randomString(14) + + // Set the xds server to the defined testing port as well as enable insecure communication. + contourConfiguration.Spec.XDSServer = contour_api_v1alpha1.XDSServerConfig{ + Type: contour_api_v1alpha1.ContourServerType, + Address: "0.0.0.0", + Port: port, + TLS: &contour_api_v1alpha1.TLS{ + Insecure: true, + }, + } - content, err := yaml.Marshal(config) - if err != nil { - return nil, "", err - } - if err := os.WriteFile(configFile.Name(), content, 0600); err != nil { - return nil, "", err + // Disable leader election. + contourConfiguration.Spec.LeaderElection = contour_api_v1alpha1.LeaderElectionConfig{ + DisableLeaderElection: true, + } + + if err := d.client.Create(context.TODO(), contourConfiguration); err != nil { + return nil, "", fmt.Errorf("could not create ContourConfiguration: %v", err) + } + + contourServeArgs = append([]string{ + "serve", + "--kubeconfig=" + d.kubeConfig, + "--contour-config-name=" + contourConfiguration.Name, + }, additionalArgs...) + + configReferenceName = contourConfiguration.Name + } else { + + configFile, err := ioutil.TempFile("", "contour-config-*.yaml") + if err != nil { + return nil, "", err + } + defer configFile.Close() + + content, err = yaml.Marshal(config) + if err != nil { + return nil, "", err + } + if err := os.WriteFile(configFile.Name(), content, 0600); err != nil { + return nil, "", err + } + + contourServeArgs = append([]string{ + "serve", + "--xds-address=0.0.0.0", + "--xds-port=" + d.localContourPort, + "--insecure", + "--kubeconfig=" + d.kubeConfig, + "--config-path=" + configFile.Name(), + "--disable-leader-election", + }, additionalArgs...) + + configReferenceName = configFile.Name() } - contourServeArgs := append([]string{ - "serve", - "--xds-address=0.0.0.0", - "--xds-port=" + d.localContourPort, - "--insecure", - "--kubeconfig=" + d.kubeConfig, - "--config-path=" + configFile.Name(), - "--disable-leader-election", - }, additionalArgs...) session, err := gexec.Start(exec.Command(d.contourBin, contourServeArgs...), d.cmdOutputWriter, d.cmdOutputWriter) // nolint:gosec if err != nil { return nil, "", err } - return session, configFile.Name(), nil + return session, configReferenceName, nil } func (d *Deployment) StopLocalContour(contourCmd *gexec.Session, configFile string) error { + + // Look for the ENV variable to tell if this test run should use + // the ContourConfiguration file or the ContourConfiguration CRD. + if useContourConfiguration, variableFound := os.LookupEnv("USE_CONTOUR_CONFIGURATION_CRD"); variableFound && useContourConfiguration == "true" { + cc := &contour_api_v1alpha1.ContourConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: configFile, + Namespace: "projectcontour", + }, + } + + if err := d.client.Delete(context.TODO(), cc); err != nil { + return fmt.Errorf("could not delete ContourConfiguration: %v", err) + } + } + // Default timeout of 1s produces test flakes, // a minute should be more than enough to avoid them. contourCmd.Terminate().Wait(time.Minute) return os.RemoveAll(configFile) } + +func randomString(n int) string { + const letters = "abcdefghijklmnopqrstuvwxyz0123456789" + ret := make([]byte, n) + for i := 0; i < n; i++ { + num, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters)))) + if err != nil { + return "" + } + ret[i] = letters[num.Int64()] + } + + return string(ret) +} diff --git a/test/e2e/fixtures.go b/test/e2e/fixtures.go index 04c7fbdf89b..53873324c51 100644 --- a/test/e2e/fixtures.go +++ b/test/e2e/fixtures.go @@ -19,6 +19,8 @@ package e2e import ( "context" + contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" + "github.com/onsi/ginkgo" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" @@ -303,3 +305,79 @@ func (e *EchoSecure) Deploy(ns, name string) func() { require.NoError(e.t, e.client.Delete(context.TODO(), deployment)) } } + +// DefaultContourConfiguration returns a default ContourConfiguration object. +func DefaultContourConfiguration() *contour_api_v1alpha1.ContourConfiguration { + return &contour_api_v1alpha1.ContourConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress", + Namespace: "projectcontour", + }, + Spec: contour_api_v1alpha1.ContourConfigurationSpec{ + Debug: contour_api_v1alpha1.DebugConfig{ + Address: "127.0.0.1", + Port: 6060, + DebugLogLevel: contour_api_v1alpha1.InfoLog, + KubernetesDebugLogLevel: 0, + }, + Health: contour_api_v1alpha1.HealthConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + Envoy: contour_api_v1alpha1.EnvoyConfig{ + DefaultHTTPVersions: []contour_api_v1alpha1.HTTPVersionType{ + "HTTP/1.1", "HTTP/2", + }, + Listener: contour_api_v1alpha1.EnvoyListenerConfig{ + UseProxyProto: false, + DisableAllowChunkedLength: false, + ConnectionBalancer: "", + TLS: contour_api_v1alpha1.EnvoyTLS{ + MinimumProtocolVersion: "1.2", + CipherSuites: []contour_api_v1alpha1.TLSCipherType{ + "[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]", + "[ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + }, + }, + }, + Service: contour_api_v1alpha1.NamespacedName{ + Name: "envoy", + Namespace: "projectcontour", + }, + HTTPListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8080, + AccessLog: "/dev/stdout", + }, + HTTPSListener: contour_api_v1alpha1.EnvoyListener{ + Address: "0.0.0.0", + Port: 8443, + AccessLog: "/dev/stdout", + }, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8002, + }, + Logging: contour_api_v1alpha1.EnvoyLogging{ + AccessLogFormat: contour_api_v1alpha1.EnvoyAccessLog, + }, + Cluster: contour_api_v1alpha1.ClusterParameters{ + DNSLookupFamily: contour_api_v1alpha1.AutoClusterDNSFamily, + }, + Network: contour_api_v1alpha1.NetworkParameters{ + EnvoyAdminPort: 9001, + }, + }, + HTTPProxy: contour_api_v1alpha1.HTTPProxyConfig{ + DisablePermitInsecure: false, + }, + EnableExternalNameService: false, + Metrics: contour_api_v1alpha1.MetricsConfig{ + Address: "0.0.0.0", + Port: 8000, + }, + }, + } +} diff --git a/test/e2e/gateway/gateway_test.go b/test/e2e/gateway/gateway_test.go index 9fbce143eff..03654adb823 100644 --- a/test/e2e/gateway/gateway_test.go +++ b/test/e2e/gateway/gateway_test.go @@ -22,6 +22,8 @@ import ( "math/big" "testing" + contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" @@ -55,6 +57,7 @@ var _ = Describe("Gateway API", func() { var ( contourCmd *gexec.Session contourConfig *config.Parameters + contourConfiguration *contour_api_v1alpha1.ContourConfiguration contourConfigFile string additionalContourArgs []string @@ -76,6 +79,12 @@ var _ = Describe("Gateway API", func() { ControllerName: gatewayClass.Spec.Controller, } + // Update contour configuration to point to specified gateway. + contourConfiguration = e2e.DefaultContourConfiguration() + contourConfiguration.Spec.Gateway = &contour_api_v1alpha1.GatewayConfig{ + ControllerName: gatewayClass.Spec.Controller, + } + contourGatewayClass = gatewayClass contourGateway = gateway }) @@ -104,7 +113,7 @@ var _ = Describe("Gateway API", func() { // until here to start Contour. JustBeforeEach(func() { var err error - contourCmd, contourConfigFile, err = f.Deployment.StartLocalContour(contourConfig, additionalContourArgs...) + contourCmd, contourConfigFile, err = f.Deployment.StartLocalContour(contourConfig, contourConfiguration, additionalContourArgs...) require.NoError(f.T(), err) // Wait for Envoy to be healthy. diff --git a/test/e2e/gateway/multiple_gateways_and_classes_test.go b/test/e2e/gateway/multiple_gateways_and_classes_test.go index ad6b7ac50f9..13e08596ffe 100644 --- a/test/e2e/gateway/multiple_gateways_and_classes_test.go +++ b/test/e2e/gateway/multiple_gateways_and_classes_test.go @@ -21,6 +21,9 @@ import ( "fmt" "time" + contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" + "github.com/projectcontour/contour/test/e2e" + . "github.com/onsi/ginkgo" "github.com/onsi/gomega/gexec" "github.com/projectcontour/contour/internal/k8s" @@ -35,6 +38,7 @@ var _ = Describe("GatewayClass/Gateway admission tests", func() { var ( contourCmd *gexec.Session contourConfig *config.Parameters + contourConfiguration *contour_api_v1alpha1.ContourConfiguration contourConfigFile string additionalContourArgs []string controllerName string @@ -51,6 +55,12 @@ var _ = Describe("GatewayClass/Gateway admission tests", func() { }, } + // Update contour configuration to point to specified gateway. + contourConfiguration = e2e.DefaultContourConfiguration() + contourConfiguration.Spec.Gateway = &contour_api_v1alpha1.GatewayConfig{ + ControllerName: controllerName, + } + // Default contour serve command line arguments can be appended to in // nested BeforeEach. additionalContourArgs = []string{} @@ -62,7 +72,7 @@ var _ = Describe("GatewayClass/Gateway admission tests", func() { // until here to start Contour. JustBeforeEach(func() { var err error - contourCmd, contourConfigFile, err = f.Deployment.StartLocalContour(contourConfig, additionalContourArgs...) + contourCmd, contourConfigFile, err = f.Deployment.StartLocalContour(contourConfig, contourConfiguration, additionalContourArgs...) require.NoError(f.T(), err) // Wait for Envoy to be healthy. diff --git a/test/e2e/httpproxy/httpproxy_test.go b/test/e2e/httpproxy/httpproxy_test.go index a238366fdc6..57166990efa 100644 --- a/test/e2e/httpproxy/httpproxy_test.go +++ b/test/e2e/httpproxy/httpproxy_test.go @@ -21,6 +21,8 @@ import ( "fmt" "testing" + contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" + "github.com/davecgh/go-spew/spew" certmanagerv1 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1" certmanagermetav1 "github.com/jetstack/cert-manager/pkg/apis/meta/v1" @@ -57,6 +59,7 @@ var _ = Describe("HTTPProxy", func() { var ( contourCmd *gexec.Session contourConfig *config.Parameters + contourConfiguration *contour_api_v1alpha1.ContourConfiguration contourConfigFile string additionalContourArgs []string ) @@ -66,6 +69,10 @@ var _ = Describe("HTTPProxy", func() { // BeforeEach. contourConfig = &config.Parameters{} + // Contour configuration crd, can be modified in nested + // BeforeEach. + contourConfiguration = e2e.DefaultContourConfiguration() + // Default contour serve command line arguments can be appended to in // nested BeforeEach. additionalContourArgs = []string{} @@ -77,7 +84,7 @@ var _ = Describe("HTTPProxy", func() { // until here to start Contour. JustBeforeEach(func() { var err error - contourCmd, contourConfigFile, err = f.Deployment.StartLocalContour(contourConfig, additionalContourArgs...) + contourCmd, contourConfigFile, err = f.Deployment.StartLocalContour(contourConfig, contourConfiguration, additionalContourArgs...) require.NoError(f.T(), err) // Wait for Envoy to be healthy. @@ -122,6 +129,11 @@ var _ = Describe("HTTPProxy", func() { Namespace: namespace, }, } + contourConfiguration.Spec.HTTPProxy.FallbackCertificate = &contour_api_v1alpha1.NamespacedName{ + Name: "fallback-cert", + Namespace: namespace, + } + f.Certs.CreateSelfSignedCert(namespace, "fallback-cert", "fallback-cert", "fallback.projectcontour.io") }) @@ -208,6 +220,11 @@ var _ = Describe("HTTPProxy", func() { Name: "backend-client-cert", }, } + + contourConfiguration.Spec.Envoy.ClientCertificate = &contour_api_v1alpha1.NamespacedName{ + Name: "backend-client-cert", + Namespace: namespace, + } }) testBackendTLS(namespace) @@ -226,6 +243,7 @@ var _ = Describe("HTTPProxy", func() { Context("with ExternalName Services enabled", func() { BeforeEach(func() { contourConfig.EnableExternalNameService = true + contourConfiguration.Spec.EnableExternalNameService = true }) testExternalNameServiceInsecure(namespace) }) @@ -235,6 +253,7 @@ var _ = Describe("HTTPProxy", func() { Context("with ExternalName Services enabled", func() { BeforeEach(func() { contourConfig.EnableExternalNameService = true + contourConfiguration.Spec.EnableExternalNameService = true }) testExternalNameServiceTLS(namespace) }) @@ -244,6 +263,7 @@ var _ = Describe("HTTPProxy", func() { Context("with ExternalName Services enabled", func() { BeforeEach(func() { contourConfig.EnableExternalNameService = true + contourConfiguration.Spec.EnableExternalNameService = true }) testExternalNameServiceLocalhostInvalid(namespace) }) @@ -262,6 +282,15 @@ var _ = Describe("HTTPProxy", func() { Domain: "contour", FailOpen: false, } + contourConfiguration.Spec.RateLimitService = &contour_api_v1alpha1.RateLimitServiceConfig{ + ExtensionService: contour_api_v1alpha1.NamespacedName{ + Name: f.Deployment.RateLimitExtensionService.Name, + Namespace: namespace, + }, + Domain: "contour", + FailOpen: false, + EnableXRateLimitHeaders: false, + } require.NoError(f.T(), f.Deployment.EnsureRateLimitResources( namespace, diff --git a/test/e2e/infra/infra_test.go b/test/e2e/infra/infra_test.go index dcd0876470a..a8b8e75c435 100644 --- a/test/e2e/infra/infra_test.go +++ b/test/e2e/infra/infra_test.go @@ -19,6 +19,8 @@ package infra import ( "testing" + contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" @@ -51,6 +53,7 @@ var _ = Describe("Infra", func() { contourCmd *gexec.Session kubectlCmd *gexec.Session contourConfig *config.Parameters + contourConfiguration *contour_api_v1alpha1.ContourConfiguration contourConfigFile string additionalContourArgs []string ) @@ -60,6 +63,10 @@ var _ = Describe("Infra", func() { // BeforeEach. contourConfig = &config.Parameters{} + // Contour configuration crd, can be modified in nested + // BeforeEach. + contourConfiguration = e2e.DefaultContourConfiguration() + // Default contour serve command line arguments can be appended to in // nested BeforeEach. additionalContourArgs = []string{} @@ -71,7 +78,7 @@ var _ = Describe("Infra", func() { // until here to start Contour. JustBeforeEach(func() { var err error - contourCmd, contourConfigFile, err = f.Deployment.StartLocalContour(contourConfig, additionalContourArgs...) + contourCmd, contourConfigFile, err = f.Deployment.StartLocalContour(contourConfig, contourConfiguration, additionalContourArgs...) require.NoError(f.T(), err) // Wait for Envoy to be healthy. diff --git a/test/e2e/ingress/ingress_test.go b/test/e2e/ingress/ingress_test.go index f594b82d0f6..828bb51452a 100644 --- a/test/e2e/ingress/ingress_test.go +++ b/test/e2e/ingress/ingress_test.go @@ -20,6 +20,8 @@ import ( "context" "testing" + contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" + certmanagerv1 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1" certmanagermetav1 "github.com/jetstack/cert-manager/pkg/apis/meta/v1" . "github.com/onsi/ginkgo" @@ -55,6 +57,7 @@ var _ = Describe("Ingress", func() { var ( contourCmd *gexec.Session contourConfig *config.Parameters + contourConfiguration *contour_api_v1alpha1.ContourConfiguration contourConfigFile string additionalContourArgs []string ) @@ -64,6 +67,8 @@ var _ = Describe("Ingress", func() { // BeforeEach. contourConfig = &config.Parameters{} + contourConfiguration = e2e.DefaultContourConfiguration() + // Default contour serve command line arguments can be appended to in // nested BeforeEach. additionalContourArgs = []string{} @@ -75,7 +80,7 @@ var _ = Describe("Ingress", func() { // until here to start Contour. JustBeforeEach(func() { var err error - contourCmd, contourConfigFile, err = f.Deployment.StartLocalContour(contourConfig, additionalContourArgs...) + contourCmd, contourConfigFile, err = f.Deployment.StartLocalContour(contourConfig, contourConfiguration, additionalContourArgs...) require.NoError(f.T(), err) // Wait for Envoy to be healthy. @@ -167,6 +172,10 @@ var _ = Describe("Ingress", func() { Name: "backend-client-cert", }, } + contourConfiguration.Spec.Envoy.ClientCertificate = &contour_api_v1alpha1.NamespacedName{ + Namespace: namespace, + Name: "backend-client-cert", + } }) testBackendTLS(namespace)