Skip to content

Commit

Permalink
cmd/contour: Use ContourConfiguration for all config (#4048)
Browse files Browse the repository at this point in the history
Convert the ServeContext to a ContourConfiguration and use
that for all config items which will allow for the ServeContext
to be deprecated and the optional ContourConfig CRD to be
specified.

Signed-off-by: Steve Sloka <slokas@vmware.com>
  • Loading branch information
stevesloka authored Oct 6, 2021
1 parent ec04b41 commit 2d52d47
Show file tree
Hide file tree
Showing 27 changed files with 2,434 additions and 573 deletions.
8 changes: 8 additions & 0 deletions .github/workflows/prbuild.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ jobs:
# have to update branch protection rules every time we change
# a Kubernetes version number.
kubernetes_version: ["kubernetes:latest", "kubernetes:n-1", "kubernetes:n-2"]
# run tests using the configuration crd as well as without
config_type: ["ConfigmapConfiguration", "ContourConfiguration"]
# include defines an additional variable (the specific node
# image to use) for each kubernetes_version value.
include:
Expand All @@ -52,6 +54,11 @@ jobs:
node_image: "docker.io/kindest/node:v1.21.2@sha256:9d07ff05e4afefbba983fac311807b3c17a5f36e7061f6cb7e2ba756255b2be4"
- kubernetes_version: "kubernetes:n-2"
node_image: "docker.io/kindest/node:v1.20.7@sha256:cbeaf907fc78ac97ce7b625e4bf0de16e3ea725daf6b04f930bd14c67c671ff9"
- config_type: "ConfigmapConfiguration"
use_config_crd: "false"
- config_type: "ContourConfiguration"
use_config_crd: "true"

steps:
- uses: actions/checkout@v2
# TODO uncomment the below once we're using the image
Expand Down Expand Up @@ -82,6 +89,7 @@ jobs:
env:
NODEIMAGE: ${{ matrix.node_image }}
LOAD_PREBUILT_IMAGE: "true"
USE_CONTOUR_CONFIGURATION_CRD: ${{ matrix.use_config_crd }}
run: |
make e2e
- uses: act10ns/slack@v1
Expand Down
31 changes: 10 additions & 21 deletions apis/projectcontour/v1alpha1/contourconfig.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ type ContourConfigurationSpec struct {
// Envoy contains parameters for Envoy as well
// as how to optionally configure a managed Envoy fleet.
// +optional
// +kubebuilder:default={listener: {useProxyProtocol: false, disableAllowChunkedLength: false, connectionBalancer: "", tls: { minimumProtocolVersion: "1.2", cipherSuites: "[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]";"[ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]";"ECDHE-ECDSA-AES256-GCM-SHA384";"ECDHE-RSA-AES256-GCM-SHA384" }}, service: {name: "envoy", namespace: "projectcontour"}, http: {address: "0.0.0.0", port: 8080, accessLog: "/dev/stdout"}, https: {address: "0.0.0.0", port: 8443, accessLog: "/dev/stdout"}, metrics: {address: "0.0.0.0", port: 8002}, logging: { accessLogFormat: "envoy"}, defaultHTTPVersions: "http/1.1";"http/2", cluster: {dnsLookupFamily: "auto"}, network: { adminPort: 9001}}
// +kubebuilder:default={listener: {useProxyProtocol: false, disableAllowChunkedLength: false, connectionBalancer: "", tls: { minimumProtocolVersion: "1.2", cipherSuites: "[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]";"[ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]";"ECDHE-ECDSA-AES256-GCM-SHA384";"ECDHE-RSA-AES256-GCM-SHA384" }}, service: {name: "envoy", namespace: "projectcontour"}, http: {address: "0.0.0.0", port: 8080, accessLog: "/dev/stdout"}, https: {address: "0.0.0.0", port: 8443, accessLog: "/dev/stdout"}, metrics: {address: "0.0.0.0", port: 8002}, logging: { accessLogFormat: "envoy"}, defaultHTTPVersions: "HTTP/1.1";"HTTP/2", cluster: {dnsLookupFamily: "auto"}, network: { adminPort: 9001}}
Envoy EnvoyConfig `json:"envoy"`

// Gateway contains parameters for the gateway-api Gateway that Contour
Expand Down Expand Up @@ -79,7 +79,7 @@ type ContourConfigurationSpec struct {
// +optional
Policy *PolicyConfig `json:"policy,omitempty"`

// Metrics defines the endpoints Envoy use to serve to metrics.
// Metrics defines the endpoints Contour uses to serve to metrics.
// +optional
// +kubebuilder:default={address: "0.0.0.0", port: 8000}
Metrics MetricsConfig `json:"metrics"`
Expand Down Expand Up @@ -169,11 +169,11 @@ type MetricsConfig struct {
}

// HTTPVersionType is the name of a supported HTTP version.
// +kubebuilder:validation:Enum="http/1.1";"http/2"
// +kubebuilder:validation:Enum="HTTP/1.1";"HTTP/2"
type HTTPVersionType string

const HTTPVersion1 HTTPVersionType = "http/1.1"
const HTTPVersion2 HTTPVersionType = "http/2"
const HTTPVersion1 HTTPVersionType = "HTTP/1.1"
const HTTPVersion2 HTTPVersionType = "HTTP/2"

// EnvoyConfig defines how Envoy is to be Configured from Contour.
type EnvoyConfig struct {
Expand Down Expand Up @@ -254,7 +254,7 @@ type DebugConfig struct {
// +kubebuilder:default=0
// +kubebuilder:validation:Minimum=0
// +kubebuilder:validation:Maximum=9
KubernetesDebugLogLevel int `json:"kubernetesLogLevel"`
KubernetesDebugLogLevel uint `json:"kubernetesLogLevel"`
}

// EnvoyListenerConfig hold various configurable Envoy listener values.
Expand Down Expand Up @@ -283,21 +283,6 @@ type EnvoyListenerConfig struct {
// +kubebuilder:validation:Enum="[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]";"[ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]";"ECDHE-ECDSA-AES128-GCM-SHA256";"ECDHE-RSA-AES128-GCM-SHA256";"ECDHE-ECDSA-AES128-SHA";"ECDHE-RSA-AES128-SHA";"AES128-GCM-SHA256";"AES128-SHA";"ECDHE-ECDSA-AES256-GCM-SHA384";"ECDHE-RSA-AES256-GCM-SHA384";"ECDHE-ECDSA-AES256-SHA";"ECDHE-RSA-AES256-SHA";"AES256-GCM-SHA384";"AES256-SHA"
type TLSCipherType string

const CIPHER1 TLSCipherType = "[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]"
const CIPHER2 TLSCipherType = "[ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]"
const CIPHER3 TLSCipherType = "ECDHE-ECDSA-AES128-GCM-SHA256"
const CIPHER4 TLSCipherType = "ECDHE-RSA-AES128-GCM-SHA256"
const CIPHER5 TLSCipherType = "ECDHE-ECDSA-AES128-SHA"
const CIPHER6 TLSCipherType = "ECDHE-RSA-AES128-SHA"
const CIPHER7 TLSCipherType = "AES128-GCM-SHA256"
const CIPHER8 TLSCipherType = "AES128-SHA"
const CIPHER9 TLSCipherType = "ECDHE-ECDSA-AES256-GCM-SHA384"
const CIPHER10 TLSCipherType = "ECDHE-RSA-AES256-GCM-SHA384"
const CIPHER11 TLSCipherType = "ECDHE-ECDSA-AES256-SHA"
const CIPHER12 TLSCipherType = "ECDHE-RSA-AES256-SHA"
const CIPHER13 TLSCipherType = "AES256-GCM-SHA384"
const CIPHER14 TLSCipherType = "AES256-SHA"

// EnvoyTLS describes tls parameters for Envoy listneners.
type EnvoyTLS struct {
// MinimumProtocolVersion is the minimum TLS version this vhost should
Expand Down Expand Up @@ -518,6 +503,10 @@ type PolicyConfig struct {
// ResponseHeadersPolicy defines the response headers set/removed on all routes
// +optional
ResponseHeadersPolicy *HeadersPolicy `json:"responseHeaders,omitempty"`

// ApplyToIngress determines if the Policies will apply to ingress objects
// +optional
ApplyToIngress bool `json:"applyToIngress"`
}

type HeadersPolicy struct {
Expand Down
8 changes: 7 additions & 1 deletion cmd/contour/contour.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,13 @@ func main() {
log.WithError(err).Fatal("invalid configuration")
}

if err := doServe(log, serveCtx); err != nil {
// Build out serve deps.
serve, err := NewServer(log, serveCtx)
if err != nil {
log.WithError(err).Fatal("unable to initialize Server dependencies required to start Contour")
}

if err := serve.doServe(); err != nil {
log.WithError(err).Fatal("Contour server failed")
}
case version.FullCommand():
Expand Down
41 changes: 28 additions & 13 deletions cmd/contour/leadership.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,13 @@ package main
import (
"context"
"os"
"time"

contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1"

"github.com/google/uuid"
"github.com/projectcontour/contour/internal/k8s"
"github.com/projectcontour/contour/internal/workgroup"
"github.com/projectcontour/contour/pkg/config"
"github.com/sirupsen/logrus"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
Expand All @@ -40,15 +42,15 @@ func disableLeaderElection(log logrus.FieldLogger) chan struct{} {
func setupLeadershipElection(
g *workgroup.Group,
log logrus.FieldLogger,
conf *config.LeaderElectionParameters,
conf contour_api_v1alpha1.LeaderElectionConfig,
clients *k8s.Clients, updateNow func(),
) chan struct{} {
le, leader, deposed := newLeaderElector(log, conf, clients)

g.AddContext(func(electionCtx context.Context) error {
log.WithFields(logrus.Fields{
"configmapname": conf.Name,
"configmapnamespace": conf.Namespace,
"configmapname": conf.Configmap.Name,
"configmapnamespace": conf.Configmap.Namespace,
}).Info("started leader election")

le.Run(electionCtx)
Expand Down Expand Up @@ -85,7 +87,7 @@ func setupLeadershipElection(
// channels by which to observe elections and depositions.
func newLeaderElector(
log logrus.FieldLogger,
conf *config.LeaderElectionParameters,
conf contour_api_v1alpha1.LeaderElectionConfig,
clients *k8s.Clients,
) (*leaderelection.LeaderElector, chan struct{}, chan struct{}) {
log = log.WithField("context", "leaderelection")
Expand All @@ -97,12 +99,25 @@ func newLeaderElector(

rl := newResourceLock(log, conf, clients)

leaseDuration, err := time.ParseDuration(conf.LeaseDuration)
if err != nil {
log.WithError(err).Fatalf("could not parse LeaseDuration: %q", conf.LeaseDuration)
}
renewDeadline, err := time.ParseDuration(conf.RenewDeadline)
if err != nil {
log.WithError(err).Fatalf("could not parse RenewDeadline: %q", conf.RenewDeadline)
}
retryPeriod, err := time.ParseDuration(conf.RetryPeriod)
if err != nil {
log.WithError(err).Fatalf("could not parse RetryPeriod: %q", conf.RetryPeriod)
}

// Make the leader elector, ready to be used in the Workgroup.
le, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
Lock: rl,
LeaseDuration: conf.LeaseDuration,
RenewDeadline: conf.RenewDeadline,
RetryPeriod: conf.RetryPeriod,
LeaseDuration: leaseDuration,
RenewDeadline: renewDeadline,
RetryPeriod: retryPeriod,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(_ context.Context) {
log.WithFields(logrus.Fields{
Expand All @@ -126,7 +141,7 @@ func newLeaderElector(

// newResourceLock creates a new resourcelock.Interface based on the Pod's name,
// or a uuid if the name cannot be determined.
func newResourceLock(log logrus.FieldLogger, conf *config.LeaderElectionParameters, clients *k8s.Clients) resourcelock.Interface {
func newResourceLock(log logrus.FieldLogger, conf contour_api_v1alpha1.LeaderElectionConfig, clients *k8s.Clients) resourcelock.Interface {
resourceLockID, found := os.LookupEnv("POD_NAME")
if !found {
resourceLockID = uuid.New().String()
Expand All @@ -138,8 +153,8 @@ func newResourceLock(log logrus.FieldLogger, conf *config.LeaderElectionParamete
// cycle (ie nine months).
// Figure out the resource lock ID
resourcelock.ConfigMapsResourceLock,
conf.Namespace,
conf.Name,
conf.Configmap.Namespace,
conf.Configmap.Name,
clients.ClientSet().CoreV1(),
clients.ClientSet().CoordinationV1(),
resourcelock.ResourceLockConfig{
Expand All @@ -148,8 +163,8 @@ func newResourceLock(log logrus.FieldLogger, conf *config.LeaderElectionParamete
)
if err != nil {
log.WithError(err).
WithField("name", conf.Name).
WithField("namespace", conf.Namespace).
WithField("name", conf.Configmap.Name).
WithField("namespace", conf.Configmap.Namespace).
WithField("identity", resourceLockID).
Fatal("failed to create new resource lock")
}
Expand Down
Loading

0 comments on commit 2d52d47

Please sign in to comment.