Skip to content

Commit

Permalink
Pull in the new fancier webhook drain from knative/pkg. (#3634)
Browse files Browse the repository at this point in the history
This is attempting to try and combat the webhook Post EOF errors we have been seeing intermittently: knative/pkg#1509
  • Loading branch information
mattmoor authored Jul 18, 2020
1 parent 4f19a2c commit 47adfa0
Show file tree
Hide file tree
Showing 13 changed files with 423 additions and 66 deletions.
4 changes: 2 additions & 2 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ require (
k8s.io/apiserver v0.17.6
k8s.io/client-go v11.0.1-0.20190805182717-6502b5e7b1b5+incompatible
k8s.io/utils v0.0.0-20200124190032-861946025e34
knative.dev/pkg v0.0.0-20200716235533-0f78f8a8cccf
knative.dev/test-infra v0.0.0-20200716222033-3c06d840fc70
knative.dev/pkg v0.0.0-20200718042534-e193c4be24bb
knative.dev/test-infra v0.0.0-20200718013433-a19b8b74cc35
sigs.k8s.io/yaml v1.2.0
)

Expand Down
8 changes: 4 additions & 4 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -1665,12 +1665,12 @@ k8s.io/utils v0.0.0-20200124190032-861946025e34/go.mod h1:sZAwmy6armz5eXlNoLmJcl
knative.dev/caching v0.0.0-20200116200605-67bca2c83dfa/go.mod h1:dHXFU6CGlLlbzaWc32g80cR92iuBSpsslDNBWI8C7eg=
knative.dev/eventing-contrib v0.11.2/go.mod h1:SnXZgSGgMSMLNFTwTnpaOH7hXDzTFtw0J8OmHflNx3g=
knative.dev/pkg v0.0.0-20200207155214-fef852970f43/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q=
knative.dev/pkg v0.0.0-20200716235533-0f78f8a8cccf h1:kGCVdSLQZp56x5idc+BxauU/FAPB/9hK5BicMkNd3vs=
knative.dev/pkg v0.0.0-20200716235533-0f78f8a8cccf/go.mod h1:3mm5ZffkmyYnqN+SOq1cN9TX0KTjhEbiZL8YBpP4C4Y=
knative.dev/pkg v0.0.0-20200718042534-e193c4be24bb h1:0+6xt01+h9P8+83UEn1rYbiRnZe829eEfK5exnORtZ4=
knative.dev/pkg v0.0.0-20200718042534-e193c4be24bb/go.mod h1:3mm5ZffkmyYnqN+SOq1cN9TX0KTjhEbiZL8YBpP4C4Y=
knative.dev/test-infra v0.0.0-20200715185233-6964ba126fee h1:SH4N5kSRiEgmOcgjFwsyLMipS3sPJlN6dpp783C/ILQ=
knative.dev/test-infra v0.0.0-20200715185233-6964ba126fee/go.mod h1:mAsPDmFmlsTJjRWplWBz8xtEiarSgvGiiOjkGj4Or1g=
knative.dev/test-infra v0.0.0-20200716222033-3c06d840fc70 h1:1510826l+2CBQMaNcDqQeSzz1H6g90hLdWArf5L+SVo=
knative.dev/test-infra v0.0.0-20200716222033-3c06d840fc70/go.mod h1:mAsPDmFmlsTJjRWplWBz8xtEiarSgvGiiOjkGj4Or1g=
knative.dev/test-infra v0.0.0-20200718013433-a19b8b74cc35 h1:HeB4f6l/HmRtadbchy+xH/gKsJpFwp+gL7xXLBPNd3A=
knative.dev/test-infra v0.0.0-20200718013433-a19b8b74cc35/go.mod h1:mAsPDmFmlsTJjRWplWBz8xtEiarSgvGiiOjkGj4Or1g=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
Expand Down
159 changes: 159 additions & 0 deletions vendor/knative.dev/pkg/controller/two_lane_queue.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
/*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controller

import (
"k8s.io/client-go/util/workqueue"
)

// twoLaneQueue is a rate limited queue that wraps around two queues
// -- fast queue (anonymously aliased), whose contents are processed with priority.
// -- slow queue (slowLane queue), whose contents are processed if fast queue has no items.
// All the default methods operate on the fast queue, unless noted otherwise.
type twoLaneQueue struct {
workqueue.RateLimitingInterface
slowLane workqueue.RateLimitingInterface
// consumerQueue is necessary to ensure that we're not reconciling
// the same object at the exact same time (e.g. if it had been enqueued
// in both fast and slow and is the only object there).
consumerQueue workqueue.Interface

name string

fastChan chan interface{}
slowChan chan interface{}
}

// Creates a new twoLaneQueue.
func newTwoLaneWorkQueue(name string) *twoLaneQueue {
rl := workqueue.DefaultControllerRateLimiter()
tlq := &twoLaneQueue{
RateLimitingInterface: workqueue.NewNamedRateLimitingQueue(
rl,
name+"-fast",
),
slowLane: workqueue.NewNamedRateLimitingQueue(
rl,
name+"-slow",
),
consumerQueue: workqueue.NewNamed(name + "-consumer"),
name: name,
fastChan: make(chan interface{}),
slowChan: make(chan interface{}),
}
// Run consumer thread.
go tlq.runConsumer()
// Run producer threads.
go process(tlq.RateLimitingInterface, tlq.fastChan)
go process(tlq.slowLane, tlq.slowChan)
return tlq
}

func process(q workqueue.Interface, ch chan interface{}) {
// Sender closes the channel
defer close(ch)
for {
i, d := q.Get()
// If the queue is empty and we're shutting down — stop the loop.
if d {
break
}
ch <- i
q.Done(i)
}
}

func (tlq *twoLaneQueue) runConsumer() {
// Shutdown flags.
fast, slow := true, true
// When both producer queues are shutdown stop the consumerQueue.
defer tlq.consumerQueue.ShutDown()
// While any of the queues is still running, try to read off of them.
for fast || slow {
// By default drain the fast lane.
// Channels in select are picked random, so first
// we have a select that only looks at the fast lane queue.
if fast {
select {
case item, ok := <-tlq.fastChan:
if !ok {
// This queue is shutdown and drained. Stop looking at it.
fast = false
continue
}
tlq.consumerQueue.Add(item)
continue
default:
// This immediately exits the wait if the fast chan is empty.
}
}

// If the fast lane queue had no items, we can select from both.
// Obviously if suddenly both are populated at the same time there's a
// 50% chance that the slow would be picked first, but this should be
// a rare occasion not to really worry about it.
select {
case item, ok := <-tlq.fastChan:
if !ok {
// This queue is shutdown and drained. Stop looking at it.
fast = false
continue
}
tlq.consumerQueue.Add(item)
case item, ok := <-tlq.slowChan:
if !ok {
// This queue is shutdown and drained. Stop looking at it.
slow = false
continue
}
tlq.consumerQueue.Add(item)
}
}
}

// Shutdown implements workqueue.Interace.
// Shutdown shuts down both queues.
func (tlq *twoLaneQueue) ShutDown() {
tlq.RateLimitingInterface.ShutDown()
tlq.slowLane.ShutDown()
}

// Done implements workqueue.Interface.
// Done marks the item as completed in all the queues.
// NB: this will just re-enqueue the object on the queue that
// didn't originate the object.
func (tlq *twoLaneQueue) Done(i interface{}) {
tlq.consumerQueue.Done(i)
}

// Get implements workqueue.Interface.
// It gets the item from fast lane if it has anything, alternatively
// the slow lane.
func (tlq *twoLaneQueue) Get() (interface{}, bool) {
return tlq.consumerQueue.Get()
}

// Len returns the sum of lengths.
// NB: actual _number_ of unique object might be less than this sum.
func (tlq *twoLaneQueue) Len() int {
return tlq.RateLimitingInterface.Len() + tlq.slowLane.Len() + tlq.consumerQueue.Len()
}

// SlowLane gives direct access to the slow queue.
func (tlq *twoLaneQueue) SlowLane() workqueue.RateLimitingInterface {
return tlq.slowLane
}
41 changes: 35 additions & 6 deletions vendor/knative.dev/pkg/injection/sharedmain/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/transport"
"k8s.io/klog"

"go.uber.org/zap"
apierrors "k8s.io/apimachinery/pkg/api/errors"
Expand Down Expand Up @@ -63,18 +65,39 @@ func GetConfig(masterURL, kubeconfig string) (*rest.Config, error) {
if kubeconfig == "" {
kubeconfig = os.Getenv("KUBECONFIG")
}

// We produce configs a bunch of ways, this gives us a single place
// to "decorate" them with common useful things (e.g. for debugging)
decorate := func(cfg *rest.Config) *rest.Config {

// Augment the rest.Config with a "wrapper" around the transport that
// will emit varying levels of debug logging when -v is passed with
// levels 6 to 9.
wt := transport.DebugWrappers
if cfg.WrapTransport != nil {
wt = transport.Wrappers(wt, cfg.WrapTransport)
}
cfg.WrapTransport = wt

return cfg
}

// If we have an explicit indication of where the kubernetes config lives, read that.
if kubeconfig != "" {
return clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
if c, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig); err != nil {
return nil, err
} else {
return decorate(c), nil
}
}
// If not, try the in-cluster config.
if c, err := rest.InClusterConfig(); err == nil {
return c, nil
return decorate(c), nil
}
// If no in-cluster config, try the default location in the user's home directory.
if usr, err := user.Current(); err == nil {
if c, err := clientcmd.BuildConfigFromFlags("", filepath.Join(usr.HomeDir, ".kube", "config")); err == nil {
return c, nil
return decorate(c), nil
}
}

Expand Down Expand Up @@ -168,9 +191,14 @@ func MainWithConfig(ctx context.Context, component string, cfg *rest.Config, cto

MemStatsOrDie(ctx)

// Adjust our client's rate limits based on the number of controllers we are running.
cfg.QPS = float32(len(ctors)) * rest.DefaultQPS
cfg.Burst = len(ctors) * rest.DefaultBurst
// Respect user provided settings, but if omitted customize the default behavior.
if cfg.QPS == 0 {
// Adjust our client's rate limits based on the number of controllers we are running.
cfg.QPS = float32(len(ctors)) * rest.DefaultQPS
}
if cfg.Burst == 0 {
cfg.Burst = len(ctors) * rest.DefaultBurst
}
ctx, informers := injection.Default.SetupInformers(ctx, cfg)

logger, atomicLevel := SetupLoggerOrDie(ctx, component)
Expand Down Expand Up @@ -260,6 +288,7 @@ func ParseAndGetConfigOrDie() *rest.Config {
kubeconfig = flag.String("kubeconfig", "",
"Path to a kubeconfig. Only required if out-of-cluster.")
)
klog.InitFlags(flag.CommandLine)
flag.Parse()

cfg, err := GetConfig(*masterURL, *kubeconfig)
Expand Down
65 changes: 39 additions & 26 deletions vendor/knative.dev/pkg/leaderelection/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package leaderelection
import (
"context"
"fmt"
"hash/fnv"
"strings"
"sync"

Expand Down Expand Up @@ -81,11 +82,11 @@ type Elector interface {

// BuildElector builds a leaderelection.LeaderElector for the named LeaderAware
// reconciler using a builder added to the context via WithStandardLeaderElectorBuilder.
func BuildElector(ctx context.Context, la reconciler.LeaderAware, queueName string, enq func(reconciler.Bucket, types.NamespacedName)) (Elector, error) {
func BuildElector(ctx context.Context, la reconciler.LeaderAware, name string, enq func(reconciler.Bucket, types.NamespacedName)) (Elector, error) {
if val := ctx.Value(builderKey{}); val != nil {
switch builder := val.(type) {
case *standardBuilder:
return builder.buildElector(ctx, la, queueName, enq)
return builder.buildElector(ctx, la, name, enq)
case *statefulSetBuilder:
return builder.buildElector(ctx, la, enq)
}
Expand All @@ -106,17 +107,24 @@ type standardBuilder struct {
}

func (b *standardBuilder) buildElector(ctx context.Context, la reconciler.LeaderAware,
queueName string, enq func(reconciler.Bucket, types.NamespacedName)) (Elector, error) {
name string, enq func(reconciler.Bucket, types.NamespacedName)) (Elector, error) {
logger := logging.FromContext(ctx)

id, err := UniqueID()
if err != nil {
return nil, err
}

bkts := newStandardBuckets(queueName, b.lec)
electors := make([]Elector, 0, b.lec.Buckets)
for _, bkt := range bkts {
buckets := make([]Elector, 0, b.lec.Buckets)
for i := uint32(0); i < b.lec.Buckets; i++ {
bkt := &bucket{
// The resource name is the lowercase:
// {component}.{workqueue}.{index}-of-{total}
name: strings.ToLower(fmt.Sprintf("%s.%s.%02d-of-%02d", b.lec.Component, name, i, b.lec.Buckets)),
index: i,
total: b.lec.Buckets,
}

rl, err := resourcelock.New(KnativeResourceLock,
system.Namespace(), // use namespace we are running in
bkt.Name(),
Expand Down Expand Up @@ -160,27 +168,9 @@ func (b *standardBuilder) buildElector(ctx context.Context, la reconciler.Leader
// if lec.WatchDog != nil {
// lec.WatchDog.SetLeaderElection(le)
// }
electors = append(electors, &runUntilCancelled{Elector: le})
}
return &runAll{les: electors}, nil
}

func newStandardBuckets(queueName string, cc ComponentConfig) []reconciler.Bucket {
names := make(sets.String, cc.Buckets)
for i := uint32(0); i < cc.Buckets; i++ {
names.Insert(standardBucketName(i, queueName, cc))
buckets = append(buckets, &runUntilCancelled{Elector: le})
}
bs := hash.NewBucketSet(names)

bkts := make([]reconciler.Bucket, 0, cc.Buckets)
for name := range names {
bkts = append(bkts, hash.NewBucket(name, bs))
}
return bkts
}

func standardBucketName(ordinal uint32, queueName string, cc ComponentConfig) string {
return strings.ToLower(fmt.Sprintf("%s.%s.%02d-of-%02d", cc.Component, queueName, ordinal, cc.Buckets))
return &runAll{les: buckets}, nil
}

type statefulSetBuilder struct {
Expand Down Expand Up @@ -279,3 +269,26 @@ func (ruc *runUntilCancelled) Run(ctx context.Context) {
}
}
}

type bucket struct {
name string

// We are bucket {index} of {total}
index uint32
total uint32
}

var _ reconciler.Bucket = (*bucket)(nil)

// Name implements reconciler.Bucket
func (b *bucket) Name() string {
return b.name
}

// Has implements reconciler.Bucket
func (b *bucket) Has(nn types.NamespacedName) bool {
h := fnv.New32a()
h.Write([]byte(nn.Namespace + "." + nn.Name))
ii := h.Sum32() % b.total
return b.index == ii
}
2 changes: 2 additions & 0 deletions vendor/knative.dev/pkg/network/error_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ import (

// ErrorHandler sets up a handler suitable for use with the ErrorHandler field on
// httputil's reverse proxy.
// TODO(mattmoor): Move the implementation into handlers/error.go once downstream consumers
// have adopted the alias.
func ErrorHandler(logger *zap.SugaredLogger) func(http.ResponseWriter, *http.Request, error) {
return func(w http.ResponseWriter, req *http.Request, err error) {
ss := readSockStat(logger)
Expand Down
Loading

0 comments on commit 47adfa0

Please sign in to comment.