-
Notifications
You must be signed in to change notification settings - Fork 84
/
controller_dns_node_resolver_daemonset.go
283 lines (260 loc) · 9.72 KB
/
controller_dns_node_resolver_daemonset.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
package controller
import (
"context"
"fmt"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/openshift/cluster-dns-operator/pkg/manifests"
operatorv1 "github.com/openshift/api/operator/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
// services is a comma- or space-delimited list of services for which
// entries should be added to /etc/hosts. NOTE: For now, ensure these
// are relative names; for each relative name, an alias with the
// CLUSTER_DOMAIN suffix will also be added.
services = "image-registry.openshift-image-registry.svc"
// workloadPartitioningManagement contains the management workload annotation
workloadPartitioningManagement = "target.workload.openshift.io/management"
)
var (
// nodeResolverScript is a shell script that updates /etc/hosts.
nodeResolverScript = manifests.NodeResolverScript()
)
// ensureNodeResolverDaemonset ensures the node resolver daemonset exists if it
// should or does not exist if it should not exist. Returns a Boolean
// indicating whether the daemonset exists, the daemonset if it does exist, and
// an error value.
func (r *reconciler) ensureNodeResolverDaemonSet(dns *operatorv1.DNS, clusterIP, clusterDomain string) (bool, *appsv1.DaemonSet, error) {
haveDS, current, err := r.currentNodeResolverDaemonSet()
if err != nil {
return false, nil, err
}
wantDS, desired, err := desiredNodeResolverDaemonSet(dns, clusterIP, clusterDomain, r.OpenshiftCLIImage)
if err != nil {
return haveDS, current, fmt.Errorf("failed to build node resolver daemonset: %v", err)
}
switch {
case !wantDS && !haveDS:
return false, nil, nil
case !wantDS && haveDS:
if err := r.deleteNodeResolverDaemonSet(current); err != nil {
return true, current, err
}
return false, nil, nil
case wantDS && !haveDS:
if err := r.createNodeResolverDaemonSet(desired); err != nil {
return false, nil, err
}
return r.currentNodeResolverDaemonSet()
case wantDS && haveDS:
if updated, err := r.updateNodeResolverDaemonSet(current, desired); err != nil {
return true, current, err
} else if updated {
return r.currentNodeResolverDaemonSet()
}
}
return true, current, nil
}
// desiredNodeResolverDaemonSet returns the desired node resolver daemonset.
func desiredNodeResolverDaemonSet(dns *operatorv1.DNS, clusterIP, clusterDomain, openshiftCLIImage string) (bool, *appsv1.DaemonSet, error) {
hostPathFile := corev1.HostPathFile
// TODO: Consider setting maxSurge to a positive value.
maxSurge := intstr.FromInt(0)
maxUnavailable := intstr.FromString("33%")
envs := []corev1.EnvVar{{
Name: "SERVICES",
Value: services,
}}
if len(clusterIP) > 0 {
envs = append(envs, corev1.EnvVar{
Name: "NAMESERVER",
Value: clusterIP,
})
}
if len(clusterDomain) > 0 {
envs = append(envs, corev1.EnvVar{
Name: "CLUSTER_DOMAIN",
Value: clusterDomain,
})
}
trueVal := true
name := NodeResolverDaemonSetName()
daemonset := appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: name.Name,
Namespace: name.Namespace,
OwnerReferences: []metav1.OwnerReference{
dnsOwnerRef(dns),
},
},
Spec: appsv1.DaemonSetSpec{
Selector: NodeResolverDaemonSetPodSelector(),
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
workloadPartitioningManagement: `{"effect": "PreferredDuringScheduling"}`,
},
Labels: NodeResolverDaemonSetPodSelector().MatchLabels,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Command: []string{
"/bin/bash", "-c",
nodeResolverScript,
},
Env: envs,
Image: openshiftCLIImage,
ImagePullPolicy: corev1.PullIfNotPresent,
Name: "dns-node-resolver",
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("5m"),
corev1.ResourceMemory: resource.MustParse("21Mi"),
},
},
SecurityContext: &corev1.SecurityContext{
Privileged: &trueVal,
},
TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError,
VolumeMounts: []corev1.VolumeMount{{
Name: "hosts-file",
MountPath: "/etc/hosts",
}},
}},
// The node-resolver pods need to run on
// every node in the cluster. On nodes
// that have Smart NICs, each pod that
// uses the container network consumes
// an SR-IOV device. Using the host
// network eliminates the need for this
// scarce resource.
HostNetwork: true,
NodeSelector: map[string]string{
"kubernetes.io/os": "linux",
},
PriorityClassName: "system-node-critical",
ServiceAccountName: "node-resolver",
Tolerations: []corev1.Toleration{{
Operator: corev1.TolerationOpExists,
}},
Volumes: []corev1.Volume{{
Name: "hosts-file",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/etc/hosts",
Type: &hostPathFile,
},
},
}},
},
},
UpdateStrategy: appsv1.DaemonSetUpdateStrategy{
Type: appsv1.RollingUpdateDaemonSetStrategyType,
RollingUpdate: &appsv1.RollingUpdateDaemonSet{
MaxSurge: &maxSurge,
MaxUnavailable: &maxUnavailable,
},
},
},
}
return true, &daemonset, nil
}
// currentNodeResolverDaemonSet returns the current DNS node resolver
// daemonset.
func (r *reconciler) currentNodeResolverDaemonSet() (bool, *appsv1.DaemonSet, error) {
daemonset := &appsv1.DaemonSet{}
if err := r.client.Get(context.TODO(), NodeResolverDaemonSetName(), daemonset); err != nil {
if errors.IsNotFound(err) {
return false, nil, nil
}
return false, nil, err
}
return true, daemonset, nil
}
// createNodeResolverDaemonSet creates a DNS node resolver daemonset.
func (r *reconciler) createNodeResolverDaemonSet(daemonset *appsv1.DaemonSet) error {
if err := r.client.Create(context.TODO(), daemonset); err != nil {
return fmt.Errorf("failed to create node resolver daemonset %s/%s: %v", daemonset.Namespace, daemonset.Name, err)
}
logrus.Infof("created node resolver daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
return nil
}
// deleteNodeResolverDaemonSet deletes a DNS node resolver daemonset.
func (r *reconciler) deleteNodeResolverDaemonSet(daemonset *appsv1.DaemonSet) error {
if err := r.client.Delete(context.TODO(), daemonset); err != nil {
if errors.IsNotFound(err) {
return nil
}
return fmt.Errorf("failed to delete node resolver daemonset %s/%s: %v", daemonset.Namespace, daemonset.Name, err)
}
logrus.Infof("deleted node resolver daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
return nil
}
// updateNodeResolverDaemonSet updates a node resolver daemonset.
func (r *reconciler) updateNodeResolverDaemonSet(current, desired *appsv1.DaemonSet) (bool, error) {
changed, updated := nodeResolverDaemonSetConfigChanged(current, desired)
if !changed {
return false, nil
}
if err := r.client.Update(context.TODO(), updated); err != nil {
return false, fmt.Errorf("failed to update node resolver daemonset %s/%s: %v", updated.Namespace, updated.Name, err)
}
logrus.Infof("updated node resolver daemonset: %s/%s", updated.Namespace, updated.Name)
return true, nil
}
// nodeResolverDaemonSetConfigChanged checks if current config matches the expected config
// for the node resolver daemonset and if not returns the updated config.
func nodeResolverDaemonSetConfigChanged(current, expected *appsv1.DaemonSet) (bool, *appsv1.DaemonSet) {
changed := false
updated := current.DeepCopy()
if !cmp.Equal(current.Spec.UpdateStrategy, expected.Spec.UpdateStrategy, cmpopts.EquateEmpty()) {
updated.Spec.UpdateStrategy = expected.Spec.UpdateStrategy
changed = true
}
if len(current.Spec.Template.Spec.Containers) != len(expected.Spec.Template.Spec.Containers) {
updated.Spec.Template.Spec.Containers = expected.Spec.Template.Spec.Containers
changed = true
} else if len(expected.Spec.Template.Spec.Containers) > 0 {
curCommand := current.Spec.Template.Spec.Containers[0].Command
expCommand := expected.Spec.Template.Spec.Containers[0].Command
if !cmp.Equal(curCommand, expCommand, cmpopts.EquateEmpty()) {
updated.Spec.Template.Spec.Containers[0].Command = expCommand
changed = true
}
curImage := current.Spec.Template.Spec.Containers[0].Image
expImage := expected.Spec.Template.Spec.Containers[0].Image
if curImage != expImage {
updated.Spec.Template.Spec.Containers[0].Image = expImage
changed = true
}
curEnv := current.Spec.Template.Spec.Containers[0].Env
expEnv := expected.Spec.Template.Spec.Containers[0].Env
if !cmp.Equal(curEnv, expEnv, cmpopts.EquateEmpty()) {
updated.Spec.Template.Spec.Containers[0].Env = expEnv
changed = true
}
}
if !cmp.Equal(current.Spec.Template.Spec.NodeSelector, expected.Spec.Template.Spec.NodeSelector, cmpopts.EquateEmpty()) {
updated.Spec.Template.Spec.NodeSelector = expected.Spec.Template.Spec.NodeSelector
changed = true
}
if !cmp.Equal(current.Spec.Template.Spec.Tolerations, expected.Spec.Template.Spec.Tolerations, cmpopts.EquateEmpty(), cmpopts.SortSlices(cmpTolerations)) {
updated.Spec.Template.Spec.Tolerations = expected.Spec.Template.Spec.Tolerations
changed = true
}
if !cmp.Equal(current.Spec.Template.Spec.Volumes, expected.Spec.Template.Spec.Volumes, cmpopts.EquateEmpty(), cmp.Comparer(cmpConfigMapVolumeSource), cmp.Comparer(cmpSecretVolumeSource)) {
updated.Spec.Template.Spec.Volumes = expected.Spec.Template.Spec.Volumes
changed = true
}
if !changed {
return false, nil
}
return true, updated
}