Skip to content
This repository was archived by the owner on May 6, 2020. It is now read-only.

Commit 3630fe7

Browse files
author
Matthew Fisher
committed
fix(api): account for NoneType when resource is gone
In versions prior to Kubernetes v1.5, an empty list was returned when a resource didn't exist. in v1.5 it now returns a NoneType, so we need to account for that change and convert it back to an empty list.
1 parent 7bfd858 commit 3630fe7

File tree

4 files changed

+53
-29
lines changed

4 files changed

+53
-29
lines changed

rootfs/api/models/app.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -281,9 +281,10 @@ def restart(self, **kwargs): # noqa
281281
desired = 0
282282
labels = self._scheduler_filter(**kwargs)
283283
# fetch RS (which represent Deployments)
284-
controllers = self._scheduler.rs.get(kwargs['id'], labels=labels)
285-
286-
for controller in controllers.json()['items']:
284+
controllers = self._scheduler.rs.get(kwargs['id'], labels=labels).json()['items']
285+
if not controllers:
286+
controllers = []
287+
for controller in controllers:
287288
desired += controller['spec']['replicas']
288289
except KubeException:
289290
# Nothing was found
@@ -737,6 +738,8 @@ def list_pods(self, *args, **kwargs):
737738
pods = [self._scheduler.pod.get(self.id, kwargs['name']).json()]
738739
else:
739740
pods = self._scheduler.pod.get(self.id, labels=labels).json()['items']
741+
if not pods:
742+
pods = []
740743

741744
data = []
742745
for p in pods:

rootfs/api/models/release.py

Lines changed: 20 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -263,8 +263,10 @@ def cleanup_old(self): # noqa
263263
# Cleanup controllers
264264
labels = {'heritage': 'deis'}
265265
controller_removal = []
266-
controllers = self._scheduler.rc.get(self.app.id, labels=labels).json()
267-
for controller in controllers['items']:
266+
controllers = self._scheduler.rc.get(self.app.id, labels=labels).json()['items']
267+
if not controllers:
268+
controllers = []
269+
for controller in controllers:
268270
current_version = controller['metadata']['labels']['version']
269271
# skip the latest release
270272
if current_version == latest_version:
@@ -289,8 +291,10 @@ def cleanup_old(self): # noqa
289291

290292
# Remove stray pods
291293
labels = {'heritage': 'deis'}
292-
pods = self._scheduler.pod.get(self.app.id, labels=labels).json()
293-
for pod in pods['items']:
294+
pods = self._scheduler.pod.get(self.app.id, labels=labels).json()['items']
295+
if not pods:
296+
pods = []
297+
for pod in pods:
294298
if self._scheduler.pod.deleted(pod):
295299
continue
296300

@@ -319,8 +323,10 @@ def _cleanup_deployment_secrets_and_configs(self, namespace):
319323
# Find all ReplicaSets
320324
versions = []
321325
labels = {'heritage': 'deis', 'app': namespace}
322-
replicasets = self._scheduler.rs.get(namespace, labels=labels).json()
323-
for replicaset in replicasets['items']:
326+
replicasets = self._scheduler.rs.get(namespace, labels=labels).json()['items']
327+
if not replicasets:
328+
replicasets = []
329+
for replicaset in replicasets:
324330
if (
325331
'version' not in replicaset['metadata']['labels'] or
326332
replicaset['metadata']['labels']['version'] in versions
@@ -338,8 +344,10 @@ def _cleanup_deployment_secrets_and_configs(self, namespace):
338344
'version__notin': versions
339345
}
340346
self.app.log('Cleaning up orphaned env var secrets for application {}'.format(namespace), level=logging.DEBUG) # noqa
341-
secrets = self._scheduler.secret.get(namespace, labels=labels).json()
342-
for secret in secrets['items']:
347+
secrets = self._scheduler.secret.get(namespace, labels=labels).json()['items']
348+
if not secrets:
349+
secrets = []
350+
for secret in secrets:
343351
self._scheduler.secret.delete(namespace, secret['metadata']['name'])
344352

345353
def _delete_release_in_scheduler(self, namespace, version):
@@ -358,8 +366,10 @@ def _delete_release_in_scheduler(self, namespace, version):
358366
# see if the app config has deploy timeout preference, otherwise use global
359367
timeout = self.config.values.get('DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT)
360368

361-
controllers = self._scheduler.rc.get(namespace, labels=labels).json()
362-
for controller in controllers['items']:
369+
controllers = self._scheduler.rc.get(namespace, labels=labels).json()['items']
370+
if not controllers:
371+
controllers = []
372+
for controller in controllers:
363373
# Deployment takes care of this in the API, RC does not
364374
# Have the RC scale down pods and delete itself
365375
self._scheduler.rc.scale(namespace, controller['metadata']['name'], 0, timeout)

rootfs/scheduler/resources/deployment.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ def in_progress(self, namespace, name, timeout, batches, replicas, tags):
224224
# fetch the latest RS for Deployment and use the start time to compare to deploy timeout
225225
replicasets = self.rs.get(namespace, labels=labels).json()['items']
226226
# the labels should ensure that only 1 replicaset due to the version label
227-
if len(replicasets) != 1:
227+
if replicasets and len(replicasets) != 1:
228228
# if more than one then sort by start time to newest is first
229229
replicasets.sort(key=lambda x: x['metadata']['creationTimestamp'], reverse=True)
230230

@@ -356,7 +356,8 @@ def _get_deploy_steps(self, batches, tags):
356356
# if there is no batch information available default to available nodes for app
357357
if not batches:
358358
# figure out how many nodes the application can go on
359-
steps = len(self.node.get(labels=tags).json()['items'])
359+
nodes = self.node.get(labels=tags).json()['items']
360+
steps = len(nodes) if nodes else 0
360361
else:
361362
steps = int(batches)
362363

rootfs/scheduler/resources/pod.py

Lines changed: 24 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -485,10 +485,12 @@ def events(self, pod):
485485
'involvedObject.namespace': pod['metadata']['namespace'],
486486
'involvedObject.uid': pod['metadata']['uid']
487487
}
488-
events = self.ns.events(pod['metadata']['namespace'], fields=fields).json()
488+
events = self.ns.events(pod['metadata']['namespace'], fields=fields).json()['items']
489+
if not events:
490+
events = []
489491
# make sure that events are sorted
490-
events['items'].sort(key=lambda x: x['lastTimestamp'])
491-
return events['items']
492+
events.sort(key=lambda x: x['lastTimestamp'])
493+
return events
492494

493495
def _handle_pod_errors(self, pod, reason, message):
494496
"""
@@ -568,8 +570,10 @@ def _handle_pending_pods(self, namespace, labels):
568570
or throws errors as needed
569571
"""
570572
timeout = 0
571-
pods = self.get(namespace, labels=labels).json()
572-
for pod in pods['items']:
573+
pods = self.get(namespace, labels=labels).json()['items']
574+
if not pods:
575+
pods = []
576+
for pod in pods:
573577
# only care about pods that are not starting or in the starting phases
574578
if pod['status']['phase'] not in ['Pending', 'ContainerCreating']:
575579
continue
@@ -612,14 +616,16 @@ def wait_until_terminated(self, namespace, labels, current, desired):
612616
delta = current - desired
613617
self.log(namespace, "waiting for {} pods to be terminated ({}s timeout)".format(delta, timeout)) # noqa
614618
for waited in range(timeout):
615-
pods = self.get(namespace, labels=labels).json()
616-
count = len(pods['items'])
619+
pods = self.get(namespace, labels=labels).json()['items']
620+
if not pods:
621+
pods = []
622+
count = len(pods)
617623

618624
# see if any pods are past their terminationGracePeriodsSeconds (as in stuck)
619625
# seems to be a problem in k8s around that:
620626
# https://github.com/kubernetes/kubernetes/search?q=terminating&type=Issues
621627
# these will be eventually GC'ed by k8s, ignoring them for now
622-
for pod in pods['items']:
628+
for pod in pods:
623629
# remove pod if it is passed the graceful termination period
624630
if self.deleted(pod):
625631
count -= 1
@@ -655,8 +661,10 @@ def wait_until_ready(self, namespace, containers, labels, desired, timeout): #
655661
self.log(namespace, 'Increasing timeout by {}s to allow a pull image operation to finish for pods'.format(additional_timeout)) # noqa
656662

657663
count = 0 # ready pods
658-
pods = self.get(namespace, labels=labels).json()
659-
for pod in pods['items']:
664+
pods = self.get(namespace, labels=labels).json()['items']
665+
if not pods:
666+
pods = []
667+
for pod in pods:
660668
# now that state is running time to see if probes are passing
661669
if self.ready(pod):
662670
count += 1
@@ -691,8 +699,10 @@ def _handle_not_ready_pods(self, namespace, labels):
691699
Detects if any pod is in the Running phase but not Ready and handles
692700
any potential issues around that mainly failed healthcheks
693701
"""
694-
pods = self.get(namespace, labels=labels).json()
695-
for pod in pods['items']:
702+
pods = self.get(namespace, labels=labels).json()['items']
703+
if not pods:
704+
pods = []
705+
for pod in pods:
696706
# only care about pods that are in running phase
697707
if pod['status']['phase'] != 'Running':
698708
continue
@@ -703,8 +713,8 @@ def _handle_not_ready_pods(self, namespace, labels):
703713
if container is None or container['ready'] == 'true':
704714
continue
705715

706-
for event in self.events(pod):
707-
if event['reason'] == 'Unhealthy':
716+
for event in self.events(pod):
717+
if event['reason'] == 'Unhealthy':
708718
# strip out whitespaces on either side
709719
message = "\n".join([x.strip() for x in event['message'].split("\n")])
710720
raise KubeException(message)

0 commit comments

Comments
 (0)