Skip to content

Commit c021d1d

Browse files
Merge pull request openshift#131 from openshift-cherrypick-robot/cherry-pick-129-to-master-post-release
[master-post-release] Bug 1703546: Waiting for synced flush
2 parents de286aa + fea9522 commit c021d1d

File tree

2 files changed

+19
-10
lines changed

2 files changed

+19
-10
lines changed

pkg/k8shandler/deployment.go

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -309,17 +309,17 @@ func (node *deploymentNode) restart(upgradeStatus *api.ElasticsearchNodeStatus)
309309
}
310310

311311
if replicas > 0 {
312-
if ok, err := DoSynchronizedFlush(node.clusterName, node.self.Namespace); !ok {
313-
logrus.Warnf("Unable to perform synchronized flush: %v", err)
314-
return
315-
}
316312

317313
// disable shard allocation
318314
if ok, err := SetShardAllocation(node.clusterName, node.self.Namespace, api.ShardAllocationNone); !ok {
319315
logrus.Warnf("Unable to disable shard allocation: %v", err)
320316
return
321317
}
322318

319+
if ok, err := DoSynchronizedFlush(node.clusterName, node.self.Namespace); !ok {
320+
logrus.Warnf("Unable to perform synchronized flush: %v", err)
321+
}
322+
323323
// check for available replicas empty
324324
// node.self.Status.Replicas
325325
// if we aren't at 0, then we need to scale down to 0
@@ -393,17 +393,16 @@ func (node *deploymentNode) update(upgradeStatus *api.ElasticsearchNodeStatus) e
393393
if upgradeStatus.UpgradeStatus.UpgradePhase == "" ||
394394
upgradeStatus.UpgradeStatus.UpgradePhase == api.ControllerUpdated {
395395

396-
if ok, err := DoSynchronizedFlush(node.clusterName, node.self.Namespace); !ok {
397-
logrus.Warnf("Unable to perform synchronized flush: %v", err)
398-
return err
399-
}
400-
401396
// disable shard allocation
402397
if ok, err := SetShardAllocation(node.clusterName, node.self.Namespace, api.ShardAllocationNone); !ok {
403398
logrus.Warnf("Unable to disable shard allocation: %v", err)
404399
return err
405400
}
406401

402+
if ok, err := DoSynchronizedFlush(node.clusterName, node.self.Namespace); !ok {
403+
logrus.Warnf("Unable to perform synchronized flush: %v", err)
404+
}
405+
407406
if err := node.executeUpdate(); err != nil {
408407
return err
409408
}

pkg/k8shandler/elasticsearch.go

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -357,7 +357,6 @@ func GetClusterNodeCount(clusterName, namespace string) (int32, error) {
357357
return nodeCount, payload.Error
358358
}
359359

360-
// TODO: also check that the number of shards in the response > 0?
361360
func DoSynchronizedFlush(clusterName, namespace string) (bool, error) {
362361

363362
payload := &esCurlStruct{
@@ -367,6 +366,17 @@ func DoSynchronizedFlush(clusterName, namespace string) (bool, error) {
367366

368367
curlESService(clusterName, namespace, payload)
369368

369+
failed := 0
370+
if shards, ok := payload.ResponseBody["_shards"].(map[string]interface{}); ok {
371+
if failedFload, ok := shards["failed"].(float64); ok {
372+
failed = int(failedFload)
373+
}
374+
}
375+
376+
if payload.Error == nil && failed != 0 {
377+
payload.Error = fmt.Errorf("Failed to flush %d shards in preparation for cluster restart", failed)
378+
}
379+
370380
return (payload.StatusCode == 200), payload.Error
371381
}
372382

0 commit comments

Comments
 (0)