diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index 268cc3dd3a3..feb5dc852b8 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -245,6 +245,7 @@ TESTS = [
[ "colocation-influence", "Respect colocation influence" ],
[ "colocation-priority-group", "Apply group colocations in order of primary priority" ],
[ "colocation-vs-stickiness", "Group stickiness outweighs anti-colocation score" ],
+ [ "promoted-with-blocked", "Promoted role colocated with a resource with blocked start" ],
],
[
[ "rsc-sets-seq-true", "Resource Sets - sequential=false" ],
diff --git a/cts/scheduler/dot/promoted-with-blocked.dot b/cts/scheduler/dot/promoted-with-blocked.dot
new file mode 100644
index 00000000000..427b2c07400
--- /dev/null
+++ b/cts/scheduler/dot/promoted-with-blocked.dot
@@ -0,0 +1,63 @@
+ digraph "g" {
+"rsc1_monitor_0 node1" -> "rsc1_start_0 node2" [ style = dashed]
+"rsc1_monitor_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc1_monitor_0 node2" -> "rsc1_start_0 node2" [ style = dashed]
+"rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black"]
+"rsc1_monitor_0 node3" -> "rsc1_start_0 node2" [ style = dashed]
+"rsc1_monitor_0 node3" [ style=bold color="green" fontcolor="black"]
+"rsc1_monitor_0 node4" -> "rsc1_start_0 node2" [ style = dashed]
+"rsc1_monitor_0 node4" [ style=bold color="green" fontcolor="black"]
+"rsc1_monitor_0 node5" -> "rsc1_start_0 node2" [ style = dashed]
+"rsc1_monitor_0 node5" [ style=bold color="green" fontcolor="black"]
+"rsc1_monitor_10000 node2" [ style=dashed color="red" fontcolor="black"]
+"rsc1_start_0 node2" -> "rsc1_monitor_10000 node2" [ style = dashed]
+"rsc1_start_0 node2" [ style=dashed color="red" fontcolor="black"]
+"rsc2-clone_promote_0" -> "rsc2:4_promote_0 node2" [ style = dashed]
+"rsc2-clone_promote_0" [ style=dashed color="red" fontcolor="orange"]
+"rsc2-clone_promoted_0" [ style=dashed color="red" fontcolor="orange"]
+"rsc2-clone_running_0" -> "rsc2-clone_promote_0" [ style = dashed]
+"rsc2-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"rsc2-clone_start_0" -> "rsc2-clone_running_0" [ style = bold]
+"rsc2-clone_start_0" -> "rsc2:0_start_0 node3" [ style = bold]
+"rsc2-clone_start_0" -> "rsc2:1_start_0 node4" [ style = bold]
+"rsc2-clone_start_0" -> "rsc2:2_start_0 node5" [ style = bold]
+"rsc2-clone_start_0" -> "rsc2:3_start_0 node1" [ style = bold]
+"rsc2-clone_start_0" -> "rsc2:4_start_0 node2" [ style = bold]
+"rsc2-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"rsc2:0_monitor_0 node3" -> "rsc2-clone_start_0" [ style = bold]
+"rsc2:0_monitor_0 node3" [ style=bold color="green" fontcolor="black"]
+"rsc2:0_monitor_10000 node3" [ style=bold color="green" fontcolor="black"]
+"rsc2:0_start_0 node3" -> "rsc2-clone_running_0" [ style = bold]
+"rsc2:0_start_0 node3" -> "rsc2:0_monitor_10000 node3" [ style = bold]
+"rsc2:0_start_0 node3" [ style=bold color="green" fontcolor="black"]
+"rsc2:1_monitor_0 node4" -> "rsc2-clone_start_0" [ style = bold]
+"rsc2:1_monitor_0 node4" [ style=bold color="green" fontcolor="black"]
+"rsc2:1_monitor_10000 node4" [ style=bold color="green" fontcolor="black"]
+"rsc2:1_start_0 node4" -> "rsc2-clone_running_0" [ style = bold]
+"rsc2:1_start_0 node4" -> "rsc2:1_monitor_10000 node4" [ style = bold]
+"rsc2:1_start_0 node4" [ style=bold color="green" fontcolor="black"]
+"rsc2:2_monitor_0 node5" -> "rsc2-clone_start_0" [ style = bold]
+"rsc2:2_monitor_0 node5" [ style=bold color="green" fontcolor="black"]
+"rsc2:2_monitor_10000 node5" [ style=bold color="green" fontcolor="black"]
+"rsc2:2_start_0 node5" -> "rsc2-clone_running_0" [ style = bold]
+"rsc2:2_start_0 node5" -> "rsc2:2_monitor_10000 node5" [ style = bold]
+"rsc2:2_start_0 node5" [ style=bold color="green" fontcolor="black"]
+"rsc2:3_monitor_0 node1" -> "rsc2-clone_start_0" [ style = bold]
+"rsc2:3_monitor_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc2:3_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
+"rsc2:3_start_0 node1" -> "rsc2-clone_running_0" [ style = bold]
+"rsc2:3_start_0 node1" -> "rsc2:3_monitor_10000 node1" [ style = bold]
+"rsc2:3_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc2:4_monitor_0 node2" -> "rsc2-clone_start_0" [ style = bold]
+"rsc2:4_monitor_0 node2" [ style=bold color="green" fontcolor="black"]
+"rsc2:4_promote_0 node2" -> "rsc2-clone_promoted_0" [ style = dashed]
+"rsc2:4_promote_0 node2" [ style=dashed color="red" fontcolor="black"]
+"rsc2:4_start_0 node2" -> "rsc2-clone_running_0" [ style = bold]
+"rsc2:4_start_0 node2" -> "rsc2:4_promote_0 node2" [ style = dashed]
+"rsc2:4_start_0 node2" [ style=bold color="green" fontcolor="black"]
+"rsc3_monitor_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc3_monitor_0 node2" [ style=bold color="green" fontcolor="black"]
+"rsc3_monitor_0 node3" [ style=bold color="green" fontcolor="black"]
+"rsc3_monitor_0 node4" [ style=bold color="green" fontcolor="black"]
+"rsc3_monitor_0 node5" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/exp/promoted-with-blocked.exp b/cts/scheduler/exp/promoted-with-blocked.exp
new file mode 100644
index 00000000000..540963bcd70
--- /dev/null
+++ b/cts/scheduler/exp/promoted-with-blocked.exp
@@ -0,0 +1,305 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/cts/scheduler/scores/promoted-with-blocked.scores b/cts/scheduler/scores/promoted-with-blocked.scores
new file mode 100644
index 00000000000..d279b01a8c1
--- /dev/null
+++ b/cts/scheduler/scores/promoted-with-blocked.scores
@@ -0,0 +1,76 @@
+
+pcmk__clone_allocate: rsc2-clone allocation score on node1: 0
+pcmk__clone_allocate: rsc2-clone allocation score on node2: 0
+pcmk__clone_allocate: rsc2-clone allocation score on node3: 0
+pcmk__clone_allocate: rsc2-clone allocation score on node4: 0
+pcmk__clone_allocate: rsc2-clone allocation score on node5: 0
+pcmk__clone_allocate: rsc2:0 allocation score on node1: 0
+pcmk__clone_allocate: rsc2:0 allocation score on node2: 0
+pcmk__clone_allocate: rsc2:0 allocation score on node3: 0
+pcmk__clone_allocate: rsc2:0 allocation score on node4: 0
+pcmk__clone_allocate: rsc2:0 allocation score on node5: 0
+pcmk__clone_allocate: rsc2:1 allocation score on node1: 0
+pcmk__clone_allocate: rsc2:1 allocation score on node2: 0
+pcmk__clone_allocate: rsc2:1 allocation score on node3: 0
+pcmk__clone_allocate: rsc2:1 allocation score on node4: 0
+pcmk__clone_allocate: rsc2:1 allocation score on node5: 0
+pcmk__clone_allocate: rsc2:2 allocation score on node1: 0
+pcmk__clone_allocate: rsc2:2 allocation score on node2: 0
+pcmk__clone_allocate: rsc2:2 allocation score on node3: 0
+pcmk__clone_allocate: rsc2:2 allocation score on node4: 0
+pcmk__clone_allocate: rsc2:2 allocation score on node5: 0
+pcmk__clone_allocate: rsc2:3 allocation score on node1: 0
+pcmk__clone_allocate: rsc2:3 allocation score on node2: 0
+pcmk__clone_allocate: rsc2:3 allocation score on node3: 0
+pcmk__clone_allocate: rsc2:3 allocation score on node4: 0
+pcmk__clone_allocate: rsc2:3 allocation score on node5: 0
+pcmk__clone_allocate: rsc2:4 allocation score on node1: 0
+pcmk__clone_allocate: rsc2:4 allocation score on node2: 0
+pcmk__clone_allocate: rsc2:4 allocation score on node3: 0
+pcmk__clone_allocate: rsc2:4 allocation score on node4: 0
+pcmk__clone_allocate: rsc2:4 allocation score on node5: 0
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: Fencing allocation score on node4: 0
+pcmk__primitive_assign: Fencing allocation score on node5: 0
+pcmk__primitive_assign: rsc1 allocation score on node1: 0
+pcmk__primitive_assign: rsc1 allocation score on node2: 0
+pcmk__primitive_assign: rsc1 allocation score on node3: 0
+pcmk__primitive_assign: rsc1 allocation score on node4: 0
+pcmk__primitive_assign: rsc1 allocation score on node5: 0
+pcmk__primitive_assign: rsc2:0 allocation score on node1: 0
+pcmk__primitive_assign: rsc2:0 allocation score on node2: 0
+pcmk__primitive_assign: rsc2:0 allocation score on node3: 0
+pcmk__primitive_assign: rsc2:0 allocation score on node4: 0
+pcmk__primitive_assign: rsc2:0 allocation score on node5: 0
+pcmk__primitive_assign: rsc2:1 allocation score on node1: 0
+pcmk__primitive_assign: rsc2:1 allocation score on node2: 0
+pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc2:1 allocation score on node4: 0
+pcmk__primitive_assign: rsc2:1 allocation score on node5: 0
+pcmk__primitive_assign: rsc2:2 allocation score on node1: 0
+pcmk__primitive_assign: rsc2:2 allocation score on node2: 0
+pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc2:2 allocation score on node4: -INFINITY
+pcmk__primitive_assign: rsc2:2 allocation score on node5: 0
+pcmk__primitive_assign: rsc2:3 allocation score on node1: 0
+pcmk__primitive_assign: rsc2:3 allocation score on node2: 0
+pcmk__primitive_assign: rsc2:3 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc2:3 allocation score on node4: -INFINITY
+pcmk__primitive_assign: rsc2:3 allocation score on node5: -INFINITY
+pcmk__primitive_assign: rsc2:4 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc2:4 allocation score on node2: 0
+pcmk__primitive_assign: rsc2:4 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc2:4 allocation score on node4: -INFINITY
+pcmk__primitive_assign: rsc2:4 allocation score on node5: -INFINITY
+pcmk__primitive_assign: rsc3 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc3 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc3 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc3 allocation score on node4: -INFINITY
+pcmk__primitive_assign: rsc3 allocation score on node5: -INFINITY
+rsc2:0 promotion score on node3: -INFINITY
+rsc2:1 promotion score on node4: -INFINITY
+rsc2:2 promotion score on node5: -INFINITY
+rsc2:3 promotion score on node1: -INFINITY
+rsc2:4 promotion score on node2: INFINITY
diff --git a/cts/scheduler/summary/bundle-order-partial-stop.summary b/cts/scheduler/summary/bundle-order-partial-stop.summary
index 39eab8f93ea..faf5e5d51f7 100644
--- a/cts/scheduler/summary/bundle-order-partial-stop.summary
+++ b/cts/scheduler/summary/bundle-order-partial-stop.summary
@@ -24,7 +24,7 @@ Current cluster status:
Transition Summary:
* Stop rabbitmq-bundle-docker-0 ( undercloud ) due to node availability
* Stop rabbitmq-bundle-0 ( undercloud ) due to node availability
- * Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to unrunnable rabbitmq-bundle-0 start
+ * Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to colocation with haproxy-bundle-docker-0
* Stop galera-bundle-docker-0 ( undercloud ) due to node availability
* Stop galera-bundle-0 ( undercloud ) due to node availability
* Stop galera:0 ( Promoted galera-bundle-0 ) due to unrunnable galera-bundle-0 start
diff --git a/cts/scheduler/summary/bundle-order-stop.summary b/cts/scheduler/summary/bundle-order-stop.summary
index 39eab8f93ea..faf5e5d51f7 100644
--- a/cts/scheduler/summary/bundle-order-stop.summary
+++ b/cts/scheduler/summary/bundle-order-stop.summary
@@ -24,7 +24,7 @@ Current cluster status:
Transition Summary:
* Stop rabbitmq-bundle-docker-0 ( undercloud ) due to node availability
* Stop rabbitmq-bundle-0 ( undercloud ) due to node availability
- * Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to unrunnable rabbitmq-bundle-0 start
+ * Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to colocation with haproxy-bundle-docker-0
* Stop galera-bundle-docker-0 ( undercloud ) due to node availability
* Stop galera-bundle-0 ( undercloud ) due to node availability
* Stop galera:0 ( Promoted galera-bundle-0 ) due to unrunnable galera-bundle-0 start
diff --git a/cts/scheduler/summary/promoted-with-blocked.summary b/cts/scheduler/summary/promoted-with-blocked.summary
new file mode 100644
index 00000000000..f045b61dfe1
--- /dev/null
+++ b/cts/scheduler/summary/promoted-with-blocked.summary
@@ -0,0 +1,59 @@
+1 of 8 resource instances DISABLED and 0 BLOCKED from further action due to failure
+
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 node4 node5 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node1
+ * rsc1 (ocf:pacemaker:Dummy): Stopped
+ * Clone Set: rsc2-clone [rsc2] (promotable):
+ * Stopped: [ node1 node2 node3 node4 node5 ]
+ * rsc3 (ocf:pacemaker:Dummy): Stopped (disabled)
+
+Transition Summary:
+ * Start rsc1 ( node2 ) due to unrunnable rsc3 start (blocked)
+ * Start rsc2:0 ( node3 )
+ * Start rsc2:1 ( node4 )
+ * Start rsc2:2 ( node5 )
+ * Start rsc2:3 ( node1 )
+ * Promote rsc2:4 ( Stopped -> Promoted node2 ) due to colocation with rsc1 (blocked)
+
+Executing Cluster Transition:
+ * Resource action: rsc1 monitor on node5
+ * Resource action: rsc1 monitor on node4
+ * Resource action: rsc1 monitor on node3
+ * Resource action: rsc1 monitor on node2
+ * Resource action: rsc1 monitor on node1
+ * Resource action: rsc2:0 monitor on node3
+ * Resource action: rsc2:1 monitor on node4
+ * Resource action: rsc2:2 monitor on node5
+ * Resource action: rsc2:3 monitor on node1
+ * Resource action: rsc2:4 monitor on node2
+ * Pseudo action: rsc2-clone_start_0
+ * Resource action: rsc3 monitor on node5
+ * Resource action: rsc3 monitor on node4
+ * Resource action: rsc3 monitor on node3
+ * Resource action: rsc3 monitor on node2
+ * Resource action: rsc3 monitor on node1
+ * Resource action: rsc2:0 start on node3
+ * Resource action: rsc2:1 start on node4
+ * Resource action: rsc2:2 start on node5
+ * Resource action: rsc2:3 start on node1
+ * Resource action: rsc2:4 start on node2
+ * Pseudo action: rsc2-clone_running_0
+ * Resource action: rsc2:0 monitor=10000 on node3
+ * Resource action: rsc2:1 monitor=10000 on node4
+ * Resource action: rsc2:2 monitor=10000 on node5
+ * Resource action: rsc2:3 monitor=10000 on node1
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 node4 node5 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node1
+ * rsc1 (ocf:pacemaker:Dummy): Stopped
+ * Clone Set: rsc2-clone [rsc2] (promotable):
+ * Unpromoted: [ node1 node2 node3 node4 node5 ]
+ * rsc3 (ocf:pacemaker:Dummy): Stopped (disabled)
diff --git a/cts/scheduler/xml/promoted-with-blocked.xml b/cts/scheduler/xml/promoted-with-blocked.xml
new file mode 100644
index 00000000000..c9a31b2ad20
--- /dev/null
+++ b/cts/scheduler/xml/promoted-with-blocked.xml
@@ -0,0 +1,156 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h
index 38ce24513b6..6079fdb3a50 100644
--- a/lib/pacemaker/libpacemaker_private.h
+++ b/lib/pacemaker/libpacemaker_private.h
@@ -410,8 +410,8 @@ void pcmk__new_colocation(const char *id, const char *node_attr, int score,
bool influence, pe_working_set_t *data_set);
G_GNUC_INTERNAL
-void pcmk__block_colocated_starts(pe_action_t *action,
- pe_working_set_t *data_set);
+void pcmk__block_colocation_dependents(pe_action_t *action,
+ pe_working_set_t *data_set);
/*!
* \internal
diff --git a/lib/pacemaker/pcmk_sched_actions.c b/lib/pacemaker/pcmk_sched_actions.c
index 0bcc72bff9d..8c61264c282 100644
--- a/lib/pacemaker/pcmk_sched_actions.c
+++ b/lib/pacemaker/pcmk_sched_actions.c
@@ -656,7 +656,7 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
then->uuid);
if (pcmk_is_set(last_flags, pe_action_runnable)
&& !pcmk_is_set(then->flags, pe_action_runnable)) {
- pcmk__block_colocated_starts(then, data_set);
+ pcmk__block_colocation_dependents(then, data_set);
}
pcmk__update_action_for_orderings(then, data_set);
for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) {
diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c
index a30c8184b80..ee7ecd76a5b 100644
--- a/lib/pacemaker/pcmk_sched_colocation.c
+++ b/lib/pacemaker/pcmk_sched_colocation.c
@@ -787,9 +787,17 @@ pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set)
}
}
+/*!
+ * \internal
+ * \brief Make actions of a given type unrunnable for a given resource
+ *
+ * \param[in,out] rsc Resource whose actions should be blocked
+ * \param[in] task Name of action to block
+ * \param[in] reason Unrunnable start action causing the block
+ */
static void
-mark_start_blocked(pe_resource_t *rsc, pe_resource_t *reason,
- pe_working_set_t *data_set)
+mark_action_blocked(pe_resource_t *rsc, const char *task,
+ const pe_resource_t *reason)
{
char *reason_text = crm_strdup_printf("colocation with %s", reason->id);
@@ -797,63 +805,107 @@ mark_start_blocked(pe_resource_t *rsc, pe_resource_t *reason,
pe_action_t *action = (pe_action_t *) gIter->data;
if (pcmk_is_set(action->flags, pe_action_runnable)
- && pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
+ && pcmk__str_eq(action->task, task, pcmk__str_casei)) {
pe__clear_action_flags(action, pe_action_runnable);
pe_action_set_reason(action, reason_text, false);
- pcmk__block_colocated_starts(action, data_set);
- pcmk__update_action_for_orderings(action, data_set);
+ pcmk__block_colocation_dependents(action, rsc->cluster);
+ pcmk__update_action_for_orderings(action, rsc->cluster);
}
}
+
+ // If parent resource can't perform an action, neither can any children
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ mark_action_blocked((pe_resource_t *) (iter->data), task, reason);
+ }
free(reason_text);
}
/*!
* \internal
- * \brief If a start action is unrunnable, block starts of colocated resources
+ * \brief If an action is unrunnable, block any relevant dependent actions
+ *
+ * If a given action is an unrunnable start or promote, block the start or
+ * promote actions of resources colocated with it, as appropriate to the
+ * colocations' configured roles.
*
* \param[in] action Action to check
* \param[in] data_set Cluster working set
*/
void
-pcmk__block_colocated_starts(pe_action_t *action, pe_working_set_t *data_set)
+pcmk__block_colocation_dependents(pe_action_t *action,
+ pe_working_set_t *data_set)
{
GList *gIter = NULL;
pe_resource_t *rsc = NULL;
+ bool is_start = false;
- if (!pcmk_is_set(action->flags, pe_action_runnable)
- && pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
+ if (pcmk_is_set(action->flags, pe_action_runnable)) {
+ return; // Only unrunnable actions block dependents
+ }
- rsc = uber_parent(action->rsc);
- if (rsc->parent) {
- /* For bundles, uber_parent() returns the clone, not the bundle, so
- * the existence of rsc->parent implies this is a bundle.
- * In this case, we need the bundle resource, so that we can check
- * if all containers are stopped/stopping.
- */
- rsc = rsc->parent;
- }
+ is_start = pcmk__str_eq(action->task, RSC_START, pcmk__str_none);
+ if (!is_start && !pcmk__str_eq(action->task, RSC_PROMOTE, pcmk__str_none)) {
+ return; // Only unrunnable starts and promotes block dependents
}
- if ((rsc == NULL) || (rsc->rsc_cons_lhs == NULL)) {
+ CRM_ASSERT(action->rsc != NULL); // Start and promote are resource actions
+
+ /* If this resource is part of a collective resource, dependents are blocked
+ * only if all instances of the collective are unrunnable, so check the
+ * collective resource.
+ */
+ rsc = uber_parent(action->rsc);
+ if (rsc->parent != NULL) {
+ rsc = rsc->parent; // Bundle
+ }
+
+ if (rsc->rsc_cons_lhs == NULL) {
return;
}
- // Block colocated starts only if all children (if any) have unrunnable starts
+ // Colocation fails only if entire primary can't reach desired role
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *)gIter->data;
- pe_action_t *start = find_first_action(child->actions, NULL, RSC_START, NULL);
-
- if ((start == NULL) || pcmk_is_set(start->flags, pe_action_runnable)) {
- return;
+ pe_resource_t *child = (pe_resource_t *) gIter->data;
+ pe_action_t *child_action = find_first_action(child->actions, NULL,
+ action->task, NULL);
+
+ if ((child_action == NULL)
+ || pcmk_is_set(child_action->flags, pe_action_runnable)) {
+ crm_trace("Not blocking %s colocation dependents because "
+ "at least %s has runnable %s",
+ rsc->id, child->id, action->task);
+ return; // At least one child can reach desired role
}
}
+ crm_trace("Blocking %s colocation dependents due to unrunnable %s %s",
+ rsc->id, action->rsc->id, action->task);
+
+ // Check each colocation where this resource is primary
for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
- pcmk__colocation_t *colocate_with = (pcmk__colocation_t *) gIter->data;
+ pcmk__colocation_t *colocation = (pcmk__colocation_t *) gIter->data;
+
+ if (colocation->score < INFINITY) {
+ continue; // Only mandatory colocations block dependent
+ }
- if (colocate_with->score == INFINITY) {
- mark_start_blocked(colocate_with->dependent, action->rsc, data_set);
+ /* If the primary can't start, the dependent can't reach its colocated
+ * role, regardless of what the primary or dependent colocation role is.
+ *
+ * If the primary can't be promoted, the dependent can't reach its
+ * colocated role if the primary's colocation role is promoted.
+ */
+ if (!is_start && (colocation->primary_role != RSC_ROLE_PROMOTED)) {
+ continue;
+ }
+
+ // Block the dependent from reaching its colocated role
+ if (colocation->dependent_role == RSC_ROLE_PROMOTED) {
+ mark_action_blocked(colocation->dependent, RSC_PROMOTE,
+ action->rsc);
+ } else {
+ mark_action_blocked(colocation->dependent, RSC_START, action->rsc);
}
}
}
diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c
index 6bbe2d1b889..668fef76a55 100644
--- a/lib/pacemaker/pcmk_sched_group.c
+++ b/lib/pacemaker/pcmk_sched_group.c
@@ -45,8 +45,8 @@ expand_group_colocations(pe_resource_t *rsc)
*
* However, there is a special case when a group has a mandatory colocation
* with a resource that can't start. In that case,
- * pcmk__block_colocated_starts() will ensure that dependent resources in
- * mandatory colocations (i.e. the first member for groups) can't start
+ * pcmk__block_colocation_dependents() will ensure that dependent resources
+ * in mandatory colocations (i.e. the first member for groups) can't start
* either. But if any group member is unmanaged and already started, the
* internal group colocations are no longer sufficient to make that apply to
* later members.
diff --git a/lib/pacemaker/pcmk_sched_ordering.c b/lib/pacemaker/pcmk_sched_ordering.c
index 527b97569ab..2b498a672f8 100644
--- a/lib/pacemaker/pcmk_sched_ordering.c
+++ b/lib/pacemaker/pcmk_sched_ordering.c
@@ -1397,7 +1397,7 @@ pcmk__apply_orderings(pe_working_set_t *data_set)
}
}
- g_list_foreach(data_set->actions, (GFunc) pcmk__block_colocated_starts,
+ g_list_foreach(data_set->actions, (GFunc) pcmk__block_colocation_dependents,
data_set);
crm_trace("Ordering probes");