Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: crmsh enhancements, master slave, validations #197

Merged
merged 6 commits into from
Apr 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 4 additions & 14 deletions tasks/shell_crmsh/cluster-destroy-crm.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

- name: Stop cluster
ansible.builtin.command:
cmd: crm cluster stop --all
cmd: crm cluster stop
when: not __ha_cluster_config_files_stat.results |
selectattr('stat.exists', 'equalto', false) | list | length > 0
changed_when: true
Expand Down Expand Up @@ -52,17 +52,7 @@
loop_var: config_file
when: config_file.stat.exists

- name: Find all files in /var/lib/pacemaker/cib/
ansible.builtin.find:
paths: /var/lib/pacemaker/cib
recurse: true
patterns:
- 'cib*'
- 'shadow*'
register: __ha_cluster_cib_files

- name: Remove all files in /var/lib/pacemaker/cib/
ansible.builtin.file:
path: "{{ item.path }}"
state: absent
loop: "{{ __ha_cluster_cib_files.files }}"
ansible.builtin.shell:
cmd: 'rm -f /var/lib/pacemaker/cib/{cib*,shadow*}'
changed_when: true
12 changes: 6 additions & 6 deletions tasks/shell_crmsh/cluster-start-and-reload.yml
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,10 @@
changed_when: false
register: __ha_cluster_crm_output_nodes
until: __ha_cluster_crm_output_nodes.stdout is regex(__ha_cluster_node_count)
# 2 minutes retry loop for cluster to initialize
retries: 12
# Retry loop for cluster to initialize
retries: 20
delay: 10
timeout: 120
# timeout: 120

- name: Wait for the cluster to show Online nodes
ansible.builtin.command:
Expand All @@ -85,10 +85,10 @@
changed_when: false
register: __ha_cluster_crm_output_online
until: __ha_cluster_crm_output_online.stdout is regex("Online:")
# 2 minutes retry loop for cluster nodes to come up
retries: 12
# Retry loop for cluster to initialize
retries: 20
delay: 10
timeout: 120
# timeout: 120

- name: Output current cluster status
ansible.builtin.debug:
Expand Down
32 changes: 27 additions & 5 deletions tasks/shell_crmsh/create-and-push-cib.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,14 @@
# status changes, resulting in shadow CIB outdated and unable to patch.
# Sleep is implemented to ensure that cluster have enough time to freeze
# to ensure CIB export consistency.
# Meta-attrs is-managed will conflict with maintenance mode. Option n
# will skip their deletion.
# Meta-attrs is-managed will conflict with maintenance mode as well as
# individual resource maintenance attributes. Expect will skip their deletion.
- name: Put cluster in maintenance mode to freeze cib changes
ansible.builtin.expect:
command: crm configure property maintenance-mode=true
responses:
".*is-managed.*": "n"
".*already.*": "n"
run_once: true # noqa: run_once[task]
check_mode: false
changed_when: true
Expand Down Expand Up @@ -122,6 +123,26 @@
properties_set: "{{ ha_cluster_cluster_properties[0] }}"
when: ha_cluster_cluster_properties[0].attrs | d([])

## Resource defaults
- name: Configure resource defaults
ansible.builtin.include_tasks: crm-rsc-op-defaults.yml
vars:
operations: false
loop: "{{ ha_cluster_resource_defaults.meta_attrs | d([]) }}"
loop_control:
index_var: defaults_set_index
loop_var: defaults_set

## Resource operation defaults
- name: Configure resource operation defaults
ansible.builtin.include_tasks: crm-rsc-op-defaults.yml
vars:
operations: true
loop: "{{ ha_cluster_resource_operation_defaults.meta_attrs | d([]) }}"
loop_control:
index_var: defaults_set_index
loop_var: defaults_set

## Resources
- name: Configure cluster resources
ansible.builtin.include_tasks: crm-cib-resource-primitive.yml
Expand All @@ -143,7 +164,7 @@

## Stonith levels - fencing_topology
- name: Configure stonith levels - fencing_topology
include_tasks: crm-cib-stonith-level.yml
ansible.builtin.include_tasks: crm-cib-stonith-level.yml
when: ha_cluster_stonith_levels

## Constraints
Expand Down Expand Up @@ -267,13 +288,14 @@
when: __ha_cluster_cib_diff.rc == 1
run_once: true # noqa: run_once[task]

# Meta-attrs is-managed will conflict with maintenance mode. Option n
# will skip their deletion.
# Meta-attrs is-managed will conflict with maintenance mode as well as
# individual resource maintenance attributes. Expect will skip their deletion.
- name: Disable maintenance mode
ansible.builtin.expect:
command: crm configure property maintenance-mode=false
responses:
".*is-managed.*": "n"
".*already.*": "n"
check_mode: false
changed_when: true
run_once: true # noqa: run_once[task]
Expand Down
64 changes: 56 additions & 8 deletions tasks/shell_crmsh/crm-cib-constraint-colocation.yml
Original file line number Diff line number Diff line change
@@ -1,16 +1,38 @@
# SPDX-License-Identifier: MIT
---
# Verify if constraint.resource_leader.id exists
- name: Verify resource_leader presence {{ constraint.resource_leader.id }}
ansible.builtin.command:
cmd: >-
crm -c {{ __ha_cluster_crm_shadow }}
configure show {{ constraint.resource_leader.id }}
register: __ha_cluster_constraint_resource_leader
changed_when: false
failed_when:
- "'does not exist' in __ha_cluster_constraint_resource_leader.stderr"

# Verify if constraint.resourceresource_follower_leader.id exists
- name: Verify resource_follower presence {{ constraint.resource_follower.id }}
ansible.builtin.command:
cmd: >-
crm -c {{ __ha_cluster_crm_shadow }}
configure show {{ constraint.resource_follower.id }}
register: __ha_cluster_constraint_resource_follower
changed_when: false
failed_when:
- "'does not exist' in __ha_cluster_constraint_resource_follower.stderr"

- name: Define colocation constraint.id '{{
constraint.id | d(constraint_index) }}'
ansible.builtin.set_fact:
__ha_cluster_constraint_id:
"{{ constraint.id if constraint.id is defined else
(constraint.resource_leader.id | quote) + '-colocation' }}"
'col_' + (constraint.resource_leader.id | quote) }}"

# Verify if Shadow CIB already contains same constraint id.
- name: Verify colocation constraint presence {{ __ha_cluster_constraint_id }}
ansible.builtin.command:
cmd: |
cmd: >-
crm -c {{ __ha_cluster_crm_shadow }}
configure show {{ __ha_cluster_constraint_id }}
register: __ha_cluster_constraint_status
Expand All @@ -20,27 +42,53 @@
# Delete constraint id in Shadow CIB to avoid errors during cibadmin patch.
- name: Delete present colocation constraint {{ __ha_cluster_constraint_id }}
ansible.builtin.command:
cmd: |
crm -c {{ __ha_cluster_crm_shadow }}
cmd: >-
crm --force -c {{ __ha_cluster_crm_shadow }}
configure delete {{ __ha_cluster_constraint_id }}
when: __ha_cluster_constraint_status.rc == 0
check_mode: false
changed_when: not ansible_check_mode

# Expect module is used to combat crmsh freezing when asking for user input
- name: Configure colocation constraint {{ __ha_cluster_constraint_id }}
ansible.builtin.command:
cmd: |
ansible.builtin.expect:
command: |
crm -c {{ __ha_cluster_crm_shadow }}
configure colocation {{ __ha_cluster_constraint_id }}
{% for option in constraint.options | d([]) if option.name == 'score' %}
{{ option.value | lower | replace('infinity', 'inf') | quote }}:
{% else %}
inf:
{% endfor %}
{{ constraint.resource_leader.id
| quote }} {{ constraint.resource_follower.id | quote }}
{% if constraint.resource_follower.role | d() and
constraint.resource_follower.role | lower in __ha_cluster_crmsh_roles %}
{{ constraint.resource_follower.id | quote }}:{{
constraint.resource_follower.role | lower | capitalize | quote }}
{% else %}
{{ constraint.resource_follower.id | quote }}
{% endif %}
{% if constraint.resource_leader.role | d() and
constraint.resource_leader.role | lower in __ha_cluster_crmsh_roles %}
{{ constraint.resource_leader.id | quote }}:{{
constraint.resource_leader.role | lower | capitalize | quote }}
{% else %}
{{ constraint.resource_leader.id | quote }}
{% endif %}
{% for option in constraint.options | d([]) if option.name != 'score' %}
{{ option.name | quote }}={{ option.value | quote }}
{% endfor %}
# ERROR and "Do you still want to commit (y/n)?" trigger response "n".
responses:
".*ERROR.*": "n"
".*y/n*": "n"
check_mode: false
changed_when: not ansible_check_mode
ignore_errors: true
register: __ha_cluster_crmsh_output

- name: Display crm command error details
ansible.builtin.fail:
msg: "{{ __ha_cluster_crmsh_output.stdout_lines }}"
when:
- __ha_cluster_crmsh_output is defined
- __ha_cluster_crmsh_output.rc != 0
53 changes: 43 additions & 10 deletions tasks/shell_crmsh/crm-cib-constraint-location.yml
Original file line number Diff line number Diff line change
@@ -1,18 +1,31 @@
# SPDX-License-Identifier: MIT
---
# Verify if constraint.resource.id exists
- name: Verify resource presence {{ constraint.resource.id }}
ansible.builtin.command:
cmd: >-
crm -c {{ __ha_cluster_crm_shadow }}
configure show {{ constraint.resource.id }}
register: __ha_cluster_constraint_resource
changed_when: false
when: constraint.resource.pattern is not defined
failed_when:
- "'does not exist' in __ha_cluster_constraint_resource.stderr"

- name: Define location constraint.id {{ constraint.id | d(constraint_index) }}
ansible.builtin.set_fact:
__ha_cluster_constraint_id:
"{{ constraint.id if constraint.id is defined
else (constraint.resource.pattern | regex_replace('[^A-Za-z0-9]', '')
| quote) + '-location'
else 'loc_' +
(constraint.resource.pattern | regex_replace('[^A-Za-z0-9]', '')
| quote)
if constraint.resource.pattern is defined
else (constraint.resource.id | quote) + '-location' }}"
else 'loc_' + (constraint.resource.id | quote) }}"

# Verify if Shadow CIB already contains same constraint id.
- name: Verify location constraint presence {{ __ha_cluster_constraint_id }}
ansible.builtin.command:
cmd: |
cmd: >-
crm -c {{ __ha_cluster_crm_shadow }}
configure show {{ __ha_cluster_constraint_id }}
register: __ha_cluster_constraint_status
Expand All @@ -22,36 +35,56 @@
# Delete constraint id in Shadow CIB to avoid errors during cibadmin patch.
- name: Delete present location constraint {{ __ha_cluster_constraint_id }}
ansible.builtin.command:
cmd: |
crm -c {{ __ha_cluster_crm_shadow }}
cmd: >-
crm --force -c {{ __ha_cluster_crm_shadow }}
configure delete {{ __ha_cluster_constraint_id }}
when: __ha_cluster_constraint_status.rc == 0
check_mode: false
changed_when: not ansible_check_mode

# Expect module is used to combat crmsh freezing when asking for user input
- name: Configure location constraint {{ __ha_cluster_constraint_id }}
ansible.builtin.command:
cmd: |
ansible.builtin.expect:
command: |
crm -c {{ __ha_cluster_crm_shadow }}
configure location {{ __ha_cluster_constraint_id }}
{% if constraint.resource.pattern | d() %}
/{{ constraint.resource.pattern | quote }}/
{% else %}
{{ constraint.resource.id | quote }}
{% endif %}\
rule
{% if constraint.resource.role | d() and
constraint.resource.role | lower in __ha_cluster_crmsh_roles %}
role={{
constraint.resource.role | lower | capitalize | quote
}}
{% endif %}
rule
{% for option in constraint.options | d([]) if option.name == 'score' %}
{{ option.value | lower | replace('infinity', 'inf') | quote }}:
{% else %}
inf:
{% endfor %}
{% if constraint.rule | d() %}
{{ constraint.rule }}
rule {{ constraint.rule }}
{% else %}
'\'#uname eq {{ constraint.node }}
{% endif %}
{% for option in constraint.options | d([]) if option.name != 'score' %}
{{ option.name | quote }}={{ option.value | quote }}
{% endfor %}
# ERROR and "Do you still want to commit (y/n)?" trigger response "n".
responses:
".*ERROR.*": "n"
".*y/n*": "n"
check_mode: false
changed_when: not ansible_check_mode
ignore_errors: true
register: __ha_cluster_crmsh_output

- name: Display crm command error details
ansible.builtin.fail:
msg: "{{ __ha_cluster_crmsh_output.stdout_lines }}"
when:
- __ha_cluster_crmsh_output is defined
- __ha_cluster_crmsh_output.rc != 0
Loading
Loading