diff --git a/defaults/main.yml b/defaults/main.yml index 755364ae..f57a4a94 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -13,6 +13,7 @@ storage_pool_defaults: type: lvm disks: [] volumes: [] + grow_to_fill: false encryption: false encryption_password: null diff --git a/library/blivet.py b/library/blivet.py index d82b86b5..85e49c7a 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -51,6 +51,9 @@ encryption_password: description: encryption_password type: str + grow_to_fill: + description: grow_to_fill + type: bool name: description: name type: str @@ -370,7 +373,7 @@ from blivet3.callbacks import callbacks from blivet3 import devicelibs from blivet3 import devices - from blivet3.deviceaction import ActionConfigureFormat, ActionAddMember, ActionRemoveMember + from blivet3.deviceaction import ActionConfigureFormat, ActionResizeFormat, ActionAddMember, ActionRemoveMember from blivet3.devicefactory import DEFAULT_THPOOL_RESERVE from blivet3.flags import flags as blivet_flags from blivet3.formats import fslib, get_format @@ -386,7 +389,7 @@ from blivet.callbacks import callbacks from blivet import devicelibs from blivet import devices - from blivet.deviceaction import ActionConfigureFormat, ActionAddMember, ActionRemoveMember + from blivet.deviceaction import ActionConfigureFormat, ActionResizeFormat, ActionAddMember, ActionRemoveMember from blivet.devicefactory import DEFAULT_THPOOL_RESERVE from blivet.flags import flags as blivet_flags from blivet.formats import fslib, get_format @@ -412,6 +415,7 @@ def __getattr__(self, val): blivet_flags.allow_online_fs_resize = True blivet_flags.gfs2 = True set_up_logging() + log = logging.getLogger(BLIVET_PACKAGE + ".ansible") # XXX add support for LVM RAID raid0 level @@ -1747,6 +1751,18 @@ def _manage_members(self): add_disks = [d for d in self._disks if d not in self._device.ancestors] remove_disks = [pv for pv in self._device.pvs if not any(d in pv.ancestors for d in self._disks)] + if self._pool['grow_to_fill']: + grow_pv_candidates = [pv for pv in self._device.pvs if pv not in remove_disks and pv not in add_disks] + + for pv in grow_pv_candidates: + if abs(self._device.size - self._device.current_size) < 2 * self._device.pe_size: + continue + + pv.format.update_size_info() # set pv to be resizable + pv.grow_to_fill = True + ac = ActionResizeFormat(pv, self._device.size) + self._blivet.devicetree.actions.add(ac) + if not (add_disks or remove_disks): return @@ -2135,6 +2151,7 @@ def run_module(): encryption_key_size=dict(type='int'), encryption_luks_version=dict(type='str'), encryption_password=dict(type='str', no_log=True), + grow_to_fill=dict(type='bool'), name=dict(type='str'), raid_level=dict(type='str'), raid_device_count=dict(type='int'), @@ -2279,6 +2296,7 @@ def action_dict(action): # execute the scheduled actions, committing changes to disk callbacks.action_executed.add(record_action) callbacks.action_executed.add(ensure_udev_update) + try: b.devicetree.actions.process(devices=b.devicetree.devices, dry_run=module.check_mode) except Exception as e: diff --git a/tests/test-verify-pool-members.yml b/tests/test-verify-pool-members.yml index 0cca087d..d12d4638 100644 --- a/tests/test-verify-pool-members.yml +++ b/tests/test-verify-pool-members.yml @@ -70,6 +70,25 @@ loop_var: pv when: storage_test_pool.type == 'lvm' +- name: Check that blivet supports PV grow to fill + ansible.builtin.script: >- + scripts/does_library_support.py + blivet.formats.lvmpv.LVMPhysicalVolume.grow_to_fill + args: + executable: "{{ ansible_python.executable }}" + register: grow_supported + changed_when: false + +- name: Verify that PVs fill the whole devices when they should + include_tasks: verify-pool-member-pvsize.yml + loop: "{{ _storage_test_pool_pvs | default([]) }}" + loop_control: + loop_var: st_pool_pv + when: + - grow_supported.stdout | trim == 'True' + - storage_test_pool.type == "lvm" + - storage_test_pool.grow_to_fill | bool + - name: Check MD RAID include_tasks: verify-pool-md.yml diff --git a/tests/tests_lvm_pool_pv_grow.yml b/tests/tests_lvm_pool_pv_grow.yml new file mode 100644 index 00000000..54094c50 --- /dev/null +++ b/tests/tests_lvm_pool_pv_grow.yml @@ -0,0 +1,99 @@ +--- +- name: Test create disk and remove + hosts: all + become: true + vars: + storage_safe_mode: false + mount_location1: '/opt/test1' + mount_location2: '/opt/test2' + pv_size: '8g' + volume1_size: '2g' + volume2_size: '3g' + tags: + - tests::lvm + + tasks: + - name: Run the role + include_role: + name: linux-system-roles.storage + + - name: Mark tasks to be skipped + set_fact: + storage_skip_checks: + - blivet_available + - service_facts + - "{{ (lookup('env', + 'SYSTEM_ROLES_REMOVE_CLOUD_INIT') in ['', 'false']) | + ternary('packages_installed', '') }}" + + - name: Get unused disks + include_tasks: get_unused_disk.yml + vars: + max_return: 1 + + - name: Create PV with a space to grow + command: "pvcreate --setphysicalvolumesize {{ pv_size }} /dev/{{ unused_disks[0] }}" + register: pvcreate_output + changed_when: pvcreate_output.rc != 0 + + # VG has to be present, the role otherwise automatically reformats empty PV, + # taking all available space + - name: Create VG + command: "vgcreate foo /dev/{{ unused_disks[0] }}" + register: vgcreate_output + changed_when: vgcreate_output.rc != 0 + + - name: Create LVM + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + grow_to_fill: true + state: present + volumes: + - name: test1 + size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + - name: test2 + size: "{{ volume2_size }}" + mount_point: "{{ mount_location2 }}" + + - name: Verify role results + include_tasks: verify-role-results.yml + + - name: Rerun the task to verify idempotence + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + grow_to_fill: true + state: present + volumes: + - name: test1 + size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + - name: test2 + size: "{{ volume2_size }}" + mount_point: "{{ mount_location2 }}" + + - name: Verify role results + include_tasks: verify-role-results.yml + + - name: Remove 'foo' pool created above + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + state: "absent" + volumes: + - name: test1 + - name: test2 + + - name: Verify role results + include_tasks: verify-role-results.yml diff --git a/tests/verify-pool-member-pvsize.yml b/tests/verify-pool-member-pvsize.yml new file mode 100644 index 00000000..d156e164 --- /dev/null +++ b/tests/verify-pool-member-pvsize.yml @@ -0,0 +1,24 @@ +--- +- name: Get actual PV size + command: "pvs --noheadings --nosuffix --units b -o SIZE {{ st_pool_pv }}" + register: actual_pv_size + changed_when: false + +- name: Convert blkinfo size to bytes + bsize: + size: "{{ storage_test_blkinfo.info[st_pool_pv]['size'] }}" + register: dev_size + +- name: Verify each PV size + assert: + that: (dev_size.bytes - actual_pv_size.stdout | int) | + abs / actual_pv_size.stdout | int < 0.04 + msg: >- + PV resize failure; size difference too big + (device size: {{ dev_size.bytes }}) + (actual PV size: {{ actual_pv_size.stdout }}) + +- name: Clean up test variables + set_fact: + actual_pv_size: null + dev_size: null