diff --git a/README.md b/README.md index 4520c6b2..f8e3daab 100644 --- a/README.md +++ b/README.md @@ -226,8 +226,9 @@ e.g.: "30g", "50GiB". Mode for the cache. Supported values include `writethrough` (default) and `writeback`. #### `cache_devices` -List of devices (physical volumes) that will be used for the cache. These should be -physical volumes on fast SSD or NVMe drives. +List of devices that will be used for the cache. These should be either physical volumes or +drives these physical volumes are allocated on. Generally you want to select fast devices like +SSD or NVMe drives for cache. #### `storage_safe_mode` When true (the default), an error will occur instead of automatically removing existing devices and/or formatting. diff --git a/library/blivet.py b/library/blivet.py index 3ceb7552..f0b4af51 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -770,11 +770,17 @@ def _manage_cache(self): self._detach_cache() def _get_params_lvmcache(self): + parent = self._blivet_pool._device fast_pvs = [] - for pv in self._volume['cache_devices']: - pv_device = self._blivet.devicetree.resolve_device(pv) + for cache_spec in self._volume['cache_devices']: + cache_device = self._blivet.devicetree.resolve_device(cache_spec) + if cache_device is None: + raise BlivetAnsibleError("cache device '%s' not found" % cache_spec) + + pv_device = next((pv for pv in parent.pvs if cache_device.name in [an.name for an in pv.ancestors]), + None) if pv_device is None: - raise BlivetAnsibleError("cache device '%s' not found" % pv) + raise BlivetAnsibleError("cache device '%s' doesn't seems to be a physical volume or its parent" % cache_spec) fast_pvs.append(pv_device) cache_request = devices.lvm.LVMCacheRequest(size=Size(self._volume['cache_size']), diff --git a/tests/tests_create_lvm_cache_then_remove.yml b/tests/tests_create_lvm_cache_then_remove.yml index 748d07a1..1e11cfa9 100644 --- a/tests/tests_create_lvm_cache_then_remove.yml +++ b/tests/tests_create_lvm_cache_then_remove.yml @@ -3,11 +3,12 @@ become: true vars: storage_safe_mode: false - mount_location1: '/opt/test1' - mount_location2: '/opt/test2' + storage_use_partitions: true volume_group_size: '10g' volume_size: '5g' cache_size: '4g' + tags: + - tests::lvm tasks: - include_role: diff --git a/tests/tests_fatals_cache_volume.yml b/tests/tests_fatals_cache_volume.yml index 7fdeb696..fc016f96 100644 --- a/tests/tests_fatals_cache_volume.yml +++ b/tests/tests_fatals_cache_volume.yml @@ -3,6 +3,11 @@ become: true vars: storage_safe_mode: false + volume_group_size: '10g' + volume_size: '5g' + cache_size: '4g' + tags: + - tests::lvm tasks: - include_role: @@ -18,6 +23,7 @@ - include_tasks: get_unused_disk.yml vars: max_return: 2 + disks_needed: 2 - name: Verify that creating a cached partition volume fails block: @@ -44,3 +50,30 @@ that: - ansible_failed_result.msg != 'UNREACH' msg: "Role has not failed when it should have" + + - name: Verify that creating cache on unused disk fails + block: + - name: Create cached volume + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: "foo" + disks: "{{ [unused_disks[0]] }}" + volumes: + - name: test1 + size: "{{ volume_size }}" + cached: true + cache_size: "{{ cache_size }}" + cache_devices: "{{ [unused_disks[1]] }}" + + - name: unreachable task + fail: + msg: UNREACH + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_result.msg != 'UNREACH' + msg: "Role has not failed when it should have"