rebase storage role to latest upstream

Rebase to latest upstream (1937938)

Related: rhbz#1961404
This commit is contained in:
Rich Megginson 2021-04-01 23:27:27 +02:00 committed by Noriko Hosoi
parent a06bd9db99
commit bd305606ba
5 changed files with 9 additions and 515 deletions

View File

@ -117,7 +117,7 @@ BuildRequires: ansible >= 2.9.10
%global rolename5 network
#%%deftag 5 1.0.0
%defcommit 6 485de47b0dc0787aea077ba448ecb954f53e40c4
%defcommit 6 2c3eeb8b2dd898d8c589a0740c2ba9b707e4ed2c
%global rolename6 storage
#%%deftag 6 1.2.2
@ -207,10 +207,7 @@ Patch55: network-disable-bondtests.diff
Patch56: network-pr353.diff
Patch57: network-ansible-test.diff
Patch62: storage-partition-name.diff
Patch63: storage-no-disks-existing.diff
Patch64: storage-trim-volume-size.diff
Patch65: storage-ansible-test.diff
Patch61: storage-ansible-test.diff
Patch71: metrics-mssql-x86.diff
@ -292,10 +289,7 @@ cd %{rolename5}
%patch57 -p1
cd ..
cd %{rolename6}
%patch62 -p1
%patch63 -p1
%patch64 -p1
%patch65 -p1
%patch61 -p1
cd ..
cd %{rolename7}
%patch71 -p1

View File

@ -1,12 +1,10 @@
From 1d7f9d53c5be6588a7a6c34e4c623b2a8f6fff19 Mon Sep 17 00:00:00 2001
From 0a69c057b41890d3d426ac10dfc198e7a3dbab4e Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Wed, 3 Mar 2021 07:55:20 -0700
Subject: [PATCH] resolve ansible-test issues
This fixes many formatting issues as well to make black, flake8,
pylint, yamllint, and ansible-lint happier.
(cherry picked from commit bb2a1af5f63d00c3ff178f3b44696189d9adf542)
---
.github/workflows/tox.yml | 4 +-
.sanity-ansible-ignore-2.9.txt | 13 +
@ -2912,10 +2910,10 @@ index 854ac0d..2490914 100644
- include_tasks: verify-role-results.yml
diff --git a/tests/tests_lvm_auto_size_cap.yml b/tests/tests_lvm_auto_size_cap.yml
index fb17c23..8c754a6 100644
index 30aa6a7..eae7ff3 100644
--- a/tests/tests_lvm_auto_size_cap.yml
+++ b/tests/tests_lvm_auto_size_cap.yml
@@ -33,12 +33,12 @@
@@ -35,12 +35,12 @@
name: linux-system-roles.storage
vars:
storage_pools:
@ -2934,7 +2932,7 @@ index fb17c23..8c754a6 100644
- name: unreachable task
fail:
msg: UNREACH
@@ -56,11 +56,11 @@
@@ -58,11 +58,11 @@
name: linux-system-roles.storage
vars:
storage_pools:
@ -2951,7 +2949,7 @@ index fb17c23..8c754a6 100644
- include_tasks: verify-role-results.yml
@@ -69,12 +69,12 @@
@@ -71,12 +71,12 @@
name: linux-system-roles.storage
vars:
storage_pools:
@ -2970,7 +2968,7 @@ index fb17c23..8c754a6 100644
- include_tasks: verify-role-results.yml
@@ -83,7 +83,7 @@
@@ -85,7 +85,7 @@
name: linux-system-roles.storage
vars:
storage_pools:

View File

@ -1,142 +0,0 @@
diff --git a/library/blivet.py b/library/blivet.py
index eb8bb11..e927121 100644
--- a/library/blivet.py
+++ b/library/blivet.py
@@ -104,6 +104,7 @@ try:
from blivet3.formats import get_format
from blivet3.partitioning import do_partitioning
from blivet3.size import Size
+ from blivet3.udev import trigger
from blivet3.util import set_up_logging
BLIVET_PACKAGE = 'blivet3'
except ImportError:
@@ -116,6 +117,7 @@ except ImportError:
from blivet.formats import get_format
from blivet.partitioning import do_partitioning
from blivet.size import Size
+ from blivet.udev import trigger
from blivet.util import set_up_logging
BLIVET_PACKAGE = 'blivet'
except ImportError:
@@ -821,7 +823,10 @@ class BlivetPool(BlivetBase):
def _look_up_disks(self):
""" Look up the pool's disks in blivet's device tree. """
- if not self._pool['disks']:
+ if self._disks:
+ return
+
+ if not self._device and not self._pool['disks']:
raise BlivetAnsibleError("no disks specified for pool '%s'" % self._pool['name'])
elif not isinstance(self._pool['disks'], list):
raise BlivetAnsibleError("pool disks must be specified as a list")
@@ -832,7 +837,7 @@ class BlivetPool(BlivetBase):
if device is not None: # XXX fail if any disk isn't resolved?
disks.append(device)
- if self._pool['disks'] and not disks:
+ if self._pool['disks'] and not self._device and not disks:
raise BlivetAnsibleError("unable to resolve any disks specified for pool '%s' (%s)" % (self._pool['name'], self._pool['disks']))
self._disks = disks
@@ -974,9 +979,9 @@ class BlivetPool(BlivetBase):
""" Schedule actions to configure this pool according to the yaml input. """
global safe_mode
# look up the device
- self._look_up_disks()
self._look_up_device()
self._apply_defaults()
+ self._look_up_disks()
# schedule destroy if appropriate, including member type change
if not self.ultimately_present:
@@ -999,6 +1004,7 @@ class BlivetPartitionPool(BlivetPool):
return self._device.partitionable
def _look_up_device(self):
+ self._look_up_disks()
self._device = self._disks[0]
def _create(self):
@@ -1354,6 +1360,13 @@ def run_module():
actions.append(action)
+ def ensure_udev_update(action):
+ if action.is_create:
+ sys_path = action.device.path
+ if os.path.islink(sys_path):
+ sys_path = os.readlink(action.device.path)
+ trigger(action='change', subsystem='block', name=os.path.basename(sys_path))
+
def action_dict(action):
return dict(action=action.type_desc_str,
fs_type=action.format.type if action.is_format else None,
@@ -1395,6 +1408,7 @@ def run_module():
if scheduled:
# execute the scheduled actions, committing changes to disk
callbacks.action_executed.add(record_action)
+ callbacks.action_executed.add(ensure_udev_update)
try:
b.devicetree.actions.process(devices=b.devicetree.devices, dry_run=module.check_mode)
except Exception as e:
diff --git a/tests/tests_existing_lvm_pool.yml b/tests/tests_existing_lvm_pool.yml
new file mode 100644
index 0000000..854ac0d
--- /dev/null
+++ b/tests/tests_existing_lvm_pool.yml
@@ -0,0 +1,54 @@
+---
+- hosts: all
+ become: true
+ vars:
+ mount_location: '/opt/test1'
+ volume_group_size: '5g'
+ volume_size: '4g'
+ pool_name: foo
+
+ tasks:
+ - include_role:
+ name: linux-system-roles.storage
+
+ - include_tasks: get_unused_disk.yml
+ vars:
+ min_size: "{{ volume_group_size }}"
+ max_return: 1
+
+ - name: Create one LVM logical volume under one volume group
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: "{{ pool_name }}"
+ disks: "{{ unused_disks }}"
+ volumes:
+ - name: test1
+ size: "{{ volume_size }}"
+
+ - include_tasks: verify-role-results.yml
+
+ - name: Create another volume in the existing pool, identified only by name.
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: "{{ pool_name }}"
+ volumes:
+ - name: newvol
+ size: '2 GiB'
+ fs_type: ext4
+ fs_label: newvol
+
+ - include_tasks: verify-role-results.yml
+
+ - name: Clean up.
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: "{{ pool_name }}"
+ state: absent
+
+ - include_tasks: verify-role-results.yml

View File

@ -1,30 +0,0 @@
commit effb7faf20301ddcee8ee36a1b156a0b9f006bb0
Author: David Lehman <dlehman@redhat.com>
Date: Tue Aug 4 16:00:33 2020 -0400
Be smarter in choosing expected partition name.
BlivetVolume._get_device_id is only used to look up pre-existing
volumes, so we don't have to try too hard to guess it by name.
We can just see if the disk has a single partition and, if so,
return the name of that partition.
Fixes: #141
diff --git a/library/blivet.py b/library/blivet.py
index eb8bb11..0f7ce98 100644
--- a/library/blivet.py
+++ b/library/blivet.py
@@ -554,7 +554,11 @@ class BlivetPartitionVolume(BlivetVolume):
return self._device.raw_device.type == 'partition'
def _get_device_id(self):
- return self._blivet_pool._disks[0].name + '1'
+ device_id = None
+ if self._blivet_pool._disks[0].partitioned and len(self._blivet_pool._disks[0].children) == 1:
+ device_id = self._blivet_pool._disks[0].children[0].name
+
+ return device_id
def _resize(self):
pass

View File

@ -1,326 +0,0 @@
diff --git a/library/blivet.py b/library/blivet.py
index e927121..f59f821 100644
--- a/library/blivet.py
+++ b/library/blivet.py
@@ -130,6 +130,9 @@ if BLIVET_PACKAGE:
set_up_logging()
log = logging.getLogger(BLIVET_PACKAGE + ".ansible")
+
+MAX_TRIM_PERCENT = 2
+
use_partitions = None # create partitions on pool backing device disks?
disklabel_type = None # user-specified disklabel type
safe_mode = None # do not remove any existing devices or formatting
@@ -445,8 +448,16 @@ class BlivetVolume(BlivetBase):
if not self._device.resizable:
return
- if self._device.format.resizable:
- self._device.format.update_size_info()
+ trim_percent = (1.0 - float(self._device.max_size / size))*100
+ log.debug("resize: size=%s->%s ; trim=%s", self._device.size, size, trim_percent)
+ if size > self._device.max_size and trim_percent <= MAX_TRIM_PERCENT:
+ log.info("adjusting %s resize target from %s to %s to fit in free space",
+ self._volume['name'],
+ size,
+ self._device.max_size)
+ size = self._device.max_size
+ if size == self._device.size:
+ return
if not self._device.min_size <= size <= self._device.max_size:
raise BlivetAnsibleError("volume '%s' cannot be resized to '%s'" % (self._volume['name'], size))
@@ -610,10 +621,18 @@ class BlivetLVMVolume(BlivetVolume):
raise BlivetAnsibleError("invalid size '%s' specified for volume '%s'" % (self._volume['size'], self._volume['name']))
fmt = self._get_format()
+ trim_percent = (1.0 - float(parent.free_space / size))*100
+ log.debug("size: %s ; %s", size, trim_percent)
if size > parent.free_space:
- raise BlivetAnsibleError("specified size for volume '%s' exceeds available space in pool '%s' (%s)" % (size,
- parent.name,
- parent.free_space))
+ if trim_percent > MAX_TRIM_PERCENT:
+ raise BlivetAnsibleError("specified size for volume '%s' exceeds available space in pool '%s' (%s)"
+ % (size, parent.name, parent.free_space))
+ else:
+ log.info("adjusting %s size from %s to %s to fit in %s free space", self._volume['name'],
+ size,
+ parent.free_space,
+ parent.name)
+ size = parent.free_space
try:
device = self._blivet.new_lv(name=self._volume['name'],
diff --git a/tests/tests_create_lv_size_equal_to_vg.yml b/tests/tests_create_lv_size_equal_to_vg.yml
new file mode 100644
index 0000000..21a5788
--- /dev/null
+++ b/tests/tests_create_lv_size_equal_to_vg.yml
@@ -0,0 +1,48 @@
+---
+- hosts: all
+ become: true
+ vars:
+ storage_safe_mode: false
+ mount_location: '/opt/test1'
+ volume_group_size: '10g'
+ lv_size: '10g'
+ unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}'
+ disk_size: '{{ unused_disk_subfact.sectors|int *
+ unused_disk_subfact.sectorsize|int }}'
+
+ tasks:
+ - include_role:
+ name: linux-system-roles.storage
+
+ - include_tasks: get_unused_disk.yml
+ vars:
+ min_size: "{{ volume_group_size }}"
+ max_return: 1
+
+ - name: Create one lv which size is equal to vg size
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: foo
+ disks: "{{ unused_disks }}"
+ volumes:
+ - name: test1
+ size: "{{ lv_size }}"
+ mount_point: "{{ mount_location }}"
+
+ - include_tasks: verify-role-results.yml
+
+ - name: Clean up
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: foo
+ disks: "{{ unused_disks }}"
+ state: "absent"
+ volumes:
+ - name: test1
+ mount_point: "{{ mount_location }}"
+
+ - include_tasks: verify-role-results.yml
diff --git a/tests/tests_lvm_auto_size_cap.yml b/tests/tests_lvm_auto_size_cap.yml
new file mode 100644
index 0000000..fb17c23
--- /dev/null
+++ b/tests/tests_lvm_auto_size_cap.yml
@@ -0,0 +1,89 @@
+---
+- hosts: all
+ become: true
+
+ tasks:
+ - include_role:
+ name: linux-system-roles.storage
+
+ - include_tasks: get_unused_disk.yml
+ vars:
+ min_size: 10g
+ max_return: 1
+
+ - command: lsblk -b -l --noheadings -o NAME,SIZE
+ register: storage_test_lsblk
+
+ - set_fact:
+ test_disk_size: "{{ storage_test_lsblk.stdout_lines|map('regex_search', '^' + unused_disks[0] + '\\s+\\d+$')|select('string')|first|regex_replace('^\\w+\\s+', '') }}"
+
+ - package:
+ name: bc
+ state: installed
+
+ - command:
+ cmd: bc
+ stdin: "{{ test_disk_size }} *2"
+ register: doubled_size
+
+ - name: Test handling of too-large LVM volume size
+ block:
+ - name: Try to create a pool containing one volume twice the size of the backing disk
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: foo
+ type: lvm
+ disks: "{{ unused_disks }}"
+ volumes:
+ - name: test1
+ size: "{{ doubled_size.stdout|trim }}"
+ - name: unreachable task
+ fail:
+ msg: UNREACH
+ rescue:
+ - name: Check that we failed in the role
+ assert:
+ that:
+ - ansible_failed_result.msg != 'UNREACH'
+ - blivet_output.failed and
+ blivet_output.msg|regex_search('specified size for volume.+exceeds available')
+ msg: "Role has not failed when it should have"
+
+ - name: Create a pool containing one volume the same size as the backing disk
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: foo
+ disks: "{{ unused_disks }}"
+ volumes:
+ - name: test1
+ size: "{{ test_disk_size }}"
+
+ - include_tasks: verify-role-results.yml
+
+ - name: Repeat the previous invocation to verify idempotence
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: foo
+ type: lvm
+ disks: "{{ unused_disks }}"
+ volumes:
+ - name: test1
+ size: "{{ test_disk_size }}"
+
+ - include_tasks: verify-role-results.yml
+
+ - name: Clean up
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: foo
+ disks: "{{ unused_disks }}"
+ state: absent
+ volumes: []
diff --git a/tests/tests_lvm_errors.yml b/tests/tests_lvm_errors.yml
index 37d41dc..e8dc4f4 100644
--- a/tests/tests_lvm_errors.yml
+++ b/tests/tests_lvm_errors.yml
@@ -11,8 +11,6 @@
- '/non/existent/disk'
invalid_size: 'xyz GiB'
unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}'
- too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) *
- unused_disk_subfact.sectorsize|int }}'
tasks:
- include_role:
@@ -86,39 +84,6 @@
- ansible_failed_result.msg != 'UNREACH'
msg: "Role has not failed when it should have"
- # the following does not work properly
- # - name: Verify the output
- # assert:
- # that: "{{ blivet_output.failed and
- # blivet_output.msg|regex_search('invalid size.+for volume') and
- # not blivet_output.changed }}"
- # msg: "Unexpected behavior w/ invalid volume size"
-
- - name: Test for correct handling of too-large volume size.
- block:
- - name: Try to create LVM with a too-large volume size.
- include_role:
- name: linux-system-roles.storage
- vars:
- storage_pools:
- - name: foo
- disks: "{{ unused_disks }}"
- volumes:
- - name: test1
- size: "{{ too_large_size }}"
- mount_point: "{{ mount_location1 }}"
-
- - name: unreachable task
- fail:
- msg: UNREACH
-
- rescue:
- - name: Check that we failed in the role
- assert:
- that:
- - ansible_failed_result.msg != 'UNREACH'
- msg: "Role has not failed when it should have"
-
# the following does not work properly
# - name: Verify the output
# assert:
@@ -138,7 +103,7 @@
disks: "{{ unused_disks[0] }}"
volumes:
- name: test1
- size: "{{ too_large_size }}"
+ size: "{{ volume_size }}"
mount_point: "{{ mount_location1 }}"
- name: unreachable task
@@ -171,7 +136,7 @@
disks: []
volumes:
- name: test1
- size: "{{ too_large_size }}"
+ size: "{{ volume1_size }}"
mount_point: "{{ mount_location1 }}"
- name: unreachable task
diff --git a/tests/tests_misc.yml b/tests/tests_misc.yml
index a69ee98..3139bc7 100644
--- a/tests/tests_misc.yml
+++ b/tests/tests_misc.yml
@@ -7,7 +7,7 @@
volume_group_size: '5g'
volume1_size: '4g'
unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}'
- too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) *
+ too_large_size: '{{ (unused_disk_subfact.sectors|int * 1.2) *
unused_disk_subfact.sectorsize|int }}'
tasks:
diff --git a/tests/tests_resize.yml b/tests/tests_resize.yml
index 9eeb2b9..209d129 100644
--- a/tests/tests_resize.yml
+++ b/tests/tests_resize.yml
@@ -9,7 +9,7 @@
invalid_size1: 'xyz GiB'
invalid_size2: 'none'
unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}'
- too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) *
+ too_large_size: '{{ unused_disk_subfact.sectors|int * 1.2 *
unused_disk_subfact.sectorsize|int }}'
disk_size: '{{ unused_disk_subfact.sectors|int *
unused_disk_subfact.sectorsize|int }}'
@@ -122,23 +122,7 @@
size: "{{ disk_size }}"
mount_point: "{{ mount_location }}"
- - name: Unreachable task
- fail:
- msg: UNREACH
-
- rescue:
- - name: Check that we failed in the role
- assert:
- that:
- - ansible_failed_result.msg != 'UNREACH'
- msg: "Role has not failed when it should have"
-
- - name: Verify the output
- assert:
- that: "blivet_output.failed and
- blivet_output.msg|regex_search('volume.+cannot be resized to.+') and
- not blivet_output.changed"
- msg: "Unexpected behavior w/ invalid volume size"
+ - include_tasks: verify-role-results.yml
- name: Test for correct handling of invalid size specification
block: