diff --git a/.github/workflows/pr-welcome-msg.yml b/.github/workflows/pr-welcome-msg.yml index 0102c41f..f056fb79 100644 --- a/.github/workflows/pr-welcome-msg.yml +++ b/.github/workflows/pr-welcome-msg.yml @@ -19,7 +19,7 @@ jobs: issue-number: ${{ github.event.pull_request.number }} body: | ## **Thank you for contributing to the Leapp project!** - Please note that every PR needs to comply with the [Leapp Guidelines](https://leapp.readthedocs.io/en/latest/contributing.html#) and must pass all tests in order to be mergeable. + Please note that every PR needs to comply with the [leapp-repository contribution and development guidelines](https://leapp-repository.readthedocs.io/latest/contrib-and-devel-guidelines.html) and must pass all tests in order to be mergeable. If you want to request a review or rebuild a package in copr, you can use following commands as a comment: - **`review please @oamg/developers`** to notify leapp developers of the review request - **`/packit copr-build`** to submit a public copr build using packit @@ -39,6 +39,6 @@ jobs: See other labels for particular jobs defined in the `.packit.yaml` file. - Please [open ticket](https://url.corp.redhat.com/oamg-ci-issue) in case you experience technical problem with the CI. (RH internal only) + Please [open ticket](https://red.ht/rhel-upgrades-ci-issue) in case you experience technical problem with the CI. (RH internal only) **Note:** In case there are problems with tests not being triggered automatically on new PR/commit or pending for a long time, please contact leapp-infra. diff --git a/.gitignore b/.gitignore index 0bb92d3d..a04c7ded 100644 --- a/.gitignore +++ b/.gitignore @@ -115,6 +115,7 @@ ENV/ # visual studio code configuration .vscode +*.code-workspace # pycharm .idea diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d4cb2046..7315b693 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1 +1 @@ -See the [Contribution guidelines](https://leapp.readthedocs.io/en/latest/contributing.html) +See the [contribution guidelines](https://leapp-repository.readthedocs.io/latest/contrib-and-devel-guidelines.html). diff --git a/Makefile b/Makefile index 6769b2f3..3d9f2857 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ SHELL=/bin/bash __PKGNAME=$${_PKGNAME:-leapp-repository} VENVNAME ?= tut -DIST_VERSION ?= 7 +DIST_VERSION ?= 8 PKGNAME=leapp-repository DEPS_PKGNAME=leapp-el7toel8-deps VERSION=`grep -m1 "^Version:" packaging/$(PKGNAME).spec | grep -om1 "[0-9].[0-9.]**"` @@ -23,7 +23,7 @@ PYLINT_ARGS ?= FLAKE8_ARGS ?= # python version to run test with -_PYTHON_VENV=$${PYTHON_VENV:-python2.7} +_PYTHON_VENV=$${PYTHON_VENV:-python3.6} ifdef ACTOR TEST_PATHS=`$(_PYTHON_VENV) utils/actor_path.py $(ACTOR)` @@ -55,9 +55,9 @@ _TEST_CONTAINER=$${TEST_CONTAINER:-rhel8} # In case just specific CHROOTs should be used for the COPR build, you can # set the multiple CHROOTs separated by comma in the COPR_CHROOT envar, e.g. -# "epel-7-x86_64,epel-8-x86_64". But for the copr-cli utility, each of them +# "epel-8-x86_64,epel-9-x86_64". But for the copr-cli utility, each of them # has to be specified separately for the -r option; So we transform it -# automatically to "-r epel-7-x86_64 -r epel-8-x86_64" (without quotes). +# automatically to "-r epel-8-x86_64 -r epel-9-x86_64" (without quotes). ifdef COPR_CHROOT _COPR_CHROOT=`echo $${COPR_CHROOT} | grep -o "[^,]*" | sed "s/^/-r /g"` endif @@ -111,7 +111,7 @@ help: @echo " packaging" @echo " srpm create the SRPM" @echo " build_container create the RPM in container" - @echo " - set BUILD_CONTAINER to el7, el8 or el9" + @echo " - set BUILD_CONTAINER to el8 or el9" @echo " - don't run more than one build at the same time" @echo " since containers operate on the same files!" @echo " copr_build create the COPR build using the COPR TOKEN" @@ -154,7 +154,7 @@ help: @echo " COPR_CONFIG path to the COPR config with API token" @echo " (default: ~/.config/copr_rh_oamg.conf)" @echo " COPR_CHROOT specify the CHROOT which should be used for" - @echo " the build, e.g. 'epel-7-x86_64'. You can" + @echo " the build, e.g. 'epel-8-x86_64'. You can" @echo " specify multiple CHROOTs separated by comma." @echo "" @echo "Possible use:" @@ -189,7 +189,6 @@ source: prepare @git archive --prefix "$(PKGNAME)-$(VERSION)/" -o "packaging/sources/$(PKGNAME)-$(VERSION).tar.gz" HEAD @echo "--- PREPARE DEPS PKGS ---" mkdir -p packaging/tmp/ - @__TIMESTAMP=$(TIMESTAMP) $(MAKE) DIST_VERSION=7 _build_subpkg @__TIMESTAMP=$(TIMESTAMP) $(MAKE) DIST_VERSION=8 _build_subpkg @__TIMESTAMP=$(TIMESTAMP) $(MAKE) DIST_VERSION=9 _build_subpkg @tar -czf packaging/sources/deps-pkgs.tar.gz -C packaging/RPMS/noarch `ls -1 packaging/RPMS/noarch | grep -o "[^/]*rpm$$"` @@ -250,11 +249,8 @@ _build_local: source @mv packaging/$(PKGNAME).spec.bak packaging/$(PKGNAME).spec build_container: - echo "--- Build RPM ${PKGNAME}-${VERSION}-${RELEASE}.el$(DIST_VERSION).rpm in container ---"; \ + echo "--- Build RPM ${PKGNAME}-${VERSION}-${RELEASE}.el$(DIST_VERSION).rpm in container ---"; case "$(BUILD_CONTAINER)" in \ - el7) \ - CONT_FILE="utils/container-builds/Containerfile.centos7"; \ - ;; \ el8) \ CONT_FILE="utils/container-builds/Containerfile.ubi8"; \ ;; \ @@ -266,7 +262,7 @@ build_container: exit 1; \ ;; \ *) \ - echo "Available containers are el7, el8, el9"; \ + echo "Available containers are el8, el9"; \ exit 1; \ ;; \ esac && \ @@ -375,13 +371,15 @@ lint_fix: echo "--- isort inplace fixing done. ---;" test_no_lint: - . $(VENVNAME)/bin/activate; \ + @. $(VENVNAME)/bin/activate; \ snactor repo find --path repos/; \ - cd repos/system_upgrade/el7toel8/; \ - snactor workflow sanity-check ipu && \ - cd - && \ + for dir in repos/system_upgrade/*/; do \ + echo "Running sanity-check in $$dir"; \ + (cd $$dir && snactor workflow sanity-check ipu); \ + done; \ $(_PYTHON_VENV) -m pytest $(REPORT_ARG) $(TEST_PATHS) $(LIBRARY_PATH) $(PYTEST_ARGS) + test: lint test_no_lint # container images act like a cache so that dependencies can only be downloaded once @@ -396,9 +394,6 @@ _build_container_image: # tests one IPU, leapp repositories irrelevant to the tested IPU are deleted _test_container_ipu: @case $$TEST_CONT_IPU in \ - el7toel8) \ - export REPOSITORIES="common,el7toel8"; \ - ;; \ el8toel9) \ export REPOSITORIES="common,el8toel9"; \ ;; \ @@ -409,7 +404,7 @@ _test_container_ipu: echo "TEST_CONT_IPU must be set"; exit 1; \ ;; \ *) \ - echo "Only supported TEST_CONT_IPUs are el7toel8, el8toel9, el9toel10"; exit 1; \ + echo "Only supported TEST_CONT_IPUs are el8toel9, el9toel10"; exit 1; \ ;; \ esac && \ $(_CONTAINER_TOOL) exec -w /repocopy $$_CONT_NAME make clean && \ @@ -421,25 +416,20 @@ lint_container: @_TEST_CONT_TARGET="lint" $(MAKE) test_container lint_container_all: - @for container in "f34" "rhel7" "rhel8"; do \ + @for container in "f34" "rhel8" "rhel9"; do \ TEST_CONTAINER=$$container $(MAKE) lint_container || exit 1; \ done # Runs tests in a container # Builds testing image first if it doesn't exist # On some Python versions, we need to test both IPUs, -# because e.g. RHEL7 to RHEL8 IPU must work on python2.7 and python3.6 -# and RHEL8 to RHEL9 IPU must work on python3.6 and python3.9. +# because e.g RHEL8 to RHEL9 IPU must work on python3.6 and python3.9. test_container: @case $(_TEST_CONTAINER) in \ f34) \ export CONT_FILE="utils/container-tests/Containerfile.f34"; \ export _VENV="python3.9"; \ ;; \ - rhel7) \ - export CONT_FILE="utils/container-tests/Containerfile.rhel7"; \ - export _VENV="python2.7"; \ - ;; \ rhel8) \ export CONT_FILE="utils/container-tests/Containerfile.rhel8"; \ export _VENV="python3.6"; \ @@ -449,7 +439,7 @@ test_container: export _VENV="python3.9"; \ ;; \ *) \ - echo "Error: Available containers are: f34, rhel7, rhel8"; exit 1; \ + echo "Error: Available containers are: f34, rhel8, rhel9"; exit 1; \ ;; \ esac; \ export TEST_IMAGE="leapp-repo-tests-$(_TEST_CONTAINER)"; \ @@ -461,11 +451,7 @@ test_container: $(_CONTAINER_TOOL) exec $$_CONT_NAME rsync -aur --delete --exclude "tut*" /repo/ /repocopy && \ export res=0; \ case $$_VENV in \ - python2.7) \ - TEST_CONT_IPU=el7toel8 $(MAKE) _test_container_ipu || res=1; \ - ;;\ python3.6) \ - echo "INFO: Skipping testing of el7toel8 repository. Obsoleted"; \ TEST_CONT_IPU=el8toel9 $(MAKE) _test_container_ipu || res=1; \ ;; \ python3.9) \ @@ -485,7 +471,7 @@ test_container: exit $$res test_container_all: - @for container in "f34" "rhel7" "rhel8" "rhel9"; do \ + @for container in "f34" "rhel8" "rhel9"; do \ TEST_CONTAINER=$$container $(MAKE) test_container || exit 1; \ done @@ -493,14 +479,14 @@ test_container_no_lint: @_TEST_CONT_TARGET="test_no_lint" $(MAKE) test_container test_container_all_no_lint: - @for container in "f34" "rhel7" "rhel8" "rhel9"; do \ + @for container in "f34" "rhel8" "rhel9"; do \ TEST_CONTAINER=$$container $(MAKE) test_container_no_lint || exit 1; \ done # clean all testing and building containers and their images clean_containers: - @for i in "leapp-repo-tests-f34" "leapp-repo-tests-rhel7" "leapp-repo-tests-rhel8" \ - "leapp-repo-tests-rhel9" "leapp-repo-build-el7" "leapp-repo-build-el8"; do \ + @for i in "leapp-repo-tests-f34" "leapp-repo-tests-rhel8" \ + "leapp-repo-tests-rhel9" "leapp-repo-build-el8"; do \ $(_CONTAINER_TOOL) kill "$$i-cont" || :; \ $(_CONTAINER_TOOL) rm "$$i-cont" || :; \ $(_CONTAINER_TOOL) rmi "$$i" || :; \ diff --git a/README.md b/README.md index 6b45b4b7..43da589e 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -**Before doing anything, please read the upstream [documentation](https://leapp-repository.readthedocs.io/).** +**Before doing anything, please read the [leapp-repository documentation](https://leapp-repository.readthedocs.io/).** -Also, you could find useufl to read [Leapp framework documentation](https://leapp.readthedocs.io/). +Also, you could find the [Leapp framework documentation](https://leapp.readthedocs.io/) useful to read. --- @@ -17,7 +17,7 @@ Also, you could find useufl to read [Leapp framework documentation](https://leap - *All files in /var/log/leapp* - */var/lib/leapp/leapp.db* - *journalctl* - - If you want, you can optionally send anything else would you like to provide (e.g. storage info) + - If you want, you can optionally send any other relevant information (e.g. storage, network) **For your convenience you can pack all logs with this command:** diff --git a/ci/.gitignore b/ci/.gitignore new file mode 100644 index 00000000..e6f97f0f --- /dev/null +++ b/ci/.gitignore @@ -0,0 +1 @@ +**/.vagrant diff --git a/ci/ansible/ansible.cfg b/ci/ansible/ansible.cfg new file mode 100644 index 00000000..d5c13036 --- /dev/null +++ b/ci/ansible/ansible.cfg @@ -0,0 +1,4 @@ +[defaults] +callbacks_enabled=ansible.posix.profile_tasks +stdout_callback=community.general.yaml +pipelining=True diff --git a/ci/ansible/docker-ce.yaml b/ci/ansible/docker-ce.yaml new file mode 100644 index 00000000..bba5f3df --- /dev/null +++ b/ci/ansible/docker-ce.yaml @@ -0,0 +1,6 @@ +--- +- name: Docker CE configuration + hosts: all + become: yes + roles: + - docker-ce diff --git a/ci/ansible/minimal.yaml b/ci/ansible/minimal.yaml new file mode 100644 index 00000000..517cc81b --- /dev/null +++ b/ci/ansible/minimal.yaml @@ -0,0 +1,6 @@ +--- +- name: Minimal configuration + hosts: all + become: yes + roles: + - minimal diff --git a/ci/ansible/requirements.yaml b/ci/ansible/requirements.yaml new file mode 100644 index 00000000..13ca0224 --- /dev/null +++ b/ci/ansible/requirements.yaml @@ -0,0 +1,3 @@ +collections: + - name: community.general + - name: ansible.posix diff --git a/ci/ansible/roles/docker-ce/README.md b/ci/ansible/roles/docker-ce/README.md new file mode 100644 index 00000000..860444b1 --- /dev/null +++ b/ci/ansible/roles/docker-ce/README.md @@ -0,0 +1,43 @@ +Docker CE Install and configuration +========= + +Install latest version of Docker CE Engine form upstream repository. Start and enable services after installation. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +`docker_ce_repo_checksum` in defaults/main.yaml. SHA512 Checksum of the docker-ce.repo file. +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: all + become: yes + roles: + - role: docker + vars: + docker_ce_repo_checksum: sha512:XXXX # You can provide the new checksum if the default one not actual + + +License +------- + +GPL-3.0-or-later + +Author Information +------------------ + +AlmaLinux OS Foundation diff --git a/ci/ansible/roles/docker-ce/defaults/main.yaml b/ci/ansible/roles/docker-ce/defaults/main.yaml new file mode 100644 index 00000000..d0fd0c09 --- /dev/null +++ b/ci/ansible/roles/docker-ce/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +# defaults file for docker-ce +docker_ce_repo_checksum: sha512:1de0b99cbb427e974144f226451711dc491caef6b1256cb599ff307a687ba2d7dd959a016d4e4cfdd4acbd83423ba1f78fa89db61bab35351e35f1152aedaf5c diff --git a/ci/ansible/roles/docker-ce/handlers/main.yaml b/ci/ansible/roles/docker-ce/handlers/main.yaml new file mode 100644 index 00000000..a7236219 --- /dev/null +++ b/ci/ansible/roles/docker-ce/handlers/main.yaml @@ -0,0 +1,2 @@ +--- +# handlers file for docker-ce diff --git a/ci/ansible/roles/docker-ce/meta/main.yaml b/ci/ansible/roles/docker-ce/meta/main.yaml new file mode 100644 index 00000000..aa67ded8 --- /dev/null +++ b/ci/ansible/roles/docker-ce/meta/main.yaml @@ -0,0 +1,25 @@ +galaxy_info: + author: AlmaLinux OS Community + description: Install and configure Docker CE Engine + company: AlmaLinux OS Foundation + + license: GPL-3.0-or-later + + min_ansible_version: 2.11 + + platforms: + - name: EL + versions: + - 7 + - 8 + - 9 + + galaxy_tags: + - docker + - el7 + - el8 + - el9 + - almalinux + +dependencies: + - minimal diff --git a/ci/ansible/roles/docker-ce/tasks/install_docker_el7.yaml b/ci/ansible/roles/docker-ce/tasks/install_docker_el7.yaml new file mode 100644 index 00000000..320477af --- /dev/null +++ b/ci/ansible/roles/docker-ce/tasks/install_docker_el7.yaml @@ -0,0 +1,11 @@ +--- +# Install Docker +- name: Install Docker CE Stable + ansible.builtin.yum: + name: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-compose-plugin + update_cache: yes + state: present diff --git a/ci/ansible/roles/docker-ce/tasks/install_docker_el8.yaml b/ci/ansible/roles/docker-ce/tasks/install_docker_el8.yaml new file mode 100644 index 00000000..d44a202a --- /dev/null +++ b/ci/ansible/roles/docker-ce/tasks/install_docker_el8.yaml @@ -0,0 +1,11 @@ +--- +# Install Docker +- name: Install Docker CE Stable + ansible.builtin.dnf: + name: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-compose-plugin + update_cache: yes + state: present diff --git a/ci/ansible/roles/docker-ce/tasks/main.yaml b/ci/ansible/roles/docker-ce/tasks/main.yaml new file mode 100644 index 00000000..989af23f --- /dev/null +++ b/ci/ansible/roles/docker-ce/tasks/main.yaml @@ -0,0 +1,38 @@ +--- +# tasks file for docker-ce +- name: Add Docker CE repository + ansible.builtin.get_url: + url: https://download.docker.com/linux/centos/docker-ce.repo + dest: /etc/yum.repos.d/docker-ce.repo + checksum: "{{ docker_ce_repo_checksum }}" + owner: root + group: root + mode: '0644' + seuser: system_u + serole: object_r + setype: system_conf_t + +- name: Remove older versions of Docker on EL7 + ansible.builtin.include_tasks: remove_old_docker_el7.yaml + when: ansible_facts['distribution_major_version'] == '7' + +- name: Remove older versions of Docker on >= EL8 + ansible.builtin.include_tasks: remove_old_docker_el8.yaml + when: ansible_facts['distribution_major_version'] == '8' + +- name: Install Docker CE Stable on EL7 + ansible.builtin.include_tasks: install_docker_el7.yaml + when: ansible_facts['distribution_major_version'] == '7' + +- name: Install Docker CE Stable on >= EL8 + ansible.builtin.include_tasks: install_docker_el8.yaml + when: ansible_facts['distribution_major_version'] == '8' + +- name: Start and Enable Docker services + ansible.builtin.systemd: + name: "{{ item }}" + enabled: yes + state: started + loop: + - docker.service + - containerd.service diff --git a/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el7.yaml b/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el7.yaml new file mode 100644 index 00000000..db9e0960 --- /dev/null +++ b/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el7.yaml @@ -0,0 +1,15 @@ +--- +# Remove older versions of Docker +- name: Uninstall older versions of Docker + ansible.builtin.yum: + name: + - docker + - docker-client + - docker-client-latest + - docker-common + - docker-latest + - docker-latest-logrotate + - docker-logrotate + - docker-engine + autoremove: yes + state: absent diff --git a/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el8.yaml b/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el8.yaml new file mode 100644 index 00000000..88f860cf --- /dev/null +++ b/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el8.yaml @@ -0,0 +1,15 @@ +--- +# Remove older versions of Docker +- name: Uninstall older versions of Docker + ansible.builtin.dnf: + name: + - docker + - docker-client + - docker-client-latest + - docker-common + - docker-latest + - docker-latest-logrotate + - docker-logrotate + - docker-engine + autoremove: yes + state: absent diff --git a/ci/ansible/roles/docker-ce/tests/inventory b/ci/ansible/roles/docker-ce/tests/inventory new file mode 100644 index 00000000..878877b0 --- /dev/null +++ b/ci/ansible/roles/docker-ce/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ci/ansible/roles/docker-ce/tests/test.yaml b/ci/ansible/roles/docker-ce/tests/test.yaml new file mode 100644 index 00000000..789ba96e --- /dev/null +++ b/ci/ansible/roles/docker-ce/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - docker-ce diff --git a/ci/ansible/roles/docker-ce/vars/main.yaml b/ci/ansible/roles/docker-ce/vars/main.yaml new file mode 100644 index 00000000..7ff8a18f --- /dev/null +++ b/ci/ansible/roles/docker-ce/vars/main.yaml @@ -0,0 +1,2 @@ +--- +# vars file for docker-ce diff --git a/ci/ansible/roles/minimal/README.md b/ci/ansible/roles/minimal/README.md new file mode 100644 index 00000000..225dd44b --- /dev/null +++ b/ci/ansible/roles/minimal/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/ci/ansible/roles/minimal/defaults/main.yaml b/ci/ansible/roles/minimal/defaults/main.yaml new file mode 100644 index 00000000..4a5a46cd --- /dev/null +++ b/ci/ansible/roles/minimal/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +# defaults file for minimal diff --git a/ci/ansible/roles/minimal/handlers/main.yaml b/ci/ansible/roles/minimal/handlers/main.yaml new file mode 100644 index 00000000..89105fec --- /dev/null +++ b/ci/ansible/roles/minimal/handlers/main.yaml @@ -0,0 +1,2 @@ +--- +# handlers file for minimal diff --git a/ci/ansible/roles/minimal/meta/main.yaml b/ci/ansible/roles/minimal/meta/main.yaml new file mode 100644 index 00000000..ecc81ab7 --- /dev/null +++ b/ci/ansible/roles/minimal/meta/main.yaml @@ -0,0 +1,23 @@ +galaxy_info: + author: AlmaLinux OS Community + description: Minimal configuration for ELevate + company: AlmaLinux OS Foundation + + license: GPL-3.0-or-later + + min_ansible_version: 2.11 + + platforms: + - name: EL + versions: + - 7 + - 8 + - 9 + + galaxy_tags: + - elevate + - upgrade + - cleanup + - el7 + - el8 + - el9 diff --git a/ci/ansible/roles/minimal/tasks/cleanup_el7.yaml b/ci/ansible/roles/minimal/tasks/cleanup_el7.yaml new file mode 100644 index 00000000..1b4af7c6 --- /dev/null +++ b/ci/ansible/roles/minimal/tasks/cleanup_el7.yaml @@ -0,0 +1,10 @@ +--- +# Remove old kernels +- name: Install the yum-utils + ansible.builtin.yum: + name: yum-utils + state: present + update_cache: yes + +- name: Remove the old kernels on EL7 + ansible.builtin.command: package-cleanup -y --oldkernels --count=1 diff --git a/ci/ansible/roles/minimal/tasks/cleanup_el8.yaml b/ci/ansible/roles/minimal/tasks/cleanup_el8.yaml new file mode 100644 index 00000000..56aeefd3 --- /dev/null +++ b/ci/ansible/roles/minimal/tasks/cleanup_el8.yaml @@ -0,0 +1,7 @@ +--- +# Remove old kernels +- name: Remove old kernels on EL8 + ansible.builtin.command: dnf -y remove --oldinstallonly + register: removeoldkernels + changed_when: removeoldkernels.rc == 0 + failed_when: removeoldkernels.rc > 1 diff --git a/ci/ansible/roles/minimal/tasks/main.yaml b/ci/ansible/roles/minimal/tasks/main.yaml new file mode 100644 index 00000000..8c1b35bd --- /dev/null +++ b/ci/ansible/roles/minimal/tasks/main.yaml @@ -0,0 +1,21 @@ +--- +# tasks file for minimal +- name: Upgrade the packages on EL7 + ansible.builtin.include_tasks: upgrade_el7.yaml + when: ansible_facts['distribution_major_version'] == '7' + +- name: Upgrade the packages on EL8 + ansible.builtin.include_tasks: upgrade_el8.yaml + when: ansible_facts['distribution_major_version'] == '8' + +- name: Reboot the system + ansible.builtin.reboot: + when: upgrade_status is changed + +- name: Cleanup the older kernels on EL7 + ansible.builtin.include_tasks: cleanup_el7.yaml + when: ansible_facts['distribution_major_version'] == '7' + +- name: Cleanup the older kernels on El8 + ansible.builtin.include_tasks: cleanup_el8.yaml + when: ansible_facts['distribution_major_version'] == '8' diff --git a/ci/ansible/roles/minimal/tasks/upgrade_el7.yaml b/ci/ansible/roles/minimal/tasks/upgrade_el7.yaml new file mode 100644 index 00000000..7648a586 --- /dev/null +++ b/ci/ansible/roles/minimal/tasks/upgrade_el7.yaml @@ -0,0 +1,8 @@ +--- +# Upgrade the system +- name: Upgrade the system + ansible.builtin.yum: + name: "*" + state: latest + update_cache: yes + register: upgrade_status diff --git a/ci/ansible/roles/minimal/tasks/upgrade_el8.yaml b/ci/ansible/roles/minimal/tasks/upgrade_el8.yaml new file mode 100644 index 00000000..0d4a5d2a --- /dev/null +++ b/ci/ansible/roles/minimal/tasks/upgrade_el8.yaml @@ -0,0 +1,8 @@ +--- +# Upgrade the system +- name: Upgrade the system + ansible.builtin.dnf: + name: "*" + state: latest + update_cache: yes + register: upgrade_status diff --git a/ci/ansible/roles/minimal/tests/inventory b/ci/ansible/roles/minimal/tests/inventory new file mode 100644 index 00000000..878877b0 --- /dev/null +++ b/ci/ansible/roles/minimal/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ci/ansible/roles/minimal/tests/test.yaml b/ci/ansible/roles/minimal/tests/test.yaml new file mode 100644 index 00000000..db5c4c17 --- /dev/null +++ b/ci/ansible/roles/minimal/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - minimal diff --git a/ci/ansible/roles/minimal/vars/main.yaml b/ci/ansible/roles/minimal/vars/main.yaml new file mode 100644 index 00000000..b24df080 --- /dev/null +++ b/ci/ansible/roles/minimal/vars/main.yaml @@ -0,0 +1,2 @@ +--- +# vars file for minimal diff --git a/ci/jenkins/ELevate_el7toel8_Development.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Development.jenkinsfile new file mode 100644 index 00000000..317209ef --- /dev/null +++ b/ci/jenkins/ELevate_el7toel8_Development.jenkinsfile @@ -0,0 +1,249 @@ +RETRY = params.RETRY +TIMEOUT = params.TIMEOUT + +pipeline { + agent { + label 'x86_64 && bm' + } + options { + timestamps() + parallelsAlwaysFailFast() + } + parameters { + choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation') + choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') + string(name: 'LEAPP_SRC_GIT_USER', defaultValue: 'AlmaLinux', description: 'Input name of Git user of LEAPP source', trim: true) + string(name: 'LEAPP_SRC_GIT_BRANCH', defaultValue: 'almalinux', description: 'Input name of Git branch of LEAPP source', trim: true) + string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) + string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) + } + environment { + VAGRANT_NO_COLOR = '1' + } + stages { + stage('Prepare') { + steps { + sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml', + label: 'Install Ansible collections' + sh script: 'python3.11 -m venv .venv', + label: 'Create Python virtual environment' + sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko', + label: 'Install Testinfra' + sh script: 'git clone https://github.com/AlmaLinux/leapp-data.git --branch devel', + label: 'Fetch devel version of leapp data' + } + } + stage('CreateSingleMachine') { + when { + expression { params.TARGET_DISTRO_FILTER != 'all' } + } + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER) + + sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile', + label: 'Generate Vagrantfile' + sh script: "vagrant up $targetDistro.vmName", + label: 'Create source VM' + } + } + } + stage('CreateMultiMachine') { + when { + expression { params.TARGET_DISTRO_FILTER == 'all' } + } + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile', + label: 'Generate Vagrantfile' + sh script: 'vagrant up', + label: 'Create source VM' + } + } + stage('ELevationAndTest') { + matrix { + when { + anyOf { + expression { params.TARGET_DISTRO_FILTER == 'all' } + expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } + } + } + axes { + axis { + name 'TARGET_DISTRO' + values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8' + } + } + stages { + stage('ELevate') { + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum-config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"", + label: 'Add testing repo of ELevate' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"", + label: 'Install testing version of ELevate' + sh script: "vagrant upload ci/scripts/install_elevate_dev.sh install_elevate_dev.sh $targetDistro.vmName", + label: 'Upload installer script to VMs' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo bash install_elevate_dev.sh -u ${LEAPP_SRC_GIT_USER} -b ${LEAPP_SRC_GIT_BRANCH}\"", + label: 'Install development version of ELevate', + returnStatus: true + sh script: "vagrant upload leapp-data/ leapp-data/ --compress $targetDistro.vmName", + label: 'Upload devel branch of leapp data' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mkdir -p /etc/leapp/files/vendors.d\"", + label: 'Create directory structrue of leapp data' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo install -t /etc/leapp/files leapp-data/files/${targetDistro.leappData}/*\"", + label: 'Install devel version of leapp data' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo install -t /etc/leapp/files/vendors.d leapp-data/vendors.d/*\"", + label: 'Install devel version of leapp vendor data' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mv -f /etc/leapp/files/leapp_upgrade_repositories.repo.el8 /etc/leapp/files/leapp_upgrade_repositories.repo\"", + label: 'Configure leapp upgrade repositories for EL7toEL8' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mv -f /etc/leapp/files/repomap.json.el8 /etc/leapp/files/repomap.json\"", + label: 'Configure leapp repository mapping for EL7toEL8' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum -y install tree && sudo tree -ha /etc/leapp\"", + label: 'Check if development version of leapp data installed correctly' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"", + label: 'Start pre-upgrade check', + returnStatus: true + sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"", + label: 'Permit ssh as root login' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"", + label: 'Answer the leapp question' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"", + label: 'Start the Upgrade' + sh script: "vagrant reload $targetDistro.vmName", + label: 'Reboot to the ELevate initramfs' + sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config", + label: 'Generate the ssh-config file' + } + } + } + } + } + stage('Distro Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'minimal' } + expression { params.CONF_FILTER == 'docker-ce' } + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: 'rm -f conftest.py pytest.ini', + label: 'Delete root conftest.py file' + sh script: """ + . .venv/bin/activate \ + && py.test -v --hosts=${targetDistro.vmName} \ + --ssh-config=.vagrant/ssh-config \ + --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \ + ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py + """, + label: 'Run the distro specific tests' + } + } + } + } + } + stage('Docker Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'docker-ce' } + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: """ + . .venv/bin/activate \ + && py.test -v --hosts=${targetDistro.vmName} \ + --ssh-config=.vagrant/ssh-config \ + --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \ + ci/tests/tests/docker/test_docker_ce.py + """, + label: 'Run the docker specific tests' + } + } + } + } + } + } + } + } + } + post { + success { + junit testResults: 'ci/tests/tests/**/**_junit.xml', + skipPublishingChecks: true + } + cleanup { + sh script: 'vagrant destroy -f --no-parallel -g', + label: 'Destroy VMs' + cleanWs() + } + } +} + +def targetDistroSpec(distro) { + def spec = [:] + + switch (distro) { + case 'almalinux-8': + vm = 'almalinux_8' + ldata = 'almalinux' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'centos-stream-8': + vm = 'centosstream_8' + ldata = 'centos' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'oraclelinux-8': + vm = 'oraclelinux_8' + ldata = 'oraclelinux' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'rocky-8': + vm = 'rocky_8' + ldata = 'rocky' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + default: + spec = [ + vmName: 'unknown', + leappData: 'unknown' + ] + break + } + return spec +} diff --git a/ci/jenkins/ELevate_el7toel8_Internal.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Internal.jenkinsfile new file mode 100644 index 00000000..97f900fe --- /dev/null +++ b/ci/jenkins/ELevate_el7toel8_Internal.jenkinsfile @@ -0,0 +1,230 @@ +RETRY = params.RETRY +TIMEOUT = params.TIMEOUT + +pipeline { + agent { + label 'x86_64 && bm' + } + options { + timestamps() + parallelsAlwaysFailFast() + } + parameters { + choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation') + choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') + string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) + string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) + } + environment { + VAGRANT_NO_COLOR = '1' + } + stages { + stage('Prepare') { + steps { + sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml', + label: 'Install Ansible collections' + sh script: 'python3.11 -m venv .venv', + label: 'Create Python virtual environment' + sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko', + label: 'Install Testinfra' + } + } + stage('CreateSingleMachine') { + when { + expression { params.TARGET_DISTRO_FILTER != 'all' } + } + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER) + + sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile', + label: 'Generate Vagrantfile' + sh script: "vagrant up $targetDistro.vmName", + label: 'Create source VM' + } + } + } + stage('CreateMultiMachine') { + when { + expression { params.TARGET_DISTRO_FILTER == 'all' } + } + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile', + label: 'Generate Vagrantfile' + sh script: 'vagrant up', + label: 'Create source VM' + } + } + stage('ELevationAndTest') { + matrix { + when { + anyOf { + expression { params.TARGET_DISTRO_FILTER == 'all' } + expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } + } + } + axes { + axis { + name 'TARGET_DISTRO' + values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8' + } + } + stages { + stage('ELevate') { + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y https://repo.almalinux.org/elevate/elevate-release-latest-el7.noarch.rpm\"", + label: 'Install the elevate-release-latest rpm packages for EL7' + sh script: "vagrant ssh $targetDistro.vmName -c \"wget https://build.almalinux.org/pulp/content/copr/eabdullin1-leapp-data-internal-almalinux-8-x86_64-dr/config.repo -O /etc/yum.repos.d/internal-leapp.repo\"", + label: 'Add pulp repository' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"", + label: 'Install the leap rpm package' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y $targetDistro.leappData\"", + label: 'Install the LEAP migration data rpm packages' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"", + label: 'Start the Pre-Upgrade check', + returnStatus: true + sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"", + label: 'Permit ssh as root login' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"", + label: 'Answer the LEAP question' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"", + label: 'Start the Upgrade' + sh script: "vagrant reload $targetDistro.vmName", + label: 'Reboot to the ELevate initramfs' + sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config", + label: 'Generate the ssh-config file' + } + } + } + } + } + stage('Distro Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'minimal' } + expression { params.CONF_FILTER == 'docker-ce' } + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: 'rm -f conftest.py pytest.ini', + label: 'Delete root conftest.py file' + sh script: """ + . .venv/bin/activate \ + && py.test -v --hosts=${targetDistro.vmName} \ + --ssh-config=.vagrant/ssh-config \ + --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \ + ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py + """, + label: 'Run the distro specific tests' + } + } + } + } + } + stage('Docker Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'docker-ce' } + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: """ + . .venv/bin/activate \ + && py.test -v --hosts=${targetDistro.vmName} \ + --ssh-config=.vagrant/ssh-config \ + --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \ + ci/tests/tests/docker/test_docker_ce.py + """, + label: 'Run the docker specific tests' + } + } + } + } + } + } + } + } + } + post { + success { + junit testResults: 'ci/tests/tests/**/**_junit.xml', + skipPublishingChecks: true + } + cleanup { + sh script: 'vagrant destroy -f --no-parallel -g', + label: 'Destroy VMs' + cleanWs() + } + } +} + +def targetDistroSpec(distro) { + def spec = [:] + + switch (distro) { + case 'almalinux-8': + vm = 'almalinux_8' + ldata = 'leapp-data-almalinux' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'centos-stream-8': + vm = 'centosstream_8' + ldata = 'leapp-data-centos' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'oraclelinux-8': + vm = 'oraclelinux_8' + ldata = 'leapp-data-oraclelinux' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'rocky-8': + vm = 'rocky_8' + ldata = 'leapp-data-rocky' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + default: + spec = [ + vmName: 'unknown', + leappData: 'unknown' + ] + break + } + return spec +} diff --git a/ci/jenkins/ELevate_el7toel8_Internal_Dev.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Internal_Dev.jenkinsfile new file mode 100644 index 00000000..af2fabe2 --- /dev/null +++ b/ci/jenkins/ELevate_el7toel8_Internal_Dev.jenkinsfile @@ -0,0 +1,253 @@ +RETRY = params.RETRY +TIMEOUT = params.TIMEOUT + +pipeline { + agent { + label 'x86_64 && bm' + } + options { + timestamps() + parallelsAlwaysFailFast() + } + parameters { + choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation') + choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') + string(name: 'LEAPP_SRC_GIT_USER', defaultValue: 'AlmaLinux', description: 'Input name of Git user of LEAPP source', trim: true) + string(name: 'LEAPP_SRC_GIT_BRANCH', defaultValue: 'almalinux', description: 'Input name of Git branch of LEAPP source', trim: true) + string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) + string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) + } + environment { + VAGRANT_NO_COLOR = '1' + } + stages { + stage('Prepare') { + steps { + sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml', + label: 'Install Ansible collections' + sh script: 'python3.11 -m venv .venv', + label: 'Create Python virtual environment' + sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko', + label: 'Install Testinfra' + sh script: 'git clone https://github.com/AlmaLinux/leapp-data.git --branch devel', + label: 'Fetch devel version of leapp data' + } + } + stage('CreateSingleMachine') { + when { + expression { params.TARGET_DISTRO_FILTER != 'all' } + } + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER) + + sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile', + label: 'Generate Vagrantfile' + sh script: "vagrant up $targetDistro.vmName", + label: 'Create source VM' + } + } + } + stage('CreateMultiMachine') { + when { + expression { params.TARGET_DISTRO_FILTER == 'all' } + } + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile', + label: 'Generate Vagrantfile' + sh script: 'vagrant up', + label: 'Create source VM' + } + } + stage('ELevationAndTest') { + matrix { + when { + anyOf { + expression { params.TARGET_DISTRO_FILTER == 'all' } + expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } + } + } + axes { + axis { + name 'TARGET_DISTRO' + values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8' + } + } + stages { + stage('ELevate') { + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum-config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"", + label: 'Add testing repo of ELevate' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo wget https://build.almalinux.org/pulp/content/copr/eabdullin1-leapp-data-internal-centos7-x86_64-dr/config.repo -O /etc/yum.repos.d/internal-leapp.repo\"", + label: 'Add pulp repository' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo sed -i 's|enabled=1|enabled=1\\npriority=80|' /etc/yum.repos.d/internal-leapp.repo\"", + label: 'Set priority for pulp repository' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"", + label: 'Install testing version of ELevate' + sh script: "vagrant upload ci/scripts/install_elevate_dev.sh install_elevate_dev.sh $targetDistro.vmName", + label: 'Upload installer script to VMs' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo bash install_elevate_dev.sh -u ${LEAPP_SRC_GIT_USER} -b ${LEAPP_SRC_GIT_BRANCH}\"", + label: 'Install development version of ELevate', + returnStatus: true + sh script: "vagrant upload leapp-data/ leapp-data/ --compress $targetDistro.vmName", + label: 'Upload devel branch of leapp data' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mkdir -p /etc/leapp/files/vendors.d\"", + label: 'Create directory structrue of leapp data' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo install -t /etc/leapp/files leapp-data/files/${targetDistro.leappData}/*\"", + label: 'Install devel version of leapp data' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo install -t /etc/leapp/files/vendors.d leapp-data/vendors.d/*\"", + label: 'Install devel version of leapp vendor data' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mv -f /etc/leapp/files/leapp_upgrade_repositories.repo.el8 /etc/leapp/files/leapp_upgrade_repositories.repo\"", + label: 'Configure leapp upgrade repositories for EL7toEL8' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mv -f /etc/leapp/files/repomap.json.el8 /etc/leapp/files/repomap.json\"", + label: 'Configure leapp repository mapping for EL7toEL8' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum -y install tree && sudo tree -ha /etc/leapp\"", + label: 'Check if development version of leapp data installed correctly' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"", + label: 'Start pre-upgrade check', + returnStatus: true + sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"", + label: 'Permit ssh as root login' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"", + label: 'Answer the leapp question' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"", + label: 'Start the Upgrade' + sh script: "vagrant reload $targetDistro.vmName", + label: 'Reboot to the ELevate initramfs' + sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config", + label: 'Generate the ssh-config file' + } + } + } + } + } + stage('Distro Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'minimal' } + expression { params.CONF_FILTER == 'docker-ce' } + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: 'rm -f conftest.py pytest.ini', + label: 'Delete root conftest.py file' + sh script: """ + . .venv/bin/activate \ + && py.test -v --hosts=${targetDistro.vmName} \ + --ssh-config=.vagrant/ssh-config \ + --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \ + ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py + """, + label: 'Run the distro specific tests' + } + } + } + } + } + stage('Docker Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'docker-ce' } + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: """ + . .venv/bin/activate \ + && py.test -v --hosts=${targetDistro.vmName} \ + --ssh-config=.vagrant/ssh-config \ + --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \ + ci/tests/tests/docker/test_docker_ce.py + """, + label: 'Run the docker specific tests' + } + } + } + } + } + } + } + } + } + post { + success { + junit testResults: 'ci/tests/tests/**/**_junit.xml', + skipPublishingChecks: true + } + cleanup { + sh script: 'vagrant destroy -f --no-parallel -g', + label: 'Destroy VMs' + cleanWs() + } + } +} + +def targetDistroSpec(distro) { + def spec = [:] + + switch (distro) { + case 'almalinux-8': + vm = 'almalinux_8' + ldata = 'almalinux' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'centos-stream-8': + vm = 'centosstream_8' + ldata = 'centos' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'oraclelinux-8': + vm = 'oraclelinux_8' + ldata = 'oraclelinux' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'rocky-8': + vm = 'rocky_8' + ldata = 'rocky' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + default: + spec = [ + vmName: 'unknown', + leappData: 'unknown' + ] + break + } + return spec +} diff --git a/ci/jenkins/ELevate_el7toel8_Stable.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Stable.jenkinsfile new file mode 100644 index 00000000..ae9bdb57 --- /dev/null +++ b/ci/jenkins/ELevate_el7toel8_Stable.jenkinsfile @@ -0,0 +1,228 @@ +RETRY = params.RETRY +TIMEOUT = params.TIMEOUT + +pipeline { + agent { + label 'x86_64 && bm' + } + options { + timestamps() + parallelsAlwaysFailFast() + } + parameters { + choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation') + choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') + string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) + string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) + } + environment { + VAGRANT_NO_COLOR = '1' + } + stages { + stage('Prepare') { + steps { + sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml', + label: 'Install Ansible collections' + sh script: 'python3.11 -m venv .venv', + label: 'Create Python virtual environment' + sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko', + label: 'Install Testinfra' + } + } + stage('CreateSingleMachine') { + when { + expression { params.TARGET_DISTRO_FILTER != 'all' } + } + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER) + + sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile', + label: 'Generate Vagrantfile' + sh script: "vagrant up $targetDistro.vmName", + label: 'Create source VM' + } + } + } + stage('CreateMultiMachine') { + when { + expression { params.TARGET_DISTRO_FILTER == 'all' } + } + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile', + label: 'Generate Vagrantfile' + sh script: 'vagrant up', + label: 'Create source VM' + } + } + stage('ELevationAndTest') { + matrix { + when { + anyOf { + expression { params.TARGET_DISTRO_FILTER == 'all' } + expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } + } + } + axes { + axis { + name 'TARGET_DISTRO' + values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8' + } + } + stages { + stage('ELevate') { + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y https://repo.almalinux.org/elevate/elevate-release-latest-el7.noarch.rpm\"", + label: 'Install the elevate-release-latest rpm packages for EL7' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"", + label: 'Install the leap rpm package' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y $targetDistro.leappData\"", + label: 'Install the LEAP migration data rpm packages' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"", + label: 'Start the Pre-Upgrade check', + returnStatus: true + sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"", + label: 'Permit ssh as root login' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"", + label: 'Answer the LEAP question' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"", + label: 'Start the Upgrade' + sh script: "vagrant reload $targetDistro.vmName", + label: 'Reboot to the ELevate initramfs' + sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config", + label: 'Generate the ssh-config file' + } + } + } + } + } + stage('Distro Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'minimal' } + expression { params.CONF_FILTER == 'docker-ce' } + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: 'rm -f conftest.py pytest.ini', + label: 'Delete root conftest.py file' + sh script: """ + . .venv/bin/activate \ + && py.test -v --hosts=${targetDistro.vmName} \ + --ssh-config=.vagrant/ssh-config \ + --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \ + ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py + """, + label: 'Run the distro specific tests' + } + } + } + } + } + stage('Docker Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'docker-ce' } + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: """ + . .venv/bin/activate \ + && py.test -v --hosts=${targetDistro.vmName} \ + --ssh-config=.vagrant/ssh-config \ + --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \ + ci/tests/tests/docker/test_docker_ce.py + """, + label: 'Run the docker specific tests' + } + } + } + } + } + } + } + } + } + post { + success { + junit testResults: 'ci/tests/tests/**/**_junit.xml', + skipPublishingChecks: true + } + cleanup { + sh script: 'vagrant destroy -f --no-parallel -g', + label: 'Destroy VMs' + cleanWs() + } + } +} + +def targetDistroSpec(distro) { + def spec = [:] + + switch (distro) { + case 'almalinux-8': + vm = 'almalinux_8' + ldata = 'leapp-data-almalinux' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'centos-stream-8': + vm = 'centosstream_8' + ldata = 'leapp-data-centos' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'oraclelinux-8': + vm = 'oraclelinux_8' + ldata = 'leapp-data-oraclelinux' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'rocky-8': + vm = 'rocky_8' + ldata = 'leapp-data-rocky' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + default: + spec = [ + vmName: 'unknown', + leappData: 'unknown' + ] + break + } + return spec +} diff --git a/ci/jenkins/ELevate_el7toel8_Testing.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Testing.jenkinsfile new file mode 100644 index 00000000..0f37cf2e --- /dev/null +++ b/ci/jenkins/ELevate_el7toel8_Testing.jenkinsfile @@ -0,0 +1,228 @@ +RETRY = params.RETRY +TIMEOUT = params.TIMEOUT + +pipeline { + agent { + label 'x86_64 && bm' + } + options { + timestamps() + parallelsAlwaysFailFast() + } + parameters { + choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation') + choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') + string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) + string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) + } + environment { + VAGRANT_NO_COLOR = '1' + } + stages { + stage('Prepare') { + steps { + sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml', + label: 'Install Ansible collections' + sh script: 'python3.11 -m venv .venv', + label: 'Create Python virtual environment' + sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko', + label: 'Install Testinfra' + } + } + stage('CreateSingleMachine') { + when { + expression { params.TARGET_DISTRO_FILTER != 'all' } + } + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER) + + sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile', + label: 'Generate Vagrantfile' + sh script: "vagrant up $targetDistro.vmName", + label: 'Create source VM' + } + } + } + stage('CreateMultiMachine') { + when { + expression { params.TARGET_DISTRO_FILTER == 'all' } + } + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile', + label: 'Generate Vagrantfile' + sh script: 'vagrant up', + label: 'Create source VM' + } + } + stage('ELevationAndTest') { + matrix { + when { + anyOf { + expression { params.TARGET_DISTRO_FILTER == 'all' } + expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } + } + } + axes { + axis { + name 'TARGET_DISTRO' + values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8' + } + } + stages { + stage('ELevate') { + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum-config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"", + label: 'Install the elevate-release-latest rpm packages for EL7' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"", + label: 'Install the leap rpm package' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y $targetDistro.leappData\"", + label: 'Install the LEAP migration data rpm packages' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"", + label: 'Start the Pre-Upgrade check', + returnStatus: true + sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"", + label: 'Permit ssh as root login' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"", + label: 'Answer the LEAP question' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"", + label: 'Start the Upgrade' + sh script: "vagrant reload $targetDistro.vmName", + label: 'Reboot to the ELevate initramfs' + sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config", + label: 'Generate the ssh-config file' + } + } + } + } + } + stage('Distro Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'minimal' } + expression { params.CONF_FILTER == 'docker-ce' } + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: 'rm -f conftest.py pytest.ini', + label: 'Delete root conftest.py file' + sh script: """ + . .venv/bin/activate \ + && py.test -v --hosts=${targetDistro.vmName} \ + --ssh-config=.vagrant/ssh-config \ + --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \ + ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py + """, + label: 'Run the distro specific tests' + } + } + } + } + } + stage('Docker Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'docker-ce' } + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: """ + . .venv/bin/activate \ + && py.test -v --hosts=${targetDistro.vmName} \ + --ssh-config=.vagrant/ssh-config \ + --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \ + ci/tests/tests/docker/test_docker_ce.py + """, + label: 'Run the docker specific tests' + } + } + } + } + } + } + } + } + } + post { + success { + junit testResults: 'ci/tests/tests/**/**_junit.xml', + skipPublishingChecks: true + } + cleanup { + sh script: 'vagrant destroy -f --no-parallel -g', + label: 'Destroy VMs' + cleanWs() + } + } +} + +def targetDistroSpec(distro) { + def spec = [:] + + switch (distro) { + case 'almalinux-8': + vm = 'almalinux_8' + ldata = 'leapp-data-almalinux' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'centos-stream-8': + vm = 'centosstream_8' + ldata = 'leapp-data-centos' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'oraclelinux-8': + vm = 'oraclelinux_8' + ldata = 'leapp-data-oraclelinux' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'rocky-8': + vm = 'rocky_8' + ldata = 'leapp-data-rocky' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + default: + spec = [ + vmName: 'unknown', + leappData: 'unknown' + ] + break + } + return spec +} diff --git a/ci/jenkins/ELevate_el8toel9_Development.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Development.jenkinsfile new file mode 100644 index 00000000..7eb5430b --- /dev/null +++ b/ci/jenkins/ELevate_el8toel9_Development.jenkinsfile @@ -0,0 +1,200 @@ +RETRY = params.RETRY +TIMEOUT = params.TIMEOUT + +pipeline { + agent { + label params.AGENT + } + options { + timestamps() + } + parameters { + string(name: 'AGENT', defaultValue: 'almalinux-8-vagrant-libvirt-x86_64', description: 'Input label of the Jenkins Agent', trim: true) + string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) + string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) + string(name: 'REPO_URL', defaultValue: 'https://github.com/LKHN/el-test-auto-dev.git', description: 'URL of the pipeline repository', trim: true) + string(name: 'REPO_BRANCH', defaultValue: 'main', description: 'Branch of the pipeline repository', trim: true) + choice(name: 'SOURCE_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a source distro or all for ELevation') + choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all to ELevation') + choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') + } + stages { + stage('Source') { + steps { + git url: REPO_URL, + branch: REPO_BRANCH, + credentialsId: 'github-almalinuxautobot' + } + } + stage('Prepare Build and Test enviroment') { + steps { + sh script: 'cp Vagrantfile.el8toel9 Vagrantfile', + label: 'Generate the el8toel9 Vagrantfile' + sh script: 'sudo dnf -y install python39-devel python39-wheel', + label: 'Install Python 3.9, PIP and Wheel' + sh script: 'sudo python3 -m pip install --no-cache-dir --upgrade -r requirements.txt', + label: 'Install TestInfra' + sh script: 'git clone https://github.com/AlmaLinux/leapp-data.git --branch devel', + label: 'Clone the leapp-data git repository' + } + } + stage('ELevation') { + matrix { + when { + allOf { + anyOf { + expression { params.SOURCE_DISTRO_FILTER == 'all' } + expression { params.SOURCE_DISTRO_FILTER == env.SOURCE_DISTRO } + } + anyOf { + expression { params.TARGET_DISTRO_FILTER == 'all' } + expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } + } + } + } + axes { + axis { + name 'SOURCE_DISTRO' + values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8' + } + axis { + name 'TARGET_DISTRO' + values 'almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9' + } + } + stages { + stage('Create and Configure Machines') { + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + sh script: 'vagrant destroy -f $SOURCE_DISTRO', + label: 'Make sure no machine present from the last retry' + sh script: 'vagrant up $SOURCE_DISTRO', + label: 'Create the source machines' + } + } + } + } + stage('ELevate to the all target distros') { + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"', + label: 'Add the ELevate Testing RPM repository' + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf install -y leapp-upgrade\"', + label: 'Install the leap rpm package' + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo bash /vagrant/scripts/install_elevate_dev.sh\"', + label: 'Install Development version of ELevate', + returnStatus: true + script { + def LEAPP_DATA = getLeappDataDistro(TARGET_DISTRO) + sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mkdir -p /etc/leapp/files/vendors.d\"", + label:'Create the LEAPP directory') + sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo install -t /etc/leapp/files /vagrant/leapp-data/files/${LEAPP_DATA}/*\"", + label:"Install the LEAPP DATA") + sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo install -t /etc/leapp/files/vendors.d /vagrant/leapp-data/vendors.d/*\"', + label:"Install the Vendor DATA") + sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mv -f /etc/leapp/files/leapp_upgrade_repositories.repo.el9 /etc/leapp/files/leapp_upgrade_repositories.repo\"", + label:'Set LEAPP Repos for EL8') + sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mv -f /etc/leapp/files/repomap.json.el9 /etc/leapp/files/repomap.json\"", + label:'Set LEAPP Repo map for EL8') + sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install tree && sudo tree -ha /etc/leapp\"', + label:"Debug: Data paths") + } + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp preupgrade\"', + label: 'Start the Pre-Upgrade check', + returnStatus: true + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"', + label: 'Permit ssh as root login' + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"', + label: 'Answer the LEAP question' + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp upgrade\"', + label: 'Start the Upgrade' + sh script: 'vagrant reload $SOURCE_DISTRO', + label: 'Reboot to the ELevate initramfs' + sh script: 'vagrant ssh-config $SOURCE_DISTRO >> .vagrant/ssh-config', + label: 'Generate the ssh-config file' + } + } + } + } + stage('Distro Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'minimal'} + expression { params.CONF_FILTER == 'docker-ce'} + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/distro/test_osinfo_$SOURCE_DISTRO-junit.xml tests/distro/test_osinfo_$SOURCE_DISTRO.py', + label: 'Run the distro specific tests' + } + } + } + } + stage('Docker Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'docker-ce'} + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/docker/test_docker_ce_$SOURCE_DISTRO-junit.xml tests/docker/test_docker_ce.py', + label: 'Run the distro specific tests' + } + } + } + } + } + } + } + } + post { + success { + junit testResults: '**/tests/**/**-junit.xml', + skipPublishingChecks: true + } + cleanup { + sh script: 'vagrant destroy -f', + label: 'Destroy All Machines' + cleanWs() + } + } +} + +/* +* Common Functions +*/ +def getLeappDataDistro(TARGET_DISTRO) { + def leapp_data = "" + + switch(TARGET_DISTRO) { + case "almalinux-9": + leapp_data = TARGET_DISTRO.substring(0, 9) + break + + case "centos-stream-9": + leapp_data = TARGET_DISTRO.substring(0, 6) + break + + case "oraclelinux-9": + leapp_data = TARGET_DISTRO.substring(0, 11) + break + + case "rocky-9": + leapp_data = TARGET_DISTRO.substring(0, 5) + break + + default: + leap_data = "Error: Target Distro Not Supported" + break + } + return leapp_data +} diff --git a/ci/jenkins/ELevate_el8toel9_Internal.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Internal.jenkinsfile new file mode 100644 index 00000000..aa6be967 --- /dev/null +++ b/ci/jenkins/ELevate_el8toel9_Internal.jenkinsfile @@ -0,0 +1,214 @@ +RETRY = params.RETRY +TIMEOUT = params.TIMEOUT + +pipeline { + agent { + label 'x86_64 && bm' + } + options { + timestamps() + parallelsAlwaysFailFast() + } + parameters { + // choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'rocky-9', 'all'], description: 'Select a target distro or all for ELevation') + choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all for ELevation') + choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') + string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) + string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) + } + environment { + VAGRANT_NO_COLOR = '1' + } + stages { + stage('Prepare') { + steps { + sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml', + label: 'Install Ansible collections' + sh script: 'python3.11 -m venv .venv', + label: 'Create Python virtual environment' + sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko', + label: 'Install Testinfra' + } + } + stage('CreateSingleMachine') { + when { + expression { params.TARGET_DISTRO_FILTER != 'all' } + } + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER) + + sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile', + label: 'Generate Vagrantfile' + sh script: "vagrant up $targetDistro.vmName", + label: 'Create source VM' + } + } + } + stage('CreateMultiMachine') { + when { + expression { params.TARGET_DISTRO_FILTER == 'all' } + } + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + sh script: 'cp ci/vagrant/el8toel9_multi.rb Vagrantfile', + label: 'Generate Vagrantfile' + sh script: 'vagrant up', + label: 'Create source VM' + } + } + stage('ELevationAndTest') { + matrix { + when { + anyOf { + expression { params.TARGET_DISTRO_FILTER == 'all' } + expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } + } + } + axes { + axis { + name 'TARGET_DISTRO' + // values 'almalinux-9', 'centos-stream-9', 'rocky-9' + values 'almalinux-9', 'rocky-9' + } + } + stages { + stage('ELevate') { + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y https://repo.almalinux.org/elevate/elevate-release-latest-el8.noarch.rpm\"", + label: 'Install the elevate-release-latest rpm packages for EL8' + sh script: "vagrant ssh $targetDistro.vmName -c \"wget https://build.almalinux.org/pulp/content/copr/eabdullin1-leapp-data-internal-centos7-x86_64-dr/config.repo -O /etc/yum.repos.d/internal-leapp.repo\"", + label: 'Add pulp repository' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y leapp-upgrade\"", + label: 'Install the leap rpm package' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y $targetDistro.leappData\"", + label: 'Install the LEAP migration data rpm packages' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"", + label: 'Start the Pre-Upgrade check', + returnStatus: true + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo sed -i \'s/^AllowZoneDrifting=.*/AllowZoneDrifting=no/\' /etc/firewalld/firewalld.conf\"", + label: 'TODO' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section check_vdo.no_vdo_devices=True\"", + label: 'TODO' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"", + label: 'Start the Upgrade' + sh script: "vagrant reload $targetDistro.vmName", + label: 'Reboot to the ELevate initramfs' + sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config", + label: 'Generate the ssh-config file' + } + } + } + } + } + stage('Distro Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'minimal' } + expression { params.CONF_FILTER == 'docker-ce' } + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: 'rm -f conftest.py pytest.ini', + label: 'Delete root conftest.py file' + sh script: """ + . .venv/bin/activate \ + && py.test -v --hosts=${targetDistro.vmName} \ + --ssh-config=.vagrant/ssh-config \ + --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \ + ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py + """, + label: 'Run the distro specific tests' + } + } + } + } + } + stage('Docker Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'docker-ce' } + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: """ + . .venv/bin/activate \ + && py.test -v --hosts=${targetDistro.vmName} \ + --ssh-config=.vagrant/ssh-config \ + --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \ + ci/tests/tests/docker/test_docker_ce.py + """, + label: 'Run the docker specific tests' + } + } + } + } + } + } + } + } + } + post { + success { + junit testResults: 'ci/tests/tests/**/**_junit.xml', + skipPublishingChecks: true + } + cleanup { + sh script: 'vagrant destroy -f --no-parallel -g', + label: 'Destroy VMs' + cleanWs() + } + } +} + +def targetDistroSpec(distro) { + def spec = [:] + + switch (distro) { + case 'almalinux-9': + vm = 'almalinux_9' + ldata = 'leapp-data-almalinux' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'rocky-9': + vm = 'rocky_9' + ldata = 'leapp-data-rocky' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + default: + spec = [ + vmName: 'unknown', + leappData: 'unknown' + ] + break + } + return spec +} diff --git a/ci/jenkins/ELevate_el8toel9_Internal_Dev.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Internal_Dev.jenkinsfile new file mode 100644 index 00000000..82626697 --- /dev/null +++ b/ci/jenkins/ELevate_el8toel9_Internal_Dev.jenkinsfile @@ -0,0 +1,206 @@ +RETRY = params.RETRY +TIMEOUT = params.TIMEOUT + +pipeline { + agent { + label params.AGENT + } + options { + timestamps() + } + parameters { + string(name: 'AGENT', defaultValue: 'almalinux-8-vagrant-libvirt-x86_64', description: 'Input label of the Jenkins Agent', trim: true) + string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) + string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) + string(name: 'REPO_URL', defaultValue: 'https://github.com/LKHN/el-test-auto-dev.git', description: 'URL of the pipeline repository', trim: true) + string(name: 'REPO_BRANCH', defaultValue: 'main', description: 'Branch of the pipeline repository', trim: true) + choice(name: 'SOURCE_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a source distro or all for ELevation') + choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all to ELevation') + choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') + } + stages { + stage('Source') { + steps { + git url: REPO_URL, + branch: REPO_BRANCH, + credentialsId: 'github-almalinuxautobot' + } + } + stage('Prepare Build and Test enviroment') { + steps { + sh script: 'cp Vagrantfile.el8toel9 Vagrantfile', + label: 'Generate the el8toel9 Vagrantfile' + sh script: 'sudo dnf -y install python39-devel python39-wheel', + label: 'Install Python 3.9, PIP and Wheel' + sh script: 'sudo python3 -m pip install --no-cache-dir --upgrade -r requirements.txt', + label: 'Install TestInfra' + sh script: 'git clone https://github.com/AlmaLinux/leapp-data.git --branch devel', + label: 'Clone the leapp-data git repository' + } + } + stage('ELevation') { + matrix { + when { + allOf { + anyOf { + expression { params.SOURCE_DISTRO_FILTER == 'all' } + expression { params.SOURCE_DISTRO_FILTER == env.SOURCE_DISTRO } + } + anyOf { + expression { params.TARGET_DISTRO_FILTER == 'all' } + expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } + } + } + } + axes { + axis { + name 'SOURCE_DISTRO' + values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8' + } + axis { + name 'TARGET_DISTRO' + values 'almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9' + } + } + stages { + stage('Create and Configure Machines') { + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + sh script: 'vagrant destroy -f $SOURCE_DISTRO', + label: 'Make sure no machine present from the last retry' + sh script: 'vagrant up $SOURCE_DISTRO', + label: 'Create the source machines' + } + } + } + } + stage('ELevate to the all target distros') { + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"', + label: 'Add the ELevate Testing RPM repository' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y wget\"", + label: 'Install wget' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo wget https://build.almalinux.org/pulp/content/copr/eabdullin1-leapp-data-internal-almalinux-8-x86_64-dr/config.repo -O /etc/yum.repos.d/internal-leapp.repo\"", + label: 'Add pulp repository' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo sed -i 's|enabled=1|enabled=1\\npriority=80|' /etc/yum.repos.d/internal-leapp.repo\"", + label: 'Set priority for pulp repository' + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf install -y leapp-upgrade\"', + label: 'Install the leap rpm package' + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo bash /vagrant/scripts/install_elevate_dev.sh\"', + label: 'Install Development version of ELevate', + returnStatus: true + script { + def LEAPP_DATA = getLeappDataDistro(TARGET_DISTRO) + sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mkdir -p /etc/leapp/files/vendors.d\"", + label:'Create the LEAPP directory') + sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo install -t /etc/leapp/files /vagrant/leapp-data/files/${LEAPP_DATA}/*\"", + label:"Install the LEAPP DATA") + sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo install -t /etc/leapp/files/vendors.d /vagrant/leapp-data/vendors.d/*\"', + label:"Install the Vendor DATA") + sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mv -f /etc/leapp/files/leapp_upgrade_repositories.repo.el9 /etc/leapp/files/leapp_upgrade_repositories.repo\"", + label:'Set LEAPP Repos for EL8') + sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mv -f /etc/leapp/files/repomap.json.el9 /etc/leapp/files/repomap.json\"", + label:'Set LEAPP Repo map for EL8') + sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install tree && sudo tree -ha /etc/leapp\"', + label:"Debug: Data paths") + } + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp preupgrade\"', + label: 'Start the Pre-Upgrade check', + returnStatus: true + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"', + label: 'Permit ssh as root login' + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"', + label: 'Answer the LEAP question' + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp upgrade\"', + label: 'Start the Upgrade' + sh script: 'vagrant reload $SOURCE_DISTRO', + label: 'Reboot to the ELevate initramfs' + sh script: 'vagrant ssh-config $SOURCE_DISTRO >> .vagrant/ssh-config', + label: 'Generate the ssh-config file' + } + } + } + } + stage('Distro Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'minimal'} + expression { params.CONF_FILTER == 'docker-ce'} + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/distro/test_osinfo_$SOURCE_DISTRO-junit.xml tests/distro/test_osinfo_$SOURCE_DISTRO.py', + label: 'Run the distro specific tests' + } + } + } + } + stage('Docker Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'docker-ce'} + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/docker/test_docker_ce_$SOURCE_DISTRO-junit.xml tests/docker/test_docker_ce.py', + label: 'Run the distro specific tests' + } + } + } + } + } + } + } + } + post { + success { + junit testResults: '**/tests/**/**-junit.xml', + skipPublishingChecks: true + } + cleanup { + sh script: 'vagrant destroy -f', + label: 'Destroy All Machines' + cleanWs() + } + } +} + +/* +* Common Functions +*/ +def getLeappDataDistro(TARGET_DISTRO) { + def leapp_data = "" + + switch(TARGET_DISTRO) { + case "almalinux-9": + leapp_data = TARGET_DISTRO.substring(0, 9) + break + + case "centos-stream-9": + leapp_data = TARGET_DISTRO.substring(0, 6) + break + + case "oraclelinux-9": + leapp_data = TARGET_DISTRO.substring(0, 11) + break + + case "rocky-9": + leapp_data = TARGET_DISTRO.substring(0, 5) + break + + default: + leap_data = "Error: Target Distro Not Supported" + break + } + return leapp_data +} diff --git a/ci/jenkins/ELevate_el8toel9_Stable.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Stable.jenkinsfile new file mode 100644 index 00000000..68f00165 --- /dev/null +++ b/ci/jenkins/ELevate_el8toel9_Stable.jenkinsfile @@ -0,0 +1,212 @@ +RETRY = params.RETRY +TIMEOUT = params.TIMEOUT + +pipeline { + agent { + label 'x86_64 && bm' + } + options { + timestamps() + parallelsAlwaysFailFast() + } + parameters { + // choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'rocky-9', 'all'], description: 'Select a target distro or all for ELevation') + choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all for ELevation') + choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') + string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) + string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) + } + environment { + VAGRANT_NO_COLOR = '1' + } + stages { + stage('Prepare') { + steps { + sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml', + label: 'Install Ansible collections' + sh script: 'python3.11 -m venv .venv', + label: 'Create Python virtual environment' + sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko', + label: 'Install Testinfra' + } + } + stage('CreateSingleMachine') { + when { + expression { params.TARGET_DISTRO_FILTER != 'all' } + } + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER) + + sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile', + label: 'Generate Vagrantfile' + sh script: "vagrant up $targetDistro.vmName", + label: 'Create source VM' + } + } + } + stage('CreateMultiMachine') { + when { + expression { params.TARGET_DISTRO_FILTER == 'all' } + } + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + sh script: 'cp ci/vagrant/el8toel9_multi.rb Vagrantfile', + label: 'Generate Vagrantfile' + sh script: 'vagrant up', + label: 'Create source VM' + } + } + stage('ELevationAndTest') { + matrix { + when { + anyOf { + expression { params.TARGET_DISTRO_FILTER == 'all' } + expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } + } + } + axes { + axis { + name 'TARGET_DISTRO' + // values 'almalinux-9', 'centos-stream-9', 'rocky-9' + values 'almalinux-9', 'rocky-9' + } + } + stages { + stage('ELevate') { + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y https://repo.almalinux.org/elevate/elevate-release-latest-el8.noarch.rpm\"", + label: 'Install the elevate-release-latest rpm packages for EL8' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y leapp-upgrade\"", + label: 'Install the leap rpm package' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y $targetDistro.leappData\"", + label: 'Install the LEAP migration data rpm packages' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"", + label: 'Start the Pre-Upgrade check', + returnStatus: true + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo sed -i \'s/^AllowZoneDrifting=.*/AllowZoneDrifting=no/\' /etc/firewalld/firewalld.conf\"", + label: 'TODO' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section check_vdo.no_vdo_devices=True\"", + label: 'TODO' + sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"", + label: 'Start the Upgrade' + sh script: "vagrant reload $targetDistro.vmName", + label: 'Reboot to the ELevate initramfs' + sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config", + label: 'Generate the ssh-config file' + } + } + } + } + } + stage('Distro Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'minimal' } + expression { params.CONF_FILTER == 'docker-ce' } + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: 'rm -f conftest.py pytest.ini', + label: 'Delete root conftest.py file' + sh script: """ + . .venv/bin/activate \ + && py.test -v --hosts=${targetDistro.vmName} \ + --ssh-config=.vagrant/ssh-config \ + --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \ + ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py + """, + label: 'Run the distro specific tests' + } + } + } + } + } + stage('Docker Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'docker-ce' } + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + script { + def targetDistro = targetDistroSpec(TARGET_DISTRO) + + sh script: """ + . .venv/bin/activate \ + && py.test -v --hosts=${targetDistro.vmName} \ + --ssh-config=.vagrant/ssh-config \ + --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \ + ci/tests/tests/docker/test_docker_ce.py + """, + label: 'Run the docker specific tests' + } + } + } + } + } + } + } + } + } + post { + success { + junit testResults: 'ci/tests/tests/**/**_junit.xml', + skipPublishingChecks: true + } + cleanup { + sh script: 'vagrant destroy -f --no-parallel -g', + label: 'Destroy VMs' + cleanWs() + } + } +} + +def targetDistroSpec(distro) { + def spec = [:] + + switch (distro) { + case 'almalinux-9': + vm = 'almalinux_9' + ldata = 'leapp-data-almalinux' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + case 'rocky-9': + vm = 'rocky_9' + ldata = 'leapp-data-rocky' + + spec = [ + vmName: vm, + leappData: ldata + ] + break + default: + spec = [ + vmName: 'unknown', + leappData: 'unknown' + ] + break + } + return spec +} diff --git a/ci/jenkins/ELevate_el8toel9_Testing.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Testing.jenkinsfile new file mode 100644 index 00000000..79cdd472 --- /dev/null +++ b/ci/jenkins/ELevate_el8toel9_Testing.jenkinsfile @@ -0,0 +1,187 @@ +RETRY = params.RETRY +TIMEOUT = params.TIMEOUT + +pipeline { + agent { + label params.AGENT + } + options { + timestamps() + } + parameters { + string(name: 'AGENT', defaultValue: 'almalinux-8-vagrant-libvirt-x86_64', description: 'Input label of the Jenkins Agent', trim: true) + string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) + string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) + string(name: 'REPO_URL', defaultValue: 'https://github.com/LKHN/el-test-auto-dev.git', description: 'URL of the pipeline repository', trim: true) + string(name: 'REPO_BRANCH', defaultValue: 'main', description: 'Branch of the pipeline repository', trim: true) + choice(name: 'SOURCE_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a source distro or all for ELevation') + choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all to ELevation') + choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') + } + stages { + stage('Source') { + steps { + git url: REPO_URL, + branch: REPO_BRANCH, + credentialsId: 'github-almalinuxautobot' + } + } + stage('Prepare Build and Test enviroment') { + steps { + sh script: 'cp Vagrantfile.el8toel9 Vagrantfile', + label: 'Generate the el8toel9 Vagrantfile' + sh script: 'sudo dnf -y install python39-devel python39-wheel', + label: 'Install Python 3.9, PIP and Wheel' + sh script: 'sudo python3 -m pip install --no-cache-dir --upgrade -r requirements.txt', + label: 'Install TestInfra' + } + } + stage('ELevation') { + matrix { + when { + allOf { + anyOf { + expression { params.SOURCE_DISTRO_FILTER == 'all' } + expression { params.SOURCE_DISTRO_FILTER == env.SOURCE_DISTRO } + } + anyOf { + expression { params.TARGET_DISTRO_FILTER == 'all' } + expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } + } + } + } + axes { + axis { + name 'SOURCE_DISTRO' + values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8' + } + axis { + name 'TARGET_DISTRO' + values 'almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9' + } + } + stages { + stage('Create and Configure Machines') { + environment { + CONFIG = "${CONF_FILTER}" + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + sh script: 'vagrant destroy -f $SOURCE_DISTRO', + label: 'Make sure no machine present from the last retry' + sh script: 'vagrant up $SOURCE_DISTRO', + label: 'Create the source machines' + } + } + } + } + stage('ELevate to the all target distros') { + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"', + label: 'Add the ELevate Testing RPM repository' + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install leapp-upgrade\"', + label: 'Install the leap rpm package' + script { + def LEAPP_DATA = getLeappDataDistro(TARGET_DISTRO) + sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install leapp-data-$LEAPP_DATA\"", + label:'Install the LEAP migration data rpm packages') + sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install tree && sudo tree -ha /etc/leapp\"', + label:'Debug: Data paths') + } + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp preupgrade\"', + label: 'Start the Pre-Upgrade check', + returnStatus: true + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"', + label: 'Permit ssh as root login' + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"', + label: 'Answer the LEAP question' + sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp upgrade\"', + label: 'Start the Upgrade' + sh script: 'vagrant reload $SOURCE_DISTRO', + label: 'Reboot to the ELevate initramfs' + sh script: 'vagrant ssh-config $SOURCE_DISTRO >> .vagrant/ssh-config', + label: 'Generate the ssh-config file' + } + } + } + } + stage('Distro Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'minimal'} + expression { params.CONF_FILTER == 'docker-ce'} + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/distro/test_osinfo_$TARGET_DISTRO-junit.xml tests/distro/test_osinfo_$TARGET_DISTRO.py', + label: 'Run the distro specific tests' + } + } + } + } + stage('Docker Tests') { + when { + anyOf { + expression { params.CONF_FILTER == 'docker-ce'} + } + } + steps { + retry(RETRY) { + timeout(time: TIMEOUT, unit: 'MINUTES') { + sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/docker/test_docker_ce_$SOURCE_DISTRO-junit.xml tests/docker/test_docker_ce.py', + label: 'Run the distro specific tests' + } + } + } + } + } + } + } + } + post { + success { + junit testResults: '**/tests/**/**-junit.xml', + skipPublishingChecks: true + } + cleanup { + sh script: 'vagrant destroy -f', + label: 'Destroy All Machines' + cleanWs() + } + } +} + +/* +* Common Functions +*/ +def getLeappDataDistro(TARGET_DISTRO) { + def leapp_data = "" + + switch(TARGET_DISTRO) { + case "almalinux-9": + leapp_data = TARGET_DISTRO.substring(0, 9) + break + + case "centos-stream-9": + leapp_data = TARGET_DISTRO.substring(0, 6) + break + + case "oraclelinux-9": + leapp_data = TARGET_DISTRO.substring(0, 11) + break + + case "rocky-9": + leapp_data = TARGET_DISTRO.substring(0, 5) + break + + default: + leap_data = "Error: Target Distro Not Supported" + break + } + return leapp_data +} diff --git a/ci/scripts/install_elevate_dev.sh b/ci/scripts/install_elevate_dev.sh new file mode 100644 index 00000000..f9cc2903 --- /dev/null +++ b/ci/scripts/install_elevate_dev.sh @@ -0,0 +1,117 @@ +#!/usr/bin/env bash + +USER='AlmaLinux' +BRANCH='almalinux' + +show_usage() { + echo 'Usage: sync_cloudlinux [OPTION]...' + echo '' + echo ' -h, --help show this message and exit' + echo ' -u, --user github user name (default: AlmaLinux)' + echo ' -b, --branch github branch name (default: almalinux)' +} + +while [[ $# -gt 0 ]]; do + opt="$1" + case ${opt} in + -h|--help) + show_usage + exit 0 + ;; + -u|--user) + USER="$2" + shift + shift + ;; + -b|--branch) + BRANCH="$2" + shift + shift + ;; + *) + echo -e "Error: unknown option ${opt}" >&2 + exit 2 + ;; + esac +done + +RHEL_MAJOR_VERSION=$(rpm --eval %rhel) +WORK_DIR="$HOME" +NEW_LEAPP_NAME="leapp-repository-$BRANCH" +NEW_LEAPP_DIR="$WORK_DIR/$NEW_LEAPP_NAME/" +LEAPP_PATH='/usr/share/leapp-repository/repositories/' +LEAPP_GPG_PATH='/etc/leapp/repos.d/system_upgrade/common/files/rpm-gpg' +EXCLUDE_PATH=' +/usr/share/leapp-repository/repositories/system_upgrade/el7toel8/files/bundled-rpms +/usr/share/leapp-repository/repositories/system_upgrade/el7toel8/files +/usr/share/leapp-repository/repositories/system_upgrade/el7toel8 +/usr/share/leapp-repository/repositories/system_upgrade/el8toel9/files/bundled-rpms +/usr/share/leapp-repository/repositories/system_upgrade/el8toel9/files +/usr/share/leapp-repository/repositories/system_upgrade/el8toel9 +/usr/share/leapp-repository/repositories/system_upgrade +/usr/share/leapp-repository/repositories/ +' + + +echo "RHEL_MAJOR_VERSION=$RHEL_MAJOR_VERSION" +echo "WORK_DIR=$WORK_DIR" +echo "EXCLUDED_PATHS=$EXCLUDE_PATH" + +echo "Preserve GPG keys if any" +for major in 8 9; do + test -e ${LEAPP_GPG_PATH}/${major} && mv ${LEAPP_GPG_PATH}/${major} ${WORK_DIR}/ +done + + +echo 'Remove old files' +for dir in $(find $LEAPP_PATH -type d); +do + skip=0 + for exclude in $(echo $EXCLUDE_PATH); + do + if [[ $exclude == $dir ]];then + skip=1 + break + fi + done + if [ $skip -eq 0 ];then + rm -rf $dir + fi +done + +echo "Download new tarball from https://github.com/$USER/leapp-repository/archive/$BRANCH/leapp-repository-$BRANCH.tar.gz" +curl -s -L https://github.com/$USER/leapp-repository/archive/$BRANCH/leapp-repository-$BRANCH.tar.gz | tar -xmz -C $WORK_DIR/ || exit 1 + +echo 'Deleting files as in spec file' +rm -rf $NEW_LEAPP_DIR/repos/common/actors/testactor +find $NEW_LEAPP_DIR/repos/common -name "test.py" -delete +rm -rf `find $NEW_LEAPP_DIR -name "tests" -type d` +find $NEW_LEAPP_DIR -name "Makefile" -delete +if [ $RHEL_MAJOR_VERSION -eq '7' ]; then + rm -rf $NEW_LEAPP_DIR/repos/system_upgrade/el8toel9 +else + rm -rf $NEW_LEAPP_DIR/repos/system_upgrade/el7toel8 + rm -rf $NEW_LEAPP_DIR/repos/system_upgrade/cloudlinux +fi + +echo 'Copy new data to system' +cp -r $NEW_LEAPP_DIR/repos/* $LEAPP_PATH || exit 1 + +for DIRECTORY in $(find $LEAPP_PATH -mindepth 1 -maxdepth 1 -type d); +do + REPOSITORY=$(basename $DIRECTORY) + if ! [ -e /etc/leapp/repos.d/$REPOSITORY ];then + echo "Enabling repository $REPOSITORY" + ln -s $LEAPP_PATH/$REPOSITORY /etc/leapp/repos.d/$REPOSITORY || exit 1 + fi +done + +echo "Restore GPG keys if any" +for major in 8 9; do + rm -rf ${LEAPP_GPG_PATH}/${major} + test -e ${WORK_DIR}/${major} && mv ${WORK_DIR}/${major} ${LEAPP_GPG_PATH}/ +done + +rm -rf $NEW_LEAPP_DIR + +exit 0 diff --git a/ci/tests/tests/conftest.py b/ci/tests/tests/conftest.py new file mode 100644 index 00000000..01f9443e --- /dev/null +++ b/ci/tests/tests/conftest.py @@ -0,0 +1,52 @@ +import pytest +import re + + +@pytest.fixture(scope="module") +def get_os_release(host): + """Get content of the /etc/os-release""" + os_release = host.file("/etc/os-release") + return os_release + + +@pytest.fixture(scope="module") +def get_redhat_release(host): + """Get content of the /etc/redhat-release""" + redhat_release = host.file("/etc/redhat-release") + return redhat_release + + +@pytest.fixture(scope="module") +def get_kernel_info(host): + """Get kernel version and vendor information""" + kernel_ver_pattern = re.compile( + f".*(^[0-9][0-9]?[0-9]?.[0-9][0-9]?[0-9]?.[0-9][0-9]?[0-9]?).*" + ) + kernel_ver_output = host.check_output("uname -r") + kernel_version = kernel_ver_pattern.match(kernel_ver_output).group(1) + + with host.sudo(): + kernel_vendor = host.check_output( + "grep -Ei '(.*kernel signing key|.*CA Server|.*Build)' /proc/keys | sed -E" + " 's/ +/:/g' | cut -d ':' -f 9 | uniq" + ) + kernel_info = (kernel_version, kernel_vendor) + return kernel_info + + +@pytest.fixture(scope="module", params=["glibc", "systemd", "coreutils", "rpm"]) +def get_pkg_info(host, request): + """Get vendor and version of installed packages""" + pkg_name = request.param + pkg_vendor = host.check_output( + f"rpm -qa --queryformat \"%{{VENDOR}}\n\" {request.param} | sed '$p;d' " + ) + pkg_version = host.check_output( + f'rpm -qa --queryformat "%{{VERSION}}\n" {request.param} | sort -n | sed' + " '$p;d'" + ) + pkg_info = (pkg_name, pkg_vendor, pkg_version) + # print(pkg_name) + # print(pkg_vendor) + # print(pkg_version) + return pkg_info diff --git a/ci/tests/tests/distro/test_osinfo_almalinux_8.py b/ci/tests/tests/distro/test_osinfo_almalinux_8.py new file mode 100644 index 00000000..c5219b35 --- /dev/null +++ b/ci/tests/tests/distro/test_osinfo_almalinux_8.py @@ -0,0 +1,43 @@ +import pytest + + +@pytest.mark.usefixtures("get_os_release") +class TestOSRelease: + """Test values of NAME, ID and VERSION_ID""" + + def test_os_rel_name(self, get_os_release): + assert get_os_release.contains('NAME="AlmaLinux"') + + def test_os_rel_id(self, get_os_release): + assert get_os_release.contains('ID="almalinux"') + + def test_os_rel_version_id(self, get_os_release): + assert get_os_release.contains('VERSION_ID="8.*"') + + +@pytest.mark.usefixtures("get_redhat_release") +class TestRHRelease: + """Test contents of the /etc/redhat-release""" + + def test_redhat_release(self, get_redhat_release): + assert get_redhat_release.contains("AlmaLinux release 8.*") + + +@pytest.mark.usefixtures("get_pkg_info") +class TestPkgInfo: + """Test vendor and version of packages""" + + def test_pkg_vendor(self, get_pkg_info): + assert get_pkg_info[1] == "AlmaLinux" + + def test_pkg_version(self, get_pkg_info): + if get_pkg_info[0] == "kernel": + assert get_pkg_info[2] == "4.18.0" + elif get_pkg_info[0] == "glibc": + assert get_pkg_info[2] == "2.28" + elif get_pkg_info[0] == "systemd": + assert get_pkg_info[2] == "239" + elif get_pkg_info[0] == "coreutils": + assert get_pkg_info[2] == "8.30" + else: + assert get_pkg_info[2] == "4.14.3" diff --git a/ci/tests/tests/distro/test_osinfo_almalinux_9.py b/ci/tests/tests/distro/test_osinfo_almalinux_9.py new file mode 100644 index 00000000..1536e52b --- /dev/null +++ b/ci/tests/tests/distro/test_osinfo_almalinux_9.py @@ -0,0 +1,52 @@ +import pytest + + +@pytest.mark.usefixtures("get_os_release") +class TestOSRelease: + """Test values of NAME, ID and VERSION_ID""" + + def test_os_rel_name(self, get_os_release): + assert get_os_release.contains('NAME="AlmaLinux"') + + def test_os_rel_id(self, get_os_release): + assert get_os_release.contains('ID="almalinux"') + + def test_os_rel_version_id(self, get_os_release): + assert get_os_release.contains('VERSION_ID="9.*"') + + +@pytest.mark.usefixtures("get_redhat_release") +class TestRHRelease: + """Test contents of the /etc/redhat-release""" + + def test_redhat_release(self, get_redhat_release): + assert get_redhat_release.contains("AlmaLinux release 9.*") + + +@pytest.mark.usefixtures("get_kernel_info") +class TestKernelInfo: + """Test version and vendor of running kernel""" + + def test_kernel_version(self, get_kernel_info): + assert get_kernel_info[0] == "5.14.0" + + def test_kernel_vendor(self, get_kernel_info): + assert get_kernel_info[1] == "AlmaLinux" + + +@pytest.mark.usefixtures("get_pkg_info") +class TestPkgInfo: + """Test vendor and version of packages""" + + def test_pkg_vendor(self, get_pkg_info): + assert get_pkg_info[1] == "AlmaLinux" + + def test_pkg_version(self, get_pkg_info): + if get_pkg_info[0] == "glibc": + assert get_pkg_info[2] == "2.34" + elif get_pkg_info[0] == "systemd": + assert get_pkg_info[2] == "252" + elif get_pkg_info[0] == "coreutils": + assert get_pkg_info[2] == "8.32" + else: + assert get_pkg_info[2] == "4.16.1.3" diff --git a/ci/tests/tests/distro/test_osinfo_centosstream_8.py b/ci/tests/tests/distro/test_osinfo_centosstream_8.py new file mode 100644 index 00000000..995ae61e --- /dev/null +++ b/ci/tests/tests/distro/test_osinfo_centosstream_8.py @@ -0,0 +1,23 @@ +import pytest + + +@pytest.mark.usefixtures("get_os_release") +class TestOSRelease: + """Test values of NAME, ID and VERSION_ID""" + + def test_os_rel_name(self, get_os_release): + assert get_os_release.contains('NAME="CentOS Stream"') + + def test_os_rel_id(self, get_os_release): + assert get_os_release.contains('ID="centos"') + + def test_os_rel_version_id(self, get_os_release): + assert get_os_release.contains('VERSION_ID="8"') + + +@pytest.mark.usefixtures("get_redhat_release") +class TestRHRelease: + """Test contents of the /etc/redhat-release""" + + def test_redhat_release(self, get_redhat_release): + assert get_redhat_release.contains("CentOS Stream release 8") diff --git a/ci/tests/tests/distro/test_osinfo_centosstream_9.py b/ci/tests/tests/distro/test_osinfo_centosstream_9.py new file mode 100644 index 00000000..28e47202 --- /dev/null +++ b/ci/tests/tests/distro/test_osinfo_centosstream_9.py @@ -0,0 +1,23 @@ +import pytest + + +@pytest.mark.usefixtures("get_os_release") +class TestOSRelease: + """Test values of NAME, ID and VERSION_ID""" + + def test_os_rel_name(self, get_os_release): + assert get_os_release.contains('NAME="CentOS Stream"') + + def test_os_rel_id(self, get_os_release): + assert get_os_release.contains('ID="centos"') + + def test_os_rel_version_id(self, get_os_release): + assert get_os_release.contains('VERSION_ID="9"') + + +@pytest.mark.usefixtures("get_redhat_release") +class TestRHRelease: + """Test contents of the /etc/redhat-release""" + + def test_redhat_release(self, get_redhat_release): + assert get_redhat_release.contains("CentOS Stream release 9") diff --git a/ci/tests/tests/distro/test_osinfo_oraclelinux_8.py b/ci/tests/tests/distro/test_osinfo_oraclelinux_8.py new file mode 100644 index 00000000..2080fd2f --- /dev/null +++ b/ci/tests/tests/distro/test_osinfo_oraclelinux_8.py @@ -0,0 +1,23 @@ +import pytest + + +@pytest.mark.usefixtures("get_os_release") +class TestOSRelease: + """Test values of NAME, ID and VERSION_ID""" + + def test_os_rel_name(self, get_os_release): + assert get_os_release.contains('NAME="Oracle Linux Server"') + + def test_os_rel_id(self, get_os_release): + assert get_os_release.contains('ID="ol"') + + def test_os_rel_version_id(self, get_os_release): + assert get_os_release.contains('VERSION_ID="8.*"') + + +@pytest.mark.usefixtures("get_redhat_release") +class TestRHRelease: + """Test contents of the /etc/redhat-release""" + + def test_redhat_release(self, get_redhat_release): + assert get_redhat_release.contains("Red Hat Enterprise Linux release 8.*") diff --git a/ci/tests/tests/distro/test_osinfo_oraclelinux_9.py b/ci/tests/tests/distro/test_osinfo_oraclelinux_9.py new file mode 100644 index 00000000..bd5044bb --- /dev/null +++ b/ci/tests/tests/distro/test_osinfo_oraclelinux_9.py @@ -0,0 +1,23 @@ +import pytest + + +@pytest.mark.usefixtures("get_os_release") +class TestOSRelease: + """Test values of NAME, ID and VERSION_ID""" + + def test_os_rel_name(self, get_os_release): + assert get_os_release.contains('NAME="Oracle Linux Server"') + + def test_os_rel_id(self, get_os_release): + assert get_os_release.contains('ID="ol"') + + def test_os_rel_version_id(self, get_os_release): + assert get_os_release.contains('VERSION_ID="9.*"') + + +@pytest.mark.usefixtures("get_redhat_release") +class TestRHRelease: + """Test contents of the /etc/redhat-release""" + + def test_redhat_release(self, get_redhat_release): + assert get_redhat_release.contains("Red Hat Enterprise Linux release 9.*") diff --git a/ci/tests/tests/distro/test_osinfo_rocky_8.py b/ci/tests/tests/distro/test_osinfo_rocky_8.py new file mode 100644 index 00000000..cce5d668 --- /dev/null +++ b/ci/tests/tests/distro/test_osinfo_rocky_8.py @@ -0,0 +1,23 @@ +import pytest + + +@pytest.mark.usefixtures("get_os_release") +class TestOSRelease: + """Test values of NAME, ID and VERSION_ID""" + + def test_os_rel_name(self, get_os_release): + assert get_os_release.contains('NAME="Rocky Linux"') + + def test_os_rel_id(self, get_os_release): + assert get_os_release.contains('ID="rocky"') + + def test_os_rel_version_id(self, get_os_release): + assert get_os_release.contains('VERSION_ID="8.*"') + + +@pytest.mark.usefixtures("get_redhat_release") +class TestRHRelease: + """Test contents of the /etc/redhat-release""" + + def test_redhat_release(self, get_redhat_release): + assert get_redhat_release.contains("Rocky Linux release 8.*") diff --git a/ci/tests/tests/distro/test_osinfo_rocky_9.py b/ci/tests/tests/distro/test_osinfo_rocky_9.py new file mode 100644 index 00000000..ce8cccdb --- /dev/null +++ b/ci/tests/tests/distro/test_osinfo_rocky_9.py @@ -0,0 +1,23 @@ +import pytest + + +@pytest.mark.usefixtures("get_os_release") +class TestOSRelease: + """Test values of NAME, ID and VERSION_ID""" + + def test_os_rel_name(self, get_os_release): + assert get_os_release.contains('NAME="Rocky Linux"') + + def test_os_rel_id(self, get_os_release): + assert get_os_release.contains('ID="rocky"') + + def test_os_rel_version_id(self, get_os_release): + assert get_os_release.contains('VERSION_ID="9.*"') + + +@pytest.mark.usefixtures("get_redhat_release") +class TestRHRelease: + """Test contents of the /etc/redhat-release""" + + def test_redhat_release(self, get_redhat_release): + assert get_redhat_release.contains("Rocky Linux release 9.*") diff --git a/ci/tests/tests/docker/test_docker_ce.py b/ci/tests/tests/docker/test_docker_ce.py new file mode 100644 index 00000000..3c2550c7 --- /dev/null +++ b/ci/tests/tests/docker/test_docker_ce.py @@ -0,0 +1,26 @@ +import pytest + + +class TestDockerServices: + """Test docker and containerd services running and enabled""" + + def test_docker_is_running(self, host): + assert host.service("docker.service").is_running + + def test_containerd_is_running(self, host): + assert host.service("containerd.service").is_running + + def test_docker_is_enabled(self, host): + assert host.service("docker.service").is_enabled + + def test_containerd_is_enabled(self, host): + assert host.service("containerd.service").is_enabled + + +class TestDockerWorking: + """Test docker working with the hello world container""" + + def test_docker_is_working(self, host): + with host.sudo(): + cmd = host.run("sudo docker run --rm hello-world") + assert cmd.succeeded diff --git a/ci/vagrant/el7toel8_multi.rb b/ci/vagrant/el7toel8_multi.rb new file mode 100644 index 00000000..a18da81d --- /dev/null +++ b/ci/vagrant/el7toel8_multi.rb @@ -0,0 +1,40 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +configuration = ENV['CONFIG'] + +Vagrant.configure('2') do |config| + config.vagrant.plugins = 'vagrant-libvirt' + + config.vm.synced_folder '.', '/vagrant', disabled: true + config.vm.box = 'generic/centos7' + config.vm.boot_timeout = 3600 + + config.vm.provider 'libvirt' do |v| + v.uri = 'qemu:///system' + v.memory = 4096 + v.machine_type = 'q35' + v.cpu_mode = 'host-passthrough' + v.cpus = 2 + v.disk_bus = 'scsi' + v.disk_driver cache: 'writeback', discard: 'unmap' + v.random_hostname = true + end + + target_distros = ['almalinux', 'centosstream', 'oraclelinux', 'rocky'] + + target_distros.each do |target_distro| + config.vm.define "#{target_distro}_8" do |machine| + machine.vm.hostname = "#{target_distro}-8.test" + + if target_distro == target_distros[-1] + machine.vm.provision 'ansible' do |ansible| + ansible.compatibility_mode = '2.0' + ansible.limit = 'all' + ansible.playbook = "ci/ansible/#{configuration}.yaml" + ansible.config_file = 'ci/ansible/ansible.cfg' + end + end + end + end +end diff --git a/ci/vagrant/el7toel8toel9_single.rb b/ci/vagrant/el7toel8toel9_single.rb new file mode 100644 index 00000000..8cd05ac3 --- /dev/null +++ b/ci/vagrant/el7toel8toel9_single.rb @@ -0,0 +1,53 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +configuration = ENV['CONFIG'] + +Vagrant.configure('2') do |config| + config.vagrant.plugins = 'vagrant-libvirt' + + config.vm.synced_folder '.', '/vagrant', disabled: true + config.ssh.disable_deprecated_algorithms = true + config.vm.boot_timeout = 3600 + + config.vm.provider 'libvirt' do |v| + v.uri = 'qemu:///system' + v.memory = 4096 + v.machine_type = 'q35' + v.cpu_mode = 'host-passthrough' + v.cpus = 2 + v.disk_bus = 'scsi' + v.disk_driver cache: 'writeback', discard: 'unmap' + v.random_hostname = true + end + + # EL7toEL8 + target_distros = ['almalinux', 'centosstream', 'oraclelinux', 'rocky'] + + target_distros.each do |target_distro| + config.vm.define "#{target_distro}_8" do |machine| + machine.vm.box = 'generic/centos7' + machine.vm.hostname = "#{target_distro}-8.test" + end + end + + # EL8toEL9 + target_distros_el9 = { + almalinux: 'almalinux/8', + # centosstream: 'generic/centos8s', + rocky: 'generic/rocky8' + } + + target_distros_el9.each_pair do |vm, box| + config.vm.define "#{vm}_9" do |machine| + machine.vm.box = "#{box}" + machine.vm.hostname = "#{vm}-9.test" + end + end + + config.vm.provision 'ansible' do |ansible| + ansible.compatibility_mode = '2.0' + ansible.playbook = "ci/ansible/#{configuration}.yaml" + ansible.config_file = 'ci/ansible/ansible.cfg' + end +end diff --git a/ci/vagrant/el8toel9_multi.rb b/ci/vagrant/el8toel9_multi.rb new file mode 100644 index 00000000..370758e6 --- /dev/null +++ b/ci/vagrant/el8toel9_multi.rb @@ -0,0 +1,45 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +configuration = ENV['CONFIG'] + +Vagrant.configure('2') do |config| + config.vagrant.plugins = 'vagrant-libvirt' + + config.vm.synced_folder '.', '/vagrant', disabled: true + config.ssh.disable_deprecated_algorithms = true + config.vm.boot_timeout = 3600 + + config.vm.provider 'libvirt' do |v| + v.uri = 'qemu:///system' + v.memory = 4096 + v.machine_type = 'q35' + v.cpu_mode = 'host-passthrough' + v.cpus = 2 + v.disk_bus = 'scsi' + v.disk_driver cache: 'writeback', discard: 'unmap' + v.random_hostname = true + end + + target_distros = { + almalinux: 'almalinux/8', + # centosstream: 'generic/centos8s', + rocky: 'generic/rocky8' + } + + target_distros.each_pair do |vm, box| + config.vm.define "#{vm}_9" do |machine| + machine.vm.box = "#{box}" + machine.vm.hostname = "#{vm}-9.test" + + if [vm, box] == target_distros.to_a.last + machine.vm.provision 'ansible' do |ansible| + ansible.compatibility_mode = '2.0' + ansible.limit = 'all' + ansible.playbook = "ci/ansible/#{configuration}.yaml" + ansible.config_file = 'ci/ansible/ansible.cfg' + end + end + end + end +end diff --git a/commands/command_utils.py b/commands/command_utils.py index 155bacad..e6ba6ba4 100644 --- a/commands/command_utils.py +++ b/commands/command_utils.py @@ -59,7 +59,9 @@ def assert_version_format(version_str, desired_format, version_kind): :raises: CommandError """ if not re.match(desired_format.regex, version_str): - error_str = 'Unexpected format of target version: {0}. The required format is \'{1}\'.' + error_str = ( + 'Unexpected format of target version: {0}. The required format is \'{1}\'.' + ) raise CommandError(error_str.format(version_str, desired_format.human_readable)) @@ -182,26 +184,32 @@ def get_target_version(flavour): return target_versions[-1] if target_versions else None -def vet_upgrade_path(args): +def get_target_release(args): """ - Make sure the user requested upgrade_path is a supported one. - If LEAPP_DEVEL_TARGET_RELEASE is set then it's value is not vetted against upgrade_paths_map but used as is. + Return the user selected target release or choose one from config. + + A target release can be specified, ordered by priority, by the + LEAPP_DEVEL_TARGET_RELEASE or args.target (--target cmdline arg) or in the + config file. + + NOTE: when specified via the env var or cmdline arg, the version isn't + checked against supported versions, this is done later by an actor in the + upgrade process. :return: `tuple` (target_release, flavor) """ flavor = get_upgrade_flavour() env_version_override = os.getenv('LEAPP_DEVEL_TARGET_RELEASE') - if env_version_override: + target_ver = env_version_override or args.target + if target_ver: os_release_contents = _retrieve_os_release_contents() distro_id = os_release_contents.get('ID', '') expected_version_format = _DISTRO_VERSION_FORMATS.get(distro_id, VersionFormats.MAJOR_MINOR).value - assert_version_format(env_version_override, expected_version_format, _VersionKind.TARGET) - - return (env_version_override, flavor) + assert_version_format(target_ver, expected_version_format, _VersionKind.TARGET) + return (target_ver, flavor) - target_release = args.target or get_target_version(flavor) - return (target_release, flavor) + return (get_target_version(flavor), flavor) def set_resource_limits(): diff --git a/commands/tests/test_upgrade_paths.py b/commands/tests/test_upgrade_paths.py index c2cb09aa..89b5eb71 100644 --- a/commands/tests/test_upgrade_paths.py +++ b/commands/tests/test_upgrade_paths.py @@ -1,3 +1,4 @@ +import os import resource import mock @@ -29,34 +30,53 @@ def test_get_target_version(mock_open, monkeypatch): assert command_utils.get_target_version('default') == '9.0' -@mock.patch("leapp.cli.commands.command_utils.get_upgrade_paths_config", - return_value={"default": {"7.9": ["8.4"], "8.6": ["9.0"], "7": ["8.4"], "8": ["9.0"]}}) -def test_vet_upgrade_path(mock_open, monkeypatch): +@mock.patch( + "leapp.cli.commands.command_utils.get_upgrade_paths_config", + return_value={ + "default": { + "7.9": ["8.4"], + "8.6": ["9.0", "9.2"], + "7": ["8.4"], + "8": ["9.0", "9.2"], + } + }, +) +def test_get_target_release(mock_open, monkeypatch): # do not remove mock_open monkeypatch.setattr(command_utils, 'get_os_release_version_id', lambda x: '8.6') # make sure env var LEAPP_DEVEL_TARGET_RELEASE takes precedence - # when env var set to a bad version - abort the upgrade - args = mock.Mock(target='9.0') - monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '1.2badsemver') - with pytest.raises(CommandError) as err: - command_utils.vet_upgrade_path(args) - assert 'Unexpected format of target version' in err - # MAJOR.MINOR.PATCH is considered as bad version, only MAJOR.MINOR is accepted args = mock.Mock(target='9.0') + monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '9.2') + print(os.getenv('LEAPP_DEVEL_TARGET_RELEASE')) + assert command_utils.get_target_release(args) == ('9.2', 'default') + + # when env var set to a bad version, expect an error monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '9.0.0') with pytest.raises(CommandError) as err: - command_utils.vet_upgrade_path(args) + command_utils.get_target_release(args) assert 'Unexpected format of target version' in err + # when env var set to a version not in upgrade_paths map - go on and use it + # this is checked by an actor in the IPU monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '1.2') - assert command_utils.vet_upgrade_path(args) == ('1.2', 'default') - # no env var set, --target is set to proper version + assert command_utils.get_target_release(args) == ('1.2', 'default') + + # no env var set, --target is set to proper version - use it + args = mock.Mock(target='9.0') monkeypatch.delenv('LEAPP_DEVEL_TARGET_RELEASE', raising=False) - assert command_utils.vet_upgrade_path(args) == ('9.0', 'default') - # env var is set to proper version, --target is set to a bad one - use env var and go on with the upgrade + assert command_utils.get_target_release(args) == ('9.0', 'default') + + # --target set with incorrectly formatted version, env var not set, fail + args = mock.Mock(target='9.0a') + with pytest.raises(CommandError) as err: + command_utils.get_target_release(args) + assert 'Unexpected format of target version' in err + + # env var is set to proper version, --target set to a bad one: + # env var has priority, use it and go on with the upgrade monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '9.0') - args = mock.Mock(target='1.2') - assert command_utils.vet_upgrade_path(args) == ('9.0', 'default') + args = mock.Mock(target='9.0.0') + assert command_utils.get_target_release(args) == ('9.0', 'default') def _mock_getrlimit_factory(nofile_limits=(1024, 4096), fsize_limits=(1024, 4096)): diff --git a/commands/upgrade/util.py b/commands/upgrade/util.py index b54b0b34..7d5b563e 100644 --- a/commands/upgrade/util.py +++ b/commands/upgrade/util.py @@ -253,8 +253,8 @@ def prepare_configuration(args): if args.nogpgcheck: os.environ['LEAPP_NOGPGCHECK'] = '1' - # Check upgrade path and fail early if it's unsupported - target_version, flavor = command_utils.vet_upgrade_path(args) + # Check upgrade path and fail early if it's invalid + target_version, flavor = command_utils.get_target_release(args) os.environ['LEAPP_UPGRADE_PATH_TARGET_RELEASE'] = target_version os.environ['LEAPP_UPGRADE_PATH_FLAVOUR'] = flavor diff --git a/docs/source/contrib-and-devel-guidelines.md b/docs/source/contrib-and-devel-guidelines.md index 66bef9b1..3229c8a4 100644 --- a/docs/source/contrib-and-devel-guidelines.md +++ b/docs/source/contrib-and-devel-guidelines.md @@ -1,7 +1,7 @@ # Contribution and development guidelines ## Code guidelines -Your code should follow the [Python Coding Guidelines](https://leapp.readthedocs.io/en/latest/python-coding-guidelines.html) used for the leapp project. On top of these rules follow instructions +Your code should follow the [Python Coding Guidelines](https://leapp.readthedocs.io/en/latest/contributing.html#follow-python-coding-guidelines) used for the leapp project. On top of these rules follow instructions below. ### Retrieving information about the source system should be separated from its use @@ -51,7 +51,6 @@ can be used. Here is the list of repositories in this project with the Python compatibility requirements: * `system_upgrade/common` - 3.6, 3.9, 3.12 (_you can start to ignore Python 2.7_) -* _`system_upgrade/el7toel8` - 2.7, 3.6 (deprecated)_ * `system_upgrade/el8toel9` - 3.6, 3.9 * `system_upgrade/el9toel10` - 3.9, 3.12 diff --git a/docs/source/installation-and-building.md b/docs/source/installation-and-building.md index a74787d5..5bad38e1 100644 --- a/docs/source/installation-and-building.md +++ b/docs/source/installation-and-building.md @@ -11,7 +11,7 @@ To build the RPM e.g. for RHEL 8 systems, execute: ```bash $ BUILD_CONTAINER=el8 make container_build ``` -Possible values for BUILD_CONTAINER are `el7`,`el8`, `el9`. +Possible values for BUILD_CONTAINER are `el8`, `el9`. The built packages can be found under the `packaging/RPMS/` directory. diff --git a/etc/leapp/files/repomap.json b/etc/leapp/files/repomap.json index fc0c42f1..0cd5601a 100644 --- a/etc/leapp/files/repomap.json +++ b/etc/leapp/files/repomap.json @@ -1,5 +1,5 @@ { - "datetime": "202505201636Z", + "datetime": "202507171303Z", "version_format": "1.3.0", "provided_data_streams": [ "4.0" @@ -294,6 +294,24 @@ "target": [ "rhel10-HighAvailability" ] + }, + { + "source": "rhel9-rhui-client-config-server-9", + "target": [ + "rhel10-rhui-client-config-server-10" + ] + }, + { + "source": "rhel9-rhui-microsoft-azure-rhel9", + "target": [ + "rhel10-rhui-microsoft-azure-rhel10" + ] + }, + { + "source": "rhel9-rhui-custom-client-at-alibaba", + "target": [ + "rhel10-rhui-custom-client-at-alibaba" + ] } ] } @@ -343,6 +361,15 @@ "distro": "rhel", "rhui": "aws" }, + { + "major_version": "10", + "repoid": "rhel-10-baseos-rhui-rpms", + "arch": "aarch64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "azure" + }, { "major_version": "10", "repoid": "rhel-10-baseos-rhui-rpms", @@ -352,6 +379,15 @@ "distro": "rhel", "rhui": "aws" }, + { + "major_version": "10", + "repoid": "rhel-10-baseos-rhui-rpms", + "arch": "x86_64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "azure" + }, { "major_version": "10", "repoid": "rhel-10-for-aarch64-baseos-beta-rpms", @@ -456,6 +492,15 @@ "repo_type": "rpm", "distro": "rhel" }, + { + "major_version": "10", + "repoid": "rhel-10-for-x86_64-baseos-e4s-rhui-rpms", + "arch": "x86_64", + "channel": "e4s", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "aws" + }, { "major_version": "10", "repoid": "rhel-10-for-x86_64-baseos-e4s-rpms", @@ -479,6 +524,24 @@ "channel": "ga", "repo_type": "rpm", "distro": "rhel" + }, + { + "major_version": "10", + "repoid": "rhui-rhel-10-for-aarch64-baseos-rhui-rpms", + "arch": "aarch64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "alibaba" + }, + { + "major_version": "10", + "repoid": "rhui-rhel-10-for-x86_64-baseos-rhui-rpms", + "arch": "x86_64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "alibaba" } ] }, @@ -526,6 +589,15 @@ "distro": "rhel", "rhui": "aws" }, + { + "major_version": "10", + "repoid": "rhel-10-appstream-rhui-rpms", + "arch": "aarch64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "azure" + }, { "major_version": "10", "repoid": "rhel-10-appstream-rhui-rpms", @@ -535,6 +607,15 @@ "distro": "rhel", "rhui": "aws" }, + { + "major_version": "10", + "repoid": "rhel-10-appstream-rhui-rpms", + "arch": "x86_64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "azure" + }, { "major_version": "10", "repoid": "rhel-10-for-aarch64-appstream-beta-rpms", @@ -639,6 +720,15 @@ "repo_type": "rpm", "distro": "rhel" }, + { + "major_version": "10", + "repoid": "rhel-10-for-x86_64-appstream-e4s-rhui-rpms", + "arch": "x86_64", + "channel": "e4s", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "aws" + }, { "major_version": "10", "repoid": "rhel-10-for-x86_64-appstream-e4s-rpms", @@ -662,6 +752,24 @@ "channel": "ga", "repo_type": "rpm", "distro": "rhel" + }, + { + "major_version": "10", + "repoid": "rhui-rhel-10-for-aarch64-appstream-rhui-rpms", + "arch": "aarch64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "alibaba" + }, + { + "major_version": "10", + "repoid": "rhui-rhel-10-for-x86_64-appstream-rhui-rpms", + "arch": "x86_64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "alibaba" } ] }, @@ -741,6 +849,15 @@ "distro": "rhel", "rhui": "aws" }, + { + "major_version": "10", + "repoid": "codeready-builder-for-rhel-10-rhui-rpms", + "arch": "aarch64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "azure" + }, { "major_version": "10", "repoid": "codeready-builder-for-rhel-10-rhui-rpms", @@ -750,6 +867,15 @@ "distro": "rhel", "rhui": "aws" }, + { + "major_version": "10", + "repoid": "codeready-builder-for-rhel-10-rhui-rpms", + "arch": "x86_64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "azure" + }, { "major_version": "10", "repoid": "codeready-builder-for-rhel-10-s390x-eus-rpms", @@ -813,6 +939,24 @@ "channel": "ga", "repo_type": "rpm", "distro": "centos" + }, + { + "major_version": "10", + "repoid": "rhui-codeready-builder-for-rhel-10-aarch64-rhui-rpms", + "arch": "aarch64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "alibaba" + }, + { + "major_version": "10", + "repoid": "rhui-codeready-builder-for-rhel-10-x86_64-rhui-rpms", + "arch": "x86_64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "alibaba" } ] }, @@ -923,6 +1067,33 @@ "repo_type": "rpm", "distro": "rhel", "rhui": "aws" + }, + { + "major_version": "10", + "repoid": "rhel-10-supplementary-rhui-rpms", + "arch": "x86_64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "azure" + }, + { + "major_version": "10", + "repoid": "rhui-rhel-10-for-aarch64-supplementary-rhui-rpms", + "arch": "aarch64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "alibaba" + }, + { + "major_version": "10", + "repoid": "rhui-rhel-10-for-x86_64-supplementary-rhui-rpms", + "arch": "x86_64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "alibaba" } ] }, @@ -1006,6 +1177,14 @@ "repo_type": "rpm", "distro": "rhel" }, + { + "major_version": "10", + "repoid": "rhel-10-for-aarch64-nfv-e4s-rpms", + "arch": "aarch64", + "channel": "e4s", + "repo_type": "rpm", + "distro": "rhel" + }, { "major_version": "10", "repoid": "rhel-10-for-aarch64-nfv-rpms", @@ -1115,6 +1294,15 @@ "repo_type": "rpm", "distro": "rhel" }, + { + "major_version": "10", + "repoid": "rhel-10-for-x86_64-sap-netweaver-e4s-rhui-rpms", + "arch": "x86_64", + "channel": "e4s", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "aws" + }, { "major_version": "10", "repoid": "rhel-10-for-x86_64-sap-netweaver-e4s-rpms", @@ -1160,6 +1348,15 @@ "repo_type": "rpm", "distro": "rhel" }, + { + "major_version": "10", + "repoid": "rhel-10-for-x86_64-sap-solutions-e4s-rhui-rpms", + "arch": "x86_64", + "channel": "e4s", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "aws" + }, { "major_version": "10", "repoid": "rhel-10-for-x86_64-sap-solutions-e4s-rpms", @@ -1317,6 +1514,15 @@ "repo_type": "rpm", "distro": "rhel" }, + { + "major_version": "10", + "repoid": "rhel-10-for-x86_64-highavailability-e4s-rhui-rpms", + "arch": "x86_64", + "channel": "e4s", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "aws" + }, { "major_version": "10", "repoid": "rhel-10-for-x86_64-highavailability-e4s-rpms", @@ -1340,6 +1546,75 @@ "channel": "ga", "repo_type": "rpm", "distro": "rhel" + }, + { + "major_version": "10", + "repoid": "rhui-rhel-10-for-x86_64-highavailability-rhui-rpms", + "arch": "x86_64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "alibaba" + } + ] + }, + { + "pesid": "rhel10-rhui-microsoft-azure-rhel10", + "entries": [ + { + "major_version": "10", + "repoid": "rhui-microsoft-azure-rhel10", + "arch": "x86_64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "azure" + } + ] + }, + { + "pesid": "rhel10-rhui-client-config-server-10", + "entries": [ + { + "major_version": "10", + "repoid": "rhui-client-config-server-10", + "arch": "aarch64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "aws" + }, + { + "major_version": "10", + "repoid": "rhui-client-config-server-10", + "arch": "x86_64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "aws" + } + ] + }, + { + "pesid": "rhel10-rhui-custom-client-at-alibaba", + "entries": [ + { + "major_version": "10", + "repoid": "rhui-custom-rhui_client_at_alibaba-rhel-10", + "arch": "aarch64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "alibaba" + }, + { + "major_version": "10", + "repoid": "rhui-custom-rhui_client_at_alibaba-rhel-10", + "arch": "x86_64", + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel", + "rhui": "alibaba" } ] }, @@ -5228,6 +5503,14 @@ "repo_type": "rpm", "distro": "rhel" }, + { + "major_version": "9", + "repoid": "rhel-9-for-aarch64-nfv-e4s-rpms", + "arch": "aarch64", + "channel": "e4s", + "repo_type": "rpm", + "distro": "rhel" + }, { "major_version": "9", "repoid": "rhel-9-for-aarch64-nfv-rpms", @@ -5594,6 +5877,14 @@ "repo_type": "rpm", "distro": "rhel" }, + { + "major_version": "9", + "repoid": "rhel-9-for-x86_64-highavailability-aus-rpms", + "arch": "x86_64", + "channel": "aus", + "repo_type": "rpm", + "distro": "rhel" + }, { "major_version": "9", "repoid": "rhel-9-for-x86_64-highavailability-beta-rpms", diff --git a/etc/leapp/transaction/to_reinstall b/etc/leapp/transaction/to_reinstall new file mode 100644 index 00000000..c6694a8e --- /dev/null +++ b/etc/leapp/transaction/to_reinstall @@ -0,0 +1,3 @@ +### List of packages (each on new line) to be reinstalled to the upgrade transaction +### Useful for packages that have identical version strings but contain binary changes between major OS versions +### Packages that aren't installed will be skipped diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py index b28ec57c..6882488a 100644 --- a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py +++ b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py @@ -91,7 +91,7 @@ def figure_out_commands_needed_to_add_entry(kernel_path, initramfs_path, args_to '/usr/sbin/grubby', '--add-kernel', '{0}'.format(kernel_path), '--initrd', '{0}'.format(initramfs_path), - '--title', 'RHEL-Upgrade-Initramfs', + '--title', 'ELevate-Upgrade-Initramfs', '--copy-default', '--make-default', '--args', args_to_add_str diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py index e5f632bc..3e8d8c7b 100644 --- a/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py +++ b/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py @@ -53,7 +53,7 @@ run_args_add = [ '/usr/sbin/grubby', '--add-kernel', '/abc', '--initrd', '/def', - '--title', 'RHEL-Upgrade-Initramfs', + '--title', 'ELevate-Upgrade-Initramfs', '--copy-default', '--make-default', '--args', diff --git a/repos/system_upgrade/common/actors/checkenabledvendorrepos/actor.py b/repos/system_upgrade/common/actors/checkenabledvendorrepos/actor.py new file mode 100644 index 00000000..52f5af9d --- /dev/null +++ b/repos/system_upgrade/common/actors/checkenabledvendorrepos/actor.py @@ -0,0 +1,53 @@ +from leapp.actors import Actor +from leapp.libraries.stdlib import api +from leapp.models import ( + RepositoriesFacts, + VendorSourceRepos, + ActiveVendorList, +) +from leapp.tags import FactsPhaseTag, IPUWorkflowTag + + +class CheckEnabledVendorRepos(Actor): + """ + Create a list of vendors whose repositories are present on the system and enabled. + Only those vendors' configurations (new repositories, PES actions, etc.) + will be included in the upgrade process. + """ + + name = "check_enabled_vendor_repos" + consumes = (RepositoriesFacts, VendorSourceRepos) + produces = (ActiveVendorList) + tags = (IPUWorkflowTag, FactsPhaseTag.Before) + + def process(self): + vendor_mapping_data = {} + active_vendors = set() + + # Make a dict for easy mapping of repoid -> corresponding vendor name. + for vendor_src_repodata in api.consume(VendorSourceRepos): + for vendor_src_repo in vendor_src_repodata.source_repoids: + vendor_mapping_data[vendor_src_repo] = vendor_src_repodata.vendor + + # Is the repo listed in the vendor map as from_repoid present on the system? + for repos_facts in api.consume(RepositoriesFacts): + for repo_file in repos_facts.repositories: + for repo_data in repo_file.data: + self.log.debug( + "Looking for repository {} in vendor maps".format(repo_data.repoid) + ) + if repo_data.enabled and repo_data.repoid in vendor_mapping_data: + # If the vendor's repository is present in the system and enabled, count the vendor as active. + new_vendor = vendor_mapping_data[repo_data.repoid] + self.log.debug( + "Repository {} found and enabled, enabling vendor {}".format( + repo_data.repoid, new_vendor + ) + ) + active_vendors.add(new_vendor) + + if active_vendors: + self.log.debug("Active vendor list: {}".format(active_vendors)) + api.produce(ActiveVendorList(data=list(active_vendors))) + else: + self.log.info("No active vendors found, vendor list not generated") diff --git a/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/actor.py b/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/actor.py new file mode 100644 index 00000000..62ff7644 --- /dev/null +++ b/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/actor.py @@ -0,0 +1,34 @@ +from leapp.actors import Actor +from leapp.libraries.actor import checkgrubenvtofile +from leapp.models import ConvertGrubenvTask, FirmwareFacts, HybridImageAzure +from leapp.reporting import Report +from leapp.tags import ChecksPhaseTag, IPUWorkflowTag + + +class CheckGrubenvToFile(Actor): + """ + Check whether grubenv is a symlink on Azure hybrid images using BIOS. + + Azure images provided by Red Hat aim for hybrid (BIOS/EFI) functionality, + however, currently GRUB is not able to see the "grubenv" file if it is a + symlink to a different partition (default on EFI with grub2-efi pkg + installed) and fails on BIOS systems. + + These images have a default relative symlink to EFI partition even when + booted using BIOS and in such cases GRUB is not able to find "grubenv" and + fails to get the kernel cmdline options resulting in system failing to boot + after upgrade. + + The symlink needs to be converted to a normal file with the content of + grubenv on the EFI partition in case the system is using BIOS and running + on the Azure cloud. This action is reported in the preupgrade phase. + + """ + + name = 'check_grubenv_to_file' + consumes = (FirmwareFacts, HybridImageAzure,) + produces = (ConvertGrubenvTask, Report) + tags = (ChecksPhaseTag, IPUWorkflowTag) + + def process(self): + checkgrubenvtofile.process() diff --git a/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/libraries/checkgrubenvtofile.py b/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/libraries/checkgrubenvtofile.py new file mode 100644 index 00000000..a4c5ee1c --- /dev/null +++ b/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/libraries/checkgrubenvtofile.py @@ -0,0 +1,44 @@ +from leapp import reporting +from leapp.libraries.stdlib import api +from leapp.models import ConvertGrubenvTask, FirmwareFacts, HybridImageAzure + + +def process(): + hybrid_image = next(api.consume(HybridImageAzure), None) + + if not hybrid_image: + return + + if not is_bios() or not hybrid_image.grubenv_is_symlink_to_efi: + return + + reporting.create_report([ + reporting.Title( + 'Azure hybrid (BIOS/EFI) image detected. "grubenv" symlink will be converted to a regular file' + ), + reporting.Summary( + 'Leapp detected the system is running on Azure cloud, booted using BIOS and ' + 'the "/boot/grub2/grubenv" file is a symlink to "../efi/EFI/redhat/grubenv". In case of such a ' + 'hybrid image scenario GRUB is not able to locate "grubenv" as it is a symlink to different ' + 'partition and fails to boot. If the system needs to be run in EFI mode later, please re-create ' + 'the relative symlink again.' + ), + reporting.Severity(reporting.Severity.HIGH), + reporting.Groups([ + reporting.Groups.PUBLIC_CLOUD, + reporting.Groups.BOOT + ]), + reporting.RelatedResource('file', '/boot/grub2/grubenv'), + reporting.RelatedResource('file', '/boot/efi/EFI/redhat/grubenv'), + ]) + + api.produce(ConvertGrubenvTask()) + + +def is_bios(): + """ + Check whether system is booted into BIOS + """ + + ff = next(api.consume(FirmwareFacts), None) + return ff and ff.firmware == 'bios' diff --git a/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/tests/test_checkgrubenvtofile.py b/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/tests/test_checkgrubenvtofile.py new file mode 100644 index 00000000..a5a203fd --- /dev/null +++ b/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/tests/test_checkgrubenvtofile.py @@ -0,0 +1,35 @@ +import pytest + +from leapp import reporting +from leapp.libraries.actor import checkgrubenvtofile +from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked +from leapp.libraries.stdlib import api +from leapp.models import FirmwareFacts, HybridImageAzure + +BIOS_FIRMWARE = FirmwareFacts(firmware='bios') +EFI_FIRMWARE = FirmwareFacts(firmware='efi') + + +@pytest.mark.parametrize('is_hybrid', [True, False]) +@pytest.mark.parametrize('is_bios', [True, False]) +@pytest.mark.parametrize('is_symlink', [True, False]) +def test_check_grubenv_to_file(monkeypatch, tmpdir, is_hybrid, is_bios, is_symlink): + + should_report = all([is_hybrid, is_bios, is_symlink]) + + monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) + + firmware = BIOS_FIRMWARE if is_bios else EFI_FIRMWARE + msgs = [firmware] + ([HybridImageAzure(grubenv_is_symlink_to_efi=is_symlink)] if is_hybrid else []) + monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch='x86_64', msgs=msgs)) + monkeypatch.setattr(api, "produce", produce_mocked()) + + checkgrubenvtofile.process() + + if should_report: + assert reporting.create_report.called == 1 + assert 'hybrid' in reporting.create_report.report_fields['title'] + assert api.produce.called == 1 + else: + assert reporting.create_report.called == 0 + assert api.produce.called == 0 diff --git a/repos/system_upgrade/common/actors/cloud/checkhybridimage/actor.py b/repos/system_upgrade/common/actors/cloud/checkhybridimage/actor.py deleted file mode 100644 index 3cd2d864..00000000 --- a/repos/system_upgrade/common/actors/cloud/checkhybridimage/actor.py +++ /dev/null @@ -1,24 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.checkhybridimage import check_hybrid_image -from leapp.models import FirmwareFacts, HybridImage, InstalledRPM -from leapp.reporting import Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckHybridImage(Actor): - """ - Check if the system is using Azure hybrid image. - - These images have a default relative symlink to EFI - partition even when booted using BIOS and in such cases - GRUB is not able find "grubenv" to get the kernel cmdline - options and fails to boot after upgrade`. - """ - - name = 'checkhybridimage' - consumes = (InstalledRPM, FirmwareFacts) - produces = (HybridImage, Report) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - check_hybrid_image() diff --git a/repos/system_upgrade/common/actors/cloud/checkhybridimage/libraries/checkhybridimage.py b/repos/system_upgrade/common/actors/cloud/checkhybridimage/libraries/checkhybridimage.py deleted file mode 100644 index a4eb6fa1..00000000 --- a/repos/system_upgrade/common/actors/cloud/checkhybridimage/libraries/checkhybridimage.py +++ /dev/null @@ -1,65 +0,0 @@ -import os - -from leapp import reporting -from leapp.libraries.common import rhui -from leapp.libraries.common.config.version import get_source_major_version -from leapp.libraries.common.rpms import has_package -from leapp.libraries.stdlib import api -from leapp.models import FirmwareFacts, HybridImage, InstalledRPM - -BIOS_PATH = '/boot/grub2/grubenv' -EFI_PATH = '/boot/efi/EFI/redhat/grubenv' - - -def is_grubenv_symlink_to_efi(): - """ - Check whether '/boot/grub2/grubenv' is a relative symlink to - '/boot/efi/EFI/redhat/grubenv'. - """ - return os.path.islink(BIOS_PATH) and os.path.realpath(BIOS_PATH) == os.path.realpath(EFI_PATH) - - -def is_azure_agent_installed(): - """Check whether 'WALinuxAgent' package is installed.""" - src_ver_major = get_source_major_version() - - family = rhui.RHUIFamily(rhui.RHUIProvider.AZURE) - azure_setups = rhui.RHUI_SETUPS.get(family, []) - - agent_pkg = None - for setup in azure_setups: - setup_major_ver = str(setup.os_version[0]) - if setup_major_ver == src_ver_major: - agent_pkg = setup.extra_info.get('agent_pkg') - break - - if not agent_pkg: - return False - - return has_package(InstalledRPM, agent_pkg) - - -def is_bios(): - """Check whether system is booted into BIOS""" - ff = next(api.consume(FirmwareFacts), None) - return ff and ff.firmware == 'bios' - - -def check_hybrid_image(): - """Check whether the system is using Azure hybrid image.""" - if all([is_grubenv_symlink_to_efi(), is_azure_agent_installed(), is_bios()]): - api.produce(HybridImage(detected=True)) - reporting.create_report([ - reporting.Title( - 'Azure hybrid (BIOS/EFI) image detected. "grubenv" symlink will be converted to a regular file' - ), - reporting.Summary( - 'Leapp detected the system is running on Azure cloud, booted using BIOS and ' - 'the "/boot/grub2/grubenv" file is a symlink to "../efi/EFI/redhat/grubenv". In case of such a ' - 'hybrid image scenario GRUB is not able to locate "grubenv" as it is a symlink to different ' - 'partition and fails to boot. If the system needs to be run in EFI mode later, please re-create ' - 'the relative symlink again.' - ), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.PUBLIC_CLOUD]), - ]) diff --git a/repos/system_upgrade/common/actors/cloud/checkhybridimage/tests/test_checkhybridimage.py b/repos/system_upgrade/common/actors/cloud/checkhybridimage/tests/test_checkhybridimage.py deleted file mode 100644 index 16fbb44c..00000000 --- a/repos/system_upgrade/common/actors/cloud/checkhybridimage/tests/test_checkhybridimage.py +++ /dev/null @@ -1,82 +0,0 @@ -import pytest - -from leapp import reporting -from leapp.libraries.actor import checkhybridimage -from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked -from leapp.libraries.stdlib import api -from leapp.models import FirmwareFacts, InstalledRPM, RPM -from leapp.reporting import Report - -RH_PACKAGER = 'Red Hat, Inc. ' -WA_AGENT_RPM = RPM( - name='WALinuxAgent', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51' -) -NO_AGENT_RPM = RPM( - name='NoAgent', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51' -) - -INSTALLED_AGENT = InstalledRPM(items=[WA_AGENT_RPM]) -NOT_INSTALLED_AGENT = InstalledRPM(items=[NO_AGENT_RPM]) - -BIOS_FIRMWARE = FirmwareFacts(firmware='bios') -EFI_FIRMWARE = FirmwareFacts(firmware='efi') - -BIOS_PATH = '/boot/grub2/grubenv' -EFI_PATH = '/boot/efi/EFI/redhat/grubenv' - - -def test_hybrid_image(monkeypatch, tmpdir): - grubenv_efi = tmpdir.join('grubenv_efi') - grubenv_efi.write('grubenv') - - grubenv_boot = tmpdir.join('grubenv_boot') - grubenv_boot.mksymlinkto('grubenv_efi') - - monkeypatch.setattr(checkhybridimage, 'BIOS_PATH', grubenv_boot.strpath) - monkeypatch.setattr(checkhybridimage, 'EFI_PATH', grubenv_efi.strpath) - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - monkeypatch.setattr( - api, 'current_actor', CurrentActorMocked(arch='x86_64', msgs=[BIOS_FIRMWARE, INSTALLED_AGENT]) - ) - monkeypatch.setattr(api, "produce", produce_mocked()) - - checkhybridimage.check_hybrid_image() - assert reporting.create_report.called == 1 - assert 'hybrid' in reporting.create_report.report_fields['title'] - assert api.produce.called == 1 - - -@pytest.mark.parametrize('is_symlink, realpath_match, is_bios, agent_installed', [ - (False, True, True, True), - (True, False, True, True), - (True, True, False, True), - (True, True, True, False), -]) -def test_no_hybrid_image(monkeypatch, is_symlink, realpath_match, is_bios, agent_installed, tmpdir): - grubenv_efi = tmpdir.join('grubenv_efi') - grubenv_efi.write('grubenv') - grubenv_efi_false = tmpdir.join('grubenv_efi_false') - grubenv_efi.write('nope') - grubenv_boot = tmpdir.join('grubenv_boot') - - grubenv_target = grubenv_efi if realpath_match else grubenv_efi_false - - if is_symlink: - grubenv_boot.mksymlinkto(grubenv_target) - - firmw = BIOS_FIRMWARE if is_bios else EFI_FIRMWARE - inst_rpms = INSTALLED_AGENT if agent_installed else NOT_INSTALLED_AGENT - - monkeypatch.setattr(checkhybridimage, 'BIOS_PATH', grubenv_boot.strpath) - monkeypatch.setattr(checkhybridimage, 'EFI_PATH', grubenv_efi.strpath) - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - monkeypatch.setattr( - api, 'current_actor', CurrentActorMocked(arch='x86_64', msgs=[firmw, inst_rpms]) - ) - monkeypatch.setattr(api, "produce", produce_mocked()) - - checkhybridimage.check_hybrid_image() - assert not reporting.create_report.called - assert not api.produce.called diff --git a/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py b/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py index 64e36e08..ea154173 100644 --- a/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py +++ b/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py @@ -254,10 +254,16 @@ def customize_rhui_setup_for_aws(rhui_family, setup_info): # The leapp-rhui-aws will provide all necessary files to access entire RHEL8 content setup_info.bootstrap_target_client = False return + if target_version == '9': + amazon_plugin_copy_task = CopyFile(src='/usr/lib/python3.9/site-packages/dnf-plugins/amazon-id.py', + dst='/usr/lib/python3.6/site-packages/dnf-plugins/') + setup_info.postinstall_tasks.files_to_copy.append(amazon_plugin_copy_task) + return - amazon_plugin_copy_task = CopyFile(src='/usr/lib/python3.9/site-packages/dnf-plugins/amazon-id.py', - dst='/usr/lib/python3.6/site-packages/dnf-plugins/') - setup_info.postinstall_tasks.files_to_copy.append(amazon_plugin_copy_task) + # For 9>10 and higher we give up trying to do client swapping since the client has too many dependencies + # from target system's repositories. Our leapp-rhui-aws package will carry all of the repos provided + # by the client. + setup_info.bootstrap_target_client = False def produce_rhui_info_to_setup_target(rhui_family, source_setup_desc, target_setup_desc): diff --git a/repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/actor.py b/repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/actor.py new file mode 100644 index 00000000..68ef54bb --- /dev/null +++ b/repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/actor.py @@ -0,0 +1,21 @@ +from leapp.actors import Actor +from leapp.libraries.actor import convertgrubenvtofile +from leapp.models import ConvertGrubenvTask +from leapp.tags import FinalizationPhaseTag, IPUWorkflowTag + + +class ConvertGrubenvToFile(Actor): + """ + Convert "grubenv" symlink to a regular file on Azure hybrid images using BIOS. + + For more information see CheckGrubenvToFile actor. + + """ + + name = 'convert_grubenv_to_file' + consumes = (ConvertGrubenvTask,) + produces = () + tags = (FinalizationPhaseTag, IPUWorkflowTag) + + def process(self): + convertgrubenvtofile.process() diff --git a/repos/system_upgrade/common/actors/cloud/grubenvtofile/libraries/grubenvtofile.py b/repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/libraries/convertgrubenvtofile.py similarity index 79% rename from repos/system_upgrade/common/actors/cloud/grubenvtofile/libraries/grubenvtofile.py rename to repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/libraries/convertgrubenvtofile.py index 4d699ec3..1803c6c7 100644 --- a/repos/system_upgrade/common/actors/cloud/grubenvtofile/libraries/grubenvtofile.py +++ b/repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/libraries/convertgrubenvtofile.py @@ -1,9 +1,17 @@ from leapp.libraries.stdlib import api, CalledProcessError, run +from leapp.models import ConvertGrubenvTask BIOS_PATH = '/boot/grub2/grubenv' EFI_PATH = '/boot/efi/EFI/redhat/grubenv' +def process(): + convert_grubenv_task = next(api.consume(ConvertGrubenvTask), None) + + if convert_grubenv_task: + grubenv_to_file() + + def grubenv_to_file(): try: run(['unlink', BIOS_PATH]) diff --git a/repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/tests/test_convertgrubenvtofile.py b/repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/tests/test_convertgrubenvtofile.py new file mode 100644 index 00000000..c4534bd6 --- /dev/null +++ b/repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/tests/test_convertgrubenvtofile.py @@ -0,0 +1,51 @@ +import pytest + +from leapp.libraries.actor import convertgrubenvtofile +from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked +from leapp.libraries.stdlib import api, CalledProcessError +from leapp.models import ConvertGrubenvTask + + +def raise_call_error(args=None): + raise CalledProcessError( + message='A Leapp Command Error occurred.', + command=args, + result={'signal': None, 'exit_code': 1, 'pid': 0, 'stdout': 'fake', 'stderr': 'fake'} + ) + + +class run_mocked(object): + def __init__(self, raise_err=False): + self.called = 0 + self.args = [] + self.raise_err = raise_err + + def __call__(self, *args): + self.called += 1 + self.args.append(args) + if self.raise_err: + raise_call_error(args) + + +def test_grubenv_to_file(monkeypatch): + monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch='x86_64', msgs=[ConvertGrubenvTask()])) + monkeypatch.setattr(convertgrubenvtofile, 'run', run_mocked(raise_err=False)) + convertgrubenvtofile.process() + assert convertgrubenvtofile.run.called == 2 + + +def test_no_grubenv_to_file(monkeypatch): + monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch='x86_64', msgs=[])) + monkeypatch.setattr(convertgrubenvtofile, 'run', run_mocked(raise_err=False)) + convertgrubenvtofile.process() + assert convertgrubenvtofile.run.called == 0 + + +def test_fail_grubenv_to_file(monkeypatch): + monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch='x86_64', msgs=[ConvertGrubenvTask()])) + monkeypatch.setattr(convertgrubenvtofile, 'run', run_mocked(raise_err=True)) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + convertgrubenvtofile.grubenv_to_file() + + assert convertgrubenvtofile.run.called == 1 + assert api.current_logger.warnmsg[0].startswith('Could not unlink') diff --git a/repos/system_upgrade/common/actors/cloud/grubenvtofile/actor.py b/repos/system_upgrade/common/actors/cloud/grubenvtofile/actor.py deleted file mode 100644 index fc94219c..00000000 --- a/repos/system_upgrade/common/actors/cloud/grubenvtofile/actor.py +++ /dev/null @@ -1,28 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.grubenvtofile import grubenv_to_file -from leapp.models import HybridImage -from leapp.tags import FinalizationPhaseTag, IPUWorkflowTag - - -class GrubenvToFile(Actor): - """ - Convert "grubenv" symlink to a regular file on Azure hybrid images using BIOS. - - Azure images provided by Red Hat aim for hybrid (BIOS/EFI) functionality, - however, currently GRUB is not able to see the "grubenv" file if it is a symlink - to a different partition (default on EFI with grub2-efi pkg installed) and - fails on BIOS systems. This actor converts the symlink to the normal file - with the content of grubenv on the EFI partition in case the system is using BIOS - and running on the Azure cloud. This action is reported in the preupgrade phase. - """ - - name = 'grubenvtofile' - consumes = (HybridImage,) - produces = () - tags = (FinalizationPhaseTag, IPUWorkflowTag) - - def process(self): - grubenv_msg = next(self.consume(HybridImage), None) - - if grubenv_msg and grubenv_msg.detected: - grubenv_to_file() diff --git a/repos/system_upgrade/common/actors/cloud/grubenvtofile/tests/test_grubenvtofile.py b/repos/system_upgrade/common/actors/cloud/grubenvtofile/tests/test_grubenvtofile.py deleted file mode 100644 index 807f5efa..00000000 --- a/repos/system_upgrade/common/actors/cloud/grubenvtofile/tests/test_grubenvtofile.py +++ /dev/null @@ -1,43 +0,0 @@ -import pytest - -from leapp.libraries.actor import grubenvtofile -from leapp.libraries.common.testutils import logger_mocked -from leapp.libraries.stdlib import api, CalledProcessError -from leapp.models import HybridImage - - -def raise_call_error(args=None): - raise CalledProcessError( - message='A Leapp Command Error occurred.', - command=args, - result={'signal': None, 'exit_code': 1, 'pid': 0, 'stdout': 'fake', 'stderr': 'fake'} - ) - - -class run_mocked(object): - def __init__(self, raise_err=False): - self.called = 0 - self.args = [] - self.raise_err = raise_err - - def __call__(self, *args): - self.called += 1 - self.args.append(args) - if self.raise_err: - raise_call_error(args) - - -def test_grubenv_to_file(monkeypatch): - monkeypatch.setattr(api, 'consume', lambda x: iter([HybridImage()])) - monkeypatch.setattr(grubenvtofile, 'run', run_mocked()) - grubenvtofile.grubenv_to_file() - assert grubenvtofile.run.called == 2 - - -def test_fail_grubenv_to_file(monkeypatch): - monkeypatch.setattr(api, 'consume', lambda x: iter([HybridImage()])) - monkeypatch.setattr(grubenvtofile, 'run', run_mocked(raise_err=True)) - monkeypatch.setattr(api, 'current_logger', logger_mocked()) - grubenvtofile.grubenv_to_file() - assert grubenvtofile.run.called == 1 - assert api.current_logger.warnmsg[0].startswith('Could not unlink') diff --git a/repos/system_upgrade/common/actors/cloud/scanhybridimage/actor.py b/repos/system_upgrade/common/actors/cloud/scanhybridimage/actor.py new file mode 100644 index 00000000..b1848141 --- /dev/null +++ b/repos/system_upgrade/common/actors/cloud/scanhybridimage/actor.py @@ -0,0 +1,19 @@ +from leapp.actors import Actor +from leapp.libraries.actor.scanhybridimage import scan_hybrid_image +from leapp.models import FirmwareFacts, HybridImageAzure, InstalledRPM +from leapp.reporting import Report +from leapp.tags import FactsPhaseTag, IPUWorkflowTag + + +class ScanHybridImageAzure(Actor): + """ + Check if the system is using Azure hybrid image. + """ + + name = 'scan_hybrid_image_azure' + consumes = (InstalledRPM, FirmwareFacts) + produces = (HybridImageAzure, Report) + tags = (FactsPhaseTag, IPUWorkflowTag) + + def process(self): + scan_hybrid_image() diff --git a/repos/system_upgrade/common/actors/cloud/scanhybridimage/libraries/scanhybridimage.py b/repos/system_upgrade/common/actors/cloud/scanhybridimage/libraries/scanhybridimage.py new file mode 100644 index 00000000..a37ab415 --- /dev/null +++ b/repos/system_upgrade/common/actors/cloud/scanhybridimage/libraries/scanhybridimage.py @@ -0,0 +1,102 @@ +import os + +from leapp.libraries.common import rhui +from leapp.libraries.common.config.version import get_source_major_version +from leapp.libraries.common.rpms import has_package +from leapp.libraries.stdlib import api, CalledProcessError, run +from leapp.models import FirmwareFacts, HybridImageAzure, InstalledRPM + +EFI_MOUNTPOINT = '/boot/efi/' +AZURE_HYPERVISOR_ID = 'microsoft' + +GRUBENV_BIOS_PATH = '/boot/grub2/grubenv' +GRUBENV_EFI_PATH = '/boot/efi/EFI/redhat/grubenv' + + +def scan_hybrid_image(): + """ + Check whether the system is using Azure hybrid image. + """ + + hybrid_image_condition_1 = is_azure_agent_installed() and is_bios() + hybrid_image_condition_2 = has_efi_partition() and is_bios() and is_running_on_azure_hypervisor() + + if any([hybrid_image_condition_1, hybrid_image_condition_2]): + api.produce( + HybridImageAzure( + grubenv_is_symlink_to_efi=is_grubenv_symlink_to_efi() + ) + ) + + +def is_azure_agent_installed(): + """ + Check whether 'WALinuxAgent' package is installed. + """ + + src_ver_major = get_source_major_version() + + family = rhui.RHUIFamily(rhui.RHUIProvider.AZURE) + azure_setups = rhui.RHUI_SETUPS.get(family, []) + + agent_pkg = None + for setup in azure_setups: + setup_major_ver = str(setup.os_version[0]) + if setup_major_ver == src_ver_major: + agent_pkg = setup.extra_info.get('agent_pkg') + break + + if not agent_pkg: + return False + + return has_package(InstalledRPM, agent_pkg) + + +def has_efi_partition(): + """ + Check whether ESP partition exists and is mounted. + """ + + return os.path.exists(EFI_MOUNTPOINT) and os.path.ismount(EFI_MOUNTPOINT) + + +def is_bios(): + """ + Check whether system is booted into BIOS + """ + + ff = next(api.consume(FirmwareFacts), None) + return ff and ff.firmware == 'bios' + + +def is_running_on_azure_hypervisor(): + """ + Check if system is running on Azure hypervisor (Hyper-V) + """ + + return detect_virt() == AZURE_HYPERVISOR_ID + + +def detect_virt(): + """ + Detect execution in a virtualized environment + """ + + try: + result = run(['systemd-detect-virt']) + except CalledProcessError as e: + api.current_logger().warning('Unable to detect virtualization environment! Error: {}'.format(e)) + return '' + + return result['stdout'] + + +def is_grubenv_symlink_to_efi(): + """ + Check whether '/boot/grub2/grubenv' is a relative symlink to '/boot/efi/EFI/redhat/grubenv'. + """ + + is_symlink = os.path.islink(GRUBENV_BIOS_PATH) + realpaths_match = os.path.realpath(GRUBENV_BIOS_PATH) == os.path.realpath(GRUBENV_EFI_PATH) + + return is_symlink and realpaths_match diff --git a/repos/system_upgrade/common/actors/cloud/scanhybridimage/tests/test_scanhybridimage.py b/repos/system_upgrade/common/actors/cloud/scanhybridimage/tests/test_scanhybridimage.py new file mode 100644 index 00000000..a0f6fd4c --- /dev/null +++ b/repos/system_upgrade/common/actors/cloud/scanhybridimage/tests/test_scanhybridimage.py @@ -0,0 +1,124 @@ +import os + +import pytest + +from leapp import reporting +from leapp.libraries.actor import scanhybridimage +from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, logger_mocked, produce_mocked +from leapp.libraries.stdlib import api, CalledProcessError +from leapp.models import FirmwareFacts, HybridImageAzure, InstalledRPM, RPM + +RH_PACKAGER = 'Red Hat, Inc. ' +WA_AGENT_RPM = RPM( + name='WALinuxAgent', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', + pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51' +) +NO_AGENT_RPM = RPM( + name='NoAgent', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', + pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51' +) + +INSTALLED_AGENT = InstalledRPM(items=[WA_AGENT_RPM]) +NOT_INSTALLED_AGENT = InstalledRPM(items=[NO_AGENT_RPM]) + +BIOS_FIRMWARE = FirmwareFacts(firmware='bios') +EFI_FIRMWARE = FirmwareFacts(firmware='efi') + +BIOS_PATH = '/boot/grub2/grubenv' +EFI_PATH = '/boot/efi/EFI/redhat/grubenv' + + +def raise_call_error(args=None): + raise CalledProcessError( + message='A Leapp Command Error occurred.', + command=args, + result={'signal': None, 'exit_code': 1, 'pid': 0, 'stdout': 'fake', 'stderr': 'fake'} + ) + + +class run_mocked(object): + def __init__(self, hypervisor='', raise_err=False): + self.hypervisor = hypervisor + self.called = 0 + self.args = [] + self.raise_err = raise_err + + def __call__(self, *args): # pylint: disable=inconsistent-return-statements + self.called += 1 + self.args.append(args) + + if self.raise_err: + raise_call_error(args) + + if args[0] == ['systemd-detect-virt']: + return {'stdout': self.hypervisor} + + raise AttributeError("Unexpected command supplied!") + + +@pytest.mark.parametrize('hypervisor, expected', [('none', False), ('microsoft', True)]) +def test_is_running_on_azure_hypervisor(monkeypatch, hypervisor, expected): + monkeypatch.setattr(scanhybridimage, 'run', run_mocked(hypervisor)) + + assert scanhybridimage.is_running_on_azure_hypervisor() == expected + + +def test_is_running_on_azure_hypervisor_error(monkeypatch): + monkeypatch.setattr(scanhybridimage, 'run', run_mocked('microsoft', raise_err=True)) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + + result = scanhybridimage.is_running_on_azure_hypervisor() + + assert result is False + assert any('Unable to detect' in msg for msg in api.current_logger.warnmsg) + + +@pytest.mark.parametrize('is_symlink', [True, False]) +@pytest.mark.parametrize('realpath_match', [True, False]) +def test_is_grubenv_symlink_to_efi(monkeypatch, is_symlink, realpath_match): + grubenv_efi_false = '/other/grub/grubenv' + + monkeypatch.setattr(scanhybridimage, 'GRUBENV_BIOS_PATH', BIOS_PATH) + monkeypatch.setattr(scanhybridimage, 'GRUBENV_EFI_PATH', EFI_PATH) + + monkeypatch.setattr(os.path, 'islink', lambda path: is_symlink) + + def mocked_realpath(path): + if realpath_match: + return EFI_PATH + + return grubenv_efi_false if path == EFI_PATH else EFI_PATH + + monkeypatch.setattr(os.path, 'realpath', mocked_realpath) + + result = scanhybridimage.is_grubenv_symlink_to_efi() + + assert result == (is_symlink and realpath_match) + + +@pytest.mark.parametrize('is_bios', [True, False]) +@pytest.mark.parametrize('has_efi_partition', [True, False]) +@pytest.mark.parametrize('agent_installed', [True, False]) +@pytest.mark.parametrize('is_microsoft', [True, False]) +@pytest.mark.parametrize('is_symlink', [True, False]) +def test_hybrid_image(monkeypatch, tmpdir, is_bios, has_efi_partition, agent_installed, is_microsoft, is_symlink): + should_produce = (is_microsoft and is_bios and has_efi_partition) or (agent_installed and is_bios) + + monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) + msgs = [ + BIOS_FIRMWARE if is_bios else EFI_FIRMWARE, + INSTALLED_AGENT if agent_installed else NOT_INSTALLED_AGENT + ] + monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch='x86_64', msgs=msgs)) + monkeypatch.setattr(api, 'produce', produce_mocked()) + monkeypatch.setattr(scanhybridimage, 'has_efi_partition', lambda: has_efi_partition) + monkeypatch.setattr(scanhybridimage, 'is_running_on_azure_hypervisor', lambda: is_microsoft) + monkeypatch.setattr(scanhybridimage, 'is_grubenv_symlink_to_efi', lambda: is_symlink) + + scanhybridimage.scan_hybrid_image() + + if should_produce: + assert api.produce.called == 1 + assert HybridImageAzure(grubenv_is_symlink_to_efi=is_symlink) in api.produce.model_instances + else: + assert not api.produce.called diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh index 56a94b5d..46c5d9b6 100755 --- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh +++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh @@ -390,4 +390,3 @@ getarg 'rd.break=leapp-logs' 'rd.upgrade.break=leapp-finish' && { sync mount -o "remount,$old_opts" "$NEWROOT" exit $result - diff --git a/repos/system_upgrade/common/actors/distributionsignedrpmscanner/actor.py b/repos/system_upgrade/common/actors/distributionsignedrpmscanner/actor.py index 56016513..7ae1dd5a 100644 --- a/repos/system_upgrade/common/actors/distributionsignedrpmscanner/actor.py +++ b/repos/system_upgrade/common/actors/distributionsignedrpmscanner/actor.py @@ -1,6 +1,6 @@ from leapp.actors import Actor from leapp.libraries.actor import distributionsignedrpmscanner -from leapp.models import DistributionSignedRPM, InstalledRedHatSignedRPM, InstalledRPM, InstalledUnsignedRPM +from leapp.models import DistributionSignedRPM, InstalledRedHatSignedRPM, InstalledRPM, InstalledUnsignedRPM, VendorSignatures from leapp.tags import FactsPhaseTag, IPUWorkflowTag from leapp.utils.deprecation import suppress_deprecation @@ -8,7 +8,7 @@ from leapp.utils.deprecation import suppress_deprecation @suppress_deprecation(InstalledRedHatSignedRPM) class DistributionSignedRpmScanner(Actor): """ - Provide data about distribution signed & unsigned RPM packages. + Provide data about distribution plus vendors signed & unsigned RPM packages. For various checks and actions done during the upgrade it's important to know what packages are signed by GPG keys of the installed linux system @@ -22,11 +22,18 @@ class DistributionSignedRpmScanner(Actor): common/files/distro//gpg_signatures.json where is distribution ID of the installed system (e.g. centos, rhel). + Fingerprints of vendors GPG keys are stored under + /etc/leapp/files/vendors.d/.sigs + where is name of the vendor (e.g. mariadb, postgresql). + + The "Distribution" in the name of the actor is a historical artifact - the actor + is used for both distribution and all vendors present in config files. + If the file for the installed distribution is not find, end with error. """ name = 'distribution_signed_rpm_scanner' - consumes = (InstalledRPM,) + consumes = (InstalledRPM, VendorSignatures) produces = (DistributionSignedRPM, InstalledRedHatSignedRPM, InstalledUnsignedRPM,) tags = (IPUWorkflowTag, FactsPhaseTag) diff --git a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py index f42909f0..6383a56f 100644 --- a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py +++ b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py @@ -1,17 +1,117 @@ +import os +import re + +from leapp.libraries.stdlib import run, api from leapp.actors import Actor -from leapp.libraries.common import efi_reboot_fix +from leapp.models import InstalledTargetKernelVersion, KernelCmdlineArg, FirmwareFacts, MountEntry from leapp.tags import FinalizationPhaseTag, IPUWorkflowTag +from leapp.exceptions import StopActorExecutionError class EfiFinalizationFix(Actor): """ - Adjust EFI boot entry for final reboot + Ensure that EFI boot order is updated, which is particularly necessary + when upgrading to a different OS distro. Also rebuilds grub config + if necessary. """ name = 'efi_finalization_fix' - consumes = () + consumes = (KernelCmdlineArg, InstalledTargetKernelVersion, FirmwareFacts, MountEntry) produces = () - tags = (FinalizationPhaseTag, IPUWorkflowTag) + tags = (FinalizationPhaseTag.Before, IPUWorkflowTag) def process(self): - efi_reboot_fix.maybe_emit_updated_boot_entry() + is_system_efi = False + ff = next(self.consume(FirmwareFacts), None) + + dirname = { + 'AlmaLinux': 'almalinux', + 'CentOS Linux': 'centos', + 'CentOS Stream': 'centos', + 'Oracle Linux Server': 'redhat', + 'Red Hat Enterprise Linux': 'redhat', + 'Rocky Linux': 'rocky', + 'Scientific Linux': 'redhat', + } + + efi_shimname_dict = { + 'x86_64': 'shimx64.efi', + 'aarch64': 'shimaa64.efi' + } + + def devparts(dev): + """ + NVMe block devices aren't named like SCSI/ATA/etc block devices and must be parsed differently. + SCSI/ATA/etc devices have a syntax resembling /dev/sdb4 for the 4th partition on the 2nd disk. + NVMe devices have a syntax resembling /dev/nvme0n2p4 for the 4th partition on the 2nd disk. + """ + if '/dev/nvme' in dev: + """ + NVMe + """ + part = next(re.finditer(r'p\d+$', dev)).group(0) + dev = dev[:-len(part)] + part = part[1:] + else: + """ + Non-NVMe (SCSI, ATA, etc) + """ + part = next(re.finditer(r'\d+$', dev)).group(0) + dev = dev[:-len(part)] + return [dev, part]; + + with open('/etc/system-release', 'r') as sr: + release_line = next(line for line in sr if 'release' in line) + distro = release_line.split(' release ', 1)[0] + + efi_bootentry_label = distro + distro_dir = dirname.get(distro, 'default') + shim_filename = efi_shimname_dict.get(api.current_actor().configuration.architecture, 'shimx64.efi') + + shim_path = '/boot/efi/EFI/' + distro_dir + '/' + shim_filename + grub_cfg_path = '/boot/efi/EFI/' + distro_dir + '/grub.cfg' + bootmgr_path = '\\EFI\\' + distro_dir + '\\' + shim_filename + + has_efibootmgr = os.path.exists('/sbin/efibootmgr') + has_shim = os.path.exists(shim_path) + has_grub_cfg = os.path.exists(grub_cfg_path) + + if not ff: + raise StopActorExecutionError( + 'Could not identify system firmware', + details={'details': 'Actor did not receive FirmwareFacts message.'} + ) + + if not has_efibootmgr: + return + + for fact in self.consume(FirmwareFacts): + if fact.firmware == 'efi': + is_system_efi = True + break + + if is_system_efi and has_shim: + efidevlist = [] + with open('/proc/mounts', 'r') as fp: + for line in fp: + if '/boot/efi' in line: + efidevpath = line.split(' ', 1)[0] + efidevpart = efidevpath.split('/')[-1] + if os.path.exists('/proc/mdstat'): + with open('/proc/mdstat', 'r') as mds: + for line in mds: + if line.startswith(efidevpart): + mddev = line.split(' ') + for md in mddev: + if '[' in md: + efimd = md.split('[', 1)[0] + efidp = efidevpath.replace(efidevpart, efimd) + efidevlist.append(efidp) + if len(efidevlist) == 0: + efidevlist.append(efidevpath) + for devpath in efidevlist: + efidev, efipart = devparts(devpath) + run(['/sbin/efibootmgr', '-c', '-d', efidev, '-p', efipart, '-l', bootmgr_path, '-L', efi_bootentry_label]) + + if not has_grub_cfg: + run(['/sbin/grub2-mkconfig', '-o', grub_cfg_path]) diff --git a/repos/system_upgrade/common/actors/filterrpmtransactionevents/actor.py b/repos/system_upgrade/common/actors/filterrpmtransactionevents/actor.py index 582a5821..18f2c33f 100644 --- a/repos/system_upgrade/common/actors/filterrpmtransactionevents/actor.py +++ b/repos/system_upgrade/common/actors/filterrpmtransactionevents/actor.py @@ -32,6 +32,7 @@ class FilterRpmTransactionTasks(Actor): to_remove = set() to_keep = set() to_upgrade = set() + to_reinstall = set() modules_to_enable = {} modules_to_reset = {} for event in self.consume(RpmTransactionTasks, PESRpmTransactionTasks): @@ -39,13 +40,14 @@ class FilterRpmTransactionTasks(Actor): to_install.update(event.to_install) to_remove.update(installed_pkgs.intersection(event.to_remove)) to_keep.update(installed_pkgs.intersection(event.to_keep)) + to_reinstall.update(installed_pkgs.intersection(event.to_reinstall)) modules_to_enable.update({'{}:{}'.format(m.name, m.stream): m for m in event.modules_to_enable}) modules_to_reset.update({'{}:{}'.format(m.name, m.stream): m for m in event.modules_to_reset}) to_remove.difference_update(to_keep) # run upgrade for the rest of RH signed pkgs which we do not have rule for - to_upgrade = installed_pkgs - (to_install | to_remove) + to_upgrade = installed_pkgs - (to_install | to_remove | to_reinstall) self.produce(FilteredRpmTransactionTasks( local_rpms=list(local_rpms), @@ -53,5 +55,6 @@ class FilterRpmTransactionTasks(Actor): to_remove=list(to_remove), to_keep=list(to_keep), to_upgrade=list(to_upgrade), + to_reinstall=list(to_reinstall), modules_to_reset=list(modules_to_reset.values()), modules_to_enable=list(modules_to_enable.values()))) diff --git a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py index 32e4527b..1e595e9a 100644 --- a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py +++ b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py @@ -152,11 +152,11 @@ def _report(title, summary, keys, inhibitor=False): ) hint = ( 'Check the path to the listed GPG keys is correct, the keys are valid and' - ' import them into the host RPM DB or store them inside the {} directory' + ' import them into the host RPM DB or store them inside on of the {} directories' ' prior the upgrade.' ' If you want to proceed the in-place upgrade without checking any RPM' ' signatures, execute leapp with the `--nogpgcheck` option.' - .format(get_path_to_gpg_certs()) + .format(','.format(get_path_to_gpg_certs())) ) groups = [reporting.Groups.REPOSITORY] if inhibitor: @@ -188,7 +188,7 @@ def _report_missing_keys(keys): summary = ( 'Some of the target repositories require GPG keys that are not installed' ' in the current RPM DB or are not stored in the {trust_dir} directory.' - .format(trust_dir=get_path_to_gpg_certs()) + .format(trust_dir=','.join(get_path_to_gpg_certs())) ) _report('Detected unknown GPG keys for target system repositories', summary, keys, True) @@ -262,11 +262,12 @@ def _report_repos_missing_keys(repos): def register_dnfworkaround(): - api.produce(DNFWorkaround( - display_name='import trusted gpg keys to RPM DB', - script_path=api.current_actor().get_common_tool_path('importrpmgpgkeys'), - script_args=[get_path_to_gpg_certs()], - )) + for trust_certs_dir in get_path_to_gpg_certs(): + api.produce(DNFWorkaround( + display_name='import trusted gpg keys to RPM DB', + script_path=api.current_actor().get_common_tool_path('importrpmgpgkeys'), + script_args=[trust_certs_dir], + )) @suppress_deprecation(TMPTargetRepositoriesFacts) diff --git a/repos/system_upgrade/common/actors/peseventsscanner/actor.py b/repos/system_upgrade/common/actors/peseventsscanner/actor.py index f801f1a1..cb911471 100644 --- a/repos/system_upgrade/common/actors/peseventsscanner/actor.py +++ b/repos/system_upgrade/common/actors/peseventsscanner/actor.py @@ -10,7 +10,8 @@ from leapp.models import ( RepositoriesMapping, RepositoriesSetupTasks, RHUIInfo, - RpmTransactionTasks + RpmTransactionTasks, + ActiveVendorList, ) from leapp.reporting import Report from leapp.tags import FactsPhaseTag, IPUWorkflowTag @@ -33,6 +34,7 @@ class PesEventsScanner(Actor): RepositoriesMapping, RHUIInfo, RpmTransactionTasks, + ActiveVendorList, ) produces = (ConsumedDataAsset, PESRpmTransactionTasks, RepositoriesSetupTasks, Report) tags = (IPUWorkflowTag, FactsPhaseTag) diff --git a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_event_parsing.py b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_event_parsing.py index f24dda68..7ee5d016 100644 --- a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_event_parsing.py +++ b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_event_parsing.py @@ -58,6 +58,7 @@ class Action(IntEnum): MERGED = 5 MOVED = 6 RENAMED = 7 + REINSTALLED = 8 def get_pes_events(pes_json_directory, pes_json_filename): @@ -72,13 +73,14 @@ def get_pes_events(pes_json_directory, pes_json_filename): # a case as we have no work to do in such a case here. events_data = fetch.load_data_asset(api.current_actor(), pes_json_filename, + asset_directory=pes_json_directory, asset_fulltext_name='PES events file', docs_url='', docs_title='') if not events_data: return None - if not events_data.get('packageinfo'): + if events_data.get('packageinfo') is None: raise ValueError('Found PES data with invalid structure') all_events = list(chain(*[parse_entry(entry) for entry in events_data['packageinfo']])) diff --git a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py index e6741293..7a7e9ebf 100644 --- a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py +++ b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py @@ -1,5 +1,6 @@ from collections import defaultdict, namedtuple from functools import partial +import os from leapp import reporting from leapp.exceptions import StopActorExecutionError @@ -7,6 +8,7 @@ from leapp.libraries.actor import peseventsscanner_repomap from leapp.libraries.actor.pes_event_parsing import Action, get_pes_events, Package from leapp.libraries.common import rpms from leapp.libraries.common.config import version +from leapp.libraries.common.repomaputils import combine_repomap_messages from leapp.libraries.stdlib import api from leapp.libraries.stdlib.config import is_verbose from leapp.models import ( @@ -20,7 +22,8 @@ from leapp.models import ( RepositoriesMapping, RepositoriesSetupTasks, RHUIInfo, - RpmTransactionTasks + RpmTransactionTasks, + ActiveVendorList, ) SKIPPED_PKGS_MSG = ( @@ -31,8 +34,9 @@ SKIPPED_PKGS_MSG = ( 'for details.\nThe list of these packages:' ) +VENDORS_DIR = "/etc/leapp/files/vendors.d" -TransactionConfiguration = namedtuple('TransactionConfiguration', ('to_install', 'to_remove', 'to_keep')) +TransactionConfiguration = namedtuple('TransactionConfiguration', ('to_install', 'to_remove', 'to_keep', 'to_reinstall')) def get_cloud_provider_name(cloud_provider_variant): @@ -86,7 +90,7 @@ def get_transaction_configuration(): :return: TransactionConfiguration """ - transaction_configuration = TransactionConfiguration(to_install=set(), to_remove=set(), to_keep=set()) + transaction_configuration = TransactionConfiguration(to_install=set(), to_remove=set(), to_keep=set(), to_reinstall=set()) _Pkg = partial(Package, repository=None, modulestream=None) @@ -94,6 +98,7 @@ def get_transaction_configuration(): transaction_configuration.to_install.update(_Pkg(name=pkg_name) for pkg_name in tasks.to_install) transaction_configuration.to_remove.update(_Pkg(name=pkg_name) for pkg_name in tasks.to_remove) transaction_configuration.to_keep.update(_Pkg(name=pkg_name) for pkg_name in tasks.to_keep) + transaction_configuration.to_reinstall.update(_Pkg(name=pkg_name) for pkg_name in tasks.to_reinstall) return transaction_configuration @@ -133,6 +138,7 @@ def compute_pkg_changes_between_consequent_releases(source_installed_pkgs, logger = api.current_logger() # Start with the installed packages and modify the set according to release events target_pkgs = set(source_installed_pkgs) + pkgs_to_reinstall = set() release_events = [e for e in events if e.to_release == release] @@ -176,9 +182,12 @@ def compute_pkg_changes_between_consequent_releases(source_installed_pkgs, target_pkgs = target_pkgs.difference(event.out_pkgs) target_pkgs = target_pkgs.union(event.out_pkgs) + if (event.action == Action.REINSTALLED and is_any_in_pkg_present): + pkgs_to_reinstall = pkgs_to_reinstall.union(event.in_pkgs) + pkgs_to_demodularize = pkgs_to_demodularize.difference(event.in_pkgs) - return (target_pkgs, pkgs_to_demodularize) + return (target_pkgs, pkgs_to_demodularize, pkgs_to_reinstall) def remove_undesired_events(events, relevant_to_releases): @@ -244,15 +253,17 @@ def compute_packages_on_target_system(source_pkgs, events, releases): did_processing_cross_major_version = True pkgs_to_demodularize = {pkg for pkg in target_pkgs if pkg.modulestream} - target_pkgs, pkgs_to_demodularize = compute_pkg_changes_between_consequent_releases(target_pkgs, events, - release, seen_pkgs, - pkgs_to_demodularize) + target_pkgs, pkgs_to_demodularize, pkgs_to_reinstall = compute_pkg_changes_between_consequent_releases( + target_pkgs, events, + release, seen_pkgs, + pkgs_to_demodularize + ) seen_pkgs = seen_pkgs.union(target_pkgs) demodularized_pkgs = {Package(pkg.name, pkg.repository, None) for pkg in pkgs_to_demodularize} demodularized_target_pkgs = target_pkgs.difference(pkgs_to_demodularize).union(demodularized_pkgs) - return (demodularized_target_pkgs, pkgs_to_demodularize) + return (demodularized_target_pkgs, pkgs_to_demodularize, pkgs_to_reinstall) def compute_rpm_tasks_from_pkg_set_diff(source_pkgs, target_pkgs, pkgs_to_demodularize): @@ -356,15 +367,13 @@ def get_pesid_to_repoid_map(target_pesids): :return: Dictionary mapping the target_pesids to their corresponding repoid """ - repositories_map_msgs = api.consume(RepositoriesMapping) - repositories_map_msg = next(repositories_map_msgs, None) - if list(repositories_map_msgs): - api.current_logger().warning('Unexpectedly received more than one RepositoriesMapping message.') - if not repositories_map_msg: + repositories_map_msgs = list(api.consume(RepositoriesMapping)) + if not repositories_map_msgs: raise StopActorExecutionError( 'Cannot parse RepositoriesMapping data properly', details={'Problem': 'Did not receive a message with mapped repositories'} ) + repositories_map_msg = combine_repomap_messages(repositories_map_msgs) rhui_info = next(api.consume(RHUIInfo), None) cloud_provider = rhui_info.provider if rhui_info else '' @@ -554,6 +563,19 @@ def process(): if not events: return + active_vendors = [] + for vendor_list in api.consume(ActiveVendorList): + active_vendors.extend(vendor_list.data) + + pes_json_suffix = "_pes.json" + if os.path.isdir(VENDORS_DIR): + vendor_pesfiles = list(filter(lambda vfile: pes_json_suffix in vfile, os.listdir(VENDORS_DIR))) + + for pesfile in vendor_pesfiles: + if pesfile[:-len(pes_json_suffix)] in active_vendors: + vendor_events = get_pes_events(VENDORS_DIR, pesfile) + events.extend(vendor_events) + releases = get_relevant_releases(events) installed_pkgs = get_installed_pkgs() transaction_configuration = get_transaction_configuration() @@ -567,7 +589,7 @@ def process(): events = remove_undesired_events(events, releases) # Apply events - compute what packages should the target system have - target_pkgs, pkgs_to_demodularize = compute_packages_on_target_system(pkgs_to_begin_computation_with, + target_pkgs, pkgs_to_demodularize, pkgs_to_reinstall = compute_packages_on_target_system(pkgs_to_begin_computation_with, events, releases) # Packages coming out of the events have PESID as their repository, however, we need real repoid @@ -587,4 +609,5 @@ def process(): rpm_tasks = include_instructions_from_transaction_configuration(rpm_tasks, transaction_configuration, installed_pkgs) if rpm_tasks: + rpm_tasks.to_reinstall = sorted(pkgs_to_reinstall) api.produce(rpm_tasks) diff --git a/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py b/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py index d4a64793..4ec1d6e0 100644 --- a/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py +++ b/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py @@ -3,6 +3,7 @@ from collections import defaultdict from leapp.exceptions import StopActorExecutionError from leapp.libraries.common.config.version import get_source_major_version, get_target_major_version +from leapp.libraries.common.repomaputils import RepoMapData from leapp.libraries.common.fetch import load_data_asset from leapp.libraries.common.rpms import get_leapp_packages, LeappComponents from leapp.libraries.stdlib import api @@ -16,121 +17,6 @@ REPOMAP_FILE = 'repomap.json' """The name of the new repository mapping file.""" -class RepoMapData(object): - VERSION_FORMAT = '1.3.0' - - def __init__(self): - self.repositories = [] - self.mapping = {} - - def add_repository(self, data, pesid): - """ - Add new PESIDRepositoryEntry with given pesid from the provided dictionary. - - :param data: A dict containing the data of the added repository. The dictionary structure corresponds - to the repositories entries in the repository mapping JSON schema. - :type data: Dict[str, str] - :param pesid: PES id of the repository family that the newly added repository belongs to. - :type pesid: str - """ - self.repositories.append(PESIDRepositoryEntry( - repoid=data['repoid'], - channel=data['channel'], - rhui=data.get('rhui', ''), - repo_type=data['repo_type'], - arch=data['arch'], - major_version=data['major_version'], - pesid=pesid, - distro=data['distro'], - )) - - def get_repositories(self, valid_major_versions): - """ - Return the list of PESIDRepositoryEntry object matching the specified major versions. - """ - return [repo for repo in self.repositories if repo.major_version in valid_major_versions] - - def add_mapping(self, source_major_version, target_major_version, source_pesid, target_pesid): - """ - Add a new mapping entry that is mapping the source pesid to the destination pesid(s), - relevant in an IPU from the supplied source major version to the supplied target - major version. - - :param str source_major_version: Specifies the major version of the source system - for which the added mapping applies. - :param str target_major_version: Specifies the major version of the target system - for which the added mapping applies. - :param str source_pesid: PESID of the source repository. - :param Union[str|List[str]] target_pesid: A single target PESID or a list of target - PESIDs of the added mapping. - """ - # NOTE: it could be more simple, but I prefer to be sure the input data - # contains just one map per source PESID. - key = '{}:{}'.format(source_major_version, target_major_version) - rmap = self.mapping.get(key, defaultdict(set)) - self.mapping[key] = rmap - if isinstance(target_pesid, list): - rmap[source_pesid].update(target_pesid) - else: - rmap[source_pesid].add(target_pesid) - - def get_mappings(self, src_major_version, dst_major_version): - """ - Return the list of RepoMapEntry objects for the specified upgrade path. - - IOW, the whole mapping for specified IPU. - """ - key = '{}:{}'.format(src_major_version, dst_major_version) - rmap = self.mapping.get(key, None) - if not rmap: - return None - map_list = [] - for src_pesid in sorted(rmap.keys()): - map_list.append(RepoMapEntry(source=src_pesid, target=sorted(rmap[src_pesid]))) - return map_list - - @staticmethod - def load_from_dict(data): - if data['version_format'] != RepoMapData.VERSION_FORMAT: - raise ValueError( - 'The obtained repomap data has unsupported version of format.' - ' Get {} required {}' - .format(data['version_format'], RepoMapData.VERSION_FORMAT) - ) - - repomap = RepoMapData() - - # Load reposiories - existing_pesids = set() - for repo_family in data['repositories']: - existing_pesids.add(repo_family['pesid']) - for repo in repo_family['entries']: - repomap.add_repository(repo, repo_family['pesid']) - - # Load mappings - for mapping in data['mapping']: - for entry in mapping['entries']: - if not isinstance(entry['target'], list): - raise ValueError( - 'The target field of a mapping entry is not a list: {}' - .format(entry) - ) - - for pesid in [entry['source']] + entry['target']: - if pesid not in existing_pesids: - raise ValueError( - 'The {} pesid is not related to any repository.' - .format(pesid) - ) - repomap.add_mapping( - source_major_version=mapping['source_major_version'], - target_major_version=mapping['target_major_version'], - source_pesid=entry['source'], - target_pesid=entry['target'], - ) - return repomap - - def _inhibit_upgrade(msg): local_path = os.path.join('/etc/leapp/file', REPOMAP_FILE) hint = ( diff --git a/repos/system_upgrade/common/actors/rpmscanner/libraries/rpmscanner.py b/repos/system_upgrade/common/actors/rpmscanner/libraries/rpmscanner.py index dbe56191..74c4b101 100644 --- a/repos/system_upgrade/common/actors/rpmscanner/libraries/rpmscanner.py +++ b/repos/system_upgrade/common/actors/rpmscanner/libraries/rpmscanner.py @@ -25,6 +25,8 @@ except ImportError: def _get_package_repository_data_yum(): yum_base = yum.YumBase() + # DNF configuration is not loaded here, since no impact for operations + # done by the actor is observed here pkg_repos = {} try: diff --git a/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py b/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py index 43ac1fc4..62aefaf4 100644 --- a/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py +++ b/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py @@ -18,21 +18,37 @@ def load_tasks_file(path, logger): return [] +def filter_out(installed_rpm_names, to_filter, debug_msg): + # These are the packages that aren't installed on the system. + filtered_ok = [pkg for pkg in to_filter if pkg not in installed_rpm_names] + + # And these ones are the ones that are. + filtered_out = list(set(to_filter) - set(filtered_ok)) + if filtered_out: + api.current_logger().debug( + debug_msg + + '\n- ' + '\n- '.join(filtered_out) + ) + # We may want to use either of the two sets. + return filtered_ok, filtered_out + + def load_tasks(base_dir, logger): # Loads configuration files to_install, to_keep, and to_remove from the given base directory rpms = next(api.consume(DistributionSignedRPM)) rpm_names = [rpm.name for rpm in rpms.items] + to_install = load_tasks_file(os.path.join(base_dir, 'to_install'), logger) + install_debug_msg = 'The following packages from "to_install" file will be ignored as they are already installed:' # we do not want to put into rpm transaction what is already installed (it will go to "to_upgrade" bucket) - to_install_filtered = [pkg for pkg in to_install if pkg not in rpm_names] + to_install_filtered, _ = filter_out(rpm_names, to_install, install_debug_msg) - filtered = set(to_install) - set(to_install_filtered) - if filtered: - api.current_logger().debug( - 'The following packages from "to_install" file will be ignored as they are already installed:' - '\n- ' + '\n- '.join(filtered)) + to_reinstall = load_tasks_file(os.path.join(base_dir, 'to_reinstall'), logger) + reinstall_debug_msg = 'The following packages from "to_reinstall" file will be ignored as they are not installed:' + _, to_reinstall_filtered = filter_out(rpm_names, to_reinstall, reinstall_debug_msg) return RpmTransactionTasks( to_install=to_install_filtered, + to_reinstall=to_reinstall_filtered, to_keep=load_tasks_file(os.path.join(base_dir, 'to_keep'), logger), to_remove=load_tasks_file(os.path.join(base_dir, 'to_remove'), logger)) diff --git a/repos/system_upgrade/common/actors/scancustommodifications/tests/test_scancustommodifications.py b/repos/system_upgrade/common/actors/scancustommodifications/tests/test_scancustommodifications.py index a48869e4..33d0660f 100644 --- a/repos/system_upgrade/common/actors/scancustommodifications/tests/test_scancustommodifications.py +++ b/repos/system_upgrade/common/actors/scancustommodifications/tests/test_scancustommodifications.py @@ -28,15 +28,13 @@ S.5....T. c etc/leapp/files/pes-events.json @pytest.mark.parametrize('a_file,name', [ ('repos/system_upgrade/el8toel9/actors/checkblacklistca/actor.py', 'checkblacklistca'), - ('repos/system_upgrade/el7toel8/actors/checkmemcached/actor.py', 'check_memcached'), # actor library - ('repos/system_upgrade/el7toel8/actors/checkmemcached/libraries/checkmemcached.py', 'check_memcached'), + ('repos/system_upgrade/el8toel9/actors/checkifcfg/libraries/checkifcfg_ifcfg.py', 'check_ifcfg'), # actor file ('repos/system_upgrade/common/actors/createresumeservice/files/leapp_resume.service', 'create_systemd_service'), ('repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh', 'common_leapp_dracut_modules'), # not a library and not an actor file - ('repos/system_upgrade/el7toel8/models/authselect.py', ''), ('repos/system_upgrade/common/files/rhel_upgrade.py', ''), # common library not tied to any actor ('repos/system_upgrade/common/libraries/mounting.py', ''), diff --git a/repos/system_upgrade/common/actors/scanvendorrepofiles/actor.py b/repos/system_upgrade/common/actors/scanvendorrepofiles/actor.py new file mode 100644 index 00000000..a5e481cb --- /dev/null +++ b/repos/system_upgrade/common/actors/scanvendorrepofiles/actor.py @@ -0,0 +1,26 @@ +from leapp.actors import Actor +from leapp.libraries.actor import scanvendorrepofiles +from leapp.models import ( + CustomTargetRepositoryFile, + ActiveVendorList, + VendorCustomTargetRepositoryList, +) +from leapp.tags import FactsPhaseTag, IPUWorkflowTag + + +class ScanVendorRepofiles(Actor): + """ + Load and produce custom repository data from vendor-provided files. + Only those vendors whose source system repoids were found on the system will be included. + """ + + name = "scan_vendor_repofiles" + consumes = ActiveVendorList + produces = ( + CustomTargetRepositoryFile, + VendorCustomTargetRepositoryList, + ) + tags = (FactsPhaseTag, IPUWorkflowTag) + + def process(self): + scanvendorrepofiles.process() diff --git a/repos/system_upgrade/common/actors/scanvendorrepofiles/libraries/scanvendorrepofiles.py b/repos/system_upgrade/common/actors/scanvendorrepofiles/libraries/scanvendorrepofiles.py new file mode 100644 index 00000000..84392101 --- /dev/null +++ b/repos/system_upgrade/common/actors/scanvendorrepofiles/libraries/scanvendorrepofiles.py @@ -0,0 +1,72 @@ +import os + +from leapp.libraries.common import repofileutils +from leapp.libraries.stdlib import api +from leapp.models import ( + CustomTargetRepository, + CustomTargetRepositoryFile, + ActiveVendorList, + VendorCustomTargetRepositoryList, +) + + +VENDORS_DIR = "/etc/leapp/files/vendors.d/" +REPOFILE_SUFFIX = ".repo" + + +def process(): + """ + Produce CustomTargetRepository msgs for the vendor repo files inside the + . + + The CustomTargetRepository messages are produced only if a "from" vendor repository + listed indide its map matched one of the repositories active on the system. + """ + if not os.path.isdir(VENDORS_DIR): + api.current_logger().debug( + "The {} directory doesn't exist. Nothing to do.".format(VENDORS_DIR) + ) + return + + for repofile_name in os.listdir(VENDORS_DIR): + if not repofile_name.endswith(REPOFILE_SUFFIX): + continue + # Cut the .repo part to get only the name. + vendor_name = repofile_name[:-5] + + active_vendors = [] + for vendor_list in api.consume(ActiveVendorList): + active_vendors.extend(vendor_list.data) + + api.current_logger().debug("Active vendor list: {}".format(active_vendors)) + + if vendor_name not in active_vendors: + api.current_logger().debug( + "Vendor {} not in active list, skipping".format(vendor_name) + ) + continue + + full_repo_path = os.path.join(VENDORS_DIR, repofile_name) + parsed_repofile = repofileutils.parse_repofile(full_repo_path) + api.current_logger().debug( + "Vendor {} found in active list, processing file {}".format(vendor_name, repofile_name) + ) + + api.produce(CustomTargetRepositoryFile(file=full_repo_path)) + + custom_vendor_repos = [ + CustomTargetRepository( + repoid=repo.repoid, + name=repo.name, + baseurl=repo.baseurl, + enabled=repo.enabled, + ) for repo in parsed_repofile.data + ] + + api.produce( + VendorCustomTargetRepositoryList(vendor=vendor_name, repos=custom_vendor_repos) + ) + + api.current_logger().info( + "The {} directory exists, vendor repositories loaded.".format(VENDORS_DIR) + ) diff --git a/repos/system_upgrade/common/actors/scanvendorrepofiles/tests/test_scanvendorrepofiles.py b/repos/system_upgrade/common/actors/scanvendorrepofiles/tests/test_scanvendorrepofiles.py new file mode 100644 index 00000000..cb5c7ab7 --- /dev/null +++ b/repos/system_upgrade/common/actors/scanvendorrepofiles/tests/test_scanvendorrepofiles.py @@ -0,0 +1,131 @@ +import os + +from leapp.libraries.actor import scancustomrepofile +from leapp.libraries.common import repofileutils +from leapp.libraries.common.testutils import produce_mocked +from leapp.libraries.stdlib import api + +from leapp.models import (CustomTargetRepository, CustomTargetRepositoryFile, + RepositoryData, RepositoryFile) + + +_REPODATA = [ + RepositoryData(repoid="repo1", name="repo1name", baseurl="repo1url", enabled=True), + RepositoryData(repoid="repo2", name="repo2name", baseurl="repo2url", enabled=False), + RepositoryData(repoid="repo3", name="repo3name", enabled=True), + RepositoryData(repoid="repo4", name="repo4name", mirrorlist="mirror4list", enabled=True), +] + +_CUSTOM_REPOS = [ + CustomTargetRepository(repoid="repo1", name="repo1name", baseurl="repo1url", enabled=True), + CustomTargetRepository(repoid="repo2", name="repo2name", baseurl="repo2url", enabled=False), + CustomTargetRepository(repoid="repo3", name="repo3name", baseurl=None, enabled=True), + CustomTargetRepository(repoid="repo4", name="repo4name", baseurl=None, enabled=True), +] + +_CUSTOM_REPO_FILE_MSG = CustomTargetRepositoryFile(file=scancustomrepofile.CUSTOM_REPO_PATH) + + +_TESTING_REPODATA = [ + RepositoryData(repoid="repo1-stable", name="repo1name", baseurl="repo1url", enabled=True), + RepositoryData(repoid="repo2-testing", name="repo2name", baseurl="repo2url", enabled=False), + RepositoryData(repoid="repo3-stable", name="repo3name", enabled=False), + RepositoryData(repoid="repo4-testing", name="repo4name", mirrorlist="mirror4list", enabled=True), +] + +_TESTING_CUSTOM_REPOS_STABLE_TARGET = [ + CustomTargetRepository(repoid="repo1-stable", name="repo1name", baseurl="repo1url", enabled=True), + CustomTargetRepository(repoid="repo2-testing", name="repo2name", baseurl="repo2url", enabled=False), + CustomTargetRepository(repoid="repo3-stable", name="repo3name", baseurl=None, enabled=False), + CustomTargetRepository(repoid="repo4-testing", name="repo4name", baseurl=None, enabled=True), +] + +_TESTING_CUSTOM_REPOS_BETA_TARGET = [ + CustomTargetRepository(repoid="repo1-stable", name="repo1name", baseurl="repo1url", enabled=True), + CustomTargetRepository(repoid="repo2-testing", name="repo2name", baseurl="repo2url", enabled=True), + CustomTargetRepository(repoid="repo3-stable", name="repo3name", baseurl=None, enabled=False), + CustomTargetRepository(repoid="repo4-testing", name="repo4name", baseurl=None, enabled=True), +] + +_PROCESS_STABLE_TARGET = "stable" +_PROCESS_BETA_TARGET = "beta" + + +class LoggerMocked(object): + def __init__(self): + self.infomsg = None + self.debugmsg = None + + def info(self, msg): + self.infomsg = msg + + def debug(self, msg): + self.debugmsg = msg + + def __call__(self): + return self + + +def test_no_repofile(monkeypatch): + monkeypatch.setattr(os.path, 'isfile', lambda dummy: False) + monkeypatch.setattr(api, 'produce', produce_mocked()) + monkeypatch.setattr(api, 'current_logger', LoggerMocked()) + scancustomrepofile.process() + msg = "The {} file doesn't exist. Nothing to do.".format(scancustomrepofile.CUSTOM_REPO_PATH) + assert api.current_logger.debugmsg == msg + assert not api.produce.called + + +def test_valid_repofile_exists(monkeypatch): + def _mocked_parse_repofile(fpath): + return RepositoryFile(file=fpath, data=_REPODATA) + monkeypatch.setattr(os.path, 'isfile', lambda dummy: True) + monkeypatch.setattr(api, 'produce', produce_mocked()) + monkeypatch.setattr(repofileutils, 'parse_repofile', _mocked_parse_repofile) + monkeypatch.setattr(api, 'current_logger', LoggerMocked()) + scancustomrepofile.process() + msg = "The {} file exists, custom repositories loaded.".format(scancustomrepofile.CUSTOM_REPO_PATH) + assert api.current_logger.infomsg == msg + assert api.produce.called == len(_CUSTOM_REPOS) + 1 + assert _CUSTOM_REPO_FILE_MSG in api.produce.model_instances + for crepo in _CUSTOM_REPOS: + assert crepo in api.produce.model_instances + + +def test_target_stable_repos(monkeypatch): + def _mocked_parse_repofile(fpath): + return RepositoryFile(file=fpath, data=_TESTING_REPODATA) + monkeypatch.setattr(os.path, 'isfile', lambda dummy: True) + monkeypatch.setattr(api, 'produce', produce_mocked()) + monkeypatch.setattr(repofileutils, 'parse_repofile', _mocked_parse_repofile) + + scancustomrepofile.process(_PROCESS_STABLE_TARGET) + assert api.produce.called == len(_TESTING_CUSTOM_REPOS_STABLE_TARGET) + 1 + for crepo in _TESTING_CUSTOM_REPOS_STABLE_TARGET: + assert crepo in api.produce.model_instances + + +def test_target_beta_repos(monkeypatch): + def _mocked_parse_repofile(fpath): + return RepositoryFile(file=fpath, data=_TESTING_REPODATA) + monkeypatch.setattr(os.path, 'isfile', lambda dummy: True) + monkeypatch.setattr(api, 'produce', produce_mocked()) + monkeypatch.setattr(repofileutils, 'parse_repofile', _mocked_parse_repofile) + + scancustomrepofile.process(_PROCESS_BETA_TARGET) + assert api.produce.called == len(_TESTING_CUSTOM_REPOS_BETA_TARGET) + 1 + for crepo in _TESTING_CUSTOM_REPOS_BETA_TARGET: + assert crepo in api.produce.model_instances + + +def test_empty_repofile_exists(monkeypatch): + def _mocked_parse_repofile(fpath): + return RepositoryFile(file=fpath, data=[]) + monkeypatch.setattr(os.path, 'isfile', lambda dummy: True) + monkeypatch.setattr(api, 'produce', produce_mocked()) + monkeypatch.setattr(repofileutils, 'parse_repofile', _mocked_parse_repofile) + monkeypatch.setattr(api, 'current_logger', LoggerMocked()) + scancustomrepofile.process() + msg = "The {} file exists, but is empty. Nothing to do.".format(scancustomrepofile.CUSTOM_REPO_PATH) + assert api.current_logger.infomsg == msg + assert not api.produce.called diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/actor.py b/repos/system_upgrade/common/actors/setuptargetrepos/actor.py index 91855818..3a7e955b 100644 --- a/repos/system_upgrade/common/actors/setuptargetrepos/actor.py +++ b/repos/system_upgrade/common/actors/setuptargetrepos/actor.py @@ -10,7 +10,8 @@ from leapp.models import ( RHUIInfo, SkippedRepositories, TargetRepositories, - UsedRepositories + UsedRepositories, + VendorCustomTargetRepositoryList ) from leapp.tags import FactsPhaseTag, IPUWorkflowTag @@ -37,7 +38,8 @@ class SetupTargetRepos(Actor): RepositoriesFacts, RepositoriesBlacklisted, RHUIInfo, - UsedRepositories) + UsedRepositories, + VendorCustomTargetRepositoryList) produces = (TargetRepositories, SkippedRepositories) tags = (IPUWorkflowTag, FactsPhaseTag) diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py index a6073aa3..dfa565c1 100644 --- a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py +++ b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py @@ -1,6 +1,7 @@ from leapp.libraries.actor import setuptargetrepos_repomap from leapp.libraries.common.config.version import get_source_major_version, get_source_version, get_target_version +from leapp.libraries.common.repomaputils import combine_repomap_messages from leapp.libraries.stdlib import api from leapp.models import ( CustomTargetRepository, @@ -13,7 +14,8 @@ from leapp.models import ( RHUIInfo, SkippedRepositories, TargetRepositories, - UsedRepositories + UsedRepositories, + VendorCustomTargetRepositoryList ) RHUI_CLIENT_REPOIDS_RHEL88_TO_RHEL810 = { @@ -80,13 +82,62 @@ def _get_mapped_repoids(repomap, src_repoids): return mapped_repoids +def _get_vendor_custom_repos(enabled_repos, mapping_list): + # Look at what source repos from the vendor mapping were enabled. + # If any of them are in beta, include vendor's custom repos in the list. + # Otherwise skip them. + + result = [] + + # Build a dict of vendor mappings for easy lookup. + map_dict = {mapping.vendor: mapping for mapping in mapping_list if mapping.vendor} + + for vendor_repolist in api.consume(VendorCustomTargetRepositoryList): + vendor_repomap = map_dict[vendor_repolist.vendor] + + # Find the beta channel repositories for the vendor. + beta_repos = [ + x.repoid for x in vendor_repomap.repositories if x.channel == "beta" + ] + api.current_logger().debug( + "Vendor {} beta repos: {}".format(vendor_repolist.vendor, beta_repos) + ) + + # Are any of the beta repos present and enabled on the system? + if any(rep in beta_repos for rep in enabled_repos): + # If so, use all repos including beta in the upgrade. + vendor_repos = vendor_repolist.repos + else: + # Otherwise filter beta repos out. + vendor_repos = [repo for repo in vendor_repolist.repos if repo.repoid not in beta_repos] + + result.extend([CustomTargetRepository( + repoid=repo.repoid, + name=repo.name, + baseurl=repo.baseurl, + enabled=repo.enabled, + ) for repo in vendor_repos]) + + return result + + def process(): # Load relevant data from messages used_repoids_dict = _get_used_repo_dict() enabled_repoids = _get_enabled_repoids() excluded_repoids = _get_blacklisted_repoids() + + # Remember that we can't just grab one message, each vendor can have its own mapping. + repo_mapping_list = list(api.consume(RepositoriesMapping)) + custom_repos = _get_custom_target_repos() repoids_from_installed_packages = _get_repoids_from_installed_packages() + vendor_repos = _get_vendor_custom_repos(enabled_repoids, repo_mapping_list) + custom_repos.extend(vendor_repos) + + api.current_logger().debug( + "Vendor repolist: {}".format([repo.repoid for repo in vendor_repos]) + ) # Setup repomap handler repo_mappig_msg = next(api.consume(RepositoriesMapping), RepositoriesMapping()) @@ -168,6 +219,10 @@ def process(): custom_repos = [repo for repo in custom_repos if repo.repoid not in excluded_repoids] custom_repos = sorted(custom_repos, key=lambda x: x.repoid) + api.current_logger().debug( + "Final repolist: {}".format([repo.repoid for repo in custom_repos]) + ) + # produce message about skipped repositories enabled_repoids_with_mapping = _get_mapped_repoids(repomap, enabled_repoids) skipped_repoids = enabled_repoids & set(used_repoids_dict.keys()) - enabled_repoids_with_mapping diff --git a/repos/system_upgrade/common/actors/systemfacts/actor.py b/repos/system_upgrade/common/actors/systemfacts/actor.py index 59b12c87..85d4a09e 100644 --- a/repos/system_upgrade/common/actors/systemfacts/actor.py +++ b/repos/system_upgrade/common/actors/systemfacts/actor.py @@ -47,7 +47,7 @@ class SystemFactsActor(Actor): GrubCfgBios, Report ) - tags = (IPUWorkflowTag, FactsPhaseTag,) + tags = (IPUWorkflowTag, FactsPhaseTag.Before,) def process(self): self.produce(systemfacts.get_sysctls_status()) diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py index 9fc96a52..c4a31f80 100644 --- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py @@ -152,9 +152,10 @@ def _import_gpg_keys(context, install_root_dir, target_major_version): # Import the RHEL X+1 GPG key to be able to verify the installation of initial packages try: # Import also any other keys provided by the customer in the same directory - for certname in os.listdir(certs_path): - cmd = ['rpm', '--root', install_root_dir, '--import', os.path.join(certs_path, certname)] - context.call(cmd, callback_raw=utils.logging_handler) + for trusted_dir in certs_path: + for certname in os.listdir(trusted_dir): + cmd = ['rpm', '--root', install_root_dir, '--import', os.path.join(trusted_dir, certname)] + context.call(cmd, callback_raw=utils.logging_handler) except CalledProcessError as exc: raise StopActorExecutionError( message=( @@ -294,6 +295,8 @@ def _get_files_owned_by_rpms(context, dirpath, pkgs=None, recursive=False): """ Return the list of file names inside dirpath owned by RPMs. + The returned paths are relative to the dirpath. + This is important e.g. in case of RHUI which installs specific repo files in the yum.repos.d directory. @@ -311,7 +314,7 @@ def _get_files_owned_by_rpms(context, dirpath, pkgs=None, recursive=False): searchdir = context.full_path(dirpath) if recursive: for root, _, files in os.walk(searchdir): - if '/directory-hash/' in root: + if '/directory-hash' in root: # tl;dr; for the performance improvement # The directory has been relatively recently added to ca-certificates # rpm on EL 9+ systems and the content does not seem to be important @@ -334,7 +337,7 @@ def _get_files_owned_by_rpms(context, dirpath, pkgs=None, recursive=False): api.current_logger().debug('SKIP the {} file: not owned by any rpm'.format(fname)) continue if pkgs and not [pkg for pkg in pkgs if pkg in result['stdout']]: - api.current_logger().debug('SKIP the {} file: not owned by any searched rpm:'.format(fname)) + api.current_logger().debug('SKIP the {} file: not owned by any searched rpm'.format(fname)) continue api.current_logger().debug('Found the file owned by an rpm: {}.'.format(fname)) files_owned_by_rpms.append(fname) @@ -639,6 +642,7 @@ def _prep_repository_access(context, target_userspace): run(["chroot", target_userspace, "/bin/bash", "-c", "su - -c update-ca-trust"]) if not rhsm.skip_rhsm(): + _copy_certificates(context, target_userspace) run(['rm', '-rf', os.path.join(target_etc, 'rhsm')]) context.copytree_from('/etc/rhsm', os.path.join(target_etc, 'rhsm')) @@ -927,7 +931,13 @@ def _get_rh_available_repoids(context, indata): os.rename(foreign_repofile, '{0}.back'.format(foreign_repofile)) try: - dnf_cmd = ['dnf', 'repolist', '--releasever', target_ver, '-v', '--enablerepo', '*'] + dnf_cmd = [ + 'dnf', 'repolist', + '--releasever', target_ver, '-v', + '--enablerepo', '*', + '--disablerepo', '*-source-*', + '--disablerepo', '*-debug-*', + ] repolist_result = context.call(dnf_cmd)['stdout'] repoid_lines = [line for line in repolist_result.split('\n') if line.startswith('Repo-id')] rhui_repoids = {extract_repoid_from_line(line) for line in repoid_lines} diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py index 69ed7040..267c064e 100644 --- a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py +++ b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py @@ -13,6 +13,7 @@ from leapp.libraries.actor import userspacegen from leapp.libraries.common import overlaygen, repofileutils, rhsm from leapp.libraries.common.config import architecture from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked, produce_mocked +from leapp.libraries.stdlib import api, CalledProcessError from leapp.utils.deprecation import suppress_deprecation if sys.version_info < (2, 8): @@ -1225,3 +1226,102 @@ def test_perform_ok(monkeypatch): assert userspacegen.api.produce.model_instances[1] == msg_target_repos # this one is full of constants, so it's safe to check just the instance assert isinstance(userspacegen.api.produce.model_instances[2], models.TargetUserSpaceInfo) + + +class _MockContext(): + + def __init__(self, base_dir, owned_by_rpms): + self.base_dir = base_dir + # list of files owned, no base_dir prefixed + self.owned_by_rpms = owned_by_rpms + + def full_path(self, path): + return os.path.join(self.base_dir, os.path.abspath(path).lstrip('/')) + + def call(self, cmd): + assert len(cmd) == 3 and cmd[0] == 'rpm' and cmd[1] == '-qf' + if cmd[2] in self.owned_by_rpms: + return {'exit_code': 0} + raise CalledProcessError("Command failed with exit code 1", cmd, 1) + + +def test__get_files_owned_by_rpms(monkeypatch): + + def listdir_mocked(path): + assert path == '/base/dir/some/path' + return ['fileA', 'fileB.txt', 'test.log', 'script.sh'] + + monkeypatch.setattr(os, 'listdir', listdir_mocked) + logger = logger_mocked() + monkeypatch.setattr(api, 'current_logger', logger) + + search_dir = '/some/path' + # output doesn't include full paths + owned = ['fileA', 'script.sh'] + # but the rpm -qf call happens with the full path + owned_fullpath = [os.path.join(search_dir, f) for f in owned] + context = _MockContext('/base/dir', owned_fullpath) + + out = userspacegen._get_files_owned_by_rpms(context, '/some/path', recursive=False) + assert sorted(owned) == sorted(out) + + +def test__get_files_owned_by_rpms_recursive(monkeypatch): + # this is not necessarily accurate, but close enough + fake_walk = [ + ("/base/dir/etc/pki", ["ca-trust", "tls", "rpm-gpg"], []), + ("/base/dir/etc/pki/ca-trust", ["extracted", "source"], []), + ("/base/dir/etc/pki/ca-trust/extracted", ["openssl", "java"], []), + ("/base/dir/etc/pki/ca-trust/extracted/openssl", [], ["ca-bundle.trust.crt"]), + ("/base/dir/etc/pki/ca-trust/extracted/java", [], ["cacerts"]), + + ("/base/dir/etc/pki/ca-trust/source", ["anchors", "directory-hash"], []), + ("/base/dir/etc/pki/ca-trust/source/anchors", [], ["my-ca.crt"]), + ("/base/dir/etc/pki/ca-trust/extracted/pem/directory-hash", [], [ + "5931b5bc.0", "a94d09e5.0" + ]), + ("/base/dir/etc/pki/tls", ["certs", "private"], []), + ("/base/dir/etc/pki/tls/certs", [], ["server.crt", "ca-bundle.crt"]), + ("/base/dir/etc/pki/tls/private", [], ["server.key"]), + ("/base/dir/etc/pki/rpm-gpg", [], [ + "RPM-GPG-KEY-1", + "RPM-GPG-KEY-2", + ]), + ] + + def walk_mocked(path): + assert path == '/base/dir/etc/pki' + return fake_walk + + monkeypatch.setattr(os, 'walk', walk_mocked) + logger = logger_mocked() + monkeypatch.setattr(api, 'current_logger', logger) + + search_dir = '/etc/pki' + # output doesn't include full paths + owned = [ + 'tls/certs/ca-bundle.crt', + 'ca-trust/extracted/openssl/ca-bundle.trust.crt', + 'rpm-gpg/RPM-GPG-KEY-1', + 'rpm-gpg/RPM-GPG-KEY-2', + 'ca-trust/extracted/pem/directory-hash/a94d09e5.0', + 'ca-trust/extracted/pem/directory-hash/a94d09e5.0', + ] + # the rpm -qf call happens with the full path + owned_fullpath = [os.path.join(search_dir, f) for f in owned] + context = _MockContext('/base/dir', owned_fullpath) + + out = userspacegen._get_files_owned_by_rpms(context, search_dir, recursive=True) + # any directory-hash directory should be skipped + assert sorted(owned[0:4]) == sorted(out) + + def has_dbgmsg(substr): + return any([substr in log for log in logger.dbgmsg]) + + # test a few + assert has_dbgmsg( + "SKIP files in the /base/dir/etc/pki/ca-trust/extracted/pem/directory-hash directory:" + " Not important for the IPU.", + ) + assert has_dbgmsg('SKIP the tls/certs/server.crt file: not owned by any rpm') + assert has_dbgmsg('Found the file owned by an rpm: rpm-gpg/RPM-GPG-KEY-2.') diff --git a/repos/system_upgrade/common/actors/trustedgpgkeysscanner/libraries/trustedgpgkeys.py b/repos/system_upgrade/common/actors/trustedgpgkeysscanner/libraries/trustedgpgkeys.py index 6377f767..4c5420f6 100644 --- a/repos/system_upgrade/common/actors/trustedgpgkeysscanner/libraries/trustedgpgkeys.py +++ b/repos/system_upgrade/common/actors/trustedgpgkeysscanner/libraries/trustedgpgkeys.py @@ -13,13 +13,14 @@ def _get_pubkeys(installed_rpms): pubkeys = get_pubkeys_from_rpms(installed_rpms) db_pubkeys = [key.fingerprint for key in pubkeys] certs_path = get_path_to_gpg_certs() - for certname in os.listdir(certs_path): - key_file = os.path.join(certs_path, certname) - fps = get_gpg_fp_from_file(key_file) - for fp in fps: - if fp not in db_pubkeys: - pubkeys.append(GpgKey(fingerprint=fp, rpmdb=False, filename=key_file)) - db_pubkeys += fp + for trusted_dir in certs_path: + for certname in os.listdir(trusted_dir): + key_file = os.path.join(trusted_dir, certname) + fps = get_gpg_fp_from_file(key_file) + for fp in fps: + if fp not in db_pubkeys: + pubkeys.append(GpgKey(fingerprint=fp, rpmdb=False, filename=key_file)) + db_pubkeys += fp return pubkeys diff --git a/repos/system_upgrade/common/actors/vendorreposignaturescanner/actor.py b/repos/system_upgrade/common/actors/vendorreposignaturescanner/actor.py new file mode 100644 index 00000000..dbf86974 --- /dev/null +++ b/repos/system_upgrade/common/actors/vendorreposignaturescanner/actor.py @@ -0,0 +1,72 @@ +import os + +from leapp.actors import Actor +from leapp.models import VendorSignatures, ActiveVendorList +from leapp.tags import FactsPhaseTag, IPUWorkflowTag + + +VENDORS_DIR = "/etc/leapp/files/vendors.d/" +SIGFILE_SUFFIX = ".sigs" + + +class VendorRepoSignatureScanner(Actor): + """ + Produce VendorSignatures messages for the vendor signature files inside the + . + These messages are used to extend the list of pakcages Leapp will consider + signed and will attempt to upgrade. + + The messages are produced only if a "from" vendor repository + listed indide its map matched one of the repositories active on the system. + """ + + name = 'vendor_repo_signature_scanner' + consumes = (ActiveVendorList) + produces = (VendorSignatures) + tags = (IPUWorkflowTag, FactsPhaseTag.Before) + + def process(self): + if not os.path.isdir(VENDORS_DIR): + self.log.debug( + "The {} directory doesn't exist. Nothing to do.".format(VENDORS_DIR) + ) + return + + active_vendors = [] + for vendor_list in self.consume(ActiveVendorList): + active_vendors.extend(vendor_list.data) + + self.log.debug( + "Active vendor list: {}".format(active_vendors) + ) + + for sigfile_name in os.listdir(VENDORS_DIR): + if not sigfile_name.endswith(SIGFILE_SUFFIX): + continue + # Cut the suffix part to get only the name. + vendor_name = sigfile_name[:-5] + + if vendor_name not in active_vendors: + self.log.debug( + "Vendor {} not in active list, skipping".format(vendor_name) + ) + continue + + self.log.debug( + "Vendor {} found in active list, processing file {}".format(vendor_name, sigfile_name) + ) + + full_sigfile_path = os.path.join(VENDORS_DIR, sigfile_name) + with open(full_sigfile_path) as f: + signatures = [line for line in f.read().splitlines() if line] + + self.produce( + VendorSignatures( + vendor=vendor_name, + sigs=signatures, + ) + ) + + self.log.info( + "The {} directory exists, vendor signatures loaded.".format(VENDORS_DIR) + ) diff --git a/repos/system_upgrade/common/actors/vendorrepositoriesmapping/actor.py b/repos/system_upgrade/common/actors/vendorrepositoriesmapping/actor.py new file mode 100644 index 00000000..13256476 --- /dev/null +++ b/repos/system_upgrade/common/actors/vendorrepositoriesmapping/actor.py @@ -0,0 +1,19 @@ +from leapp.actors import Actor +# from leapp.libraries.common.repomaputils import scan_vendor_repomaps, VENDOR_REPOMAP_DIR +from leapp.libraries.actor.vendorrepositoriesmapping import scan_vendor_repomaps +from leapp.models import VendorSourceRepos, RepositoriesMapping +from leapp.tags import FactsPhaseTag, IPUWorkflowTag + + +class VendorRepositoriesMapping(Actor): + """ + Scan the vendor repository mapping files and provide the data to other actors. + """ + + name = "vendor_repositories_mapping" + consumes = () + produces = (RepositoriesMapping, VendorSourceRepos,) + tags = (IPUWorkflowTag, FactsPhaseTag.Before) + + def process(self): + scan_vendor_repomaps() diff --git a/repos/system_upgrade/common/actors/vendorrepositoriesmapping/libraries/vendorrepositoriesmapping.py b/repos/system_upgrade/common/actors/vendorrepositoriesmapping/libraries/vendorrepositoriesmapping.py new file mode 100644 index 00000000..6a41d4e5 --- /dev/null +++ b/repos/system_upgrade/common/actors/vendorrepositoriesmapping/libraries/vendorrepositoriesmapping.py @@ -0,0 +1,92 @@ +import os +import json + +from leapp.libraries.common import fetch +from leapp.libraries.common.config.version import get_target_major_version, get_source_major_version +from leapp.libraries.common.repomaputils import RepoMapData +from leapp.libraries.stdlib import api +from leapp.models import VendorSourceRepos, RepositoriesMapping +from leapp.models.fields import ModelViolationError +from leapp.exceptions import StopActorExecutionError + + +VENDORS_DIR = "/etc/leapp/files/vendors.d" +"""The folder containing the vendor repository mapping files.""" + + +def inhibit_upgrade(msg): + raise StopActorExecutionError( + msg, + details={'hint': ('Read documentation at the following link for more' + ' information about how to retrieve the valid file:' + ' https://access.redhat.com/articles/3664871')}) + + +def read_repofile(repofile, repodir): + try: + return json.loads(fetch.read_or_fetch(repofile, directory=repodir, allow_download=False)) + except ValueError: + # The data does not contain a valid json + inhibit_upgrade('The repository mapping file is invalid: file does not contain a valid JSON object.') + return None + + +def read_repomap_file(repomap_file, read_repofile_func, vendor_name): + json_data = read_repofile_func(repomap_file, VENDORS_DIR) + try: + repomap_data = RepoMapData.load_from_dict(json_data) + + source_major = get_source_major_version() + target_major = get_target_major_version() + + api.produce(VendorSourceRepos( + vendor=vendor_name, + source_repoids=repomap_data.get_version_repoids(source_major) + )) + + mapping = repomap_data.get_mappings(source_major, target_major) + valid_major_versions = [source_major, target_major] + + api.produce(RepositoriesMapping( + mapping=mapping, + repositories=repomap_data.get_repositories(valid_major_versions), + vendor=vendor_name + )) + except ModelViolationError as err: + err_message = ( + 'The repository mapping file is invalid: ' + 'the JSON does not match required schema (wrong field type/value): {}. ' + 'Ensure that the current upgrade path is correct and is present in the mappings: {} -> {}' + .format(err, source_major, target_major) + ) + inhibit_upgrade(err_message) + except KeyError as err: + inhibit_upgrade( + 'The repository mapping file is invalid: the JSON is missing a required field: {}'.format(err)) + except ValueError as err: + # The error should contain enough information, so we do not need to clarify it further + inhibit_upgrade('The repository mapping file is invalid: {}'.format(err)) + + +def scan_vendor_repomaps(read_repofile_func=read_repofile): + """ + Scan the repository mapping file and produce RepositoriesMapping msg. + + See the description of the actor for more details. + """ + + map_json_suffix = "_map.json" + if os.path.isdir(VENDORS_DIR): + vendor_mapfiles = list(filter(lambda vfile: map_json_suffix in vfile, os.listdir(VENDORS_DIR))) + + for mapfile in vendor_mapfiles: + read_repomap_file(mapfile, read_repofile_func, mapfile[:-len(map_json_suffix)]) + else: + api.current_logger().debug( + "The {} directory doesn't exist. Nothing to do.".format(VENDORS_DIR) + ) + # vendor_repomap_collection = scan_vendor_repomaps(VENDOR_REPOMAP_DIR) + # if vendor_repomap_collection: + # self.produce(vendor_repomap_collection) + # for repomap in vendor_repomap_collection.maps: + # self.produce(repomap) diff --git a/repos/system_upgrade/common/files/distro/almalinux/gpg-signatures.json b/repos/system_upgrade/common/files/distro/almalinux/gpg-signatures.json new file mode 100644 index 00000000..51607273 --- /dev/null +++ b/repos/system_upgrade/common/files/distro/almalinux/gpg-signatures.json @@ -0,0 +1,25 @@ +{ + "keys": [ + "51d6647ec21ad6ea", + "d36cb86cb86b3716", + "2ae81e8aced7258b", + "429785e181b961a5", + "d07bf2a08d50eb66" + ], + "obsoleted-keys": { + "7": [], + "8": [ + "gpg-pubkey-2fa658e0-45700c69", + "gpg-pubkey-37017186-45761324", + "gpg-pubkey-db42a60e-37ea5438" + ], + "9": [ + "gpg-pubkey-d4082792-5b32db75", + "gpg-pubkey-3abb34f8-5ffd890e", + "gpg-pubkey-6275f250-5e26cb2e", + "gpg-pubkey-73e3b907-6581b071" + ], + "10": [] + } + +} diff --git a/repos/system_upgrade/common/files/distro/centos/gpg-signatures.json b/repos/system_upgrade/common/files/distro/centos/gpg-signatures.json index 547b13e7..73a9598f 100644 --- a/repos/system_upgrade/common/files/distro/centos/gpg-signatures.json +++ b/repos/system_upgrade/common/files/distro/centos/gpg-signatures.json @@ -2,7 +2,24 @@ "keys": [ "24c6a8a7f4a80eb5", "05b555b38483c65d", - "4eb84e71f2ee9d55" + "4eb84e71f2ee9d55", + "429785e181b961a5", + "d07bf2a08d50eb66", + "6c7cb6ef305d49d6" ], - "obsoleted-keys": {} + "obsoleted-keys": { + "7": [], + "8": [ + "gpg-pubkey-2fa658e0-45700c69", + "gpg-pubkey-37017186-45761324", + "gpg-pubkey-db42a60e-37ea5438" + ], + "9": [ + "gpg-pubkey-d4082792-5b32db75", + "gpg-pubkey-3abb34f8-5ffd890e", + "gpg-pubkey-6275f250-5e26cb2e", + "gpg-pubkey-73e3b907-6581b071" + ], + "10": [] + } } diff --git a/repos/system_upgrade/common/files/distro/cloudlinux/gpg-signatures.json b/repos/system_upgrade/common/files/distro/cloudlinux/gpg-signatures.json new file mode 100644 index 00000000..acad9006 --- /dev/null +++ b/repos/system_upgrade/common/files/distro/cloudlinux/gpg-signatures.json @@ -0,0 +1,22 @@ +{ + "keys": [ + "8c55a6628608cb71", + "d07bf2a08d50eb66", + "429785e181b961a5" + ], + "obsoleted-keys": { + "7": [], + "8": [ + "gpg-pubkey-2fa658e0-45700c69", + "gpg-pubkey-37017186-45761324", + "gpg-pubkey-db42a60e-37ea5438" + ], + "9": [ + "gpg-pubkey-d4082792-5b32db75", + "gpg-pubkey-3abb34f8-5ffd890e", + "gpg-pubkey-6275f250-5e26cb2e", + "gpg-pubkey-73e3b907-6581b071" + ], + "10": [] + } +} diff --git a/repos/system_upgrade/common/files/distro/ol/gpg-signatures.json b/repos/system_upgrade/common/files/distro/ol/gpg-signatures.json new file mode 100644 index 00000000..a53775cf --- /dev/null +++ b/repos/system_upgrade/common/files/distro/ol/gpg-signatures.json @@ -0,0 +1,24 @@ +{ + "keys": [ + "72f97b74ec551f03", + "82562ea9ad986da3", + "bc4d06a08d8b756f", + "429785e181b961a5", + "d07bf2a08d50eb66" + ], + "obsoleted-keys": { + "7": [], + "8": [ + "gpg-pubkey-2fa658e0-45700c69", + "gpg-pubkey-37017186-45761324", + "gpg-pubkey-db42a60e-37ea5438" + ], + "9": [ + "gpg-pubkey-d4082792-5b32db75", + "gpg-pubkey-3abb34f8-5ffd890e", + "gpg-pubkey-6275f250-5e26cb2e", + "gpg-pubkey-73e3b907-6581b071" + ], + "10": [] + } +} diff --git a/repos/system_upgrade/common/files/distro/rhel/gpg-signatures.json b/repos/system_upgrade/common/files/distro/rhel/gpg-signatures.json index 3cc67f82..c1f4acf4 100644 --- a/repos/system_upgrade/common/files/distro/rhel/gpg-signatures.json +++ b/repos/system_upgrade/common/files/distro/rhel/gpg-signatures.json @@ -4,7 +4,9 @@ "5326810137017186", "938a80caf21541eb", "fd372689897da07a", - "45689c882fa658e0" + "45689c882fa658e0", + "429785e181b961a5", + "d07bf2a08d50eb66" ], "obsoleted-keys": { "7": [], @@ -13,7 +15,12 @@ "gpg-pubkey-37017186-45761324", "gpg-pubkey-db42a60e-37ea5438" ], - "9": ["gpg-pubkey-d4082792-5b32db75"], + "9": [ + "gpg-pubkey-d4082792-5b32db75", + "gpg-pubkey-3abb34f8-5ffd890e", + "gpg-pubkey-6275f250-5e26cb2e", + "gpg-pubkey-73e3b907-6581b071" + ], "10": ["gpg-pubkey-fd431d51-4ae0493b"] } } diff --git a/repos/system_upgrade/common/files/distro/rocky/gpg-signatures.json b/repos/system_upgrade/common/files/distro/rocky/gpg-signatures.json new file mode 100644 index 00000000..f1738e79 --- /dev/null +++ b/repos/system_upgrade/common/files/distro/rocky/gpg-signatures.json @@ -0,0 +1,23 @@ +{ + "keys": [ + "15af5dac6d745a60", + "702d426d350d275d", + "429785e181b961a5", + "d07bf2a08d50eb66" + ], + "obsoleted-keys": { + "7": [], + "8": [ + "gpg-pubkey-2fa658e0-45700c69", + "gpg-pubkey-37017186-45761324", + "gpg-pubkey-db42a60e-37ea5438" + ], + "9": [ + "gpg-pubkey-d4082792-5b32db75", + "gpg-pubkey-3abb34f8-5ffd890e", + "gpg-pubkey-6275f250-5e26cb2e", + "gpg-pubkey-73e3b907-6581b071" + ], + "10": [] + } +} diff --git a/repos/system_upgrade/common/files/distro/scientific/gpg-signatures.json b/repos/system_upgrade/common/files/distro/scientific/gpg-signatures.json new file mode 100644 index 00000000..df764b53 --- /dev/null +++ b/repos/system_upgrade/common/files/distro/scientific/gpg-signatures.json @@ -0,0 +1,22 @@ +{ + "keys": [ + "b0b4183f192a7d7d", + "429785e181b961a5", + "d07bf2a08d50eb66" + ], + "obsoleted-keys": { + "7": [], + "8": [ + "gpg-pubkey-2fa658e0-45700c69", + "gpg-pubkey-37017186-45761324", + "gpg-pubkey-db42a60e-37ea5438" + ], + "9": [ + "gpg-pubkey-d4082792-5b32db75", + "gpg-pubkey-3abb34f8-5ffd890e", + "gpg-pubkey-6275f250-5e26cb2e", + "gpg-pubkey-73e3b907-6581b071" + ], + "10": [] + } +} diff --git a/repos/system_upgrade/common/files/rhel_upgrade.py b/repos/system_upgrade/common/files/rhel_upgrade.py index 34f7b8f9..27824406 100644 --- a/repos/system_upgrade/common/files/rhel_upgrade.py +++ b/repos/system_upgrade/common/files/rhel_upgrade.py @@ -116,6 +116,7 @@ class RhelUpgradeCommand(dnf.cli.Command): self.base.conf.best = self.plugin_data['dnf_conf']['best'] self.base.conf.assumeyes = True self.base.conf.gpgcheck = self.plugin_data['dnf_conf']['gpgcheck'] + self.base.conf.localpkg_gpgcheck = False self.base.conf.debug_solver = self.plugin_data['dnf_conf']['debugsolver'] self.base.conf.module_platform_id = self.plugin_data['dnf_conf']['platform_id'] installroot = self.plugin_data['dnf_conf'].get('installroot') @@ -184,6 +185,7 @@ class RhelUpgradeCommand(dnf.cli.Command): to_install = self.plugin_data['pkgs_info']['to_install'] to_remove = self.plugin_data['pkgs_info']['to_remove'] to_upgrade = self.plugin_data['pkgs_info']['to_upgrade'] + to_reinstall = self.plugin_data['pkgs_info']['to_reinstall'] # Modules to enable self._process_entities(entities=[available_modules_to_enable], @@ -196,6 +198,9 @@ class RhelUpgradeCommand(dnf.cli.Command): self._process_entities(entities=to_install, op=self.base.install, entity_name='Package') # Packages to be upgraded self._process_entities(entities=to_upgrade, op=self.base.upgrade, entity_name='Package') + # Packages to be reinstalled + self._process_entities(entities=to_reinstall, op=self.base.reinstall, entity_name='Package') + self.base.distro_sync() if self.opts.tid[0] == 'check': diff --git a/repos/system_upgrade/common/files/upgrade_paths.json b/repos/system_upgrade/common/files/upgrade_paths.json index 279e6eaa..a78a0a01 100644 --- a/repos/system_upgrade/common/files/upgrade_paths.json +++ b/repos/system_upgrade/common/files/upgrade_paths.json @@ -1,20 +1,8 @@ { - "rhel": { + "almalinux": { "default": { - "7.9": ["8.10"], - "8.10": ["9.4", "9.6"], - "9.6": ["10.0"], - "7": ["8.10"], - "8": ["9.4", "9.6"], - "9": ["10.0"] - }, - "saphana": { - "7.9": ["8.10"], - "7": ["8.10"], - "8.10": ["9.6", "9.4"], - "8": ["9.6", "9.4"], - "9.6": ["10.0"], - "9": ["10.0"] + "8.10": ["9.0", "9.1", "9.2", "9.3", "9.4", "9.5", "9.6"], + "9.6": ["10.0"] } }, "centos": { @@ -27,5 +15,10 @@ "9": "9.6", "10": "10.0" } + }, + "rocky": { + "default": { + "8.10": ["9.6"] + } } } diff --git a/repos/system_upgrade/common/libraries/config/version.py b/repos/system_upgrade/common/libraries/config/version.py index 7f29c9cd..d8ed9eaa 100644 --- a/repos/system_upgrade/common/libraries/config/version.py +++ b/repos/system_upgrade/common/libraries/config/version.py @@ -18,9 +18,9 @@ OP_MAP = { # These will not be supported fo IPU 9 -> 10 _SUPPORTED_VERSIONS = { # Note: 'rhel-alt' is detected when on 'rhel' with kernel 4.x - '7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9']}, - '8': {'rhel': ['8.10'], 'rhel-saphana': ['8.10']}, - '9': {'rhel': ['9.6'], 'rhel-saphana': ['9.6']}, + '7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9'], 'centos': ['7.9'], 'ol': ['7.9'], 'scientific': ['7.9']}, + '8': {'rhel': ['8.10'], 'rhel-saphana': ['8.10'], 'centos': ['8.5', '8'], 'almalinux': ['8.10'], 'rocky': ['8.10']}, + '9': {'rhel': ['9.6'], 'rhel-saphana': ['9.6'], 'centos': ['9'], 'almalinux': ['9.6']}, } diff --git a/repos/system_upgrade/common/libraries/distro.py b/repos/system_upgrade/common/libraries/distro.py index 2ed5eacd..219d31d1 100644 --- a/repos/system_upgrade/common/libraries/distro.py +++ b/repos/system_upgrade/common/libraries/distro.py @@ -3,6 +3,7 @@ import os from leapp.exceptions import StopActorExecutionError from leapp.libraries.stdlib import api +from leapp.models import VendorSignatures def get_distribution_data(distribution): @@ -11,8 +12,14 @@ def get_distribution_data(distribution): distribution_config = os.path.join(distributions_path, distribution, 'gpg-signatures.json') if os.path.exists(distribution_config): with open(distribution_config) as distro_config_file: - return json.load(distro_config_file) + distro_config_json = json.load(distro_config_file) else: raise StopActorExecutionError( 'Cannot find distribution signature configuration.', details={'Problem': 'Distribution {} was not found in {}.'.format(distribution, distributions_path)}) + + # Extend with Vendors signatures + for siglist in api.consume(VendorSignatures): + distro_config_json["keys"].extend(siglist.sigs) + + return distro_config_json diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py index 4f0c3a99..0f31f101 100644 --- a/repos/system_upgrade/common/libraries/dnfplugin.py +++ b/repos/system_upgrade/common/libraries/dnfplugin.py @@ -90,6 +90,7 @@ def build_plugin_data(target_repoids, debug, test, tasks, on_aws): 'to_install': sorted(tasks.to_install), 'to_remove': sorted(tasks.to_remove), 'to_upgrade': sorted(tasks.to_upgrade), + 'to_reinstall': sorted(tasks.to_reinstall), 'modules_to_enable': sorted(['{}:{}'.format(m.name, m.stream) for m in tasks.modules_to_enable]), }, 'dnf_conf': { diff --git a/repos/system_upgrade/common/libraries/fetch.py b/repos/system_upgrade/common/libraries/fetch.py index 82bf4ff3..cb20d775 100644 --- a/repos/system_upgrade/common/libraries/fetch.py +++ b/repos/system_upgrade/common/libraries/fetch.py @@ -146,7 +146,8 @@ def load_data_asset(actor_requesting_asset, asset_filename, asset_fulltext_name, docs_url, - docs_title): + docs_title, + asset_directory="/etc/leapp/files"): """ Load the content of the data asset with given asset_filename and produce :class:`leapp.model.ConsumedDataAsset` message. @@ -183,7 +184,7 @@ def load_data_asset(actor_requesting_asset, try: # The asset family ID has the form (major, minor), include only `major` in the URL - raw_asset_contents = read_or_fetch(asset_filename, data_stream=data_stream_major, allow_download=False) + raw_asset_contents = read_or_fetch(asset_filename, directory=asset_directory, data_stream=data_stream_major, allow_download=False) asset_contents = json.loads(raw_asset_contents) except ValueError: msg = 'The {0} file (at {1}) does not contain a valid JSON object.'.format(asset_fulltext_name, asset_filename) diff --git a/repos/system_upgrade/common/libraries/gpg.py b/repos/system_upgrade/common/libraries/gpg.py index c9c3f1fc..96907be0 100644 --- a/repos/system_upgrade/common/libraries/gpg.py +++ b/repos/system_upgrade/common/libraries/gpg.py @@ -122,12 +122,15 @@ def get_path_to_gpg_certs(): if target_product_type == 'beta': certs_dir = '{}beta'.format(target_major_version) distro = api.current_actor().configuration.os_release.release_id - return os.path.join( - api.get_common_folder_path('distro'), - distro, - GPG_CERTS_FOLDER, - certs_dir - ) + return [ + "/etc/leapp/files/vendors.d/rpm-gpg/", + os.path.join( + api.get_common_folder_path('distro'), + distro, + GPG_CERTS_FOLDER, + certs_dir + ) + ] def is_nogpgcheck_set(): diff --git a/repos/system_upgrade/common/libraries/module.py b/repos/system_upgrade/common/libraries/module.py index 7d4e8aa4..db725e71 100644 --- a/repos/system_upgrade/common/libraries/module.py +++ b/repos/system_upgrade/common/libraries/module.py @@ -38,6 +38,7 @@ def _create_or_get_dnf_base(base=None): conf.substitutions.update_from_etc('/') base = dnf.Base(conf=conf) + base.conf.read() base.init_plugins() base.read_all_repos() # configure plugins after the repositories are loaded diff --git a/repos/system_upgrade/common/libraries/repomaputils.py b/repos/system_upgrade/common/libraries/repomaputils.py new file mode 100644 index 00000000..39b7d662 --- /dev/null +++ b/repos/system_upgrade/common/libraries/repomaputils.py @@ -0,0 +1,141 @@ +from collections import defaultdict +from leapp.models import PESIDRepositoryEntry, RepoMapEntry, RepositoriesMapping + +class RepoMapData(object): + VERSION_FORMAT = '1.3.0' + + def __init__(self): + self.repositories = [] + self.mapping = {} + + def add_repository(self, data, pesid): + """ + Add new PESIDRepositoryEntry with given pesid from the provided dictionary. + + :param data: A dict containing the data of the added repository. The dictionary structure corresponds + to the repositories entries in the repository mapping JSON schema. + :type data: Dict[str, str] + :param pesid: PES id of the repository family that the newly added repository belongs to. + :type pesid: str + """ + self.repositories.append(PESIDRepositoryEntry( + repoid=data['repoid'], + channel=data['channel'], + rhui=data.get('rhui', ''), + repo_type=data['repo_type'], + arch=data['arch'], + major_version=data['major_version'], + pesid=pesid, + distro=data['distro'], + )) + + def get_repositories(self, valid_major_versions): + """ + Return the list of PESIDRepositoryEntry object matching the specified major versions. + """ + return [repo for repo in self.repositories if repo.major_version in valid_major_versions] + + def get_version_repoids(self, major_version): + """ + Return the list of repository ID strings for repositories matching the specified major version. + """ + return [repo.repoid for repo in self.repositories if repo.major_version == major_version] + + def add_mapping(self, source_major_version, target_major_version, source_pesid, target_pesid): + """ + Add a new mapping entry that is mapping the source pesid to the destination pesid(s), + relevant in an IPU from the supplied source major version to the supplied target + major version. + + :param str source_major_version: Specifies the major version of the source system + for which the added mapping applies. + :param str target_major_version: Specifies the major version of the target system + for which the added mapping applies. + :param str source_pesid: PESID of the source repository. + :param Union[str|List[str]] target_pesid: A single target PESID or a list of target + PESIDs of the added mapping. + """ + # NOTE: it could be more simple, but I prefer to be sure the input data + # contains just one map per source PESID. + key = '{}:{}'.format(source_major_version, target_major_version) + rmap = self.mapping.get(key, defaultdict(set)) + self.mapping[key] = rmap + if isinstance(target_pesid, list): + rmap[source_pesid].update(target_pesid) + else: + rmap[source_pesid].add(target_pesid) + + def get_mappings(self, src_major_version, dst_major_version): + """ + Return the list of RepoMapEntry objects for the specified upgrade path. + + IOW, the whole mapping for specified IPU. + """ + key = '{}:{}'.format(src_major_version, dst_major_version) + rmap = self.mapping.get(key, None) + if not rmap: + return None + map_list = [] + for src_pesid in sorted(rmap.keys()): + map_list.append(RepoMapEntry(source=src_pesid, target=sorted(rmap[src_pesid]))) + return map_list + + @staticmethod + def load_from_dict(data): + if data['version_format'] != RepoMapData.VERSION_FORMAT: + raise ValueError( + 'The obtained repomap data has unsupported version of format.' + ' Get {} required {}' + .format(data['version_format'], RepoMapData.VERSION_FORMAT) + ) + + repomap = RepoMapData() + + # Load reposiories + existing_pesids = set() + for repo_family in data['repositories']: + existing_pesids.add(repo_family['pesid']) + for repo in repo_family['entries']: + repomap.add_repository(repo, repo_family['pesid']) + + # Load mappings + for mapping in data['mapping']: + for entry in mapping['entries']: + if not isinstance(entry['target'], list): + raise ValueError( + 'The target field of a mapping entry is not a list: {}' + .format(entry) + ) + + for pesid in [entry['source']] + entry['target']: + if pesid not in existing_pesids: + raise ValueError( + 'The {} pesid is not related to any repository.' + .format(pesid) + ) + repomap.add_mapping( + source_major_version=mapping['source_major_version'], + target_major_version=mapping['target_major_version'], + source_pesid=entry['source'], + target_pesid=entry['target'], + ) + return repomap + +def combine_repomap_messages(mapping_list): + """ + Combine multiple RepositoryMapping messages into one. + Needed because we might get more than one message if there are vendors present. + """ + combined_mapping = [] + combined_repositories = [] + # Depending on whether there are any vendors present, we might get more than one message. + for msg in mapping_list: + combined_mapping.extend(msg.mapping) + combined_repositories.extend(msg.repositories) + + combined_repomapping = RepositoriesMapping( + mapping=combined_mapping, + repositories=combined_repositories + ) + + return combined_repomapping diff --git a/repos/system_upgrade/common/libraries/rhsm.py b/repos/system_upgrade/common/libraries/rhsm.py index e7b074aa..0b260c86 100644 --- a/repos/system_upgrade/common/libraries/rhsm.py +++ b/repos/system_upgrade/common/libraries/rhsm.py @@ -94,7 +94,7 @@ def _handle_rhsm_exceptions(hint=None): def skip_rhsm(): """Check whether we should skip RHSM related code.""" - return get_env('LEAPP_NO_RHSM', '0') == '1' + return True def with_rhsm(f): @@ -327,11 +327,6 @@ def set_container_mode(context): could be affected and the generated repo file in the container could be affected as well (e.g. when the release is set, using rhsm, on the host). - We want to put RHSM into the container mode always when /etc/rhsm and - /etc/pki/entitlement directories exists, even when leapp is executed with - --no-rhsm option. If any of these directories are missing, skip other - actions - most likely RHSM is not installed in such a case. - :param context: An instance of a mounting.IsolatedActions class :type context: mounting.IsolatedActions class """ @@ -339,20 +334,8 @@ def set_container_mode(context): api.current_logger().error('Trying to set RHSM into the container mode' 'on host. Skipping the action.') return - # TODO(pstodulk): check "rhsm identity" whether system is registered - # and the container mode should be required - if (not os.path.exists(context.full_path('/etc/rhsm')) - or not os.path.exists(context.full_path('/etc/pki/entitlement'))): - api.current_logger().warning( - 'Cannot set the container mode for the subscription-manager as' - ' one of required directories is missing. Most likely RHSM is not' - ' installed. Skipping other actions.' - ) - return - try: context.call(['ln', '-s', '/etc/rhsm', '/etc/rhsm-host']) - context.call(['ln', '-s', '/etc/pki/entitlement', '/etc/pki/entitlement-host']) except CalledProcessError: raise StopActorExecutionError( message='Cannot set the container mode for the subscription-manager.') diff --git a/repos/system_upgrade/common/libraries/rhui.py b/repos/system_upgrade/common/libraries/rhui.py index 30de0275..b3225d5f 100644 --- a/repos/system_upgrade/common/libraries/rhui.py +++ b/repos/system_upgrade/common/libraries/rhui.py @@ -158,6 +158,17 @@ RHUI_SETUPS = { ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), ('content-rhel9.crt', RHUI_PKI_PRODUCT_DIR) ], os_version='9'), + mk_rhui_setup(clients={'rh-amazon-rhui-client'}, leapp_pkg='leapp-rhui-aws', + mandatory_files=[ + ('rhui-client-config-server-10.crt', RHUI_PKI_PRODUCT_DIR), + ('rhui-client-config-server-10.key', RHUI_PKI_DIR), + ('leapp-aws.repo', YUM_REPOS_PATH) + ], + optional_files=[ + ('content-rhel10.key', RHUI_PKI_DIR), + ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), + ('content-rhel10.crt', RHUI_PKI_PRODUCT_DIR) + ], os_version='10'), ], RHUIFamily(RHUIProvider.AWS, arch=arch.ARCH_ARM64, client_files_folder='aws'): [ mk_rhui_setup(clients={'rh-amazon-rhui-client-arm'}, optional_files=[], os_version='7', arch=arch.ARCH_ARM64), @@ -185,6 +196,17 @@ RHUI_SETUPS = { ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), ('content-rhel9.crt', RHUI_PKI_PRODUCT_DIR) ], os_version='9', arch=arch.ARCH_ARM64), + mk_rhui_setup(clients={'rh-amazon-rhui-client'}, leapp_pkg='leapp-rhui-aws', + mandatory_files=[ + ('rhui-client-config-server-10.crt', RHUI_PKI_PRODUCT_DIR), + ('rhui-client-config-server-10.key', RHUI_PKI_DIR), + ('leapp-aws.repo', YUM_REPOS_PATH) + ], + optional_files=[ + ('content-rhel10.key', RHUI_PKI_DIR), + ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), + ('content-rhel10.crt', RHUI_PKI_PRODUCT_DIR) + ], os_version='10'), ], RHUIFamily(RHUIProvider.AWS, variant=RHUIVariant.SAP, client_files_folder='aws-sap-e4s'): [ mk_rhui_setup(clients={'rh-amazon-rhui-client-sap-bundle'}, optional_files=[], os_version='7', @@ -250,6 +272,19 @@ RHUI_SETUPS = { ], extra_info={'agent_pkg': 'WALinuxAgent'}, os_version='9'), + mk_rhui_setup(clients={'rhui-azure-rhel10'}, leapp_pkg='leapp-rhui-azure', + mandatory_files=[ + ('leapp-azure.repo', YUM_REPOS_PATH), + # We need to have the new GPG key ready when we will be bootstrapping + # target rhui client. + ('RPM-GPG-KEY-microsoft-azure-release-new', '/etc/pki/rpm-gpg/') + ], + optional_files=[ + ('key.pem', RHUI_PKI_DIR), + ('content.crt', RHUI_PKI_PRODUCT_DIR) + ], + extra_info={'agent_pkg': 'WALinuxAgent'}, + os_version='10'), ], RHUIFamily(RHUIProvider.AZURE, variant=RHUIVariant.SAP_APPS, client_files_folder='azure-sap-apps'): [ mk_rhui_setup(clients={'rhui-azure-rhel7-base-sap-apps'}, os_version='7', content_channel=ContentChannel.EUS), @@ -348,6 +383,13 @@ RHUI_SETUPS = { ('content.crt', RHUI_PKI_PRODUCT_DIR) ], os_version='9'), + mk_rhui_setup(clients={'aliyun_rhui_rhel10'}, leapp_pkg='leapp-rhui-alibaba', + mandatory_files=[('leapp-alibaba.repo', YUM_REPOS_PATH)], + optional_files=[ + ('key.pem', RHUI_PKI_DIR), + ('content.crt', RHUI_PKI_PRODUCT_DIR) + ], + os_version='10'), ], RHUIFamily(RHUIProvider.ALIBABA, arch=arch.ARCH_ARM64, client_files_folder='alibaba'): [ mk_rhui_setup(clients={'aliyun_rhui_rhel8'}, leapp_pkg='leapp-rhui-alibaba', @@ -364,6 +406,13 @@ RHUI_SETUPS = { ('content.crt', RHUI_PKI_PRODUCT_DIR) ], os_version='9'), + mk_rhui_setup(clients={'aliyun_rhui_rhel10'}, leapp_pkg='leapp-rhui-alibaba', + mandatory_files=[('leapp-alibaba.repo', YUM_REPOS_PATH)], + optional_files=[ + ('key.pem', RHUI_PKI_DIR), + ('content.crt', RHUI_PKI_PRODUCT_DIR) + ], + os_version='10'), ] } @@ -566,15 +615,29 @@ RHUI_CLOUD_MAP = { ], }, }, + '9to10': { + 'alibaba': { + 'src_pkg': 'aliyun_rhui_rhel9', + 'target_pkg': 'aliyun_rhui_rhel10', + 'leapp_pkg': 'leapp-rhui-alibaba', + 'leapp_pkg_repo': 'leapp-alibaba.repo', + 'files_map': [ + ('content.crt', RHUI_PKI_PRODUCT_DIR), + ('key.pem', RHUI_PKI_DIR), + ('leapp-alibaba.repo', YUM_REPOS_PATH) + ], + }, + } } -# TODO(mmatuska) deprecate or adjust for 9to10? def get_upg_path(): """ Get upgrade path in specific string format """ - return '7to8' if get_target_major_version() == '8' else '8to9' + source_major_version = get_source_major_version() + target_major_version = get_target_major_version() + return '{0}to{1}'.format(source_major_version, target_major_version) @deprecated(since='2023-07-27', message='This functionality has been replaced with the RHUIInfo message.') diff --git a/repos/system_upgrade/common/models/activevendorlist.py b/repos/system_upgrade/common/models/activevendorlist.py new file mode 100644 index 00000000..de4056fb --- /dev/null +++ b/repos/system_upgrade/common/models/activevendorlist.py @@ -0,0 +1,7 @@ +from leapp.models import Model, fields +from leapp.topics import VendorTopic + + +class ActiveVendorList(Model): + topic = VendorTopic + data = fields.List(fields.String()) diff --git a/repos/system_upgrade/common/models/grubenv.py b/repos/system_upgrade/common/models/grubenv.py index be541131..c7f339f1 100644 --- a/repos/system_upgrade/common/models/grubenv.py +++ b/repos/system_upgrade/common/models/grubenv.py @@ -1,12 +1,11 @@ -from leapp.models import fields, Model +from leapp.models import Model from leapp.topics import SystemFactsTopic -class HybridImage(Model): +class ConvertGrubenvTask(Model): """ - Model used for instructing Leapp to convert "grubenv" symlink - into a regular file in case of hybrid (BIOS/EFI) images using BIOS - on Azure. + Model used for instructing Leapp to convert "grubenv" symlink into a + regular file. """ + topic = SystemFactsTopic - detected = fields.Boolean(default=False) diff --git a/repos/system_upgrade/common/models/hybridimage.py b/repos/system_upgrade/common/models/hybridimage.py new file mode 100644 index 00000000..6cf860ef --- /dev/null +++ b/repos/system_upgrade/common/models/hybridimage.py @@ -0,0 +1,12 @@ +from leapp.models import fields, Model +from leapp.topics import SystemFactsTopic + + +class HybridImageAzure(Model): + """ + Model used to signify that the system is using a hybrid (BIOS/EFI) images + using BIOS on Azure. + """ + + topic = SystemFactsTopic + grubenv_is_symlink_to_efi = fields.Boolean(default=False) diff --git a/repos/system_upgrade/common/models/repositoriesmap.py b/repos/system_upgrade/common/models/repositoriesmap.py index 842cd807..fc740606 100644 --- a/repos/system_upgrade/common/models/repositoriesmap.py +++ b/repos/system_upgrade/common/models/repositoriesmap.py @@ -96,3 +96,4 @@ class RepositoriesMapping(Model): mapping = fields.List(fields.Model(RepoMapEntry), default=[]) repositories = fields.List(fields.Model(PESIDRepositoryEntry), default=[]) + vendor = fields.Nullable(fields.String()) diff --git a/repos/system_upgrade/common/models/rpmtransactiontasks.py b/repos/system_upgrade/common/models/rpmtransactiontasks.py index 7e2870d0..05d4e941 100644 --- a/repos/system_upgrade/common/models/rpmtransactiontasks.py +++ b/repos/system_upgrade/common/models/rpmtransactiontasks.py @@ -10,6 +10,7 @@ class RpmTransactionTasks(Model): to_keep = fields.List(fields.String(), default=[]) to_remove = fields.List(fields.String(), default=[]) to_upgrade = fields.List(fields.String(), default=[]) + to_reinstall = fields.List(fields.String(), default=[]) modules_to_enable = fields.List(fields.Model(Module), default=[]) modules_to_reset = fields.List(fields.Model(Module), default=[]) diff --git a/repos/system_upgrade/common/models/targetrepositories.py b/repos/system_upgrade/common/models/targetrepositories.py index 02c6c5e5..f9fd4238 100644 --- a/repos/system_upgrade/common/models/targetrepositories.py +++ b/repos/system_upgrade/common/models/targetrepositories.py @@ -21,6 +21,12 @@ class CustomTargetRepository(TargetRepositoryBase): enabled = fields.Boolean(default=True) +class VendorCustomTargetRepositoryList(Model): + topic = TransactionTopic + vendor = fields.String() + repos = fields.List(fields.Model(CustomTargetRepository)) + + class TargetRepositories(Model): """ Repositories supposed to be used during the IPU process diff --git a/repos/system_upgrade/common/models/vendorsignatures.py b/repos/system_upgrade/common/models/vendorsignatures.py new file mode 100644 index 00000000..f456aec5 --- /dev/null +++ b/repos/system_upgrade/common/models/vendorsignatures.py @@ -0,0 +1,8 @@ +from leapp.models import Model, fields +from leapp.topics import VendorTopic + + +class VendorSignatures(Model): + topic = VendorTopic + vendor = fields.String() + sigs = fields.List(fields.String()) diff --git a/repos/system_upgrade/common/models/vendorsourcerepos.py b/repos/system_upgrade/common/models/vendorsourcerepos.py new file mode 100644 index 00000000..b7a219b4 --- /dev/null +++ b/repos/system_upgrade/common/models/vendorsourcerepos.py @@ -0,0 +1,12 @@ +from leapp.models import Model, fields +from leapp.topics import VendorTopic + + +class VendorSourceRepos(Model): + """ + This model contains the data on all source repositories associated with a specific vendor. + Its data is used to determine whether the vendor should be included into the upgrade process. + """ + topic = VendorTopic + vendor = fields.String() + source_repoids = fields.List(fields.String()) diff --git a/repos/system_upgrade/common/topics/vendortopic.py b/repos/system_upgrade/common/topics/vendortopic.py new file mode 100644 index 00000000..014b7afb --- /dev/null +++ b/repos/system_upgrade/common/topics/vendortopic.py @@ -0,0 +1,5 @@ +from leapp.topics import Topic + + +class VendorTopic(Topic): + name = 'vendor_topic' diff --git a/repos/system_upgrade/el7toel8/.leapp/info b/repos/system_upgrade/el7toel8/.leapp/info deleted file mode 100644 index db977504..00000000 --- a/repos/system_upgrade/el7toel8/.leapp/info +++ /dev/null @@ -1 +0,0 @@ -{"repos": ["efcf9016-f2d1-4609-9329-a298e6587b3c", "644900a5-c347-43a3-bfab-f448f46d9647"], "messages": {}, "name": "system_upgrade_el7toel8", "id": "c47fbc3d-ae38-416e-9176-7163d67d94f6"} diff --git a/repos/system_upgrade/el7toel8/.leapp/leapp.conf b/repos/system_upgrade/el7toel8/.leapp/leapp.conf deleted file mode 100644 index 707baa3e..00000000 --- a/repos/system_upgrade/el7toel8/.leapp/leapp.conf +++ /dev/null @@ -1,6 +0,0 @@ - -[repositories] -repo_path=${project:root_dir} - -[database] -path=${project:state_dir}/leapp.db diff --git a/repos/system_upgrade/el7toel8/actors/authselectapply/actor.py b/repos/system_upgrade/el7toel8/actors/authselectapply/actor.py deleted file mode 100644 index 8e8b8f59..00000000 --- a/repos/system_upgrade/el7toel8/actors/authselectapply/actor.py +++ /dev/null @@ -1,71 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.libraries.stdlib import CalledProcessError, run -from leapp.models import Authselect, AuthselectDecision -from leapp.reporting import create_report, Report -from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag - -resources = [ - reporting.RelatedResource('package', 'authselect'), - reporting.RelatedResource('package', 'authconfig'), - reporting.RelatedResource('file', '/etc/nsswitch.conf') -] - - -class AuthselectApply(Actor): - """ - Apply changes suggested by AuthselectScanner. - - If confirmed by admin in AuthselectDecision, call suggested authselect - command to configure the system using this tool. - """ - - name = 'authselect_apply' - consumes = (Authselect, AuthselectDecision,) - produces = (Report,) - tags = (IPUWorkflowTag, ApplicationsPhaseTag) - - def process(self): - model = next(self.consume(Authselect)) - decision = next(self.consume(AuthselectDecision)) - - if not decision.confirmed or model.profile is None: - return - - command = ['authselect', 'select', '--force', model.profile] + model.features - - try: - run(command) - except CalledProcessError as err: - create_report([ # pylint: disable-msg=too-many-arguments - reporting.Title('Authselect call failed'), - reporting.Summary(str(err)), - reporting.Severity(reporting.Severity.MEDIUM), - reporting.Groups([ - reporting.Groups.AUTHENTICATION, - reporting.Groups.SECURITY, - reporting.Groups.TOOLS - ]), - reporting.Groups([ - reporting.Groups.FAILURE - ]) - ] + resources) # pylint: disable-msg=too-many-arguments - return - - try: - run(['systemctl', 'enable', 'oddjobd.service']) - except (OSError, CalledProcessError) as e: - self.log.warning('Error enabling oddjobd.service: {}'.format(e)) - - create_report([ # pylint: disable-msg=too-many-arguments - reporting.Title('System was converted to authselect.'), - reporting.Summary( - 'System was converted to authselect with the ' - 'following call: "{}"'.format(' '.join(command)) - ), - reporting.Groups([ - reporting.Groups.AUTHENTICATION, - reporting.Groups.SECURITY, - reporting.Groups.TOOLS - ]) - ] + resources) # pylint: disable-msg=too-many-arguments diff --git a/repos/system_upgrade/el7toel8/actors/authselectcheck/actor.py b/repos/system_upgrade/el7toel8/actors/authselectcheck/actor.py deleted file mode 100644 index 02a102e6..00000000 --- a/repos/system_upgrade/el7toel8/actors/authselectcheck/actor.py +++ /dev/null @@ -1,185 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.dialogs import Dialog -from leapp.dialogs.components import BooleanComponent -from leapp.models import Authselect, AuthselectDecision -from leapp.reporting import create_report, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - -resources = [ - reporting.RelatedResource('package', 'authselect'), - reporting.RelatedResource('package', 'authconfig'), - reporting.RelatedResource('file', '/etc/nsswitch.conf') -] - - -class AuthselectCheck(Actor): - """ - Confirm suggested authselect call from AuthselectScanner. - - AuthselectScanner produces an Authselect model that contains changes - that are suggested based on current configuration. This actor will - ask administrator for confirmation and will report the result. - """ - - name = 'authselect_check' - consumes = (Authselect,) - produces = (AuthselectDecision, Report,) - tags = (IPUWorkflowTag, ChecksPhaseTag) - dialogs = ( - Dialog( - scope='authselect_check', - reason='Confirmation', - components=( - BooleanComponent( - key='confirm', - label='Configure PAM and nsswitch.conf with the following ' - 'authselect call?', - default=True, - description='If yes, suggested authselect profile will ' - 'be applied on your system to generate ' - 'PAM and nsswitch.conf configuration. ' - 'If no, current configuration will be kept ' - 'intact.', - reason='There is a new tool called authselect in RHEL8 ' - 'that replaced authconfig which is used to manage ' - 'authentication (PAM) and identity (nsswitch.conf) ' - 'sources. It is recommended to switch to this tool.' - ), - ) - ), - ) - - def process(self): - model = next(self.consume(Authselect)) - - # If there is no equivalent authselect profile we will not touch - # the current configuration. Therefore there is no need for - # confirmation. - if model.profile is None: - self.produce_current_configuration(model) - return - - command = 'authselect select {0} {1} --force'.format( - model.profile, - ' '.join(model.features) - ) - - # We do not need admin confirmation if the current - # configuration was generated with authconfig. - if not model.confirm: - self.produce_authconfig_configuration(model, command) - return - - # Authselect profile is available but we require confirmation. - confirmed = self.get_confirmation(model, command) - if confirmed is not None: - # A user has made his choice - self.produce_suggested_configuration(model, confirmed, command) - - def get_confirmation(self, model, command): - dialog = self.dialogs[0] - - dialog.components[0].label += " {}".format(command) - - return self.get_answers(dialog).get('confirm') - - def produce_authconfig_configuration(self, model, command): - self.produce( - AuthselectDecision( - confirmed=True - ) - ) - - create_report([ - reporting.Title( - 'Authselect will be used to configure PAM and nsswitch.conf.' - ), - reporting.Summary( - 'There is a new tool called authselect in RHEL8 that ' - 'replaced authconfig. The upgrade process detected ' - 'that authconfig was used to generate current ' - 'configuration and it will automatically convert it ' - 'to authselect. Authselect call is: {}. The process will ' - 'also enable "oddjobd" systemd service on startup'.format(command) - ), - reporting.Groups([ - reporting.Groups.AUTHENTICATION, - reporting.Groups.SECURITY, - reporting.Groups.TOOLS - ]) - ] + resources) - - def produce_current_configuration(self, model): - self.produce( - AuthselectDecision( - confirmed=False - ) - ) - - create_report([ - reporting.Title( - 'Current PAM and nsswitch.conf configuration will be kept.' - ), - reporting.Summary( - 'There is a new tool called authselect in RHEL8 that ' - 'replaced authconfig. The upgrade process was unable ' - 'to find an authselect profile that would be equivalent ' - 'to your current configuration. Therefore your ' - 'configuration will be left intact.' - ), - reporting.Groups([ - reporting.Groups.AUTHENTICATION, - reporting.Groups.SECURITY, - reporting.Groups.TOOLS - ]), - reporting.Severity(reporting.Severity.INFO) - ] + resources) - - def produce_suggested_configuration(self, model, confirmed, command): - self.produce( - AuthselectDecision( - confirmed=confirmed - ) - ) - if confirmed: - create_report([ - reporting.Title( - 'Authselect will be used to configure PAM and nsswitch.conf.' - ), - reporting.Summary( - 'There is a new tool called authselect in RHEL8 that ' - 'replaced authconfig. The upgrade process suggested ' - 'an authselect profile that is similar to your ' - 'current configuration and your system will be switched ' - 'to this profile. Authselect call is: {}. The process will ' - 'also enable "oddjobd" systemd service on startup'.format(command) - ), - reporting.Groups([ - reporting.Groups.AUTHENTICATION, - reporting.Groups.SECURITY, - reporting.Groups.TOOLS - ]) - ] + resources) - - else: - create_report([ - reporting.Title( - 'Current PAM and nsswitch.conf configuration will be kept.' - ), - reporting.Summary( - 'There is a new tool called authselect in RHEL8 that ' - 'replaced authconfig. The upgrade process suggested ' - 'an authselect profile that is similar to your ' - 'current configuration. However this suggestion was ' - 'refused therefore existing configuration will be kept ' - 'intact.', - ), - reporting.Groups([ - reporting.Groups.AUTHENTICATION, - reporting.Groups.SECURITY, - reporting.Groups.TOOLS - ]), - reporting.Remediation(commands=[[command]]), - reporting.Severity(reporting.Severity.MEDIUM) - ] + resources) diff --git a/repos/system_upgrade/el7toel8/actors/authselectscanner/actor.py b/repos/system_upgrade/el7toel8/actors/authselectscanner/actor.py deleted file mode 100644 index 3a4033a6..00000000 --- a/repos/system_upgrade/el7toel8/actors/authselectscanner/actor.py +++ /dev/null @@ -1,95 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.authselectscanner import ( - Authconfig, - AuthselectScannerLibrary, - DConf, - read_file -) -from leapp.libraries.common.pam import PAM -from leapp.models import Authselect -from leapp.tags import FactsPhaseTag, IPUWorkflowTag - - -class AuthselectScanner(Actor): - """ - Detect what authselect configuration should be suggested to administrator. - - 1. Detect possible authselect profile by looking up modules in PAM - or by checking that daemon is enabled. - - pam_sss -> sssd - - pam_winbind -> winbind - - ypbind enabled -> nis - - If more then one module/daemon is detected that we will keep the - configuration intact. No authselect profile can be applied. - - 2. Detect authselect profile features by looking up modules in PAM - or nsswitch.conf. - - pam_faillock => with-faillock - - pam_fprintd => with-fingerprint - - pam_access => with-pamaccess - - pam_mkhomedir => with-mkhomedir - - pam_oddjob_mkhomedir => with-mkhomedir - - 3. Check if there are any unknown PAM modules. - If there are used PAM modules not used in authselect (such as pam_ldap), - we must keep the configuration intact. - - 4. Check if authconfig was used to create current configuration. - If yes, we can automatically convert the configuration to authselect. - If no, we need admin's confirmation. - - - Check that /etc/sysconfig/authconfig exists. - - Check that PAM configuration uses authconfig files. - - Check that PAM configuration was not touch after sysconfig file - was created. - """ - - name = 'authselect_scanner' - consumes = () - produces = (Authselect,) - tags = (IPUWorkflowTag, FactsPhaseTag) - - known_modules = [ - 'pam_access', - 'pam_deny', - 'pam_ecryptfs', - 'pam_env', - 'pam_faildelay', - 'pam_faillock', - 'pam_fprintd', - 'pam_keyinit', - 'pam_krb5', - 'pam_lastlog', - 'pam_limits', - 'pam_localuser', - 'pam_mkhomedir', - 'pam_oddjob_mkhomedir', - 'pam_permit', - 'pam_pkcs11', - 'pam_pwquality', - 'pam_sss', - 'pam_succeed_if', - 'pam_systemd', - 'pam_u2f', - 'pam_umask', - 'pam_unix', - 'pam_winbind' - ] - """ - List of PAM modules that are known by authselect. - """ - - def process(self): - # Load configuration - ac = Authconfig(read_file('/etc/sysconfig/authconfig')) - dconf = DConf(read_file('/etc/dconf/db/distro.d/10-authconfig')) - pam = PAM.from_system_configuration() - nsswitch = read_file("/etc/nsswitch.conf") - - scanner = AuthselectScannerLibrary( - self.known_modules, - ac, dconf, pam, nsswitch - ) - - self.produce(scanner.process()) diff --git a/repos/system_upgrade/el7toel8/actors/authselectscanner/libraries/authselectscanner.py b/repos/system_upgrade/el7toel8/actors/authselectscanner/libraries/authselectscanner.py deleted file mode 100644 index 82932202..00000000 --- a/repos/system_upgrade/el7toel8/actors/authselectscanner/libraries/authselectscanner.py +++ /dev/null @@ -1,267 +0,0 @@ -import os -import re -import textwrap - -from six import StringIO - -from leapp.libraries.common import utils -from leapp.libraries.stdlib import CalledProcessError, run -from leapp.models import Authselect - - -def read_file(config): - """ - Read file contents. Return empty string if the file does not exist. - """ - if not os.path.isfile(config): - return "" - with open(config) as f: - return f.read() - - -def is_service_enabled(service): - """ - Return true if @service is enabled with systemd, false otherwise. - """ - try: - run(["/usr/bin/systemctl", "is-enabled", "{}.service".format(service)]) - except (OSError, CalledProcessError): - return False - - return True - - -class ConfigFile(object): - """ - Base class for config parsers. - """ - - def __init__(self, content): - parser = utils.parse_config(StringIO(textwrap.dedent(content))) - self.config = parser - - def get_string(self, section, option): - if not self.config.has_option(section, option): - return None - - return self.config.get(section, option).strip('"\'') - - def get_bool(self, section, option): - if not self.config.has_option(section, option): - return False - - return self.config.getboolean(section, option) - - -class Authconfig(ConfigFile): - """ - Parse authconfig configuration. - """ - - def __init__(self, config): - # We add a custom section to convert the config to ini format - super(Authconfig, self).__init__('[authconfig]\n' + config) - - def get_string(self, option): - return super(Authconfig, self).get_string('authconfig', option) - - def get_bool(self, option): - return super(Authconfig, self).get_bool('authconfig', option) - - -class DConf(ConfigFile): - """ - Parse dconf configuration. - """ - - -class AuthselectScannerLibrary(object): - """ - Detect what authselect configuration should be suggested to administrator. - - 1. Detect possible authselect profile by looking up modules in PAM - or by checking that daemon is enabled. - - pam_sss -> sssd - - pam_winbind -> winbind - - ypbind enabled -> nis - - If more then one module/daemon is detected that we will keep the - configuration intact. No authselect profile can be applied. - - 2. Detect authselect profile features by looking up modules in PAM - or nsswitch.conf. - - pam_faillock => with-faillock - - pam_fprintd => with-fingerprint - - pam_access => with-pamaccess - - pam_mkhomedir => with-mkhomedir - - pam_oddjob_mkhomedir => with-mkhomedir - - 3. Check if there are any unknown PAM modules. - If there are used PAM modules not used in authselect (such as pam_ldap), - we must keep the configuration intact. - - 4. Check if authconfig was used to create current configuration. - If yes, we can automatically convert the configuration to authselect. - If no, we need admin's confirmation. - - - Check that /etc/sysconfig/authconfig exists. - - Check that PAM configuration uses authconfig files. - - Check that PAM configuration was not touch after sysconfig file - was created. - """ - - def __init__(self, known_modules, authconfig, dconf, pam, nsswitch): - self.known_modules = known_modules - self.ac = authconfig - self.dconf = dconf - self.pam = pam - self.nsswitch = nsswitch - - self.profile = None - self.features = [] - self.confirm = True - - def process(self): - # Detect possible authselect configuration - self.profile = self.step_detect_profile() - self.features += self.step_detect_features() - self.features += self.step_detect_sssd_features(self.profile) - self.features += self.step_detect_winbind_features(self.profile) - - # Check if there is any module that is not known by authselect. - # In this case we must left existing configuration intact. - if self.pam.has_unknown_module(self.known_modules): - self.profile = None - self.features = [] - - # Check if the proposed authselect configuration can be activated - # automatically or admin's confirmation is required. - self.confirm = self.step_detect_if_confirmation_is_required() - - # Remove duplicates - self.features = sorted(set(self.features)) - - return Authselect( - profile=self.profile, - features=self.features, - confirm=self.confirm - ) - - def step_detect_profile(self): - """ - Authselect supports three different profiles: - - sssd - - winbind - - nis - - Only one of these profiles can be selected therefore if existing - configuration contains combination of these daemons we can not - suggest any profile and must keep existing configuration. - """ - enabled_no = 0 - profile = None - - if self.pam.has('pam_sss'): - profile = 'sssd' - enabled_no += 1 - - if self.pam.has('pam_winbind'): - profile = 'winbind' - enabled_no += 1 - - if is_service_enabled('ypbind'): - profile = 'nis' - enabled_no += 1 - - return profile if enabled_no == 1 else None - - def step_detect_features(self): - pam_map = { - 'pam_faillock': 'with-faillock', - 'pam_fprintd': 'with-fingerprint', - 'pam_access': 'with-pamaccess', - 'pam_mkhomedir': 'with-mkhomedir', - 'pam_oddjob_mkhomedir': 'with-mkhomedir' - } - - features = [] - - for module, feature in pam_map.items(): - if self.pam.has(module): - features.append(feature) - - return features - - def step_detect_sssd_features(self, profile): - if profile != "sssd": - return [] - - # sudoers: sss - result = re.search( - "^[ \t]*sudoers[ \t]*:.*sss.*$", - self.nsswitch, - re.MULTILINE - ) - - features = [] - - if result is not None: - features.append("with-sudo") - - # SSSD Smartcard support - # We enable smartcard support only if it was not handled by pam_pkcs11. - # Otherwise pam_pkcs11 configuration must be converted manually. - if not self.pam.has('pam_pkcs11'): - if self.ac.get_bool('USESMARTCARD'): - features.append("with-smartcard") - - if self.ac.get_bool('FORCESMARTCARD'): - features.append("with-smartcard-required") - - if self.dconf.get_string( - 'org/gnome/settings-daemon/peripherals/smartcard', - 'removal-action' - ) == 'lock-screen': - features.append("with-smartcard-lock-on-removal") - - return features - - def step_detect_winbind_features(self, profile): - if profile != "winbind": - return [] - - if self.ac.get_bool('WINBINDKRB5'): - return ['with-krb5'] - - return [] - - def step_detect_if_confirmation_is_required(self): - sysconfig = '/etc/sysconfig/authconfig' - links = { - '/etc/pam.d/fingerprint-auth': '/etc/pam.d/fingerprint-auth-ac', - '/etc/pam.d/password-auth': '/etc/pam.d/password-auth-ac', - '/etc/pam.d/postlogin': '/etc/pam.d/postlogin-ac', - '/etc/pam.d/smartcard-auth': '/etc/pam.d/smartcard-auth-ac', - '/etc/pam.d/system-auth': '/etc/pam.d/system-auth-ac' - } - - # Check that authconfig was used to create the configuration - if not os.path.isfile(sysconfig): - return True - - # Check that all files are symbolic links to authconfig files - for name, target in links.items(): - if not os.path.islink(name): - return True - - if os.readlink(name) != target: - return True - - # Check that all file were not modified after - # /etc/sysconfig/authconfig was created. - mtime = os.path.getmtime(sysconfig) - for f in links.values(): - if os.path.getmtime(f) > mtime: - return True - - return False diff --git a/repos/system_upgrade/el7toel8/actors/authselectscanner/tests/unit_test_authselectscanner.py b/repos/system_upgrade/el7toel8/actors/authselectscanner/tests/unit_test_authselectscanner.py deleted file mode 100644 index bfff5fbe..00000000 --- a/repos/system_upgrade/el7toel8/actors/authselectscanner/tests/unit_test_authselectscanner.py +++ /dev/null @@ -1,622 +0,0 @@ -import textwrap - -from mock import patch - -from leapp.libraries.actor.authselectscanner import Authconfig, AuthselectScannerLibrary, DConf, read_file -from leapp.libraries.common.pam import PAM - - -def get_config(config): - return textwrap.dedent(config).strip() - - -def test_read_file__non_existent(): - content = read_file('/this/does/not/exist') - assert content == '' - - -def test_read_file__ok(): - content = read_file(__file__) - assert content != '' - assert 'test_read_file__ok' in content - - -def test_Authconfig_get_bool__non_existent(): - obj = Authconfig('') - assert not obj.get_bool('non-existent-option') - - -def test_Authconfig_get_bool__true(): - obj = Authconfig(get_config(''' - test_a=True - test_b=true - test_c=Yes - test_d=yes - ''')) - - assert obj.get_bool('test_a') - assert obj.get_bool('test_b') - assert obj.get_bool('test_c') - assert obj.get_bool('test_d') - - -def test_Authconfig_get_bool__false(): - obj = Authconfig(get_config(''' - test_a=False - test_b=false - test_c=No - test_d=no - ''')) - - assert not obj.get_bool('test_a') - assert not obj.get_bool('test_b') - assert not obj.get_bool('test_c') - assert not obj.get_bool('test_d') - - -def test_Authconfig_get_string__non_existent(): - obj = Authconfig('') - assert obj.get_string('non-existent-option') is None - - -def test_Authconfig_get_string__ok(): - obj = Authconfig(get_config(''' - test_a="str" - test_b=str - ''')) - - assert obj.get_string('test_a') == 'str' - assert obj.get_string('test_b') == 'str' - - -def test_DConf_get_bool__non_existent(): - obj = DConf('') - assert not obj.get_bool('section', 'non-existent-option') - - -def test_DConf_get_bool__true(): - obj = DConf(get_config(''' - [section] - test_a=True - test_b=true - test_c=Yes - test_d=yes - ''')) - - assert obj.get_bool('section', 'test_a') - assert obj.get_bool('section', 'test_b') - assert obj.get_bool('section', 'test_c') - assert obj.get_bool('section', 'test_d') - - -def test_DConf_get_bool__false(): - obj = DConf(get_config(''' - [section] - test_a=False - test_b=false - test_c=No - test_d=no - ''')) - - assert not obj.get_bool('section', 'test_a') - assert not obj.get_bool('section', 'test_b') - assert not obj.get_bool('section', 'test_c') - assert not obj.get_bool('section', 'test_d') - - -def test_DConf_get_string__non_existent(): - obj = DConf('') - assert obj.get_string('section', 'non-existent-option') is None - - -def test_DConf_get_string__ok(): - obj = DConf(get_config(''' - [section] - test_a="str" - test_b=str - ''')) - - assert obj.get_string('section', 'test_a') == 'str' - assert obj.get_string('section', 'test_b') == 'str' - - -@patch('leapp.libraries.actor.authselectscanner.is_service_enabled') -def test_AuthselectScannerLibrary_step_detect_profile__None(mock_service): - obj = AuthselectScannerLibrary([], Authconfig(''), DConf(''), PAM(''), '') - mock_service.return_value = False - assert obj.step_detect_profile() is None - - -@patch('leapp.libraries.actor.authselectscanner.is_service_enabled') -def test_AuthselectScannerLibrary_step_detect_profile__sssd(mock_service): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = AuthselectScannerLibrary([], Authconfig(''), DConf(''), PAM(pam), '') - mock_service.return_value = False - assert obj.step_detect_profile() == 'sssd' - - -@patch('leapp.libraries.actor.authselectscanner.is_service_enabled') -def test_AuthselectScannerLibrary_step_detect_profile__winbind(mock_service): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_winbind.so - auth required pam_deny.so - ''') - - obj = AuthselectScannerLibrary([], Authconfig(''), DConf(''), PAM(pam), '') - mock_service.return_value = False - assert obj.step_detect_profile() == 'winbind' - - -@patch('leapp.libraries.actor.authselectscanner.is_service_enabled') -def test_AuthselectScannerLibrary_step_detect_profile__nis(mock_service): - pam = get_config(''' - auth sufficient pam_unix.so - auth required pam_deny.so - ''') - - obj = AuthselectScannerLibrary([], Authconfig(''), DConf(''), PAM(pam), '') - mock_service.return_value = True - assert obj.step_detect_profile() == 'nis' - - -@patch('leapp.libraries.actor.authselectscanner.is_service_enabled') -def test_AuthselectScannerLibrary_step_detect_profile__sssd_winbind(mock_service): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_winbind.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = AuthselectScannerLibrary([], Authconfig(''), DConf(''), PAM(pam), '') - mock_service.return_value = False - assert obj.step_detect_profile() is None - - -@patch('leapp.libraries.actor.authselectscanner.is_service_enabled') -def test_AuthselectScannerLibrary_step_detect_profile__sssd_nis(mock_service): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = AuthselectScannerLibrary([], Authconfig(''), DConf(''), PAM(pam), '') - mock_service.return_value = True - assert obj.step_detect_profile() is None - - -@patch('leapp.libraries.actor.authselectscanner.is_service_enabled') -def test_AuthselectScannerLibrary_step_detect_profile__winbind_nis(mock_service): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_winbind.so - auth required pam_deny.so - ''') - - obj = AuthselectScannerLibrary([], Authconfig(''), DConf(''), PAM(pam), '') - mock_service.return_value = True - assert obj.step_detect_profile() is None - - -@patch('leapp.libraries.actor.authselectscanner.is_service_enabled') -def test_AuthselectScannerLibrary_step_detect_profile__sssd_winbind_nis(mock_service): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_winbind.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = AuthselectScannerLibrary([], Authconfig(''), DConf(''), PAM(pam), '') - mock_service.return_value = True - assert obj.step_detect_profile() is None - - -def test_AuthselectScannerLibrary_step_detect_features__faillock(): - pam = get_config(''' - auth required pam_faillock.so preauth silent deny=4 unlock_time=1200 - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = AuthselectScannerLibrary([], Authconfig(''), DConf(''), PAM(pam), '') - assert obj.step_detect_features() == ['with-faillock'] - - -def test_AuthselectScannerLibrary_step_detect_features__fingerprint(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth sufficient pam_fprintd.so - auth required pam_deny.so - ''') - - obj = AuthselectScannerLibrary([], Authconfig(''), DConf(''), PAM(pam), '') - assert obj.step_detect_features() == ['with-fingerprint'] - - -def test_AuthselectScannerLibrary_step_detect_features__access(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - account required pam_access.so - ''') - - obj = AuthselectScannerLibrary([], Authconfig(''), DConf(''), PAM(pam), '') - assert obj.step_detect_features() == ['with-pamaccess'] - - -def test_AuthselectScannerLibrary_step_detect_features__mkhomedir(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - session optional pam_mkhomedir.so umask=0077 - ''') - - obj = AuthselectScannerLibrary([], Authconfig(''), DConf(''), PAM(pam), '') - assert obj.step_detect_features() == ['with-mkhomedir'] - - -def test_AuthselectScannerLibrary_step_detect_features__mkhomedir_oddjob(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - session optional pam_oddjob_mkhomedir.so umask=0077 - ''') - - obj = AuthselectScannerLibrary([], Authconfig(''), DConf(''), PAM(pam), '') - assert obj.step_detect_features() == ['with-mkhomedir'] - - -def test_AuthselectScannerLibrary_step_detect_features__all(): - pam = get_config(''' - auth required pam_faillock.so preauth silent deny=4 unlock_time=1200 - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth sufficient pam_fprintd.so - auth required pam_deny.so - account required pam_access.so - session optional pam_oddjob_mkhomedir.so umask=0077 - ''') - - obj = AuthselectScannerLibrary([], Authconfig(''), DConf(''), PAM(pam), '') - features = obj.step_detect_features() - assert len(features) == 4 - assert 'with-faillock' in features - assert 'with-fingerprint' in features - assert 'with-pamaccess' in features - assert 'with-mkhomedir' in features - - -def test_AuthselectScannerLibrary_step_detect_sssd_features__sudo(): - nsswitch = get_config(''' - passwd: files sss systemd - group: files sss systemd - sudoers: files sss - ''') - - obj = AuthselectScannerLibrary( - [], Authconfig(''), DConf(''), PAM(''), nsswitch - ) - features = obj.step_detect_sssd_features('sssd') - assert features == ['with-sudo'] - - -def test_AuthselectScannerLibrary_step_detect_sssd_features__smartcard(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - ac = get_config(''' - USESMARTCARD=yes - ''') - - obj = AuthselectScannerLibrary( - [], Authconfig(ac), DConf(''), PAM(pam), '' - ) - features = obj.step_detect_sssd_features('sssd') - assert features == ['with-smartcard'] - - -def test_AuthselectScannerLibrary_step_detect_sssd_features__smartcard_required(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - ac = get_config(''' - FORCESMARTCARD=yes - ''') - - obj = AuthselectScannerLibrary( - [], Authconfig(ac), DConf(''), PAM(pam), '' - ) - features = obj.step_detect_sssd_features('sssd') - assert features == ['with-smartcard-required'] - - -def test_AuthselectScannerLibrary_step_detect_sssd_features__smartcard_lock(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - dconf = get_config(''' - [org/gnome/settings-daemon/peripherals/smartcard] - removal-action='lock-screen' - ''') - - obj = AuthselectScannerLibrary( - [], Authconfig(''), DConf(dconf), PAM(pam), '' - ) - features = obj.step_detect_sssd_features('sssd') - assert features == ['with-smartcard-lock-on-removal'] - - -def test_AuthselectScannerLibrary_step_detect_sssd_features__pkcs11(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_pkcs11.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - ac = get_config(''' - USESMARTCARD=yes - FORCESMARTCARD=yes - ''') - - dconf = get_config(''' - [org/gnome/settings-daemon/peripherals/smartcard] - removal-action='lock-screen' - ''') - - obj = AuthselectScannerLibrary( - [], Authconfig(ac), DConf(dconf), PAM(pam), '' - ) - features = obj.step_detect_sssd_features('sssd') - assert not features - - -def test_AuthselectScannerLibrary_step_detect_sssd_features__wrong_profile(): - nsswitch = get_config(''' - passwd: files sss systemd - group: files sss systemd - sudoers: files sss - ''') - - obj = AuthselectScannerLibrary( - [], Authconfig(''), DConf(''), PAM(''), nsswitch - ) - features = obj.step_detect_sssd_features('winbind') - assert not features - - -def test_AuthselectScannerLibrary_step_detect_winbind_features__krb5(): - ac = get_config(''' - WINBINDKRB5=yes - ''') - - obj = AuthselectScannerLibrary( - [], Authconfig(ac), DConf(''), PAM(''), '' - ) - features = obj.step_detect_winbind_features('winbind') - assert features == ['with-krb5'] - - -def test_AuthselectScannerLibrary_step_detect_winbind_features__wrong_profile(): - ac = get_config(''' - WINBINDKRB5=yes - ''') - - obj = AuthselectScannerLibrary( - [], Authconfig(ac), DConf(''), PAM(''), '' - ) - features = obj.step_detect_winbind_features('sssd') - assert not features - - -@patch('os.readlink') -@patch('os.path.islink') -@patch('os.path.isfile') -@patch('os.path.getmtime') -def test_AuthselectScannerLibrary_step_detect_if_confirmation_is_required__nosysconfig( - mock_getmtime, mock_isfile, mock_islink, mock_readlink -): - obj = AuthselectScannerLibrary( - [], Authconfig(''), DConf(''), PAM(''), '' - ) - mock_isfile.return_value = False - assert obj.step_detect_if_confirmation_is_required() - - -@patch('os.readlink') -@patch('os.path.islink') -@patch('os.path.isfile') -@patch('os.path.getmtime') -def test_AuthselectScannerLibrary_step_detect_if_confirmation_is_required__nolink( - mock_getmtime, mock_isfile, mock_islink, mock_readlink -): - obj = AuthselectScannerLibrary( - [], Authconfig(''), DConf(''), PAM(''), '' - ) - mock_isfile.return_value = True - mock_islink.return_value = False - assert obj.step_detect_if_confirmation_is_required() - - -@patch('os.readlink') -@patch('os.path.islink') -@patch('os.path.isfile') -@patch('os.path.getmtime') -def test_AuthselectScannerLibrary_step_detect_if_confirmation_is_required__badlink( - mock_getmtime, mock_isfile, mock_islink, mock_readlink -): - obj = AuthselectScannerLibrary( - [], Authconfig(''), DConf(''), PAM(''), '' - ) - mock_isfile.return_value = True - mock_islink.return_value = True - mock_readlink.return_value = '' - assert obj.step_detect_if_confirmation_is_required() - - -@patch('os.readlink') -@patch('os.path.islink') -@patch('os.path.isfile') -@patch('os.path.getmtime') -def test_AuthselectScannerLibrary_step_detect_if_confirmation_is_required__badmtime( - mock_getmtime, mock_isfile, mock_islink, mock_readlink -): - def my_getmtime(path): - # Make sysconfig file older then other files. - if path == '/etc/sysconfig/authconfig': - return 100 - - return 200 - - obj = AuthselectScannerLibrary( - [], Authconfig(''), DConf(''), PAM(''), '' - ) - mock_isfile.return_value = True - mock_islink.return_value = True - mock_readlink.side_effect = '{}-ac'.format - mock_getmtime.side_effect = my_getmtime - assert obj.step_detect_if_confirmation_is_required() - - -@patch('os.readlink') -@patch('os.path.islink') -@patch('os.path.isfile') -@patch('os.path.getmtime') -def test_AuthselectScannerLibrary_step_detect_if_confirmation_is_required__pass( - mock_getmtime, mock_isfile, mock_islink, mock_readlink -): - def my_getmtime(path): - # Make sysconfig file younger then other files. - if path == '/etc/sysconfig/authconfig': - return 200 - - return 100 - - obj = AuthselectScannerLibrary( - [], Authconfig(''), DConf(''), PAM(''), '' - ) - mock_isfile.return_value = True - mock_islink.return_value = True - mock_readlink.side_effect = '{}-ac'.format - mock_getmtime.side_effect = my_getmtime - assert not obj.step_detect_if_confirmation_is_required() - - -@patch('leapp.libraries.actor.authselectscanner.is_service_enabled') -@patch('leapp.libraries.actor.authselectscanner.AuthselectScannerLibrary.step_detect_if_confirmation_is_required') -def test_AuthselectScannerLibrary_process__simple(mock_confirm, mock_service): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = AuthselectScannerLibrary( - ['pam_unix', 'pam_sss', 'pam_deny'], Authconfig(''), DConf(''), PAM(pam), '' - ) - mock_confirm.return_value = True - mock_service.return_value = False - authselect = obj.process() - assert authselect.profile == 'sssd' - assert not authselect.features - assert authselect.confirm - - -@patch('leapp.libraries.actor.authselectscanner.is_service_enabled') -@patch('leapp.libraries.actor.authselectscanner.AuthselectScannerLibrary.step_detect_if_confirmation_is_required') -def test_AuthselectScannerLibrary_process__features(mock_confirm, mock_service): - pam = get_config(''' - auth required pam_faillock.so preauth silent deny=4 unlock_time=1200 - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - nsswitch = get_config(''' - passwd: files sss systemd - group: files sss systemd - sudoers: files sss - ''') - - obj = AuthselectScannerLibrary( - ['pam_unix', 'pam_sss', 'pam_deny', 'pam_faillock'], - Authconfig(''), - DConf(''), - PAM(pam), - nsswitch - ) - mock_confirm.return_value = True - mock_service.return_value = False - authselect = obj.process() - assert authselect.profile == 'sssd' - assert len(authselect.features) == 2 - assert 'with-faillock' in authselect.features - assert 'with-sudo' in authselect.features - assert authselect.confirm - - -@patch('leapp.libraries.actor.authselectscanner.is_service_enabled') -@patch('leapp.libraries.actor.authselectscanner.AuthselectScannerLibrary.step_detect_if_confirmation_is_required') -def test_AuthselectScannerLibrary_process__unknown_module(mock_confirm, mock_service): - pam = get_config(''' - auth required pam_faillock.so preauth silent deny=4 unlock_time=1200 - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = AuthselectScannerLibrary( - ['pam_unix', 'pam_sss', 'pam_deny'], - Authconfig(''), - DConf(''), - PAM(pam), - '' - ) - mock_confirm.return_value = True - mock_service.return_value = False - authselect = obj.process() - assert authselect.profile is None - assert not authselect.features - assert authselect.confirm - - -@patch('leapp.libraries.actor.authselectscanner.is_service_enabled') -@patch('leapp.libraries.actor.authselectscanner.AuthselectScannerLibrary.step_detect_if_confirmation_is_required') -def test_AuthselectScannerLibrary_process__autoconfirm(mock_confirm, mock_service): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = AuthselectScannerLibrary( - ['pam_unix', 'pam_sss', 'pam_deny'], Authconfig(''), DConf(''), PAM(pam), '' - ) - mock_confirm.return_value = False - mock_service.return_value = False - authselect = obj.process() - assert authselect.profile == 'sssd' - assert not authselect.features - assert not authselect.confirm diff --git a/repos/system_upgrade/el7toel8/actors/bindupdate/actor.py b/repos/system_upgrade/el7toel8/actors/bindupdate/actor.py deleted file mode 100644 index cc21afe9..00000000 --- a/repos/system_upgrade/el7toel8/actors/bindupdate/actor.py +++ /dev/null @@ -1,35 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import updates -from leapp.libraries.common import rpms -from leapp.models import BindFacts, DistributionSignedRPM -from leapp.tags import IPUWorkflowTag, PreparationPhaseTag - - -class BindUpdate(Actor): - """ - Actor parsing facts found in configuration and modifying configuration. - """ - - name = 'bind_update' - consumes = (DistributionSignedRPM, BindFacts) - produces = () - tags = (PreparationPhaseTag, IPUWorkflowTag) - - pkg_names = {'bind', 'bind-sdb', 'bind-pkcs11'} - - def has_bind_package(self): - """Test any bind server package is installed.""" - for pkg in self.pkg_names: - if rpms.has_package(DistributionSignedRPM, pkg): - return True - return False - - def process(self): - if not self.has_bind_package(): - self.log.debug('bind is not installed') - return - - for bindfacts in self.consume(BindFacts): - updates.update_facts(bindfacts) - self.log.info('BIND configuration files modified: %s', - ', '.join(bindfacts.modified_files)) diff --git a/repos/system_upgrade/el7toel8/actors/bindupdate/libraries/updates.py b/repos/system_upgrade/el7toel8/actors/bindupdate/libraries/updates.py deleted file mode 100644 index aa0aeeb8..00000000 --- a/repos/system_upgrade/el7toel8/actors/bindupdate/libraries/updates.py +++ /dev/null @@ -1,89 +0,0 @@ -from leapp.exceptions import StopActorExecutionError -from leapp.libraries.common import isccfg -from leapp.libraries.stdlib import api, CalledProcessError, run - -# Callback for walk function -callbacks = { - 'dnssec-lookaside': isccfg.ModifyState.callback_comment_out, -} - - -def paths_from_issues(issues): - """Extract paths from list of BindConfigIssuesModel.""" - return [issue.path for issue in issues] - - -def parser_file(parser, path): - for cfg in parser.FILES_TO_CHECK: - if cfg.path == path: - return cfg - return None - - -def make_backup(path, backup_suffix='.leapp'): - """Make backup of a file before modification.""" - backup_path = path + backup_suffix - try: - run(['cp', '--preserve=all', path, backup_path]) - except CalledProcessError as exc: - raise StopActorExecutionError( - 'Could not create a backup copy', - details={'details': 'An exception during backup raised {}'.format(str(exc))} - ) - - -def update_section(parser, section): - """Modify one section. - - :type section: ConfigSection - """ - state = isccfg.ModifyState() - parser.walk(section, callbacks, state) - state.finish(section) - return state.content() - - -def update_config(parser, cfg): - """Modify contents of file according to rules. - - :type cfg: ConfigFile - :returns str: Modified config contents - """ - return update_section(parser, cfg.root_section()) - - -def update_file(parser, path, write=True): - """Prepare modified content for the file, make backup and rewrite it. - - :param parser: IscConfigParser - :param path: String with path to a file - :param log: Log instance with debug(str) method or None - :param write: True to allow file modification, false to only return modification status - """ - cfg = parser_file(parser, path) - modified = update_config(parser, cfg) - if modified != cfg.buffer: - api.current_logger().debug('%s needs modification', path) - if write: - make_backup(path) - with open(path, 'w') as f: - f.write(modified) - api.current_logger().debug('%s updated to size %d', path, len(modified)) - return True - return False - - -def update_facts(facts, path='/etc/named.conf'): - """Parse and update all files according to supplied facts. - - :param facts: BindFacts instance - :param path: String to main configuration file - :returns: number of modified files - """ - parser = isccfg.IscConfigParser(path) - modified_files = set() - if facts.dnssec_lookaside: - for model in facts.dnssec_lookaside: - if update_file(parser, model.path): - modified_files.add(model.path) - facts.modified_files = list(modified_files) diff --git a/repos/system_upgrade/el7toel8/actors/bindupdate/tests/test_updates.py b/repos/system_upgrade/el7toel8/actors/bindupdate/tests/test_updates.py deleted file mode 100644 index bc04bb52..00000000 --- a/repos/system_upgrade/el7toel8/actors/bindupdate/tests/test_updates.py +++ /dev/null @@ -1,50 +0,0 @@ -from leapp.libraries.actor import updates -from leapp.libraries.common import isccfg -from leapp.models import BindFacts - - -def test_simple(): - """Test configuration is not modified without offending statements.""" - mockcfg = isccfg.MockConfig(""" -options { - listen-on port 53 { 127.0.0.1; }; - listen-on-v6 port 53 { ::1; }; - directory "/var/named"; - allow-query { localhost; }; - recursion yes; - - dnssec-validation yes; -}; - -zone "." IN { - type hint; - file "named.ca"; -}; -""", '/etc/named.conf') - parser = isccfg.IscConfigParser(mockcfg) - modified = updates.update_config(parser, mockcfg) - assert modified == mockcfg.buffer - - -def test_dnssec_lookaside(): - """Test unsupported statements are removed.""" - mockcfg = isccfg.MockConfig(""" -options { - listen-on port 53 { 127.0.0.1; }; - listen-on-v6 port 53 { ::1; }; - directory "/var/named"; - allow-query { localhost; }; - recursion yes; - - dnssec-validation yes; - dnssec-lookaside auto; -}; - -zone "." IN { - type hint; - file "named.ca"; -}; -""", '/etc/named.conf') - parser = isccfg.IscConfigParser(mockcfg) - modified = updates.update_config(parser, mockcfg) - assert modified != mockcfg.buffer diff --git a/repos/system_upgrade/el7toel8/actors/checkacpid/actor.py b/repos/system_upgrade/el7toel8/actors/checkacpid/actor.py deleted file mode 100644 index 8e761db4..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkacpid/actor.py +++ /dev/null @@ -1,30 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.libraries.common.rpms import has_package -from leapp.models import DistributionSignedRPM -from leapp.reporting import create_report, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckAcpid(Actor): - """ - Check if acpid is installed. If yes, write information about non-compatible changes. - """ - - name = 'checkacpid' - consumes = (DistributionSignedRPM,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - if has_package(DistributionSignedRPM, 'acpid'): - create_report([ - reporting.Title('Acpid incompatible changes in the next major version'), - reporting.Summary('The option -d (debug) no longer implies -f (foreground).'), - reporting.Severity(reporting.Severity.LOW), - reporting.Remediation( - hint='You must now use both options (\'-df\') for the same behavior. Please update ' - 'your scripts to be compatible with the changes.'), - reporting.Groups([reporting.Groups.KERNEL, reporting.Groups.SERVICES]), - reporting.RelatedResource('package', 'acpid') - ]) diff --git a/repos/system_upgrade/el7toel8/actors/checkacpid/tests/component_test_checkacpid.py b/repos/system_upgrade/el7toel8/actors/checkacpid/tests/component_test_checkacpid.py deleted file mode 100644 index a38728f7..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkacpid/tests/component_test_checkacpid.py +++ /dev/null @@ -1,33 +0,0 @@ -from leapp.models import DistributionSignedRPM, RPM -from leapp.reporting import Report -from leapp.snactor.fixture import current_actor_context - -RH_PACKAGER = 'Red Hat, Inc. ' - - -def create_modulesfacts(installed_rpm): - return DistributionSignedRPM(items=installed_rpm) - - -def test_actor_with_acpid_package(current_actor_context): - with_acpid = [ - RPM(name='acpid', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='powertop', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - - current_actor_context.feed(create_modulesfacts(installed_rpm=with_acpid)) - current_actor_context.run() - assert current_actor_context.consume(Report) - - -def test_actor_without_acpid_package(current_actor_context): - without_acpid = [ - RPM(name='powertop', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='sed', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - - current_actor_context.feed(create_modulesfacts(installed_rpm=without_acpid)) - current_actor_context.run() - assert not current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/checkbind/actor.py b/repos/system_upgrade/el7toel8/actors/checkbind/actor.py deleted file mode 100644 index 0292b6c7..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkbind/actor.py +++ /dev/null @@ -1,40 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.libraries.actor import iscmodel -from leapp.libraries.stdlib import api -from leapp.models import BindFacts, DistributionSignedRPM -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckBind(Actor): - """Actor parsing BIND configuration and checking for known issues in it.""" - - name = 'check_bind' - consumes = (DistributionSignedRPM,) - produces = (BindFacts, reporting.Report) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - pkg_names = {'bind', 'bind-sdb', 'bind-pkcs11'} - - def has_package(self, t_rpms): - """Replacement for broken leapp.libraries.common.rpms.has_package.""" - for fact in self.consume(t_rpms): - for rpm in fact.items: - if rpm.name in self.pkg_names: - return True - return False - - def process(self): - if not self.has_package(DistributionSignedRPM): - self.log.debug('bind is not installed') - return - - facts = iscmodel.get_facts('/etc/named.conf') - report = iscmodel.make_report(facts) - - if report: - api.produce(facts) - self.log.info('BIND configuration issues were found.') - reporting.create_report(report) - else: - self.log.debug('BIND configuration seems compatible.') diff --git a/repos/system_upgrade/el7toel8/actors/checkbind/libraries/iscmodel.py b/repos/system_upgrade/el7toel8/actors/checkbind/libraries/iscmodel.py deleted file mode 100644 index 73ca5388..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkbind/libraries/iscmodel.py +++ /dev/null @@ -1,110 +0,0 @@ -from leapp import reporting -from leapp.libraries.common import isccfg -from leapp.libraries.stdlib import api -from leapp.models import BindConfigIssuesModel, BindFacts - - -def add_statement(statement, state): - """Add searched statement to found issues.""" - - stmt_text = statement.serialize_skip(' ') - name = statement.var(0).value() - if name in state: - state[name].append((stmt_text, statement.config.path)) - else: - state[name] = [(stmt_text, statement.config.path)] - - -def find_dnssec_lookaside(statement, state): - try: - arg = statement.var(1) - if not (arg.type() == arg.TYPE_BARE and arg.value() == 'no'): - # auto or yes statement - # dnssec-lookaside "." trust-anchor "dlv.isc.org"; - add_statement(statement, state) - except IndexError: - api.current_logger().warning('Unexpected statement format: "%s"', - statement.serialize_skip(' ')) - - -def convert_to_issues(statements): - """Produce list of offending statements in set of files. - - :param statements: one item from list created by add_statement - """ - - files = dict() - for statement, path in statements: - if path in files: - files[path].update(statement) - if statement not in files[path].statements: - files[path].statements.append(statement) - else: - files[path] = set(statement) - values = list() - for path in files: - values.append(BindConfigIssuesModel(path=path, statements=list(files[path]))) - return values - - -def convert_found_state(state, files): - """Convert find state results to facts. - - Check found statements and create facts from them.""" - - dnssec_lookaside = None - if 'dnssec-lookaside' in state: - dnssec_lookaside = convert_to_issues(state['dnssec-lookaside']) - - return BindFacts(config_files=files, - modified_files=[], - dnssec_lookaside=dnssec_lookaside, - listen_on_v6_missing='listen-on-v6' not in state) - - -def get_facts(path, log=None): - """Find issues in configuration files. - - Report used configuration files and wrong statements in each file. - """ - - find_calls = { - 'dnssec-lookaside': find_dnssec_lookaside, - 'listen-on-v6': add_statement, - } - - parser = isccfg.IscConfigParser(path) - state = {} - files = set() - - for cfg in parser.FILES_TO_CHECK: - parser.walk(cfg.root_section(), find_calls, state) - files.add(cfg.path) - - api.current_logger().debug('Found state: "%s", files: "%s"', - repr(state), files) - - facts = convert_found_state(state, list(files)) - return facts - - -def make_report(facts): - """Make report message from gathered facts.""" - summary_messages = [] - report = [] - if facts.dnssec_lookaside: - summary_messages.append('BIND configuration contains no longer accepted statements: dnssec-lookaside.') - if facts.listen_on_v6_missing: - summary_messages.append('Default value of listen-on-v6 have changed, but it is not present in configuration.' - ' named service will now listen on INET6 sockets also.') - - if summary_messages: - summary = ' '.join(summary_messages) - report.extend([ - reporting.Title('BIND configuration issues found'), - reporting.Summary(summary), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.SERVICES, reporting.Groups.NETWORK]), - ]) - - return report diff --git a/repos/system_upgrade/el7toel8/actors/checkbind/tests/test_model.py b/repos/system_upgrade/el7toel8/actors/checkbind/tests/test_model.py deleted file mode 100644 index a2fcbdc5..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkbind/tests/test_model.py +++ /dev/null @@ -1,80 +0,0 @@ -from leapp.libraries.actor import iscmodel -from leapp.libraries.common import isccfg -from leapp.models import BindFacts - - -def model_paths(issues_model): - paths = list() - for m in issues_model: - paths.append(m.path) - return paths - - -def get_facts(cfg): - facts = iscmodel.get_facts(cfg) - assert isinstance(facts, BindFacts) - return facts - - -def test_simple(): - mockcfg = isccfg.MockConfig(""" -options { - listen-on port 53 { 127.0.0.1; }; - listen-on-v6 port 53 { ::1; }; - directory "/var/named"; - allow-query { localhost; }; - recursion yes; - - dnssec-validation yes; -}; - -zone "." IN { - type hint; - file "named.ca"; -}; -""", '/etc/named.conf') - facts = get_facts(mockcfg) - assert facts.dnssec_lookaside is None - - -def test_dnssec_lookaside(): - mockcfg = isccfg.MockConfig(""" -options { - listen-on port 53 { 127.0.0.1; }; - listen-on-v6 port 53 { ::1; }; - directory "/var/named"; - allow-query { localhost; }; - recursion yes; - - dnssec-validation yes; - dnssec-lookaside auto; -}; - -zone "." IN { - type hint; - file "named.ca"; -}; -""", '/etc/named.conf') - facts = get_facts(mockcfg) - assert '/etc/named.conf' in model_paths(facts.dnssec_lookaside) - - -def test_listen_on_v6(): - present = isccfg.MockConfig(""" -options { - listen-on { any; }; - listen-on-v6 { any; }; -}; -""", '/etc/named.conf') - missing = isccfg.MockConfig(""" -options { - listen-on { any; }; - #listen-on-v6 { any; }; -}; -""", '/etc/named.conf') - - facts = get_facts(present) - assert not facts.listen_on_v6_missing - - facts = get_facts(missing) - assert facts.listen_on_v6_missing diff --git a/repos/system_upgrade/el7toel8/actors/checkbrltty/actor.py b/repos/system_upgrade/el7toel8/actors/checkbrltty/actor.py deleted file mode 100644 index c4e032c1..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkbrltty/actor.py +++ /dev/null @@ -1,55 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.libraries.actor import checkbrltty -from leapp.libraries.common.rpms import has_package -from leapp.models import BrlttyMigrationDecision, DistributionSignedRPM -from leapp.reporting import create_report, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - -related = [reporting.RelatedResource('package', 'brltty')] - - -class CheckBrltty(Actor): - """ - Check if brltty is installed, check whether configuration update is needed. - """ - - name = 'check_brltty' - consumes = (DistributionSignedRPM,) - produces = (Report, BrlttyMigrationDecision,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - if has_package(DistributionSignedRPM, 'brltty'): - create_report([ - reporting.Title('Brltty has incompatible changes in the next major version'), - reporting.Summary( - 'The --message-delay brltty option has been renamed to --message-timeout.\n' - 'The -U [--update-interval=] brltty option has been removed.' - ), - reporting.Severity(reporting.Severity.LOW), - reporting.Groups([reporting.Groups.ACCESSIBILITY]), - reporting.Remediation( - hint='Please update your scripts to be compatible with the changes.' - ) - ] + related) - - (migrate_file, migrate_bt, migrate_espeak,) = checkbrltty.check_for_unsupported_cfg() - report_summary = '' - if migrate_bt: - report_summary = 'Unsupported aliases for bluetooth devices (\'bth:\' and \'bluez:\') will be ' - report_summary += 'renamed to \'bluetooth:\'.' - if migrate_espeak: - if report_summary: - report_summary += '\n' - report_summary += 'eSpeak speech driver is no longer supported, it will be switched to eSpeak-NG.' - if report_summary: - create_report([ - reporting.Title('brltty configuration will be migrated'), - reporting.Summary(report_summary), - reporting.Severity(reporting.Severity.LOW), - reporting.Groups([reporting.Groups.ACCESSIBILITY]), - ] + related) - - self.produce(BrlttyMigrationDecision(migrate_file=migrate_file, migrate_bt=migrate_bt, - migrate_espeak=migrate_espeak)) diff --git a/repos/system_upgrade/el7toel8/actors/checkbrltty/libraries/checkbrltty.py b/repos/system_upgrade/el7toel8/actors/checkbrltty/libraries/checkbrltty.py deleted file mode 100644 index 13c66f36..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkbrltty/libraries/checkbrltty.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -import re - -BrlttyConf = '/etc/brltty.conf' - - -def check_for_unsupported_cfg(): - migrate_file = None - migrate_bt = False - migrate_espeak = False - regex_bt = re.compile(r'\b((bth)|(bluez))([:\-][0-9a-fA-F]{2}){6}\b') - regex_espeak = re.compile(r'^\s*speech-driver\s+es\b') - if os.path.exists(BrlttyConf): - with open(BrlttyConf) as file_check: - for line in file_check: - if regex_bt.search(line): - migrate_bt = True - if regex_espeak.search(line): - migrate_espeak = True - if migrate_bt and migrate_espeak: - break - migrate_file = BrlttyConf if migrate_espeak or migrate_bt else '' - return (migrate_file, migrate_bt, migrate_espeak) diff --git a/repos/system_upgrade/el7toel8/actors/checkbrltty/tests/component_test_checkbrltty.py b/repos/system_upgrade/el7toel8/actors/checkbrltty/tests/component_test_checkbrltty.py deleted file mode 100644 index 1b843d9d..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkbrltty/tests/component_test_checkbrltty.py +++ /dev/null @@ -1,32 +0,0 @@ -from leapp.models import BrlttyMigrationDecision, DistributionSignedRPM, RPM -from leapp.reporting import Report - -RH_PACKAGER = 'Red Hat, Inc. ' - -with_brltty = [ - RPM(name='grep', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='brltty', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - -without_brltty = [ - RPM(name='grep', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='sed', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - - -def create_modulesfacts(installed_rpm): - return DistributionSignedRPM(items=installed_rpm) - - -def test_actor_without_brltty_package(current_actor_context): - current_actor_context.feed(create_modulesfacts(installed_rpm=without_brltty)) - current_actor_context.run() - assert not current_actor_context.consume(Report) - - -def test_actor_with_brltty_package(current_actor_context): - current_actor_context.feed(create_modulesfacts(installed_rpm=with_brltty)) - current_actor_context.run() - assert current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/checkbrltty/tests/unit_test_checkbrltty.py b/repos/system_upgrade/el7toel8/actors/checkbrltty/tests/unit_test_checkbrltty.py deleted file mode 100644 index 0df47111..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkbrltty/tests/unit_test_checkbrltty.py +++ /dev/null @@ -1,27 +0,0 @@ -import pytest -from six import text_type - -from leapp.libraries.actor import checkbrltty - -BRLTTY_CONF = 'brltty.conf' - - -@pytest.mark.parametrize('test_input,expected_migrate_bt,expected_migrate_espeak', [ - ('braille-device serial:/dev/ttyS0\n', False, False), - ('braille-device bth:AB-cd:ef:01:23:45\n', True, False), - ('braille-device bluez:AB-cd:ef:01:23:45\n', True, False), - ('speech-driver es\n', False, True), - ('braille-device bth:AB-cd:ef:01:23:45\nbraille-device bluez:AB-cd:ef:01:23:45\nspeech-driver es\n', True, True), -]) -def test_actor_check_migration_bth(tmpdir, monkeypatch, test_input, expected_migrate_bt, expected_migrate_espeak, - current_actor_context): - test_cfg_file = text_type(tmpdir.join(BRLTTY_CONF)) - with open(test_cfg_file, 'w') as file_out: - file_out.write(test_input) - monkeypatch.setattr(checkbrltty, 'BrlttyConf', test_cfg_file) - (migrate_file, migrate_bt, migrate_espeak,) = checkbrltty.check_for_unsupported_cfg() - - if expected_migrate_bt or expected_migrate_espeak: - assert test_cfg_file == migrate_file - assert expected_migrate_bt == migrate_bt - assert expected_migrate_espeak == migrate_espeak diff --git a/repos/system_upgrade/el7toel8/actors/checkbtrfs/actor.py b/repos/system_upgrade/el7toel8/actors/checkbtrfs/actor.py deleted file mode 100644 index a3848957..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkbtrfs/actor.py +++ /dev/null @@ -1,54 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.models import ActiveKernelModulesFacts -from leapp.reporting import create_report, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckBtrfs(Actor): - """ - Check if Btrfs filesystem is in use. If yes, inhibit the upgrade process. - - Btrfs filesystem was introduced as Technology Preview with initial releases of RHEL 6 and 7. It - was deprecated on versions 6.6 and 7.4 and will not be present in next major version. - """ - - name = 'check_btrfs' - consumes = (ActiveKernelModulesFacts,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - - hint = 'In order to unload the module from the running system, check the accompanied command.' - command = ['modprobe', '-r', 'btrfs'] - - for fact in self.consume(ActiveKernelModulesFacts): - for active_module in fact.kernel_modules: - if active_module.filename == 'btrfs': - create_report([ - reporting.Title('Btrfs has been removed from RHEL8'), - reporting.Summary( - 'The Btrfs file system was introduced as Technology Preview with the ' - 'initial release of Red Hat Enterprise Linux 6 and Red Hat Enterprise Linux 7. As of ' - 'versions 6.6 and 7.4 this technology has been deprecated and removed in RHEL8.' - ), - reporting.ExternalLink( - title='Considerations in adopting RHEL 8 - btrfs has been removed.', - url='https://red.ht/file-systems-and-storage-removed-btrfs-rhel-8' - ), - reporting.ExternalLink( - title='How do I prevent a kernel module from loading automatically?', - url='https://access.redhat.com/solutions/41278' - ), - reporting.ExternalLink( - title='Leapp upgrade fail with error "Inhibitor: Btrfs has been removed from RHEL8"', - url='https://access.redhat.com/solutions/7020130' - ), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.INHIBITOR]), - reporting.Groups([reporting.Groups.FILESYSTEM]), - reporting.Remediation(hint=hint, commands=[command]), - reporting.RelatedResource('kernel-driver', 'btrfs') - ]) - break diff --git a/repos/system_upgrade/el7toel8/actors/checkbtrfs/tests/test_btrfs_checkbtrfs.py b/repos/system_upgrade/el7toel8/actors/checkbtrfs/tests/test_btrfs_checkbtrfs.py deleted file mode 100644 index ebf031b1..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkbtrfs/tests/test_btrfs_checkbtrfs.py +++ /dev/null @@ -1,29 +0,0 @@ -from leapp.models import ActiveKernelModule, ActiveKernelModulesFacts -from leapp.reporting import Report -from leapp.snactor.fixture import current_actor_context -from leapp.utils.report import is_inhibitor - - -def create_modulesfacts(kernel_modules): - return ActiveKernelModulesFacts(kernel_modules=kernel_modules) - - -def test_actor_with_btrfs_module(current_actor_context): - with_btrfs = [ - ActiveKernelModule(filename='btrfs', parameters=[]), - ActiveKernelModule(filename='kvm', parameters=[])] - - current_actor_context.feed(create_modulesfacts(kernel_modules=with_btrfs)) - current_actor_context.run() - report_fields = current_actor_context.consume(Report)[0].report - assert is_inhibitor(report_fields) - - -def test_actor_without_btrfs_module(current_actor_context): - without_btrfs = [ - ActiveKernelModule(filename='kvm_intel', parameters=[]), - ActiveKernelModule(filename='kvm', parameters=[])] - - current_actor_context.feed(create_modulesfacts(kernel_modules=without_btrfs)) - current_actor_context.run() - assert not current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/checkchrony/actor.py b/repos/system_upgrade/el7toel8/actors/checkchrony/actor.py deleted file mode 100644 index ab11c9ae..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkchrony/actor.py +++ /dev/null @@ -1,23 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.checkchrony import check_chrony -from leapp.libraries.common.rpms import has_package -from leapp.models import DistributionSignedRPM -from leapp.reporting import Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckChrony(Actor): - """ - Check for incompatible changes in chrony configuration. - - Warn that the default chrony configuration in RHEL8 uses the leapsectz - directive. - """ - - name = 'check_chrony' - consumes = (DistributionSignedRPM,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - check_chrony(has_package(DistributionSignedRPM, 'chrony')) diff --git a/repos/system_upgrade/el7toel8/actors/checkchrony/libraries/checkchrony.py b/repos/system_upgrade/el7toel8/actors/checkchrony/libraries/checkchrony.py deleted file mode 100644 index f0fb285e..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkchrony/libraries/checkchrony.py +++ /dev/null @@ -1,50 +0,0 @@ -from leapp import reporting -from leapp.libraries.stdlib import api, run - -related = [ - reporting.RelatedResource('package', 'ntpd'), - reporting.RelatedResource('package', 'chrony'), - reporting.RelatedResource('file', '/etc/chrony.conf'), -] - - -def is_config_default(): - """Check if the chrony config file was not modified since installation.""" - try: - result = run(['rpm', '-V', '--nomtime', 'chrony'], checked=False) - return '/etc/chrony.conf' not in result['stdout'] - except OSError as e: - api.current_logger().warning("rpm verification failed: %s", str(e)) - return True - - -def check_chrony(chrony_installed): - """Report potential issues in chrony configuration.""" - if not chrony_installed: - api.current_logger().info('chrony package is not installed') - return - - if is_config_default(): - reporting.create_report([ - reporting.Title('chrony using default configuration'), - reporting.Summary( - 'default chrony configuration in RHEL8 uses leapsectz directive, which cannot be used with ' - 'leap smearing NTP servers, and uses a single pool directive instead of four server directives' - ), - reporting.Severity(reporting.Severity.MEDIUM), - reporting.Groups([ - reporting.Groups.SERVICES, - reporting.Groups.TIME_MANAGEMENT - ]) - ] + related) - - else: - reporting.create_report([ - reporting.Title('chrony using non-default configuration'), - reporting.Summary('chrony behavior will not change in RHEL8'), - reporting.Severity(reporting.Severity.LOW), - reporting.Groups([ - reporting.Groups.SERVICES, - reporting.Groups.TIME_MANAGEMENT - ]) - ] + related) diff --git a/repos/system_upgrade/el7toel8/actors/checkchrony/tests/unit_test_checkchrony.py b/repos/system_upgrade/el7toel8/actors/checkchrony/tests/unit_test_checkchrony.py deleted file mode 100644 index 7a25800b..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkchrony/tests/unit_test_checkchrony.py +++ /dev/null @@ -1,33 +0,0 @@ -from leapp import reporting -from leapp.libraries.actor import checkchrony -from leapp.libraries.common.testutils import create_report_mocked - - -def test_uninstalled(monkeypatch): - for config_default in (False, True): - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - monkeypatch.setattr(checkchrony, 'is_config_default', lambda: config_default) - - checkchrony.check_chrony(False) - - assert reporting.create_report.called == 0 - - -def test_installed_defconf(monkeypatch): - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - monkeypatch.setattr(checkchrony, 'is_config_default', lambda: True) - - checkchrony.check_chrony(True) - - assert reporting.create_report.called == 1 - assert reporting.create_report.report_fields['title'] == 'chrony using default configuration' - - -def test_installed_nodefconf(monkeypatch): - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - monkeypatch.setattr(checkchrony, 'is_config_default', lambda: False) - - checkchrony.check_chrony(True) - - assert reporting.create_report.called == 1 - assert reporting.create_report.report_fields['title'] == 'chrony using non-default configuration' diff --git a/repos/system_upgrade/el7toel8/actors/checkdocker/actor.py b/repos/system_upgrade/el7toel8/actors/checkdocker/actor.py deleted file mode 100644 index 5d82c007..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkdocker/actor.py +++ /dev/null @@ -1,36 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.libraries.common.rpms import has_package -from leapp.models import InstalledRPM -from leapp.reporting import create_report, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckDocker(Actor): - """ - Checks if Docker is installed and warns about its deprecation in RHEL8. - """ - - name = 'check_docker' - consumes = (InstalledRPM,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - if has_package(InstalledRPM, 'docker'): - create_report([ - reporting.Title('Transition from Docker to Podman in RHEL8'), - reporting.Summary('Docker has been deprecated in favour of Podman in Red Hat Enterprise Linux 8. The ' - 'docker package is going to be removed during the upgrade without migration of ' - 'existing containers.'), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.TOOLS]), - reporting.Remediation(hint='It is recommended to re-create the containers with the appropriate ' - 'container images and reattach any in-use volumes using podman directly ' - 'prior to the upgrade of the operating system, which should provide the ' - 'same level of functionality. '), - reporting.RelatedResource('package', 'docker'), - reporting.ExternalLink(url='https://access.redhat.com/solutions/5213331', - title='How do I migrate my Docker containers to Podman prior to moving from ' - 'Red Hat Enterprise Linux 7 to Red Hat Enterprise Linux 8?') - ]) diff --git a/repos/system_upgrade/el7toel8/actors/checkdocker/tests/component_test_checkdocker.py b/repos/system_upgrade/el7toel8/actors/checkdocker/tests/component_test_checkdocker.py deleted file mode 100644 index b73459ad..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkdocker/tests/component_test_checkdocker.py +++ /dev/null @@ -1,61 +0,0 @@ -from leapp.models import InstalledRPM, RPM -from leapp.reporting import Report -from leapp.snactor.fixture import current_actor_context - - -def test_actor_with_docker_package(current_actor_context): - with_docker = [ - RPM(name='docker', - epoch='2', - packager='Red Hat, Inc. ', - version='1.13.1', - release='209.git7d71120.el7_9', - arch='x86_64', - pgpsig='RSA/SHA256, Fri 07 Jan 2022 01:50:17 PM UTC, Key ID 199e2f91fd431d51', - repository='installed', - module=None, - stream=None), - RPM(name='grep', - epoch='0', - packager='Red Hat, Inc. ', - version='2.20', - release='3.el7', - arch='x86_64', - pgpsig='RSA/SHA256, Fri 24 Mar 2017 04:59:11 PM UTC, Key ID 199e2f91fd431d51', - repository='anaconda/7.9', - module=None, - stream=None) - ] - - current_actor_context.feed(InstalledRPM(items=with_docker)) - current_actor_context.run() - assert current_actor_context.consume(Report) - - -def test_actor_without_docker_package(current_actor_context): - without_docker = [ - RPM(name='tree', - epoch='0', - packager='Red Hat, Inc. ', - version='1.6.0', - release='10.el7', - arch='x86_64', - pgpsig='RSA/SHA256, Wed 02 Apr 2014 09:33:48 PM UTC, Key ID 199e2f91fd431d51', - repository='installed', - module=None, - stream=None), - RPM(name='grep', - epoch='0', - packager='Red Hat, Inc. ', - version='2.20', - release='3.el7', - arch='x86_64', - pgpsig='RSA/SHA256, Fri 24 Mar 2017 04:59:11 PM UTC, Key ID 199e2f91fd431d51', - repository='anaconda/7.9', - module=None, - stream=None) - ] - - current_actor_context.feed(InstalledRPM(items=without_docker)) - current_actor_context.run() - assert not current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/checkdosfstools/actor.py b/repos/system_upgrade/el7toel8/actors/checkdosfstools/actor.py deleted file mode 100644 index 578bc108..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkdosfstools/actor.py +++ /dev/null @@ -1,38 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.libraries.common.rpms import has_package -from leapp.models import DistributionSignedRPM -from leapp.reporting import create_report, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckDosfstools(Actor): - """ - Check if dosfstools is installed. If yes, write information about non-compatible changes. - """ - - name = 'checkdosfstools' - consumes = (DistributionSignedRPM,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - if has_package(DistributionSignedRPM, 'dosfstools'): - create_report([ - reporting.Title('Dosfstools incompatible changes in the next major version'), - reporting.Summary( - 'The automatic alignment of data clusters that was added in 3.0.8 and broken for ' - 'FAT32 starting with 3.0.20 has been reinstated. If you need to create file systems ' - 'for finicky devices that have broken FAT implementations use the option -a to ' - 'disable alignment.\n' - 'The fsck.fat now defaults to interactive repair mode which previously had to be ' - 'selected with the -r option.\n' - ), - reporting.Severity(reporting.Severity.LOW), - reporting.Groups([ - reporting.Groups.FILESYSTEM, - reporting.Groups.TOOLS - ]), - reporting.Remediation(hint='Please update your scripts to be compatible with the changes.'), - reporting.RelatedResource('package', 'dosfstools') - ]) diff --git a/repos/system_upgrade/el7toel8/actors/checkdosfstools/tests/component_test_checkdosfstools.py b/repos/system_upgrade/el7toel8/actors/checkdosfstools/tests/component_test_checkdosfstools.py deleted file mode 100644 index 5c65cf0e..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkdosfstools/tests/component_test_checkdosfstools.py +++ /dev/null @@ -1,33 +0,0 @@ -from leapp.models import DistributionSignedRPM, RPM -from leapp.reporting import Report -from leapp.snactor.fixture import current_actor_context - -RH_PACKAGER = 'Red Hat, Inc. ' - - -def create_modulesfacts(installed_rpm): - return DistributionSignedRPM(items=installed_rpm) - - -def test_actor_with_dosfstools_package(current_actor_context): - with_dosfstools = [ - RPM(name='dosfstools', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='powertop', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - - current_actor_context.feed(create_modulesfacts(installed_rpm=with_dosfstools)) - current_actor_context.run() - assert current_actor_context.consume(Report) - - -def test_actor_without_dosfstools_package(current_actor_context): - without_dosfstools = [ - RPM(name='powertop', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='sed', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - - current_actor_context.feed(create_modulesfacts(installed_rpm=without_dosfstools)) - current_actor_context.run() - assert not current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/checkfirewalld/actor.py b/repos/system_upgrade/el7toel8/actors/checkfirewalld/actor.py deleted file mode 100644 index f57a9981..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkfirewalld/actor.py +++ /dev/null @@ -1,75 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.libraries.actor import private -from leapp.models import FirewalldFacts -from leapp.reporting import create_report, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - -related = [reporting.RelatedResource('package', 'firewalld')] - - -class CheckFirewalld(Actor): - """ - Check for certain firewalld configuration that may prevent an upgrade. - """ - - name = 'check_firewalld' - consumes = (FirewalldFacts,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - unsupported_tables = [] - unsupported_ipset_types = [] - list_separator_fmt = '\n -' - for facts in self.consume(FirewalldFacts): - for table in facts.ebtablesTablesInUse: - if not private.isEbtablesTableSupported(table): - unsupported_tables.append(table) - for ipset_type in facts.ipsetTypesInUse: - if not private.isIpsetTypeSupportedByNftables(ipset_type): - unsupported_ipset_types.append(ipset_type) - - if unsupported_tables: - format_tuple = ( - list_separator_fmt, - list_separator_fmt.join(list(set(unsupported_tables))),) - create_report([ - reporting.Title('Firewalld is using an unsupported ebtables table.'), - reporting.Summary('ebtables in RHEL-8 does not support these tables:{}{}'.format(*format_tuple)), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([ - reporting.Groups.FIREWALL, - reporting.Groups.SECURITY, - reporting.Groups.NETWORK - ]), - reporting.Groups([ - reporting.Groups.INHIBITOR - ]), - reporting.Remediation( - hint='Remove firewalld direct rules that use these ebtables tables:{}{}'.format(*format_tuple) - ) - ] + related) - - if unsupported_ipset_types: - format_tuple = ( - list_separator_fmt, - list_separator_fmt.join(list(set(unsupported_ipset_types))),) - create_report([ - reporting.Title('Firewalld is using an unsupported ipset type.'), - reporting.Summary( - 'These ipset types are not supported by firewalld\'s nftables backend:{}{}'.format(*format_tuple) - ), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([ - reporting.Groups.FIREWALL, - reporting.Groups.SECURITY, - reporting.Groups.NETWORK - ]), - reporting.Groups([ - reporting.Groups.INHIBITOR - ]), - reporting.Remediation( - hint='Remove ipsets of these types from firewalld:{}{}'.format(*format_tuple) - ) - ] + related) diff --git a/repos/system_upgrade/el7toel8/actors/checkfirewalld/libraries/private.py b/repos/system_upgrade/el7toel8/actors/checkfirewalld/libraries/private.py deleted file mode 100644 index b1fcd97a..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkfirewalld/libraries/private.py +++ /dev/null @@ -1,12 +0,0 @@ -def isIpsetTypeSupportedByNftables(ipset_type): - if ipset_type in ['hash:ip', 'hash:mac', 'hash:net']: - return True - - return False - - -def isEbtablesTableSupported(table): - if table in ['filter', 'nat']: - return True - - return False diff --git a/repos/system_upgrade/el7toel8/actors/checkfirewalld/tests/component_test_checkfirewalld.py b/repos/system_upgrade/el7toel8/actors/checkfirewalld/tests/component_test_checkfirewalld.py deleted file mode 100644 index 5a618f2c..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkfirewalld/tests/component_test_checkfirewalld.py +++ /dev/null @@ -1,13 +0,0 @@ -from leapp.models import FirewalldFacts -from leapp.reporting import Report -from leapp.utils.report import is_inhibitor - - -def test_actor_execution(current_actor_context): - current_actor_context.feed( - FirewalldFacts(firewall_config_command='', - ebtablesTablesInUse=['broute'], - ipsetTypesInUse=['hash:net,port'])) - current_actor_context.run() - report_fileds = current_actor_context.consume(Report)[0].report - assert is_inhibitor(report_fileds) diff --git a/repos/system_upgrade/el7toel8/actors/checkfirewalld/tests/unit_test_checkfirewalld.py b/repos/system_upgrade/el7toel8/actors/checkfirewalld/tests/unit_test_checkfirewalld.py deleted file mode 100644 index 657b869d..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkfirewalld/tests/unit_test_checkfirewalld.py +++ /dev/null @@ -1,22 +0,0 @@ -from leapp.libraries.actor import private - - -def test_checkfirewalld_ipset(): - assert private.isIpsetTypeSupportedByNftables('hash:mac') - assert private.isIpsetTypeSupportedByNftables('hash:ip') - assert private.isIpsetTypeSupportedByNftables('hash:net') - - assert not private.isIpsetTypeSupportedByNftables('hash:ip,mark') - assert not private.isIpsetTypeSupportedByNftables('hash:ip,port') - assert not private.isIpsetTypeSupportedByNftables('hash:ip,port,ip') - assert not private.isIpsetTypeSupportedByNftables('hash:ip,port,net') - assert not private.isIpsetTypeSupportedByNftables('hash:net,iface') - assert not private.isIpsetTypeSupportedByNftables('hash:net,net') - assert not private.isIpsetTypeSupportedByNftables('hash:net,port') - - -def test_checkfirewalld_ebtables(): - assert private.isEbtablesTableSupported('nat') - assert private.isEbtablesTableSupported('filter') - - assert not private.isEbtablesTableSupported('broute') diff --git a/repos/system_upgrade/el7toel8/actors/checkfirstpartitionoffset/actor.py b/repos/system_upgrade/el7toel8/actors/checkfirstpartitionoffset/actor.py deleted file mode 100644 index cde27c2a..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkfirstpartitionoffset/actor.py +++ /dev/null @@ -1,24 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import check_first_partition_offset -from leapp.models import FirmwareFacts, GRUBDevicePartitionLayout -from leapp.reporting import Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckFirstPartitionOffset(Actor): - """ - Check whether the first partition starts at the offset >=1MiB. - - The alignment of the first partition plays role in disk access speeds. Older tools placed the start of the first - partition at cylinder 63 (due to historical reasons connected to the INT13h BIOS API). However, grub core - binary is placed before the start of the first partition, meaning that not enough space causes bootloader - installation to fail. Modern partitioning tools place the first partition at >= 1MiB (cylinder 2048+). - """ - - name = 'check_first_partition_offset' - consumes = (FirmwareFacts, GRUBDevicePartitionLayout,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag,) - - def process(self): - check_first_partition_offset.check_first_partition_offset() diff --git a/repos/system_upgrade/el7toel8/actors/checkfirstpartitionoffset/libraries/check_first_partition_offset.py b/repos/system_upgrade/el7toel8/actors/checkfirstpartitionoffset/libraries/check_first_partition_offset.py deleted file mode 100644 index fca9c3ff..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkfirstpartitionoffset/libraries/check_first_partition_offset.py +++ /dev/null @@ -1,59 +0,0 @@ -from leapp import reporting -from leapp.libraries.common.config import architecture -from leapp.libraries.stdlib import api -from leapp.models import FirmwareFacts, GRUBDevicePartitionLayout - -SAFE_OFFSET_BYTES = 1024*1024 # 1MiB - - -def check_first_partition_offset(): - if architecture.matches_architecture(architecture.ARCH_S390X): - return - - for fact in api.consume(FirmwareFacts): - if fact.firmware == 'efi': - return # Skip EFI system - - problematic_devices = [] - for grub_dev in api.consume(GRUBDevicePartitionLayout): - if not grub_dev.partitions: - # NOTE(pstodulk): In case of empty partition list we have nothing to do. - # This can could happen when the fdisk output is different then expected. - # E.g. when GPT partition table is used on the disk. We are right now - # interested strictly about MBR only, so ignoring these cases. - # This is seatbelt, as the msg should not be produced for GPT at all. - continue - first_partition = min(grub_dev.partitions, key=lambda partition: partition.start_offset) - if first_partition.start_offset < SAFE_OFFSET_BYTES: - problematic_devices.append(grub_dev.device) - - if problematic_devices: - summary = ( - 'On the system booting by using BIOS, the in-place upgrade fails ' - 'when upgrading the GRUB2 bootloader if the boot disk\'s embedding area ' - 'does not contain enough space for the core image installation. ' - 'This results in a broken system, and can occur when the disk has been ' - 'partitioned manually, for example using the RHEL 6 fdisk utility.\n\n' - - 'The list of devices with small embedding area:\n' - '{0}.' - ) - problematic_devices_fmt = ['- {0}'.format(dev) for dev in problematic_devices] - - hint = ( - 'We recommend to perform a fresh installation of the RHEL 8 system ' - 'instead of performing the in-place upgrade.\n' - 'Another possibility is to reformat the devices so that there is ' - 'at least {0} kiB space before the first partition. ' - 'Note that this operation is not supported and does not have to be ' - 'always possible.' - ) - - reporting.create_report([ - reporting.Title('Found GRUB devices with too little space reserved before the first partition'), - reporting.Summary(summary.format('\n'.join(problematic_devices_fmt))), - reporting.Remediation(hint=hint.format(SAFE_OFFSET_BYTES // 1024)), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.BOOT]), - reporting.Groups([reporting.Groups.INHIBITOR]), - ]) diff --git a/repos/system_upgrade/el7toel8/actors/checkfirstpartitionoffset/tests/test_check_first_partition_offset.py b/repos/system_upgrade/el7toel8/actors/checkfirstpartitionoffset/tests/test_check_first_partition_offset.py deleted file mode 100644 index f925f7d4..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkfirstpartitionoffset/tests/test_check_first_partition_offset.py +++ /dev/null @@ -1,67 +0,0 @@ -import pytest - -from leapp import reporting -from leapp.libraries.actor import check_first_partition_offset -from leapp.libraries.common import grub -from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked -from leapp.libraries.stdlib import api -from leapp.models import FirmwareFacts, GRUBDevicePartitionLayout, PartitionInfo -from leapp.reporting import Report -from leapp.utils.report import is_inhibitor - - -@pytest.mark.parametrize( - ('devices', 'should_report'), - [ - ( - [ - GRUBDevicePartitionLayout(device='/dev/vda', - partitions=[PartitionInfo(part_device='/dev/vda1', start_offset=32256)]) - ], - True - ), - ( - [ - GRUBDevicePartitionLayout(device='/dev/vda', - partitions=[ - PartitionInfo(part_device='/dev/vda2', start_offset=1024*1025), - PartitionInfo(part_device='/dev/vda1', start_offset=32256) - ]) - ], - True - ), - ( - [ - GRUBDevicePartitionLayout(device='/dev/vda', - partitions=[PartitionInfo(part_device='/dev/vda1', start_offset=1024*1025)]) - ], - False - ), - ( - [ - GRUBDevicePartitionLayout(device='/dev/vda', - partitions=[PartitionInfo(part_device='/dev/vda1', start_offset=1024*1024)]) - ], - False - ), - ( - [ - GRUBDevicePartitionLayout(device='/dev/vda', partitions=[]) - ], - False - ) - ] -) -def test_bad_offset_reported(monkeypatch, devices, should_report): - def consume_mocked(model_cls): - if model_cls == FirmwareFacts: - return [FirmwareFacts(firmware='bios')] - return devices - - monkeypatch.setattr(api, 'consume', consume_mocked) - monkeypatch.setattr(api, 'current_actor', CurrentActorMocked()) - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - - check_first_partition_offset.check_first_partition_offset() - - assert bool(reporting.create_report.called) == should_report diff --git a/repos/system_upgrade/el7toel8/actors/checkfstabxfsoptions/actor.py b/repos/system_upgrade/el7toel8/actors/checkfstabxfsoptions/actor.py deleted file mode 100644 index 94d8b3c2..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkfstabxfsoptions/actor.py +++ /dev/null @@ -1,27 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import checkfstabxfsoptions -from leapp.models import StorageInfo -from leapp.reporting import Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckFstabXFSOptions(Actor): - """ - Check the FSTAB file for the deprecated / removed XFS mount options. - - Some mount options for XFS have been deprecated on RHEL 7 and already - removed on RHEL 8. If any such an option is present in the FSTAB, - it's impossible to boot the RHEL 8 system without the manual update of the - file. - - Check whether any of these options are present in the FSTAB file - and inhibit the upgrade in such a case. - """ - - name = 'checkfstabxfsoptions' - consumes = (StorageInfo,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - checkfstabxfsoptions.process() diff --git a/repos/system_upgrade/el7toel8/actors/checkfstabxfsoptions/libraries/checkfstabxfsoptions.py b/repos/system_upgrade/el7toel8/actors/checkfstabxfsoptions/libraries/checkfstabxfsoptions.py deleted file mode 100644 index e9e0fafe..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkfstabxfsoptions/libraries/checkfstabxfsoptions.py +++ /dev/null @@ -1,59 +0,0 @@ -from leapp import reporting -from leapp.exceptions import StopActorExecutionError -from leapp.libraries.stdlib import api -from leapp.models import StorageInfo - -# man 5 xfs -REMOVED_XFS_OPTIONS = set([ - # removed from kernel in 4.0 - 'nodelaylog', - 'delaylog', - 'ihashsize', - 'irixsgid', - 'osyncisdsync', - 'osyncisosync', - # removed from kernel in 4.19 - 'nobarrier', - 'barrier', -]) - - -def _get_storage_data(): - storage = next(api.consume(StorageInfo), None) - if not storage: - raise StopActorExecutionError('The StorageInfo message is not available.') - if not storage.fstab: - raise StopActorExecutionError('Data from the /etc/fstab file is missing.') - return storage - - -def process(): - storage = _get_storage_data() - used_removed_options = set() - for entry in storage.fstab: - if entry.fs_vfstype == 'xfs': - # NOTE: some opts could have a value, like ihashsize=4096 - we want - # just the name of the option (that's why the double-split) - options = set([opt.split('=')[0] for opt in entry.fs_mntops.split(',')]) - used_removed_options.update(options.intersection(REMOVED_XFS_OPTIONS)) - - if not used_removed_options: - return - - list_separator_fmt = '\n - ' - reporting.create_report([ - reporting.Title('Deprecated XFS mount options present in FSTAB.'), - reporting.Summary( - 'Some XFS mount options are not supported on RHEL 8 and prevent' - ' system from booting correctly if any of the reported XFS options are used.' - ' filesystem:{}{}.'.format( - list_separator_fmt, - list_separator_fmt.join(list(REMOVED_XFS_OPTIONS)))), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.INHIBITOR]), - reporting.Groups([reporting.Groups.FILESYSTEM]), - reporting.RelatedResource('file', '/etc/fstab'), - reporting.Remediation(hint=( - 'Drop the following mount options from the /etc/fstab file for any' - ' XFS filesystem: {}.'.format(', '.join(used_removed_options)))), - ]) diff --git a/repos/system_upgrade/el7toel8/actors/checkfstabxfsoptions/tests/test_checkfstabxfsoptions.py b/repos/system_upgrade/el7toel8/actors/checkfstabxfsoptions/tests/test_checkfstabxfsoptions.py deleted file mode 100644 index a3f1ee70..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkfstabxfsoptions/tests/test_checkfstabxfsoptions.py +++ /dev/null @@ -1,69 +0,0 @@ -import pytest - -from leapp.exceptions import StopActorExecutionError -from leapp.libraries.actor import checkfstabxfsoptions -from leapp.models import FstabEntry, StorageInfo -from leapp.reporting import Report -from leapp.snactor.fixture import current_actor_context -from leapp.utils.report import is_inhibitor - - -def _myint_gen(): - i = 0 - while True: - yield i - i += 1 - - -def _gen_fs_ent(fstype='ext4', mntops='auto', val=_myint_gen()): - return FstabEntry( - fs_spec='/path/spec/{}'.format(next(val)), - fs_file='/path/file/{}'.format(next(val)), - fs_vfstype=fstype, - fs_mntops=mntops, - fs_freq='1', - fs_passno='1', - ) - - -@pytest.mark.parametrize('fstab', [ - [_gen_fs_ent()], - [_gen_fs_ent() for dummy in range(4)], - [_gen_fs_ent(), _gen_fs_ent('ext4', 'auto,quota,huge_file')], - # checking that problematic options are ignored for non-xfs FS - [_gen_fs_ent(), _gen_fs_ent('ext4', 'auto,barier,huge_file')], - [_gen_fs_ent('ext4', i) for i in checkfstabxfsoptions.REMOVED_XFS_OPTIONS], - [_gen_fs_ent(i, 'nobarrier') for i in ('ext4', 'ext3', 'vfat', 'btrfs')], -]) -def test_no_xfs_option(fstab, current_actor_context): - current_actor_context.feed(StorageInfo(fstab=fstab)) - current_actor_context.run() - report = current_actor_context.consume(Report) - assert not report - - -# each item == one fstab -problematic_fstabs = [[_gen_fs_ent('xfs', ','.join(checkfstabxfsoptions.REMOVED_XFS_OPTIONS))]] -for opt in checkfstabxfsoptions.REMOVED_XFS_OPTIONS: - problematic_fstabs.append([_gen_fs_ent('xfs', opt)]) - problematic_fstabs.append([_gen_fs_ent(), _gen_fs_ent('xfs', opt)]) - problematic_fstabs.append([_gen_fs_ent(), _gen_fs_ent('xfs', opt), _gen_fs_ent()]) - pre_opts = '{},auto,quota'.format(opt) - in_opts = 'auto,{},quota'.format(opt) - post_opts = 'auto,quota,{}'.format(opt) - problematic_fstabs.append([_gen_fs_ent(), _gen_fs_ent('xfs', pre_opts)]) - problematic_fstabs.append([_gen_fs_ent(), _gen_fs_ent('xfs', in_opts)]) - problematic_fstabs.append([_gen_fs_ent(), _gen_fs_ent('xfs', post_opts)]) -# ensure we catch even cases when a value is expected to be specified; we know just this -# one case, so it should be representative it's working like that.. -problematic_fstabs.append([_gen_fs_ent(), _gen_fs_ent('xfs', 'defaults,ihashsize=4096')]) -problematic_fstabs.append([_gen_fs_ent(), _gen_fs_ent('xfs', 'defaults,ihashsize=4096,auto')]) - - -@pytest.mark.parametrize('fstab', problematic_fstabs) -def test_removed_xfs_option(fstab, current_actor_context): - current_actor_context.feed(StorageInfo(fstab=fstab)) - current_actor_context.run() - report = current_actor_context.consume(Report) - assert report and len(report) == 1 - assert is_inhibitor(report[0].report) diff --git a/repos/system_upgrade/el7toel8/actors/checkgrep/actor.py b/repos/system_upgrade/el7toel8/actors/checkgrep/actor.py deleted file mode 100644 index 594cf92e..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkgrep/actor.py +++ /dev/null @@ -1,42 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.libraries.common.rpms import has_package -from leapp.models import DistributionSignedRPM -from leapp.reporting import create_report, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckGrep(Actor): - """ - Check if Grep is installed. If yes, write information about non-compatible changes. - """ - - name = 'checkgrep' - consumes = (DistributionSignedRPM,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - if has_package(DistributionSignedRPM, 'grep'): - create_report([ - reporting.Title('Grep has incompatible changes in the next major version'), - reporting.Summary( - 'If a file contains data improperly encoded for the current locale, and this is ' - 'discovered before any of the file\'s contents are output, grep now treats the file ' - 'as binary.\n' - 'The \'grep -P\' no longer reports an error and exits when given invalid UTF-8 data. ' - 'Instead, it considers the data to be non-matching.\n' - 'In locales with multibyte character encodings other than UTF-8, grep -P now reports ' - 'an error and exits instead of misbehaving.\n' - 'When searching binary data, grep now may treat non-text bytes as line terminators. ' - 'This can boost performance significantly.\n' - 'The \'grep -z\' no longer automatically treats the byte \'\\200\' as binary data.\n' - 'Context no longer excludes selected lines omitted because of -m. For example, ' - '\'grep "^" -m1 -A1\' now outputs the first two input lines, not just the first ' - 'line.\n' - ), - reporting.Severity(reporting.Severity.LOW), - reporting.Groups([reporting.Groups.TOOLS]), - reporting.Remediation(hint='Please update your scripts to be compatible with the changes.'), - reporting.RelatedResource('package', 'grep') - ]) diff --git a/repos/system_upgrade/el7toel8/actors/checkgrep/tests/component_test_checkgrep.py b/repos/system_upgrade/el7toel8/actors/checkgrep/tests/component_test_checkgrep.py deleted file mode 100644 index bb673a86..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkgrep/tests/component_test_checkgrep.py +++ /dev/null @@ -1,33 +0,0 @@ -from leapp.models import DistributionSignedRPM, RPM -from leapp.reporting import Report -from leapp.snactor.fixture import current_actor_context - -RH_PACKAGER = 'Red Hat, Inc. ' - - -def create_modulesfacts(installed_rpm): - return DistributionSignedRPM(items=installed_rpm) - - -def test_actor_with_grep_package(current_actor_context): - with_grep = [ - RPM(name='grep', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='powertop', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - - current_actor_context.feed(create_modulesfacts(installed_rpm=with_grep)) - current_actor_context.run() - assert current_actor_context.consume(Report) - - -def test_actor_without_grep_package(current_actor_context): - without_grep = [ - RPM(name='powertop', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='sed', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - - current_actor_context.feed(create_modulesfacts(installed_rpm=without_grep)) - current_actor_context.run() - assert not current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/checkhacluster/actor.py b/repos/system_upgrade/el7toel8/actors/checkhacluster/actor.py deleted file mode 100644 index ae62c52a..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkhacluster/actor.py +++ /dev/null @@ -1,23 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.checkhacluster import check_ha_cluster -from leapp.reporting import Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class Checkhacluster(Actor): - """ - Check if HA Cluster is in use. If yes, inhibit the upgrade process. - - The system is considered as part of cluster if a corosync.conf file - (/etc/corosync/corosync.conf) can be found there. - Also the system can be a part of a cluster as a remote node. In such case - a cib file (/var/lib/pacemaker/cib/cib.xml) can be found there. - """ - - name = "check_ha_cluster" - consumes = () - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - check_ha_cluster() diff --git a/repos/system_upgrade/el7toel8/actors/checkhacluster/libraries/checkhacluster.py b/repos/system_upgrade/el7toel8/actors/checkhacluster/libraries/checkhacluster.py deleted file mode 100644 index 115867d2..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkhacluster/libraries/checkhacluster.py +++ /dev/null @@ -1,52 +0,0 @@ -import os.path - -from leapp import reporting -from leapp.reporting import create_report - -COROSYNC_CONF_LOCATION = "/etc/corosync/corosync.conf" -CIB_LOCATION = "/var/lib/pacemaker/cib/cib.xml" - - -def inhibit(node_type): - create_report([ - reporting.Title("Use of HA cluster detected. Upgrade can't proceed."), - reporting.Summary( - "HA cluster is not supported by the inplace upgrade.\n" - "HA cluster configuration file(s) found." - " It seems to be a cluster {0}.".format(node_type) - ), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.HIGH_AVAILABILITY]), - reporting.Groups([reporting.Groups.INHIBITOR]), - reporting.ExternalLink( - url="https://access.redhat.com/articles/2059253", - title=( - "Recommended Practices for Applying Software Updates" - " to a RHEL High Availability or Resilient Storage Cluster" - ), - ), - reporting.ExternalLink( - url='https://access.redhat.com/solutions/7049940', - title='Leapp upgrade from RHEL 7 to RHEL 8 fails for pacemaker cluster' - ), - reporting.Remediation( - hint=( - "Destroy the existing HA cluster" - " or (if you have already removed HA cluster packages) remove" - " configuration files {0} and {1}".format( - CIB_LOCATION, - COROSYNC_CONF_LOCATION, - ) - ), - commands=[["sh", "-c", "pcs cluster stop --all --wait && pcs cluster destroy --all"]] - ), - reporting.RelatedResource('file', COROSYNC_CONF_LOCATION), - reporting.RelatedResource('file', CIB_LOCATION) - ]) - - -def check_ha_cluster(): - if os.path.isfile(COROSYNC_CONF_LOCATION): - inhibit(node_type="node") - elif os.path.isfile(CIB_LOCATION): - inhibit(node_type="remote node") diff --git a/repos/system_upgrade/el7toel8/actors/checkhacluster/tests/test_check_ha_cluster_checkhacluster.py b/repos/system_upgrade/el7toel8/actors/checkhacluster/tests/test_check_ha_cluster_checkhacluster.py deleted file mode 100644 index f8cc0ec2..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkhacluster/tests/test_check_ha_cluster_checkhacluster.py +++ /dev/null @@ -1,44 +0,0 @@ -from leapp.libraries.actor.checkhacluster import CIB_LOCATION, COROSYNC_CONF_LOCATION -from leapp.reporting import Report -from leapp.utils.report import is_inhibitor - - -def assert_inhibits(reports, node_type): - assert len(reports) == 1 - report_fields = reports[0].report - assert is_inhibitor(report_fields) - assert "cluster {0}".format(node_type) in report_fields["summary"] - - -def test_no_inhibit_when_no_ha_cluster(monkeypatch, current_actor_context): - monkeypatch.setattr("os.path.isfile", lambda path: False) - current_actor_context.run() - assert not current_actor_context.consume(Report) - - -def test_inhibits_when_cluster_node(monkeypatch, current_actor_context): - # NOTE(ivasilev) Limiting here the paths to mock not to cause unexpected side-effects - # (original test had path: True) - monkeypatch.setattr("os.path.isfile", lambda path: path in (COROSYNC_CONF_LOCATION, CIB_LOCATION)) - current_actor_context.run() - assert_inhibits(current_actor_context.consume(Report), "node") - - -def test_inhibits_when_cluster_node_no_cib(monkeypatch, current_actor_context): - monkeypatch.setattr( - "os.path.isfile", - lambda path: path == COROSYNC_CONF_LOCATION - ) - current_actor_context.run() - assert_inhibits(current_actor_context.consume(Report), "node") - - -def test_inhibits_when_cluster_remote_node(monkeypatch, current_actor_context): - # NOTE(ivasilev) Limiting here the paths to mock not to cause unexpected side-effects - # (original test had path: path != COROSYNC_CONF_LOCATION) - monkeypatch.setattr( - "os.path.isfile", - lambda path: path == CIB_LOCATION - ) - current_actor_context.run() - assert_inhibits(current_actor_context.consume(Report), "remote node") diff --git a/repos/system_upgrade/el7toel8/actors/checkirssi/actor.py b/repos/system_upgrade/el7toel8/actors/checkirssi/actor.py deleted file mode 100644 index b7f8d071..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkirssi/actor.py +++ /dev/null @@ -1,36 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.libraries.common.rpms import has_package -from leapp.models import DistributionSignedRPM -from leapp.reporting import create_report, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckIrssi(Actor): - """ - Check if irssi is installed. If yes, write information about non-compatible changes. - """ - - name = 'checkirssi' - consumes = (DistributionSignedRPM,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - if has_package(DistributionSignedRPM, 'irssi'): - create_report([ - reporting.Title('Irssi incompatible changes in the next major version'), - reporting.Summary( - 'Disabled support for the insecure SSLv2 protocol.\n' - 'Disabled SSLv3 due to the POODLE vulnerability.\n' - 'Removing networks will now remove all attached servers and channels.\n' - 'Removed --disable-ipv6 option.\n' - ), - reporting.Severity(reporting.Severity.LOW), - reporting.Groups([ - reporting.Groups.COMMUNICATION, - reporting.Groups.TOOLS - ]), - reporting.Remediation(hint='Please update your scripts to be compatible with the changes.'), - reporting.RelatedResource('package', 'irssi') - ]) diff --git a/repos/system_upgrade/el7toel8/actors/checkirssi/tests/component_test_checkirssi.py b/repos/system_upgrade/el7toel8/actors/checkirssi/tests/component_test_checkirssi.py deleted file mode 100644 index 9356d180..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkirssi/tests/component_test_checkirssi.py +++ /dev/null @@ -1,33 +0,0 @@ -from leapp.models import DistributionSignedRPM, RPM -from leapp.reporting import Report -from leapp.snactor.fixture import current_actor_context - -RH_PACKAGER = 'Red Hat, Inc. ' - - -def create_modulesfacts(installed_rpm): - return DistributionSignedRPM(items=installed_rpm) - - -def test_actor_with_irssi_package(current_actor_context): - with_irssi = [ - RPM(name='irssi', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='powertop', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - - current_actor_context.feed(create_modulesfacts(installed_rpm=with_irssi)) - current_actor_context.run() - assert current_actor_context.consume(Report) - - -def test_actor_without_irssi_package(current_actor_context): - without_irssi = [ - RPM(name='powertop', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='sed', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - - current_actor_context.feed(create_modulesfacts(installed_rpm=without_irssi)) - current_actor_context.run() - assert not current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/checkkdeapps/actor.py b/repos/system_upgrade/el7toel8/actors/checkkdeapps/actor.py deleted file mode 100644 index cec2cc38..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkkdeapps/actor.py +++ /dev/null @@ -1,20 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.checkkdeapps import get_kde_apps_info -from leapp.models import InstalledKdeAppsFacts, InstalledRPM -from leapp.tags import FactsPhaseTag, IPUWorkflowTag - - -class CheckKdeApps(Actor): - """ - Actor checks which KDE apps are installed. - """ - - name = 'check_kde_apps' - consumes = (InstalledRPM,) - produces = (InstalledKdeAppsFacts,) - tags = (FactsPhaseTag, IPUWorkflowTag) - - def process(self): - app_facts = get_kde_apps_info() - self.produce(InstalledKdeAppsFacts( - installed_apps=app_facts)) diff --git a/repos/system_upgrade/el7toel8/actors/checkkdeapps/libraries/checkkdeapps.py b/repos/system_upgrade/el7toel8/actors/checkkdeapps/libraries/checkkdeapps.py deleted file mode 100644 index 6ae6f09a..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkkdeapps/libraries/checkkdeapps.py +++ /dev/null @@ -1,25 +0,0 @@ -from leapp.libraries.common.rpms import has_package -from leapp.libraries.stdlib import api -from leapp.models import InstalledRPM - - -def get_kde_apps_info(): - installed = list() - base_kde_apps = ("kde-baseapps", - "okular", - "ark", - "kdepim", - "konsole", - "gwenview", - "kdenetwork", - "kate", - "kwrite") - - api.current_logger().info(" Detecting installed KDE apps ") - api.current_logger().info("================================") - for app in [application for application in base_kde_apps if has_package(InstalledRPM, application)]: - api.current_logger().info("Application {0} is installed.".format(app)) - installed.append(app) - api.current_logger().info("----------------------------------") - - return installed diff --git a/repos/system_upgrade/el7toel8/actors/checkkdeapps/tests/unit_test_checkkdeapps.py b/repos/system_upgrade/el7toel8/actors/checkkdeapps/tests/unit_test_checkkdeapps.py deleted file mode 100644 index fbbb0b78..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkkdeapps/tests/unit_test_checkkdeapps.py +++ /dev/null @@ -1,46 +0,0 @@ -from leapp.models import InstalledKdeAppsFacts, InstalledRPM, RPM -from leapp.snactor.fixture import current_actor_context - -RH_PACKAGER = 'Red Hat, Inc. ' - -# KDE apps (only name matters, other values are irrelevant) -okular_RPM = RPM(name='okular', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51') -kdenetwork_RPM = RPM(name='kdenetwork', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, - arch='noarch', pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51') -kate_RPM = RPM(name='kate', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51') - -# Some other apps to check false detection (only name matters, other values are irrelevant) -epiphany_PRM = RPM(name='epiphany', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51') -polari_RPM = RPM(name='polari', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51') - - -def test_no_app_present(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[])) - current_actor_context.run() - message = current_actor_context.consume(InstalledKdeAppsFacts)[0] - assert not message.installed_apps - - -def test_no_KDE_app_present(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[epiphany_PRM, polari_RPM])) - current_actor_context.run() - message = current_actor_context.consume(InstalledKdeAppsFacts)[0] - assert not message.installed_apps - - -def test_only_KDE_apps_present(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[okular_RPM, kdenetwork_RPM, kate_RPM])) - current_actor_context.run() - message = current_actor_context.consume(InstalledKdeAppsFacts)[0] - assert len(message.installed_apps) == 3 - - -def test_many_apps_present(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[okular_RPM, kdenetwork_RPM, kate_RPM, epiphany_PRM, polari_RPM])) - current_actor_context.run() - message = current_actor_context.consume(InstalledKdeAppsFacts)[0] - assert len(message.installed_apps) == 3 diff --git a/repos/system_upgrade/el7toel8/actors/checkkdegnome/actor.py b/repos/system_upgrade/el7toel8/actors/checkkdegnome/actor.py deleted file mode 100644 index d6e0f0a6..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkkdegnome/actor.py +++ /dev/null @@ -1,31 +0,0 @@ -""" -Actor to check if KDE and/or GNOME are installed -Author: Jan Beran -Email: jaberan@redhat.com -""" - -from leapp.actors import Actor -from leapp.libraries.actor.checkkdegnome import check_kde_gnome -from leapp.models import InstalledDesktopsFacts, InstalledKdeAppsFacts -from leapp.reporting import Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckKdeGnome(Actor): - """ - Checks whether KDE is installed - - Actor will check whether KDE is installed together with GNOME desktop to inform whether we can - inhibit the upgrade process. When both are installed, we need to inform the user that KDE will - be removed and GNOME will be used instead. If only KDE is installed, we want to inhibit - the upgrade process otherwise the user will end up without a desktop. - Note: The Package Evolution Service data makes sure the KDE-related packages are removed in the - dnf upgrade transaction. - """ - name = 'check_kde_gnome' - consumes = (InstalledDesktopsFacts, InstalledKdeAppsFacts) - produces = (Report,) - tags = (IPUWorkflowTag, ChecksPhaseTag) - - def process(self): - check_kde_gnome() diff --git a/repos/system_upgrade/el7toel8/actors/checkkdegnome/libraries/checkkdegnome.py b/repos/system_upgrade/el7toel8/actors/checkkdegnome/libraries/checkkdegnome.py deleted file mode 100644 index 9479c2b6..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkkdegnome/libraries/checkkdegnome.py +++ /dev/null @@ -1,74 +0,0 @@ -from leapp import reporting -from leapp.libraries.stdlib import api -from leapp.models import InstalledDesktopsFacts, InstalledKdeAppsFacts - - -def check_kde_gnome(): - desktop_facts = next(api.consume(InstalledDesktopsFacts)) - kde_desktop_installed = desktop_facts.kde_installed - gnome_desktop_installed = desktop_facts.gnome_installed - - # No desktop installed, we don't even care about apps as they are most likely not used or even installed - if not kde_desktop_installed and not gnome_desktop_installed: - api.current_logger().info("No desktop installed. Continuing with the upgrade.") - return - - if kde_desktop_installed: - api.current_logger().info("KDE desktop is installed. Checking what we can do about it.") - if not gnome_desktop_installed: - api.current_logger().error("Cannot perform the upgrade because there is" - " no other desktop than KDE installed.") - # We cannot continue with the upgrade process - reporting.create_report([ - reporting.Title("The installed KDE environment is unavailable on RHEL 8."), - reporting.Summary( - "Because the KDE desktop environment is not available on RHEL 8, all the KDE-related packages" - " would be removed during the upgrade. There would be no desktop environment installed after the" - " upgrade."), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([ - reporting.Groups.DESKTOP - ]), - reporting.Groups([ - reporting.Groups.INHIBITOR - ]), - reporting.Remediation( - hint=("Remove KDE (at least the `kde-workspace` package) or install the GNOME desktop environment" - " to be able to upgrade."), - commands=[['yum', '-y', 'groupinstall', '"Server with GUI"']]) - ]) - return - - # Assume both GNOME and KDE are installed in this state - api.current_logger().info("Upgrade can be performed, but KDE desktop will" - " be removed in favor of GNOME") - reporting.create_report([ - reporting.Title("Upgrade can be performed, but KDE will be uninstalled."), - reporting.Summary("The KDE desktop environment is unavailable on RHEL 8. KDE will be uninstalled " - "in favor of GNOME during the upgrade."), - reporting.Severity(reporting.Severity.MEDIUM), - reporting.Groups([ - reporting.Groups.DESKTOP - ])]) - api.current_logger().info("----------------------------------") - - # At this state we just need to detect whether any KDE/Qt app is installed to inform user - # that the application will be removed during the upgrade process. No matter if KDE is installed - # or not. - - KDEAppsFacts = next(api.consume(InstalledKdeAppsFacts)) - if KDEAppsFacts.installed_apps: - # upgrade can be performed, but user will loose KDE apps - api.current_logger().info("Installed KDE/Qt apps detected.") - reporting.create_report([ - reporting.Title("Upgrade can be performed, but KDE/Qt apps will be uninstalled."), - reporting.Summary("The KDE desktop environment is unavailable on RHEL 8. " - "All the KDE/Qt apps will be removed during the upgrade, including but not limited " - "to:\n- {0}".format("\n- ".join(KDEAppsFacts.installed_apps))), - reporting.Severity(reporting.Severity.MEDIUM), - reporting.Groups([ - reporting.Groups.DESKTOP - ])]) - else: - api.current_logger().info("No KDE app in use detected.") - # upgrade can be performed diff --git a/repos/system_upgrade/el7toel8/actors/checkkdegnome/tests/unit_test_checkkdegnome.py b/repos/system_upgrade/el7toel8/actors/checkkdegnome/tests/unit_test_checkkdegnome.py deleted file mode 100644 index c91bc730..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkkdegnome/tests/unit_test_checkkdegnome.py +++ /dev/null @@ -1,94 +0,0 @@ -from leapp.models import InstalledDesktopsFacts, InstalledKdeAppsFacts, Report -from leapp.utils.report import is_inhibitor - -no_desktop_env = InstalledDesktopsFacts(gnome_installed=False, - kde_installed=False) -gnome_desktop_env = InstalledDesktopsFacts(gnome_installed=True, - kde_installed=False) -KDE_desktop_env = InstalledDesktopsFacts(gnome_installed=False, - kde_installed=True) -both_desktop_env = InstalledDesktopsFacts(gnome_installed=True, - kde_installed=True) - - -no_KDE_apps = InstalledKdeAppsFacts(installed_apps=[]) -some_KDE_apps = InstalledKdeAppsFacts(installed_apps=["okular", "kate"]) - - -def test_no_desktop_no_apps(current_actor_context): - """ - No action expected. - """ - current_actor_context.feed(no_desktop_env) - current_actor_context.feed(no_KDE_apps) - current_actor_context.run() - assert not current_actor_context.consume(Report) - - -def test_gnome_desktop_no_apps(current_actor_context): - """ - No action expected. - """ - current_actor_context.feed(gnome_desktop_env) - current_actor_context.feed(no_KDE_apps) - current_actor_context.run() - assert not current_actor_context.consume(Report) - - -def test_gnome_desktop_KDE_apps(current_actor_context): - """ - One report about deleting KDE apps expected. - """ - current_actor_context.feed(gnome_desktop_env) - current_actor_context.feed(some_KDE_apps) - current_actor_context.run() - message = current_actor_context.consume(Report)[0] - assert "Upgrade can be performed, but KDE/Qt apps will be uninstalled." in message.report["title"] - - -def test_KDE_desktop_no_apps(current_actor_context): - """ - "Inhibitor" flag in report expected. - """ - current_actor_context.feed(KDE_desktop_env) - current_actor_context.feed(no_KDE_apps) - current_actor_context.run() - message = current_actor_context.consume(Report)[0] - assert is_inhibitor(message.report) - - -def test_KDE_desktop_KDE_apps(current_actor_context): - """ - "Inhibitor" flag in report expected. - """ - current_actor_context.feed(KDE_desktop_env) - current_actor_context.feed(some_KDE_apps) - current_actor_context.run() - message = current_actor_context.consume(Report)[0] - assert is_inhibitor(message.report) - - -def test_both_desktops_no_apps(current_actor_context): - """ - Report about removing KDE desktop environment expected. - """ - current_actor_context.feed(both_desktop_env) - current_actor_context.feed(no_KDE_apps) - current_actor_context.run() - message = current_actor_context.consume(Report)[0] - assert "Upgrade can be performed, but KDE will be uninstalled." in message.report["title"] - - -def test_both_desktop_KDE_apps(current_actor_context): - """ - Two reports expected, first about removing KDE desktop, second about KDE/Qt apps - """ - current_actor_context.feed(both_desktop_env) - current_actor_context.feed(some_KDE_apps) - current_actor_context.run() - messages = current_actor_context.consume(Report) - remove_KDE_title = "Upgrade can be performed, but KDE will be uninstalled." - remove_apps_title = "Upgrade can be performed, but KDE/Qt apps will be uninstalled." - assert len(messages) == 2 - assert [True for message in messages if remove_KDE_title in message.report["title"]] - assert [True for message in messages if remove_apps_title in message.report["title"]] diff --git a/repos/system_upgrade/el7toel8/actors/checklegacygrub/actor.py b/repos/system_upgrade/el7toel8/actors/checklegacygrub/actor.py deleted file mode 100644 index 1fc7dde4..00000000 --- a/repos/system_upgrade/el7toel8/actors/checklegacygrub/actor.py +++ /dev/null @@ -1,20 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import check_legacy_grub as check_legacy_grub_lib -from leapp.reporting import Report -from leapp.tags import FactsPhaseTag, IPUWorkflowTag - - -class CheckLegacyGrub(Actor): - """ - Check whether GRUB Legacy is installed in the MBR. - - GRUB Legacy is deprecated since RHEL 7 in favour of GRUB2. - """ - - name = 'check_grub_legacy' - consumes = () - produces = (Report,) - tags = (FactsPhaseTag, IPUWorkflowTag) - - def process(self): - check_legacy_grub_lib.check_grub_disks_for_legacy_grub() diff --git a/repos/system_upgrade/el7toel8/actors/checklegacygrub/libraries/check_legacy_grub.py b/repos/system_upgrade/el7toel8/actors/checklegacygrub/libraries/check_legacy_grub.py deleted file mode 100644 index d02c14f9..00000000 --- a/repos/system_upgrade/el7toel8/actors/checklegacygrub/libraries/check_legacy_grub.py +++ /dev/null @@ -1,71 +0,0 @@ -from leapp import reporting -from leapp.exceptions import StopActorExecution -from leapp.libraries.common import grub as grub_lib -from leapp.libraries.stdlib import api, CalledProcessError, run -from leapp.reporting import create_report - -# There is no grub legacy package on RHEL7, therefore, the system must have been upgraded from RHEL6 -MIGRATION_TO_GRUB2_GUIDE_URL = 'https://access.redhat.com/solutions/2643721' - - -def has_legacy_grub(device): - try: - output = run(['file', '-s', device]) - except CalledProcessError as err: - msg = 'Failed to determine the file type for the special device `{0}`. Full error: `{1}`' - api.current_logger().warning(msg.format(device, str(err))) - - # According to `file` manpage, the exit code > 0 iff the file does not exists (meaning) - # that grub_lib.get_grub_devices() is unreliable for some reason (better stop the upgrade), - # or because the file type could not be determined. However, its manpage directly gives examples - # of file -s being used on block devices, so this should be unlikely - especially if one would - # consider that get_grub_devices was able to determine that it is a grub device. - raise StopActorExecution() - - grub_legacy_version_string = 'GRUB version 0.94' - return grub_legacy_version_string in output['stdout'] - - -def check_grub_disks_for_legacy_grub(): - # Both GRUB2 and Grub Legacy are recognized by `get_grub_devices` - grub_devices = grub_lib.get_grub_devices() - - legacy_grub_devices = [] - for device in grub_devices: - if has_legacy_grub(device): - legacy_grub_devices.append(device) - - if legacy_grub_devices: - details = ( - 'Leapp detected GRUB Legacy to be installed on the system. ' - 'The GRUB Legacy bootloader is unsupported on RHEL7 and GRUB2 must be used instead. ' - 'The presence of GRUB Legacy is possible on systems that have been upgraded from RHEL 6 in the past, ' - 'but required manual post-upgrade steps have not been performed. ' - 'Note that the in-place upgrade from RHEL 6 to RHEL 7 systems is in such a case ' - 'considered as unfinished.\n\n' - - 'GRUB Legacy has been detected on following devices:\n' - '{block_devices_fmt}\n' - ) - - hint = ( - 'Migrate to the GRUB2 bootloader on the reported devices. ' - 'Also finish other post-upgrade steps related to the previous in-place upgrade, the majority of which ' - 'is a part of the related preupgrade report for upgrades from RHEL 6 to RHEL 7.' - 'If you are not sure whether all previously required post-upgrade steps ' - 'have been performed, consider a clean installation of the RHEL 8 system instead. ' - 'Note that the in-place upgrade to RHEL 8 can fail in various ways ' - 'if the RHEL 7 system is misconfigured.' - ) - - block_devices_fmt = '\n'.join(legacy_grub_devices) - create_report([ - reporting.Title("GRUB Legacy is used on the system"), - reporting.Summary(details.format(block_devices_fmt=block_devices_fmt)), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.BOOT]), - reporting.Remediation(hint=hint), - reporting.Groups([reporting.Groups.INHIBITOR]), - reporting.ExternalLink(url=MIGRATION_TO_GRUB2_GUIDE_URL, - title='How to install GRUB2 after a RHEL6 to RHEL7 upgrade'), - ]) diff --git a/repos/system_upgrade/el7toel8/actors/checklegacygrub/tests/test_check_legacy_grub.py b/repos/system_upgrade/el7toel8/actors/checklegacygrub/tests/test_check_legacy_grub.py deleted file mode 100644 index d6e5008e..00000000 --- a/repos/system_upgrade/el7toel8/actors/checklegacygrub/tests/test_check_legacy_grub.py +++ /dev/null @@ -1,45 +0,0 @@ -import pytest - -from leapp.libraries.actor import check_legacy_grub as check_legacy_grub_lib -from leapp.libraries.common import grub as grub_lib -from leapp.libraries.common.testutils import create_report_mocked -from leapp.utils.report import is_inhibitor - -VDA_WITH_LEGACY_GRUB = ( - '/dev/vda: x86 boot sector; GRand Unified Bootloader, stage1 version 0x3, ' - 'stage2 address 0x2000, stage2 segment 0x200, GRUB version 0.94; partition 1: ID=0x83, ' - 'active, starthead 32, startsector 2048, 1024000 sectors; partition 2: ID=0x83, starthead 221, ' - 'startsector 1026048, 19945472 sectors, code offset 0x48\n' -) - -NVME0N1_VDB_WITH_GRUB = ( - '/dev/nvme0n1: x86 boot sector; partition 1: ID=0x83, active, starthead 32, startsector 2048, 6291456 sectors; ' - 'partition 2: ID=0x83, starthead 191, startsector 6293504, 993921024 sectors, code offset 0x63' -) - - -@pytest.mark.parametrize( - ('grub_device_to_file_output', 'should_inhibit'), - [ - ({'/dev/vda': VDA_WITH_LEGACY_GRUB}, True), - ({'/dev/nvme0n1': NVME0N1_VDB_WITH_GRUB}, False), - ({'/dev/vda': VDA_WITH_LEGACY_GRUB, '/dev/nvme0n1': NVME0N1_VDB_WITH_GRUB}, True) - ] -) -def test_check_legacy_grub(monkeypatch, grub_device_to_file_output, should_inhibit): - - def file_cmd_mock(cmd, *args, **kwargs): - assert cmd[:2] == ['file', '-s'] - return {'stdout': grub_device_to_file_output[cmd[2]]} - - monkeypatch.setattr(check_legacy_grub_lib, 'create_report', create_report_mocked()) - monkeypatch.setattr(grub_lib, 'get_grub_devices', lambda: list(grub_device_to_file_output.keys())) - monkeypatch.setattr(check_legacy_grub_lib, 'run', file_cmd_mock) - - check_legacy_grub_lib.check_grub_disks_for_legacy_grub() - - assert bool(check_legacy_grub_lib.create_report.called) == should_inhibit - if should_inhibit: - assert len(check_legacy_grub_lib.create_report.reports) == 1 - report = check_legacy_grub_lib.create_report.reports[0] - assert is_inhibitor(report) diff --git a/repos/system_upgrade/el7toel8/actors/checkmemcached/actor.py b/repos/system_upgrade/el7toel8/actors/checkmemcached/actor.py deleted file mode 100644 index a3e12a18..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkmemcached/actor.py +++ /dev/null @@ -1,24 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.checkmemcached import check_memcached -from leapp.libraries.common.rpms import has_package -from leapp.models import DistributionSignedRPM -from leapp.reporting import Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckMemcached(Actor): - """ - Check for incompatible changes in memcached configuration. - - Warn that memcached in RHEL8 no longer listens on the UDP port by default - and the default service configuration binds memcached to the loopback - interface. - """ - - name = 'check_memcached' - consumes = (DistributionSignedRPM,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - check_memcached(has_package(DistributionSignedRPM, 'memcached')) diff --git a/repos/system_upgrade/el7toel8/actors/checkmemcached/libraries/checkmemcached.py b/repos/system_upgrade/el7toel8/actors/checkmemcached/libraries/checkmemcached.py deleted file mode 100644 index 15f23435..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkmemcached/libraries/checkmemcached.py +++ /dev/null @@ -1,69 +0,0 @@ -import re - -from leapp import reporting -from leapp.libraries.stdlib import api, run - -COMMON_REPORT_TAGS = [reporting.Groups.SERVICES] - - -sysconfig_path = '/etc/sysconfig/memcached' - -related = [ - reporting.RelatedResource('package', 'memcached'), - reporting.RelatedResource('file', sysconfig_path) -] - - -def is_sysconfig_default(): - """Check if the memcached sysconfig file was not modified since installation.""" - try: - result = run(['rpm', '-V', '--nomtime', 'memcached'], checked=False) - return sysconfig_path not in result['stdout'] - except OSError as e: - api.current_logger().warning("rpm verification failed: %s", str(e)) - return True - - -def is_udp_disabled(): - """Check if UDP port is disabled in the sysconfig file.""" - with open(sysconfig_path) as f: - for line in f: - if re.match(r'^\s*OPTIONS=.*-U\s*0[^0-9]', line): - return True - return False - - -def check_memcached(memcached_installed): - """Report potential issues in memcached configuration.""" - if not memcached_installed: - api.current_logger().info('memcached package is not installed') - return - - default_memcached_conf = is_sysconfig_default() - disabled_udp_port = is_udp_disabled() - - if default_memcached_conf: - reporting.create_report([ - reporting.Title('memcached service is using default configuration'), - reporting.Summary('memcached in RHEL8 listens on loopback only and has UDP port disabled by default'), - reporting.Severity(reporting.Severity.MEDIUM), - reporting.Groups(COMMON_REPORT_TAGS), - ] + related) - - elif not disabled_udp_port: - reporting.create_report([ - reporting.Title('memcached has enabled UDP port'), - reporting.Summary( - 'memcached in RHEL7 has UDP port enabled by default, but it is disabled by default in RHEL8' - ), - reporting.Severity(reporting.Severity.MEDIUM), - reporting.Groups(COMMON_REPORT_TAGS), - ] + related) - - else: - reporting.create_report([ - reporting.Title('memcached has already disabled UDP port'), - reporting.Summary('memcached in RHEL8 has UDP port disabled by default'), - reporting.Severity(reporting.Severity.LOW), - reporting.Groups(COMMON_REPORT_TAGS), - ] + related) diff --git a/repos/system_upgrade/el7toel8/actors/checkmemcached/tests/unit_test_checkmemcached.py b/repos/system_upgrade/el7toel8/actors/checkmemcached/tests/unit_test_checkmemcached.py deleted file mode 100644 index c9d306d6..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkmemcached/tests/unit_test_checkmemcached.py +++ /dev/null @@ -1,49 +0,0 @@ -from leapp import reporting -from leapp.libraries.actor import checkmemcached -from leapp.libraries.common.testutils import create_report_mocked - - -def test_uninstalled(monkeypatch): - for sysconfig_default in (False, True): - for udp_disabled in (False, True): - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - monkeypatch.setattr(checkmemcached, 'is_sysconfig_default', lambda: sysconfig_default, ) - monkeypatch.setattr(checkmemcached, 'is_udp_disabled', lambda: udp_disabled) - - checkmemcached.check_memcached(False) - - assert reporting.create_report.called == 0 - - -def test_installed_defconf(monkeypatch): - for udp_disabled in (False, True): - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - monkeypatch.setattr(checkmemcached, 'is_sysconfig_default', lambda: True) - monkeypatch.setattr(checkmemcached, 'is_udp_disabled', lambda: udp_disabled) - - checkmemcached.check_memcached(True) - - assert reporting.create_report.called == 1 - assert reporting.create_report.report_fields['title'] == 'memcached service is using default configuration' - - -def test_installed_nodefconf_udp(monkeypatch): - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - monkeypatch.setattr(checkmemcached, 'is_sysconfig_default', lambda: False) - monkeypatch.setattr(checkmemcached, 'is_udp_disabled', lambda: False) - - checkmemcached.check_memcached(True) - - assert reporting.create_report.called == 1 - assert reporting.create_report.report_fields['title'] == 'memcached has enabled UDP port' - - -def test_installed_nodefconf_noudp(monkeypatch): - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - monkeypatch.setattr(checkmemcached, 'is_sysconfig_default', lambda: False) - monkeypatch.setattr(checkmemcached, 'is_udp_disabled', lambda: True) - - checkmemcached.check_memcached(True) - - assert reporting.create_report.called == 1 - assert reporting.create_report.report_fields['title'] == 'memcached has already disabled UDP port' diff --git a/repos/system_upgrade/el7toel8/actors/checkmultiplepackageversions/actor.py b/repos/system_upgrade/el7toel8/actors/checkmultiplepackageversions/actor.py deleted file mode 100644 index 37156dd5..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkmultiplepackageversions/actor.py +++ /dev/null @@ -1,22 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.checkmultiplepackageversions import check -from leapp.models import InstalledRPM -from leapp.reporting import Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckMultiplePackageVersions(Actor): - """ - Check for problematic 32bit packages installed together with 64bit ones. - - If a known problematic 32bit package is found, the upgrade will be inhibited with the detailed - report how to solve the problem if such a remedy exists. - """ - - name = 'multiple_package_versions' - consumes = (InstalledRPM,) - produces = (Report,) - tags = (IPUWorkflowTag, ChecksPhaseTag) - - def process(self): - check() diff --git a/repos/system_upgrade/el7toel8/actors/checkmultiplepackageversions/libraries/checkmultiplepackageversions.py b/repos/system_upgrade/el7toel8/actors/checkmultiplepackageversions/libraries/checkmultiplepackageversions.py deleted file mode 100644 index 3a59a0d5..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkmultiplepackageversions/libraries/checkmultiplepackageversions.py +++ /dev/null @@ -1,37 +0,0 @@ -from leapp.libraries.common.rpms import has_package -from leapp.models import InstalledRPM -from leapp.reporting import create_report, Groups, RelatedResource, Remediation, Severity, Summary, Title - -# package_name: remedy information -PROBLEM_PACKAGE_MAP = { - 'brlapi.i686': {'bugzilla': None}, - 'gnome-online-accounts-devel.i686': { - 'bugzilla': 'https://bugzilla.redhat.com/show_bug.cgi?id=1765627'}, - 'geocode-glib-devel.i686': { - 'bugzilla': 'https://bugzilla.redhat.com/show_bug.cgi?id=1765629'}} - - -def check(): - actual_problems = [] - related_resources = [] - for package, details in PROBLEM_PACKAGE_MAP.items(): - name, arch = package.split('.') - if has_package(InstalledRPM, name, arch) and has_package(InstalledRPM, name, 'x86_64'): - actual_problems.append(package) - # generate RelatedResources for the report - related_resources.append(RelatedResource('package', package)) - if details['bugzilla']: - related_resources.append(RelatedResource('bugzilla', details['bugzilla'])) - - if actual_problems: - remediation = ["yum", "remove", "-y"] + actual_problems - # create a single report entry for all problematic packages - create_report([ - Title('Some packages have both 32bit and 64bit version installed which are known ' - 'to cause rpm transaction test to fail'), - Summary('The following packages have both 32bit and 64bit version installed which are known ' - 'to cause rpm transaction test to fail:\n{}'.format( - '\n'.join(['- {}'.format(a) for a in actual_problems]))), - Severity(Severity.HIGH), - Groups([Groups.INHIBITOR]), - Remediation(commands=[remediation])] + related_resources) diff --git a/repos/system_upgrade/el7toel8/actors/checkmultiplepackageversions/tests/test_multiplepackageversions.py b/repos/system_upgrade/el7toel8/actors/checkmultiplepackageversions/tests/test_multiplepackageversions.py deleted file mode 100644 index f8692aab..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkmultiplepackageversions/tests/test_multiplepackageversions.py +++ /dev/null @@ -1,79 +0,0 @@ -import mock - -from leapp.actors import Actor -from leapp.models import InstalledRPM, Report, RPM - - -def test_x32_x64(current_actor_context): - problem_rpms = [ - RPM(name='brlapi', version='0.1', release='1.sm01', epoch='1', packager="RH_PACKAGER", arch='i686', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='gnome-online-accounts-devel', version='0.1', release='1.sm01', epoch='1', - packager="RH_PACKAGER", arch='i686', pgpsig='SOME_OTHER_SIG_X'), - RPM(name='geocode-glib-devel', version='0.1', release='1.sm01', epoch='1', packager="RH_PACKAGER", - arch='i686', pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 5326810137017186'), - RPM(name='brlapi', version='0.1', release='1.sm01', epoch='1', packager="RH_PACKAGER", arch='x86_64', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='gnome-online-accounts-devel', version='0.1', release='1.sm01', epoch='1', - packager="RH_PACKAGER", arch='x86_64', pgpsig='SOME_OTHER_SIG_X'), - RPM(name='geocode-glib-devel', version='0.1', release='1.sm01', epoch='1', packager="RH_PACKAGER", - arch='x86_64', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 5326810137017186') - ] - - current_actor_context.feed(InstalledRPM(items=problem_rpms)) - current_actor_context.run() - report = current_actor_context.consume(Report)[0].report - assert report['title'] == ('Some packages have both 32bit and 64bit version installed which are known to' - ' cause rpm transaction test to fail') - assert {p['title'] for p in report['detail']['related_resources'] if p['scheme'] == 'package'} == \ - {'brlapi.i686', 'gnome-online-accounts-devel.i686', 'geocode-glib-devel.i686'} - - -def test_1_package(current_actor_context): - pkg = 'geocode-glib-devel' - problem_rpms = [ - RPM(name=pkg, version='0.1', release='1.sm01', epoch='1', - packager="RH_PACKAGER", arch='x86_64', pgpsig='SOME_OTHER_SIG_X'), - RPM(name=pkg, version='0.1', release='1.sm01', epoch='1', packager="RH_PACKAGER", - arch='i686', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 5326810137017186') - ] - current_actor_context.feed(InstalledRPM(items=problem_rpms)) - current_actor_context.run() - report = current_actor_context.consume(Report)[0].report - assert report['title'] == ('Some packages have both 32bit and 64bit version installed which are known to' - ' cause rpm transaction test to fail') - assert {p['title'] for p in report['detail']['related_resources'] if p['scheme'] == 'package'} == \ - {'{}.i686'.format(pkg)} - - -def test_x64_only(current_actor_context): - ok_rpms = [ - RPM(name='brlapi', version='0.1', release='1.sm01', epoch='1', packager="RH_PACKAGER", arch='x86_64', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='gnome-online-accounts-devel', version='0.1', release='1.sm01', epoch='1', - packager="RH_PACKAGER", arch='x86_64', pgpsig='SOME_OTHER_SIG_X'), - RPM(name='geocode-glib-devel', version='0.1', release='1.sm01', epoch='1', packager="RH_PACKAGER", - arch='x86_64', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 5326810137017186') - ] - - current_actor_context.feed(InstalledRPM(items=ok_rpms)) - current_actor_context.run() - assert not current_actor_context.consume(Report) - - -def test_x32_only(current_actor_context): - ok_rpms = [ - RPM(name='brlapi', version='0.1', release='1.sm01', epoch='1', packager="RH_PACKAGER", arch='i686', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='gnome-online-accounts-devel', version='0.1', release='1.sm01', epoch='1', - packager="RH_PACKAGER", arch='i686', pgpsig='SOME_OTHER_SIG_X'), - RPM(name='geocode-glib-devel', version='0.1', release='1.sm01', epoch='1', packager="RH_PACKAGER", - arch='i686', pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 5326810137017186'), - ] - - current_actor_context.feed(InstalledRPM(items=ok_rpms)) - current_actor_context.run() - assert not current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/checkntp/actor.py b/repos/system_upgrade/el7toel8/actors/checkntp/actor.py deleted file mode 100644 index 7bf4715e..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkntp/actor.py +++ /dev/null @@ -1,25 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.checkntp import check_ntp -from leapp.models import DistributionSignedRPM, NtpMigrationDecision, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckNtp(Actor): - """ - Check if ntp and/or ntpdate configuration needs to be migrated. - """ - - name = 'check_ntp' - consumes = (DistributionSignedRPM,) - produces = (Report, NtpMigrationDecision) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - installed_packages = set() - - signed_rpms = self.consume(DistributionSignedRPM) - for rpm_pkgs in signed_rpms: - for pkg in rpm_pkgs.items: - installed_packages.add(pkg.name) - - self.produce(check_ntp(installed_packages)) diff --git a/repos/system_upgrade/el7toel8/actors/checkntp/libraries/checkntp.py b/repos/system_upgrade/el7toel8/actors/checkntp/libraries/checkntp.py deleted file mode 100644 index 9ce66775..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkntp/libraries/checkntp.py +++ /dev/null @@ -1,82 +0,0 @@ -import base64 -import io -import os -import tarfile - -from leapp import reporting -from leapp.libraries.stdlib import api, CalledProcessError, run -from leapp.models import NtpMigrationDecision - -files = [ - '/etc/ntp.conf', '/etc/ntp/keys', - '/etc/ntp/crypto/pw', '/etc/ntp/step-tickers' -] - -related = [ - reporting.RelatedResource('package', 'ntpd'), - reporting.RelatedResource('package', 'chrony'), -] + [reporting.RelatedResource('file', f) for f in files] - - -# Check if a service is active and enabled -def check_service(name): - for state in ['active', 'enabled']: - try: - run(['systemctl', 'is-{}'.format(state), name]) - api.current_logger().debug('{} is {}'.format(name, state)) - except CalledProcessError: - api.current_logger().debug('{} is not {}'.format(name, state)) - return False - - return True - - -# Check if a file exists -def is_file(name): - return os.path.isfile(name) - - -# Get a base64-encoded gzipped tarball of specified files -def get_tgz64(filenames): - stream = io.BytesIO() - tar = tarfile.open(fileobj=stream, mode='w:gz') - for filename in filenames: - if os.path.isfile(filename): - tar.add(filename) - tar.close() - - return base64.b64encode(stream.getvalue()) - - -# Check services from the ntp packages for migration -def check_ntp(installed_packages): - service_data = [('ntpd', 'ntp', '/etc/ntp.conf'), - ('ntpdate', 'ntpdate', '/etc/ntp/step-tickers'), - ('ntp-wait', 'ntp-perl', None)] - - migrate_services = [] - migrate_configs = [] - for service, package, main_config in service_data: - if package in installed_packages and \ - check_service('{}.service'.format(service)) and \ - (not main_config or is_file(main_config)): - migrate_services.append(service) - if main_config: - migrate_configs.append(service) - - if migrate_configs: - reporting.create_report([ - reporting.Title('{} configuration will be migrated'.format(' and '.join(migrate_configs))), - reporting.Summary('{} service(s) detected to be enabled and active'.format(', '.join(migrate_services))), - reporting.Severity(reporting.Severity.LOW), - reporting.Groups([reporting.Groups.SERVICES, reporting.Groups.TIME_MANAGEMENT]), - ] + related) - - # Save configuration files that will be renamed in the upgrade - config_tgz64 = get_tgz64(files) - else: - api.current_logger().info('ntpd/ntpdate configuration will not be migrated') - migrate_services = [] - config_tgz64 = '' - - return NtpMigrationDecision(migrate_services=migrate_services, config_tgz64=config_tgz64) diff --git a/repos/system_upgrade/el7toel8/actors/checkntp/tests/unit_test_checkntp.py b/repos/system_upgrade/el7toel8/actors/checkntp/tests/unit_test_checkntp.py deleted file mode 100644 index b806ec9d..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkntp/tests/unit_test_checkntp.py +++ /dev/null @@ -1,64 +0,0 @@ -import base64 -import io -import os -import re -import tarfile -import tempfile - -from leapp import reporting -from leapp.libraries.actor import checkntp -from leapp.libraries.common.testutils import create_report_mocked - - -def test_nomigration(monkeypatch): - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - monkeypatch.setattr(checkntp, 'check_service', lambda _: False) - monkeypatch.setattr(checkntp, 'is_file', lambda _: False) - monkeypatch.setattr(checkntp, 'get_tgz64', lambda _: '') - - checkntp.check_ntp(set(['chrony', 'linuxptp', 'xterm'])) - - assert reporting.create_report.called == 0 - - -def test_migration(monkeypatch): - for packages, services, migrate in [ - (['ntp'], ['ntpd'], ['ntpd']), - (['ntp', 'ntpdate'], ['ntpd'], ['ntpd']), - (['ntpdate'], ['ntpdate'], ['ntpdate']), - (['ntp', 'ntpdate'], ['ntpdate'], ['ntpdate']), - (['ntp', 'ntpdate'], ['ntpd', 'ntpdate'], ['ntpd', 'ntpdate']), - (['ntp', 'ntpdate', 'ntp-perl'], ['ntpd', 'ntpdate'], ['ntpd', 'ntpdate']), - (['ntp', 'ntpdate'], ['ntpd', 'ntpdate', 'ntp-wait'], ['ntpd', 'ntpdate']), - (['ntp', 'ntpdate', 'ntp-perl'], ['ntpd', 'ntpdate', 'ntp-wait'], ['ntpd', 'ntpdate', 'ntp-wait']), - ]: - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - monkeypatch.setattr(checkntp, 'check_service', lambda service: service[:-8] in services) - monkeypatch.setattr(checkntp, 'is_file', lambda _: True) - monkeypatch.setattr(checkntp, 'get_tgz64', lambda _: '') - - decision = checkntp.check_ntp(set(packages)) - - assert reporting.create_report.called == 1 - assert 'configuration will be migrated' in reporting.create_report.report_fields['title'] - for service in ['ntpd', 'ntpdate']: - migrated = re.search(r'\b{}\b'.format(service), - reporting.create_report.report_fields['title']) is not None - assert migrated == (service in migrate) - - assert decision.migrate_services == migrate - - -def test_tgz64(monkeypatch): - f, name = tempfile.mkstemp() - os.close(f) - tgz64 = checkntp.get_tgz64([name]) - - stream = io.BytesIO(base64.b64decode(tgz64)) - tar = tarfile.open(fileobj=stream, mode='r:gz') - names = tar.getnames() - - tar.close() - os.unlink(name) - - assert names == [name.lstrip('/')] diff --git a/repos/system_upgrade/el7toel8/actors/checkpostfix/actor.py b/repos/system_upgrade/el7toel8/actors/checkpostfix/actor.py deleted file mode 100644 index 690e9de8..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkpostfix/actor.py +++ /dev/null @@ -1,66 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.models import DistributionSignedRPM -from leapp.reporting import create_report, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckPostfix(Actor): - """ - Check if postfix is installed, check whether configuration update is needed. - """ - - name = 'check_postfix' - consumes = (DistributionSignedRPM,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - for fact in self.consume(DistributionSignedRPM): - for rpm in fact.items: - if rpm.name == 'postfix': - create_report([ - reporting.Title('Postfix has incompatible changes in the next major version'), - reporting.Summary( - 'Postfix 3.x has so called "compatibility safety net" that runs Postfix programs ' - 'with backwards-compatible default settings. It will log a warning whenever ' - 'backwards-compatible default setting may be required for continuity of service. ' - 'Based on this logging the system administrator can decide if any ' - 'backwards-compatible settings need to be made permanent in main.cf or master.cf, ' - 'before turning off the backwards-compatibility safety net.\n' - 'The backward compatibility safety net is by default turned off in Red Hat ' - 'Enterprise Linux 8.\n' - 'It can be turned on by running: "postconf -e compatibility_level=0\n' - 'It can be turned off by running: "postconf -e compatibility_level=2\n\n' - 'In the Postfix MySQL database client, the default "option_group" value has changed ' - 'to "client", i.e. it now reads options from the [client] group from the MySQL ' - 'configuration file. To disable it, set "option_group" to the empty string.\n\n' - 'The postqueue command no longer forces all message arrival times to be reported ' - 'in UTC. To get the old behavior, set TZ=UTC in main.cf:import_environment.\n\n' - 'Postfix 3.2 enables elliptic curve negotiation. This changes the default ' - 'smtpd_tls_eecdh_grade setting to "auto", and introduces a new parameter ' - '"tls_eecdh_auto_curves" with the names of curves that may be negotiated.\n\n' - 'The "master.cf" chroot default value has changed from "y" (yes) to "n" (no). ' - 'This applies to master.cf services where chroot field is not explicitly ' - 'specified.\n\n' - 'The "append_dot_mydomain" default value has changed from "yes" to "no". You may ' - 'need changing it to "yes" if senders cannot use complete domain names in e-mail ' - 'addresses.\n\n' - 'The "relay_domains" default value has changed from "$mydestination" to the empty ' - 'value. This could result in unexpected "Relay access denied" errors or ETRN errors, ' - 'because now will postfix by default relay only for the localhost.\n\n' - 'The "mynetworks_style" default value has changed from "subnet" to "host". ' - 'This parameter is used to implement the "permit_mynetworks" feature. The change ' - 'could result in unexpected "access denied" errors, because postfix will now by ' - 'default trust only the local machine, not the remote SMTP clients on the ' - 'same IP subnetwork.\n\n' - 'Postfix now supports dynamically loaded database plugins. Plugins are shipped ' - 'in individual RPM sub-packages. Correct database plugins have to be installed, ' - 'otherwise the specific database client will not work. For example for PostgreSQL ' - 'map to work, the postfix-pgsql RPM package has to be installed.\n', - ), - reporting.Severity(reporting.Severity.LOW), - reporting.Groups([reporting.Groups.SERVICES, reporting.Groups.EMAIL]), - reporting.RelatedResource('package', 'postfix') - ]) - return diff --git a/repos/system_upgrade/el7toel8/actors/checkpostfix/tests/component_test_checkpostfix.py b/repos/system_upgrade/el7toel8/actors/checkpostfix/tests/component_test_checkpostfix.py deleted file mode 100644 index bc2229bc..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkpostfix/tests/component_test_checkpostfix.py +++ /dev/null @@ -1,33 +0,0 @@ -from leapp.models import DistributionSignedRPM, RPM -from leapp.reporting import Report -from leapp.snactor.fixture import current_actor_context - -RH_PACKAGER = 'Red Hat, Inc. ' - -with_postfix = [ - RPM(name='grep', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='postfix', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - -without_postfix = [ - RPM(name='grep', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='sed', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - - -def create_modulesfacts(installed_rpm): - return DistributionSignedRPM(items=installed_rpm) - - -def test_actor_without_postfix_package(current_actor_context): - current_actor_context.feed(create_modulesfacts(installed_rpm=without_postfix)) - current_actor_context.run() - assert not current_actor_context.consume(Report) - - -def test_actor_with_postfix_package(current_actor_context): - current_actor_context.feed(create_modulesfacts(installed_rpm=with_postfix)) - current_actor_context.run() - assert current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/checkremovedenvvars/actor.py b/repos/system_upgrade/el7toel8/actors/checkremovedenvvars/actor.py deleted file mode 100644 index af11f78d..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkremovedenvvars/actor.py +++ /dev/null @@ -1,19 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import checkremovedenvvars -from leapp.reporting import Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckRemovedEnvVars(Actor): - """ - Check for usage of removed environment variables and inhibit the upgrade - if they are used. - """ - - name = 'check_removed_envvars' - consumes = () - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - checkremovedenvvars.process() diff --git a/repos/system_upgrade/el7toel8/actors/checkremovedenvvars/libraries/checkremovedenvvars.py b/repos/system_upgrade/el7toel8/actors/checkremovedenvvars/libraries/checkremovedenvvars.py deleted file mode 100644 index b6372965..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkremovedenvvars/libraries/checkremovedenvvars.py +++ /dev/null @@ -1,25 +0,0 @@ -from leapp import reporting -from leapp.libraries.common.config import get_all_envs -from leapp.reporting import create_report - -DEPRECATED_VARS = ['LEAPP_GRUB_DEVICE'] - - -def process(): - - vars_to_report = [] - - for var in get_all_envs(): - if var.name in DEPRECATED_VARS: - vars_to_report.append(var.name) - - if vars_to_report: - vars_str = ' '.join(vars_to_report) - create_report([ - reporting.Title('Leapp detected removed environment variable usage'), - reporting.Summary('The following Leapp related environment variable was removed: ' + vars_str), - reporting.Severity(reporting.Severity.HIGH), - reporting.Remediation(hint='Please do not use the reported variables'), - reporting.Groups(['inhibitor']), - reporting.Groups([reporting.Groups.UPGRADE_PROCESS]), - ]) diff --git a/repos/system_upgrade/el7toel8/actors/checkremovedenvvars/tests/test_checkremovedenvvars.py b/repos/system_upgrade/el7toel8/actors/checkremovedenvvars/tests/test_checkremovedenvvars.py deleted file mode 100644 index 6fe52193..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkremovedenvvars/tests/test_checkremovedenvvars.py +++ /dev/null @@ -1,25 +0,0 @@ -import pytest - -from leapp.libraries.actor import checkremovedenvvars -from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked -from leapp.libraries.stdlib import api -from leapp.utils.report import is_inhibitor - - -def test_removed_vars(monkeypatch): - envars = {'LEAPP_GRUB_DEVICE': '/dev/sda'} - monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(envars=envars)) - monkeypatch.setattr(api.current_actor, "produce", produce_mocked()) - checkremovedenvvars.process() - assert api.current_actor.produce.called == 1 - assert 'LEAPP_GRUB_DEVICE' in api.current_actor.produce.model_instances[0].report['summary'] - assert is_inhibitor(api.current_actor.produce.model_instances[0].report) - - -def test_no_removed_vars(monkeypatch): - envars = {'LEAPP_SKIP_RHSM': '1'} - monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(envars=envars)) - monkeypatch.setattr(api.current_actor, "produce", produce_mocked()) - checkremovedenvvars.process() - assert not api.current_actor.produce.called - assert not api.current_actor.produce.model_instances diff --git a/repos/system_upgrade/el7toel8/actors/checkremovedpammodules/actor.py b/repos/system_upgrade/el7toel8/actors/checkremovedpammodules/actor.py deleted file mode 100644 index d2e92398..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkremovedpammodules/actor.py +++ /dev/null @@ -1,68 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.exceptions import StopActorExecutionError -from leapp.libraries.stdlib import api -from leapp.models import PamConfiguration, Report -from leapp.reporting import create_report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckRemovedPamModules(Actor): - """ - Check for modules that are not available in RHEL 8 anymore - - At this moment, we check only for pam_tally2. Few more modules - are already covered in RemoveOldPAMModulesApply actor - """ - - name = 'removed_pam_modules' - consumes = (PamConfiguration, ) - produces = (Report, ) - tags = (ChecksPhaseTag, IPUWorkflowTag, ) - - def process(self): - messages = self.consume(PamConfiguration) - config = next(messages, None) - if list(messages): - api.current_logger().warning('Unexpectedly received more than one PamConfiguration message.') - if not config: - raise StopActorExecutionError( - 'Could not check pam configuration', details={'details': 'No PamConfiguration facts found.'} - ) - - # This list contain tuples of removed modules and their recommended replacements - removed_modules = [ - ('pam_tally2', 'pam_faillock'), - ] - found_services = set() - found_modules = set() - replacements = set() - for service in config.services: - for module in removed_modules: - removed = module[0] - replacement = module[1] - if removed in service.modules: - found_services.add(service.service) - found_modules.add(removed) - replacements.add(replacement) - - if found_modules: - create_report([ - reporting.Title('The {} pam module(s) no longer available'.format(', '.join(found_modules))), - reporting.Summary('The services {} using PAM are configured to ' - 'use {} module(s), which is no longer available ' - 'in Red Hat Enterprise Linux 8.'.format( - ', '.join(found_services), ', '.join(found_modules))), - reporting.Remediation( - hint='If you depend on its functionality, it is ' - 'recommended to migrate to {}. Otherwise ' - 'please remove the pam module(s) from all the files ' - 'under /etc/pam.d/.'.format(', '.join(replacements)) - ), - reporting.ExternalLink( - url='https://access.redhat.com/solutions/7004774', - title='Leapp preupgrade fails with: The pam_tally2 pam module(s) no longer available' - ), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.INHIBITOR]), - ] + [reporting.RelatedResource('pam', r) for r in replacements | found_modules]) diff --git a/repos/system_upgrade/el7toel8/actors/checksendmail/actor.py b/repos/system_upgrade/el7toel8/actors/checksendmail/actor.py deleted file mode 100644 index ef59b103..00000000 --- a/repos/system_upgrade/el7toel8/actors/checksendmail/actor.py +++ /dev/null @@ -1,63 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.libraries.actor import checksendmail -from leapp.libraries.common.rpms import has_package -from leapp.libraries.common.tcpwrappersutils import config_applies_to_daemon -from leapp.models import DistributionSignedRPM, SendmailMigrationDecision, TcpWrappersFacts -from leapp.reporting import create_report, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - -COMMON_REPORT_TAGS = [reporting.Groups.SERVICES, reporting.Groups.EMAIL] - -related = [ - reporting.RelatedResource('file', f) for f in checksendmail.get_conf_files() - ] + [reporting.RelatedResource('package', 'sendmail')] - - -class CheckSendmail(Actor): - """ - Check if sendmail is installed, check whether configuration update is needed, inhibit upgrade if TCP wrappers - are used. - """ - - name = 'check_sendmail' - consumes = (DistributionSignedRPM, TcpWrappersFacts,) - produces = (Report, SendmailMigrationDecision,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - if not has_package(DistributionSignedRPM, 'sendmail'): - return - - if config_applies_to_daemon(next(self.consume(TcpWrappersFacts)), 'sendmail'): - create_report([ - reporting.Title('TCP wrappers support removed in the next major version'), - reporting.Summary( - 'TCP wrappers are legacy host-based ACL (Access Control List) system ' - 'which has been removed in the next major version of RHEL.' - ), - reporting.Remediation( - hint='Please migrate from TCP wrappers to some other access control mechanism and delete ' - 'sendmail from the /etc/hosts.[allow|deny].' - ), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups(COMMON_REPORT_TAGS + [reporting.Groups.NETWORK]), - reporting.Groups([reporting.Groups.INHIBITOR]) - ] + related) - - return - migrate_files = checksendmail.check_files_for_compressed_ipv6() - if migrate_files: - create_report([ - reporting.Title('sendmail configuration will be migrated'), - reporting.Summary( - 'IPv6 addresses will be uncompressed, check all IPv6 addresses in all sendmail ' - 'configuration files for correctness.' - ), - reporting.Severity(reporting.Severity.LOW), - reporting.Groups(COMMON_REPORT_TAGS) - ] + related) - - self.produce(SendmailMigrationDecision(migrate_files=migrate_files)) - else: - self.log.info('The sendmail configuration seems compatible - it won\'t be migrated.') diff --git a/repos/system_upgrade/el7toel8/actors/checksendmail/libraries/checksendmail.py b/repos/system_upgrade/el7toel8/actors/checksendmail/libraries/checksendmail.py deleted file mode 100644 index fb7a9de7..00000000 --- a/repos/system_upgrade/el7toel8/actors/checksendmail/libraries/checksendmail.py +++ /dev/null @@ -1,33 +0,0 @@ -import os -import re - -SendmailConfDir = '/etc/mail' -SendmailConfFiles = ['sendmail.cf', 'sendmail.mc', 'submit.cf', 'submit.mc'] -# false positives blacklist -rfp = re.compile(r'(^\s*RIPv6:::1\b)|(@\s+\[IPv6:::1\]\s+>)') - - -def get_conf_files(): - conf_files = [os.path.join(SendmailConfDir, f) for f in SendmailConfFiles] - return conf_files - - -def check_false_positives(filename, line): - return filename in ['sendmail.cf', 'submit.cf'] and rfp.search(line) is not None - - -def check_files_for_compressed_ipv6(): - conf_files = get_conf_files() - migrate_files = [] - files = [os.path.join(SendmailConfDir, re.sub(r'\.db$', '', f)) for f in os.listdir(SendmailConfDir) - if f.endswith('.db')] + conf_files - regex = re.compile(r'IPv6:[0-9a-fA-F:]*::') - for filename in files: - if not os.path.exists(filename): - continue - with open(filename) as file_check: - for line in file_check: - if regex.search(line) and not check_false_positives(os.path.basename(filename), line): - migrate_files.append(filename) - break - return migrate_files diff --git a/repos/system_upgrade/el7toel8/actors/checksendmail/tests/component_test_checksendmail.py b/repos/system_upgrade/el7toel8/actors/checksendmail/tests/component_test_checksendmail.py deleted file mode 100644 index d76f0687..00000000 --- a/repos/system_upgrade/el7toel8/actors/checksendmail/tests/component_test_checksendmail.py +++ /dev/null @@ -1,38 +0,0 @@ -from leapp.models import DaemonList, DistributionSignedRPM, RPM, SendmailMigrationDecision, TcpWrappersFacts -from leapp.reporting import Report -from leapp.utils.report import is_inhibitor - -RH_PACKAGER = 'Red Hat, Inc. ' - -with_sendmail = [ - RPM(name='grep', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='sendmail', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - -without_sendmail = [ - RPM(name='grep', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='sed', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - - -def create_modulesfacts(installed_rpm): - return DistributionSignedRPM(items=installed_rpm) - - -def test_actor_without_sendmail_package(current_actor_context): - tcpwrap_facts = TcpWrappersFacts(daemon_lists=[]) - current_actor_context.feed(create_modulesfacts(installed_rpm=without_sendmail)) - current_actor_context.feed(tcpwrap_facts) - current_actor_context.run() - assert not current_actor_context.consume(Report) - - -def test_actor_with_tcp_wrappers(current_actor_context): - tcpwrap_facts = TcpWrappersFacts(daemon_lists=[DaemonList(value=['sendmail'])]) - current_actor_context.feed(create_modulesfacts(installed_rpm=with_sendmail)) - current_actor_context.feed(tcpwrap_facts) - current_actor_context.run() - report_fields = current_actor_context.consume(Report)[0].report - assert is_inhibitor(report_fields) diff --git a/repos/system_upgrade/el7toel8/actors/checksendmail/tests/unit_test_checksendmail.py b/repos/system_upgrade/el7toel8/actors/checksendmail/tests/unit_test_checksendmail.py deleted file mode 100644 index c55a06a4..00000000 --- a/repos/system_upgrade/el7toel8/actors/checksendmail/tests/unit_test_checksendmail.py +++ /dev/null @@ -1,23 +0,0 @@ -import os - -import pytest -from six import text_type - -from leapp.libraries.actor import checksendmail - - -@pytest.mark.parametrize('test_input,migrate', [ - ('IPv6:::1\n', True), - ('IPv6:0:0:0:0:0:0:0:1\n', False), -]) -def test_check_migration(tmpdir, monkeypatch, test_input, migrate): - test_cfg_path = text_type(tmpdir) - test_cfg_file = os.path.join(test_cfg_path, 'sendmail.cf') - with open(test_cfg_file, 'w') as file_out: - file_out.write(test_input) - monkeypatch.setattr(checksendmail, 'SendmailConfDir', test_cfg_path) - files = checksendmail.check_files_for_compressed_ipv6() - if migrate: - assert files == [test_cfg_file] - else: - assert files == [] diff --git a/repos/system_upgrade/el7toel8/actors/checkwireshark/actor.py b/repos/system_upgrade/el7toel8/actors/checkwireshark/actor.py deleted file mode 100644 index ed7f8a37..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkwireshark/actor.py +++ /dev/null @@ -1,36 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.libraries.common.rpms import has_package -from leapp.models import DistributionSignedRPM, Report -from leapp.reporting import create_report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckWireshark(Actor): - """ - Report a couple of changes in tshark usage - """ - - name = 'check_wireshark' - consumes = (DistributionSignedRPM, ) - produces = (Report, ) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - if has_package(DistributionSignedRPM, 'wireshark'): - create_report([ - reporting.Title('tshark: CLI options and output changes'), - reporting.Summary( - 'The -C suboption for -N option for asynchronous DNS name resolution ' - 'has been completely removed from tshark. The reason for this is that ' - 'the asynchronous DNS resolution is now the only resolution available ' - 'so there is no need for -C. If you are using -NC with tshark in any ' - 'of your scripts, please remove it.' - '\n\n' - 'When using -H option with capinfos, the output no longer shows MD5 hashes. ' - 'Now it shows SHA256 instead. SHA1 might get removed very soon as well. ' - 'If you use these output values, please change your scripts.'), - reporting.Severity(reporting.Severity.LOW), - reporting.Groups([reporting.Groups.MONITORING, reporting.Groups.SANITY, reporting.Groups.TOOLS]), - reporting.RelatedResource('package', 'wireshark'), - ]) diff --git a/repos/system_upgrade/el7toel8/actors/checkwireshark/tests/component_test_checkwireshark.py b/repos/system_upgrade/el7toel8/actors/checkwireshark/tests/component_test_checkwireshark.py deleted file mode 100644 index 648882e6..00000000 --- a/repos/system_upgrade/el7toel8/actors/checkwireshark/tests/component_test_checkwireshark.py +++ /dev/null @@ -1,29 +0,0 @@ -from leapp.models import DistributionSignedRPM, RPM -from leapp.reporting import Report -from leapp.snactor.fixture import current_actor_context - -RH_PACKAGER = 'Red Hat, Inc. ' - - -def test_actor_with_grep_package(current_actor_context): - rpms = [ - RPM(name='wireshark', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='powertop', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - - current_actor_context.feed(DistributionSignedRPM(items=rpms)) - current_actor_context.run() - assert current_actor_context.consume(Report) - - -def test_actor_without_grep_package(current_actor_context): - rpms = [ - RPM(name='powertop', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='sed', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - - current_actor_context.feed(DistributionSignedRPM(items=rpms)) - current_actor_context.run() - assert not current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/cupscheck/actor.py b/repos/system_upgrade/el7toel8/actors/cupscheck/actor.py deleted file mode 100644 index c0b34b75..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupscheck/actor.py +++ /dev/null @@ -1,27 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import cupscheck -from leapp.models import CupsChangedFeatures, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CupsCheck(Actor): - """ - Reports changes in configuration between CUPS 1.6.3 and 2.2.6 - - Reports if user configuration contains features (interface scripts), - directives (Include, PrintcapFormat, PassEnv, SetEnv, - ServerCertificate, ServerKey) or directive values (Digest, - BasicDigest). Some of them were removed for security reasons - (interface scripts and directive Include), moved - to cups-files.conf for security reasons (PassEnv, SetEnv). - Others were removed (ServerCertificate, ServerKey, Digest, - BasicDigest) or moved (PrintcapFormat) due deprecation. - """ - - name = 'cups_check' - consumes = (CupsChangedFeatures,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - cupscheck.make_reports() diff --git a/repos/system_upgrade/el7toel8/actors/cupscheck/libraries/cupscheck.py b/repos/system_upgrade/el7toel8/actors/cupscheck/libraries/cupscheck.py deleted file mode 100644 index 0f990959..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupscheck/libraries/cupscheck.py +++ /dev/null @@ -1,219 +0,0 @@ -from leapp import reporting -from leapp.libraries.stdlib import api -from leapp.models import CupsChangedFeatures - - -def _get_input_model(model): - """ - Gets data model from an actor. - - :param obj model: object of model which data will be consumed - """ - return next(api.consume(model), None) - - -def check_interface_scripts(facts, report_func): - """ - Checks if the data model tells interface scripts are used - and produces a report. - - :param obj facts: model object containing info about CUPS configuration - :param func report_func: creates report - """ - title = ('CUPS no longer supports usage of interface scripts') - summary = ('Interface scripts are no longer supported due to ' - 'security issues - an attacker could provide ' - 'malicious script which will be run during printing.') - hint = ('Install the queue with PPD file for the printer ' - 'if available or install the queue with generic PPD, ' - 'add *cupsFilter2 directive into PPD of installed ' - 'queue (in /etc/cups/ppd) and reinstall the queue with modified PPD. ' - 'The interface script needs to have permissions 750 and ' - 'ownership root:lp. How to write *cupsFilter2 keyword ' - 'is described at https://www.cups.org/doc/spec-ppd.html#cupsFilter2 ' - 'and the script needs to be put into /usr/lib/cups/filter ' - 'or you need to use an absolute path to the script ' - 'in *cupsFilter2 directive.') - if facts.interface: - args = [ - reporting.Title(title), - reporting.Summary(summary), - reporting.Groups([reporting.Groups.DRIVERS]), - reporting.Severity(reporting.Severity.MEDIUM), - reporting.Remediation(hint=hint), - reporting.ExternalLink( - title='Upstream documentation for the cupsFilter2 PPD keyword', - url='https://www.cups.org/doc/spec-ppd.html#cupsFilter2' - ) - ] - - report_func(args) - - -def check_include_directive(facts, report_func): - """ - Checks if the data model tells include directives are used - and produces a report. - - :param obj facts: model object containing info about CUPS configuration - :param func report_func: creates report - """ - title = ('CUPS no longer supports usage of Include directive') - summary = ('Include directive was removed due to security reasons. ' - 'Contents of found included files will be appended to ' - 'cupsd.conf') - if facts.include: - args = [ - reporting.Title(title), - reporting.Summary(summary), - reporting.Groups([reporting.Groups.SERVICES]), - reporting.Severity(reporting.Severity.MEDIUM), - ] + [reporting.RelatedResource('file', f) for f in facts.include_files] - - report_func(args) - - -def check_printcap_directive(facts, report_func): - """ - Checks if the data model tells printcapformat directive is used - and produces a report. - - :param obj facts: model object containing info about CUPS configuration - :param func report_func: creates report - """ - title = ('PrintcapFormat directive is no longer in cupsd.conf') - summary = ( - 'The directive was moved into /etc/cups/cups-files.conf ' - 'because it is deprecated. This will be handled automatically during ' - 'the upgrade process.' - ) - if facts.printcap: - args = [ - reporting.Title(title), - reporting.Summary(summary), - reporting.Groups([reporting.Groups.SERVICES]), - reporting.Severity(reporting.Severity.LOW), - reporting.RelatedResource('file', '/etc/cups/cupsd.conf'), - reporting.RelatedResource('file', '/etc/cups/cups-files.conf') - ] - - report_func(args) - - -def check_env_directives(facts, report_func): - """ - Checks if the data model tells PassEnv/SetEnv directives are used - and produces a report. - - :param obj facts: model object containing info about CUPS configuration - :param func report_func: creates report - """ - title = ('PassEnv/SetEnv directives are no longer in cupsd.conf') - summary = ( - 'The directives were moved into /etc/cups/cups-files.conf ' - 'due to security reasons. ' - 'This will be handled automatically during the upgrade process.' - ) - if facts.env: - args = [ - reporting.Title(title), - reporting.Summary(summary), - reporting.Groups([reporting.Groups.SERVICES]), - reporting.Severity(reporting.Severity.LOW), - reporting.RelatedResource('file', '/etc/cups/cupsd.conf'), - reporting.RelatedResource('file', '/etc/cups/cups-files.conf') - ] - - report_func(args) - - -def check_certkey_directives(facts, report_func): - """ - Checks if the data model tells ServerKey/ServerCertificate directives - are used and produces a report. - - :param obj facts: model object containing info about CUPS configuration - :param func report_func: creates report - """ - title = ('ServerKey/ServerCertificate directives are substituted ' - 'by ServerKeychain directive') - summary = ( - 'The directives were substituted by ServerKeychain directive, ' - 'which now takes a directory as value (/etc/cups/ssl is default). ' - 'The previous directives took a file as value. ' - 'The migration script will copy the files specified in ' - 'directive values into /etc/cups/ssl directory ' - 'if they are not there already. ' - 'This will be handled automatically during the upgrade process.' - ) - if facts.certkey: - args = [ - reporting.Title(title), - reporting.Summary(summary), - reporting.Groups([reporting.Groups.SERVICES, - reporting.Groups.AUTHENTICATION]), - reporting.Severity(reporting.Severity.MEDIUM), - reporting.RelatedResource('file', '/etc/cups/cups-files.conf') - ] - - report_func(args) - - -def check_digest_values(facts, report_func): - """ - Checks if the data model tells Digest/BasicDigest values - of AuthType/DefaultAuthType directives are used - and produces a report. - - :param obj facts: model object containing info about CUPS configuration - :param func report_func: creates report - """ - title = ('CUPS no longer supports Digest and BasicDigest ' - 'directive values') - summary = ( - 'Digest and BasicDigest directive values were removed ' - 'due to deprecation. ' - 'The Basic authentication with TLS encryption will be ' - 'set automatically during the upgrade process. ' - 'The version of the used TLS is by default dependent on the set system ' - 'crypto policies.' - ) - # NOTE: the remediation instructions are missing as we do not have any - # doc covering that, mainly because of this is expected to be very rare - # at all. People usually do not use Digest & BasicDigest. - if facts.digest: - args = [ - reporting.Title(title), - reporting.Summary(summary), - reporting.Groups([ - reporting.Groups.AUTHENTICATION, - reporting.Groups.SECURITY, - reporting.Groups.SERVICES, - ]), - reporting.Severity(reporting.Severity.MEDIUM), - reporting.RelatedResource('file', '/etc/cups/cupsd.conf') - ] - - report_func(args) - - -def make_reports(consume_function=_get_input_model, - report_func=reporting.create_report, - debug_log=api.current_logger().debug): - """ - Creates reports if needed - - :param func consume_function: gets data model from an actor - :param func report_func: creates report - """ - facts = consume_function(CupsChangedFeatures) - - if facts: - check_interface_scripts(facts, report_func) - check_include_directive(facts, report_func) - check_printcap_directive(facts, report_func) - check_env_directives(facts, report_func) - check_certkey_directives(facts, report_func) - check_digest_values(facts, report_func) - else: - debug_log('No facts gathered about CUPS - skipping reports.') diff --git a/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_certkey.py b/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_certkey.py deleted file mode 100644 index 918c3a0e..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_certkey.py +++ /dev/null @@ -1,22 +0,0 @@ -import pytest - -from leapp import reporting -from leapp.libraries.actor.cupscheck import check_certkey_directives -from leapp.libraries.common.testutils import create_report_mocked -from leapp.models import CupsChangedFeatures - - -@pytest.mark.parametrize("certkey_exists,n_reports", [(False, 0), (True, 1)]) -def test_check_certkey_directives(certkey_exists, n_reports): - facts = CupsChangedFeatures(certkey=certkey_exists) - report_func = create_report_mocked() - - check_certkey_directives(facts, report_func) - - assert report_func.called == n_reports - - if report_func.called: - report_fields = report_func.report_fields - - assert 'ServerKey/ServerCertificate directives' in report_fields['title'] - assert report_fields['severity'] == reporting.Severity.MEDIUM diff --git a/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_digest_values.py b/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_digest_values.py deleted file mode 100644 index ca68be6e..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_digest_values.py +++ /dev/null @@ -1,22 +0,0 @@ -import pytest - -from leapp import reporting -from leapp.libraries.actor.cupscheck import check_digest_values -from leapp.libraries.common.testutils import create_report_mocked -from leapp.models import CupsChangedFeatures - - -@pytest.mark.parametrize("digest_exists,n_reports", [(False, 0), (True, 1)]) -def test_check_digest_values(digest_exists, n_reports): - facts = CupsChangedFeatures(digest=digest_exists) - report_func = create_report_mocked() - - check_digest_values(facts, report_func) - - assert report_func.called == n_reports - - if report_func.called: - report_fields = report_func.report_fields - - assert 'no longer supports Digest' in report_fields['title'] - assert report_fields['severity'] == reporting.Severity.MEDIUM diff --git a/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_env_directives.py b/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_env_directives.py deleted file mode 100644 index 7375b763..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_env_directives.py +++ /dev/null @@ -1,22 +0,0 @@ -import pytest - -from leapp import reporting -from leapp.libraries.actor.cupscheck import check_env_directives -from leapp.libraries.common.testutils import create_report_mocked -from leapp.models import CupsChangedFeatures - - -@pytest.mark.parametrize("env_exists,n_reports", [(False, 0), (True, 1)]) -def test_check_env_directives(env_exists, n_reports): - facts = CupsChangedFeatures(env=env_exists) - report_func = create_report_mocked() - - check_env_directives(facts, report_func) - - assert report_func.called == n_reports - - if report_func.called: - report_fields = report_func.report_fields - - assert 'PassEnv/SetEnv directives are no longer' in report_fields['title'] - assert report_fields['severity'] == reporting.Severity.LOW diff --git a/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_include_directive.py b/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_include_directive.py deleted file mode 100644 index 39062d82..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_include_directive.py +++ /dev/null @@ -1,23 +0,0 @@ -import pytest - -from leapp import reporting -from leapp.libraries.actor.cupscheck import check_include_directive -from leapp.libraries.common.testutils import create_report_mocked -from leapp.models import CupsChangedFeatures - - -@pytest.mark.parametrize("include_exists,n_reports", [(False, 0), (True, 1)]) -def test_check_include_directive(include_exists, n_reports): - facts = CupsChangedFeatures(include=include_exists, - include_files=['/etc/cups/cupsd.conf']) - report_func = create_report_mocked() - - check_include_directive(facts, report_func) - - assert report_func.called == n_reports - - if report_func.called: - report_fields = report_func.report_fields - - assert 'no longer supports usage of Include' in report_fields['title'] - assert report_fields['severity'] == reporting.Severity.MEDIUM diff --git a/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_interface_scripts.py b/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_interface_scripts.py deleted file mode 100644 index bf642746..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_interface_scripts.py +++ /dev/null @@ -1,25 +0,0 @@ -import pytest - -from leapp import reporting -from leapp.libraries.actor.cupscheck import check_interface_scripts -from leapp.libraries.common.testutils import create_report_mocked -from leapp.models import CupsChangedFeatures - - -@pytest.mark.parametrize("interface_exists,n_reports", [(False, 0), (True, 1)]) -def test_check_interface_scripts(interface_exists, n_reports): - facts = CupsChangedFeatures(interface=interface_exists) - report_func = create_report_mocked() - - check_interface_scripts(facts, report_func) - - assert report_func.called == n_reports - - if report_func.called: - report_fields = report_func.report_fields - - assert 'usage of interface scripts' in report_fields['title'] - assert 'Interface scripts are no longer' in report_fields['summary'] - assert report_fields['severity'] == reporting.Severity.MEDIUM - assert all('*cupsFilter2' in r['context'] - for r in report_fields['detail']['remediations']) diff --git a/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_printcap_directive.py b/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_printcap_directive.py deleted file mode 100644 index d2cf9157..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_check_printcap_directive.py +++ /dev/null @@ -1,22 +0,0 @@ -import pytest - -from leapp import reporting -from leapp.libraries.actor.cupscheck import check_printcap_directive -from leapp.libraries.common.testutils import create_report_mocked -from leapp.models import CupsChangedFeatures - - -@pytest.mark.parametrize("printcap_exists,n_reports", [(False, 0), (True, 1)]) -def test_check_printcap_directive(printcap_exists, n_reports): - facts = CupsChangedFeatures(printcap=printcap_exists) - report_func = create_report_mocked() - - check_printcap_directive(facts, report_func) - - assert report_func.called == n_reports - - if report_func.called: - report_fields = report_func.report_fields - - assert 'PrintcapFormat directive' in report_fields['title'] - assert report_fields['severity'] == reporting.Severity.LOW diff --git a/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_make_reports.py b/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_make_reports.py deleted file mode 100644 index 8a5e53f5..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupscheck/tests/test_make_reports.py +++ /dev/null @@ -1,44 +0,0 @@ -import pytest - -from leapp.libraries.actor.cupscheck import make_reports - - -class MockLogger(object): - def __init__(self): - self.debug_msg = '' - - def debug_log(self, msg): - self.debug_msg += msg - - -class MockInputFacts(object): - def __init__(self, facts): - self.facts = facts - - def get_facts(self, model): - ret = None - if model == 'CupsChangedFeatures': - ret = self.facts - - return ret - - -class MockReport(object): - def __init__(self): - self.report = [] - - # unused, report testing will be done separately - def create_report(self, data_list): - if data_list: - self.report.append(data_list) - - -def test_make_reports(): - - logger = MockLogger() - facts = MockInputFacts(None) - reporting = MockReport() - - make_reports(facts.get_facts, reporting.create_report, logger.debug_log) - - assert logger.debug_msg == 'No facts gathered about CUPS - skipping reports.' diff --git a/repos/system_upgrade/el7toel8/actors/cupsfiltersmigrate/actor.py b/repos/system_upgrade/el7toel8/actors/cupsfiltersmigrate/actor.py deleted file mode 100644 index 18653958..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsfiltersmigrate/actor.py +++ /dev/null @@ -1,29 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import cupsfiltersmigrate -from leapp.models import DistributionSignedRPM -from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag - - -class CupsfiltersMigrate(Actor): - """ - Actor for migrating package cups-filters. - - Migrating cups-filters package means adding two directives into - /etc/cups/cups-browsed.conf - LocalQueueNamingRemoteCUPS and - CreateIPPPrinterQueues. - - LocalQueueNamingRemoteCUPS directive indicates what will be used as a name - for local print queue creation - the default is DNS-SD ID of advertised - print queue now, it was the name of remote print queue in the past. - - CreateIPPPrinterQueues directive serves for telling cups-browsed to create - local print queues for all available IPP printers. - """ - - name = 'cupsfilters_migrate' - consumes = (DistributionSignedRPM,) - produces = () - tags = (ApplicationsPhaseTag, IPUWorkflowTag) - - def process(self): - cupsfiltersmigrate.update_cups_browsed() diff --git a/repos/system_upgrade/el7toel8/actors/cupsfiltersmigrate/libraries/cupsfiltersmigrate.py b/repos/system_upgrade/el7toel8/actors/cupsfiltersmigrate/libraries/cupsfiltersmigrate.py deleted file mode 100644 index e88be9d7..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsfiltersmigrate/libraries/cupsfiltersmigrate.py +++ /dev/null @@ -1,116 +0,0 @@ -from leapp.libraries.common.rpms import has_package -from leapp.libraries.stdlib import api -from leapp.models import DistributionSignedRPM - -# rpm : the default config file -BROWSED_CONFIG = '/etc/cups/cups-browsed.conf' - - -# The list of macros that should be set to get the behavior -# from previous RHEL -NEW_MACROS = [ - ('LocalQueueNamingRemoteCUPS', 'RemoteName'), - ('CreateIPPPrinterQueues', 'All') -] - - -def _macro_exists(path, macro): - """ - Check if macro is in the file. - - :param str path: string representing the full path of the config file - :param str macro: new directive to be added - :return boolean res: macro does/does not exist in the file - """ - with open(path, 'r') as f: - lines = f.readlines() - - for line in lines: - if line.lstrip().startswith(macro): - return True - return False - - -def _append_string(path, content): - """ - Append string at the end of file. - - :param str path: string representing the full path of file - :param str content: preformatted string to be added - """ - with open(path, 'a') as f: - f.write(content) - - -def update_config(path, check_function=_macro_exists, - append_function=_append_string): - """ - Insert expected content into the file on the path if it is not - in the file already. - - :param str path: string representing the full path of the config file - :param func check_function: function to be used to check if string is in the file - :param func append_function: function to be used to append string - """ - - macros = [] - for macro in NEW_MACROS: - if not check_function(path, macro[0]): - macros.append(' '.join(macro)) - - if not macros: - return - - fmt_input = "\n{comment_line}\n{content}\n".format(comment_line='# content added by Leapp', - content='\n'.join(macros)) - - try: - append_function(path, fmt_input) - except IOError: - raise IOError('Error during writing to file: {}.'.format(path)) - - -def _check_package(pkg): - """ - Checks if a package is installed and signed - - :param str pkg: name of package - """ - return has_package(DistributionSignedRPM, pkg) - - -def update_cups_browsed(debug_log=api.current_logger().debug, - error_log=api.current_logger().error, - is_installed=_check_package, - append_function=_append_string, - check_function=_macro_exists): - """ - Update cups-browsed configuration file - - :param func debug_log: function for debug logging - :param func error_log: function for error logging - :param func is_installed: checks if the package is installed - :param func append_function: appends string into file - :param func check_function: checks if macro is in the file - """ - - error_list = [] - - if not is_installed('cups-filters'): - return - - debug_log('Updating cups-browsed configuration file {}.' - .format(BROWSED_CONFIG)) - - try: - update_config(BROWSED_CONFIG, - check_function, - append_function) - except (OSError, IOError) as error: - error_list.append((BROWSED_CONFIG, error)) - if error_list: - error_log('The files below have not been modified ' - '(error message included):' + - ''.join(['\n - {}: {}'.format(err[0], err[1]) - for err in error_list])) - return diff --git a/repos/system_upgrade/el7toel8/actors/cupsfiltersmigrate/tests/test_update_config_cupsfiltersmigrate.py b/repos/system_upgrade/el7toel8/actors/cupsfiltersmigrate/tests/test_update_config_cupsfiltersmigrate.py deleted file mode 100644 index 5474911f..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsfiltersmigrate/tests/test_update_config_cupsfiltersmigrate.py +++ /dev/null @@ -1,100 +0,0 @@ -import pytest - -from leapp.libraries.actor.cupsfiltersmigrate import NEW_MACROS, update_config - - -def _gen_append_str(list_out=None): - """ - Just helper function to generate string expected to be added for an input (see testdata) for testing. - - :param list list_out: None, [0], [1], [0,1] - no more expected vals, - which represents what macros should be appended - in output - """ - if not list_out: - return '' - _out_list = ('LocalQueueNamingRemoteCUPS RemoteName', 'CreateIPPPrinterQueues All') - output = ['# content added by Leapp'] - for i in list_out: - output.append(_out_list[i]) - # ensure the extra NL is before the string and the empty NL is in the end - # of the string (/file) as well - return '\n{}\n'.format('\n'.join(output)) - - -testdata = ( - ('\n', - _gen_append_str([0, 1])), - ('bleblaba\n', - _gen_append_str([0, 1])), - ('fdnfdf\n# LocalQueueNamingRemoteCUPS RemoteName\n', - _gen_append_str([0, 1])), - ('fdnfdf\nfoo # LocalQueueNamingRemoteCUPS RemoteName\n', - _gen_append_str([0, 1])), - ('fdnfdf\n# LocalQueueNamingRemoteCUPS Bar\n', - _gen_append_str([0, 1])), - ('fdnfdf\n # LocalQueueNamingRemoteCUPS Bar\n', - _gen_append_str([0, 1])), - ('fdnfdf\nLocalQueueNamingRemoteCUPS RemoteName\n', - _gen_append_str([1])), - ('fdnfdf\n LocalQueueNamingRemoteCUPS RemoteName\n', - _gen_append_str([1])), - ('fdnfdf\nLocalQueueNamingRemoteCUPS Bar\n', - _gen_append_str([1])), - ('fnfngbfg\nCreateIPPPrinterQueues All\n', - _gen_append_str([0])), - ('fnfngbfg\nCreateIPPPrinterQueues Foo\n', - _gen_append_str([0])), - ('fnfngbfg\n CreateIPPPrinterQueues Foo\n', - _gen_append_str([0])), - ('CreateIPPPrinterQueues All\nLocalQueueNamingRemoteCUPS RemoteName\n', - _gen_append_str()), - ('CreateIPPPrinterQueues Foo\nLocalQueueNamingRemoteCUPS Bar\n', - _gen_append_str()), - ('foo\nCreateIPPPrinterQueues Foo\nLocalQueueNamingRemoteCUPS Bar\nFoobar\n', - _gen_append_str()), - ('foo\nCreateIPPPrinterQueues Foo\n# LocalQueueNamingRemoteCUPS Bar\nFoobar\n', - _gen_append_str([0])) -) - - -class MockFile(object): - def __init__(self, path, content=None): - self.path = path - self.content = content - self.error = False - - def append(self, path, content): - if path != self.path: - self.error = True - if not self.error: - self.content += content - return self.content - raise IOError('Error during writing to file: {}.'.format(path)) - - def exists(self, path, macro): - for line in self.content.split('\n'): - if line.lstrip().startswith(macro) and self.path == path: - return True - return False - - -def test_update_config_file_errors(): - path = 'foo' - - f = MockFile(path, content='') - - with pytest.raises(IOError): - update_config('bar', f.exists, f.append) - - assert f.content == '' - - -@pytest.mark.parametrize('content,expected', testdata) -def test_update_config_append_into_file(content, expected): - path = 'bar' - f = MockFile(path, content) - - update_config(path, f.exists, f.append) - - assert f.content == content + expected diff --git a/repos/system_upgrade/el7toel8/actors/cupsfiltersmigrate/tests/test_update_cups_browsed_cupsfiltersmigrate.py b/repos/system_upgrade/el7toel8/actors/cupsfiltersmigrate/tests/test_update_cups_browsed_cupsfiltersmigrate.py deleted file mode 100644 index 94fa833c..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsfiltersmigrate/tests/test_update_cups_browsed_cupsfiltersmigrate.py +++ /dev/null @@ -1,107 +0,0 @@ -import pytest - -from leapp.libraries.actor.cupsfiltersmigrate import BROWSED_CONFIG, update_cups_browsed - -testdata = [ - {'cups-filters': '/etc/cups/cups-browsed.conf'}, - {'cups-filters': ''}, - {'ble': ''} -] - - -class MockLogger(object): - def __init__(self): - self.debugmsg = '' - self.errmsg = '' - - def debug(self, message): - self.debugmsg += message - - def error(self, message): - self.errmsg += message - - -class MockPackage(object): - def __init__(self, name, config): - self.name = name - self.config = config - self.config_content = '' - - -class MockPackageSet(object): - def __init__(self): - self.installed_packages = None - - def add_packages(self, pkgs): - if self.installed_packages is None: - self.installed_packages = [] - - for rpm, config in pkgs.items(): - self.installed_packages.append(MockPackage(rpm, config)) - - def is_installed(self, pkg): - for rpm in self.installed_packages: - if pkg == rpm.name: - return True - return False - - def append_content(self, path, content): - found = False - - for rpm in self.installed_packages: - if path == rpm.config: - found = True - rpm.config_content += content - if not found: - raise IOError('Error during writing to file: {}.'.format(path)) - - def check_content(self, path, content): - found = False - - for rpm in self.installed_packages: - if path == rpm.config and content in rpm.config_content: - found = True - - return found - - -class ExpectedOutput(object): - def __init__(self): - self.debugmsg = '' - self.errmsg = '' - - def create(self, rpms): - error_list = [] - - for pkg, config in rpms.items(): - if pkg == 'cups-filters': - self.debugmsg += 'Updating cups-browsed configuration file {}.'.format(BROWSED_CONFIG) - if config == '': - error_list.append((BROWSED_CONFIG, 'Error during ' - 'writing to file: {}.'.format(BROWSED_CONFIG))) - - if error_list: - self.errmsg = ('The files below have not been modified ' - '(error message included):' + - ''.join(['\n - {}: {}'.format(err[0], err[1]) - for err in error_list])) - - -@pytest.mark.parametrize("rpms", testdata) -def test_update_cups_browsed(rpms): - logger = MockLogger() - installed_packages = MockPackageSet() - - installed_packages.add_packages(rpms) - - expected = ExpectedOutput() - expected.create(rpms) - - update_cups_browsed(logger.debug, - logger.error, - installed_packages.is_installed, - installed_packages.append_content, - installed_packages.check_content) - - assert expected.debugmsg == logger.debugmsg - assert expected.errmsg == logger.errmsg diff --git a/repos/system_upgrade/el7toel8/actors/cupsmigrate/actor.py b/repos/system_upgrade/el7toel8/actors/cupsmigrate/actor.py deleted file mode 100644 index 38dd0727..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsmigrate/actor.py +++ /dev/null @@ -1,21 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import cupsmigrate -from leapp.models import CupsChangedFeatures -from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag - - -class CupsMigrate(Actor): - """ - cups_migrate actor - - Migrates configuration directives and writes into error log - if any error was encountered. - """ - - name = 'cups_migrate' - consumes = (CupsChangedFeatures,) - produces = () - tags = (ApplicationsPhaseTag, IPUWorkflowTag) - - def process(self): - cupsmigrate.migrate_configuration() diff --git a/repos/system_upgrade/el7toel8/actors/cupsmigrate/libraries/cupsmigrate.py b/repos/system_upgrade/el7toel8/actors/cupsmigrate/libraries/cupsmigrate.py deleted file mode 100644 index f7aee8b2..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsmigrate/libraries/cupsmigrate.py +++ /dev/null @@ -1,231 +0,0 @@ -import os -from shutil import copy - -from leapp.libraries.stdlib import api -from leapp.models import CupsChangedFeatures - -CUPSD_CONF = '/etc/cups/cupsd.conf' -CUPSFILES_CONF = '/etc/cups/cups-files.conf' -""" -CUPS configuration files -""" - - -class FileOperations(object): - def readlines(self, path): - if os.path.exists(path): - with open(path, 'r') as f: - return f.readlines() - else: - raise IOError('Error when reading file {} - file ' - 'does not exist.'.format(path)) - - def write(self, path, mode, content): - if isinstance(content, list): - content = ''.join(content) - with open(path, mode) as f: - f.write(content) - - def copy_to_ssl(self, oldpath): - copy(oldpath, '/etc/cups/ssl') - - -def migrate_digest(op): - """ - Replaces Digest/BasicDigest for Basic - - :param obj op: file operations object - """ - try: - lines = op.readlines(CUPSD_CONF) - except IOError as error: - raise IOError(error) - - for line in lines: - for directive in ['AuthType', 'DefaultAuthType']: - if line.lstrip().startswith(directive): - auth_line_value = line.lstrip().lstrip(directive).lstrip() - for value in ['Digest', 'BasicDigest']: - if auth_line_value.startswith(value): - lines[lines.index(line)] = '{} Basic\n'.format(directive) - - op.write(CUPSD_CONF, 'w', lines) - - -def migrate_include(include_files, op): - """ - Concatenates configuration files and remove lines - with 'Include' directive - - :param list include_files: list of files which contents will be - concatenated - :param obj op: file operations object - """ - error_list = [] - lines = [] - content = [] - - for f in include_files: - try: - content = op.readlines(f) - if f != CUPSD_CONF: - content = ['\n# added by Leapp\n'] + content - lines += content - except IOError as error: - error_list.append('Include directive: {}'.format(error)) - - if error_list: - return error_list - - for line in lines: - if line.lstrip().startswith('Include'): - lines[lines.index(line)] = '' - - op.write(CUPSD_CONF, 'w', lines) - - return None - - -def move_directives(directives, op): - """ - Moves the directives from cupsd.conf to cups-files.conf - - :param list directives: list of wanted directives - :param obj op: file operations object - """ - try: - cupsd_lines = op.readlines(CUPSD_CONF) - except IOError as error: - raise IOError(error) - - lines_to_move = [] - for line in cupsd_lines: - for name in directives: - if line.lstrip().startswith(name): - lines_to_move.append(line) - cupsd_lines[cupsd_lines.index(line)] = '' - - op.write(CUPSD_CONF, 'w', cupsd_lines) - - if lines_to_move: - op.write(CUPSFILES_CONF, 'a', - '\n# added by Leapp\n{}'.format(''.join(lines_to_move))) - - -def migrate_certkey(op): - """ - Copies the key and the certificate to /etc/cups/ssl if both - are in different dirs, or sets ServerKeychain value to the dir - where the key and the certificate are. Removes old directives - - :param list directives: list of wanted directives - :param obj op: file operations object - """ - try: - lines = op.readlines(CUPSFILES_CONF) - except IOError as error: - raise IOError(error) - - certkey_values = [] - - for line in lines: - for name in ['ServerKey', 'ServerCertificate']: - if line.lstrip().startswith(name): - value = line.split()[1] - if value.startswith('ssl'): - value = os.path.join('/etc/cups', value) - certkey_values.append(value) - lines[lines.index(line)] = '' - - op.write(CUPSFILES_CONF, 'w', lines) - - # we need to decide whether we copy the files to /etc/cups/ssl - # or set ServerKeychain to non-default directory or do nothing - if all(os.path.dirname(val) == '/etc/cups/ssl' for val in certkey_values): - return - - # Check that all files are inside the same directory - if len(set([os.path.dirname(certkey) for certkey in certkey_values])) == 1: - path = os.path.dirname(certkey_values[0]) - op.write(CUPSFILES_CONF, 'a', - '\n# added by Leapp\nServerKeychain {}\n'.format(path)) - else: - for value in certkey_values: - if not os.path.dirname(value) == '/etc/cups/ssl': - op.copy_to_ssl(value) - - -def _get_facts(model): - """ - Consumes input data model - - :param class model: name of model which we consume - """ - return next(api.consume(model), None) - - -def migrate_configuration(error_log=api.current_logger().error, - debug_log=api.current_logger().debug, - op=FileOperations(), - consume_function=_get_facts): - """ - Migrate CUPS configuration based on gathered facts - - :param func error_log: sends error messages - :param func debug_log: sends debug messages - :param obj op: IO operations - :param func consume_function: receives data object from a model - """ - - facts = consume_function(CupsChangedFeatures) - error_list = [] - - if not facts: - return - - if facts.include: - debug_log('Migrating CUPS configuration - Include directives.') - include_errors = [] - - include_errors = migrate_include(facts.include_files, op) - if include_errors: - error_list += include_errors - - if facts.digest: - debug_log('Migrating CUPS configuration - BasicDigest/Digest directives.') - - try: - migrate_digest(op) - except IOError as error: - error_list.append('Digest/BasicDigest values: {}'.format(error)) - - if facts.env: - debug_log('Migrating CUPS configuration - PassEnv/SetEnv directives.') - - try: - move_directives(['PassEnv', 'SetEnv'], - op) - except IOError as error: - error_list.append('PassEnv/SetEnv directives: {}'.format(error)) - - if facts.certkey: - debug_log('Migrating CUPS configuration - ' - 'ServerKey/ServerCertificate directive.') - - try: - migrate_certkey(op) - except IOError as error: - error_list.append('ServerKey/ServerCertificate directives: {}'.format(error)) - - if facts.printcap: - debug_log('Migrating CUPS configuration - PrintcapFormat directive.') - - try: - move_directives(['PrintcapFormat'], - op) - except IOError as error: - error_list.append('PrintcapFormat directive: {}'.format(error)) - - if error_list: - error_log('Following errors happened during CUPS migration:' - + ''.join(['\n - {}'.format(err) for err in error_list])) diff --git a/repos/system_upgrade/el7toel8/actors/cupsmigrate/tests/test_migrate_certkey.py b/repos/system_upgrade/el7toel8/actors/cupsmigrate/tests/test_migrate_certkey.py deleted file mode 100644 index 989e3b3d..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsmigrate/tests/test_migrate_certkey.py +++ /dev/null @@ -1,222 +0,0 @@ -import pytest - -from leapp.libraries.actor.cupsmigrate import migrate_certkey - - -class MockFileSystem(object): - def __init__(self, - infiles): - self.files = {} - self.ssl_dir = [] - self.files = infiles - for path in infiles.keys(): - if path.startswith('/etc/cups/ssl'): - self.ssl_dir.append(path) - - def readlines(self, path): - if path in self.files.keys(): - return self.files[path].splitlines(True) - raise IOError('Error when reading file {} - file ' - 'does not exist.'.format(path)) - - def write(self, path, mode, content): - if isinstance(content, list): - content = ''.join(content) - - if mode == 'w': - self.files[path] = content - else: - self.files[path] += content - - def copy_to_ssl(self, oldpath): - self.ssl_dir.append('/etc/cups/ssl/' + oldpath.rsplit('/', 1)[1]) - - -testdata = ( - ( - { - '/etc/cups/cups-files.conf': 'ifdfdfgfg\n' - }, - { - '/etc/cups/cups-files.conf': 'ifdfdfgfg\n' - } - ), - ( - { - '/etc/cups/cups-files.conf': 'ServerKey /etc/cups/ssl/ser' - 'ver.key\nHello world\n', - '/etc/cups/ssl/server.key': '' - }, - { - '/etc/cups/cups-files.conf': 'Hello world\n', - 'ssl-dir': ['/etc/cups/ssl/server.key'] - } - ), - ( - { - '/etc/cups/cups-files.conf': '#ServerKey /etc/cups/ssl/se' - 'rver.key\nHello world\n', - '/etc/cups/ssl/server.key': '' - }, - { - '/etc/cups/cups-files.conf': '#ServerKey /etc/cups/ssl/se' - 'rver.key\nHello world\n', - 'ssl-dir': ['/etc/cups/ssl/server.key'] - } - ), - ( - { - '/etc/cups/cups-files.conf': 'ServerCertificate /etc/cups' - '/ssl/server.cert\nHello world\n', - '/etc/cups/ssl/server.cert': '' - }, - { - '/etc/cups/cups-files.conf': 'Hello world\n', - 'ssl-dir': ['/etc/cups/ssl/server.cert'] - } - ), - ( - { - '/etc/cups/cups-files.conf': 'ServerCertificate /etc/cups' - '/ssl/server.cert\nServerKey' - ' /etc/cups/ssl/server.key\n' - 'Hello world\n', - '/etc/cups/ssl/server.cert': '', - '/etc/cups/ssl/server.key': '' - }, - { - '/etc/cups/cups-files.conf': 'Hello world\n', - 'ssl-dir': ['/etc/cups/ssl/server.cert', - '/etc/cups/ssl/server.key'] - } - ), - ( - { - '/etc/cups/cups-files.conf': 'ServerCertificate ssl/serve' - 'r.cert\nServerKey ssl/serve' - 'r.key\nHello world\n', - '/etc/cups/ssl/server.cert': '', - '/etc/cups/ssl/server.key': '' - }, - { - '/etc/cups/cups-files.conf': 'Hello world\n', - 'ssl-dir': ['/etc/cups/ssl/server.cert', - '/etc/cups/ssl/server.key'] - } - ), - ( - { - '/etc/cups/cups-files.conf': 'ServerCertificate ssl/serve' - 'r.cert\nServerKey /etc/cups' - '/ssl/server.key\nHello worl' - 'd\n', - '/etc/cups/ssl/server.cert': '', - '/etc/cups/ssl/server.key': '' - }, - { - '/etc/cups/cups-files.conf': 'Hello world\n', - 'ssl-dir': ['/etc/cups/ssl/server.cert', - '/etc/cups/ssl/server.key'] - } - ), - ( - { - '/etc/cups/cups-files.conf': 'ServerCertificate ssl/serve' - 'r.cert\nServerKey /somewher' - 'e/else/server.key\nHello wo' - 'rld\n', - '/etc/cups/ssl/server.cert': '', - '/somewhere/else/server.key': '' - }, - { - '/etc/cups/cups-files.conf': 'Hello world\n', - 'ssl-dir': ['/etc/cups/ssl/server.cert', - '/etc/cups/ssl/server.key'] - } - ), - ( - { - '/etc/cups/cups-files.conf': 'ServerCertificate /somewher' - 'e/else/server.cert\nServerK' - 'ey /etc/cups/ssl/server.key' - '\nHello world\n', - '/somewhere/else/server.cert': '', - '/etc/cups/ssl/server.key': '' - }, - { - '/etc/cups/cups-files.conf': 'Hello world\n', - 'ssl-dir': ['/etc/cups/ssl/server.key', - '/etc/cups/ssl/server.cert'] - } - ), - ( - { - '/etc/cups/cups-files.conf': 'ServerCertificate /somewher' - 'e/else/server.cert\nServerK' - 'ey /somewhere/else/server.c' - 'ert\nHello world\n', - '/somewhere/else/server.cert': '', - '/somewhere/else/server.key': '' - }, - { - '/etc/cups/cups-files.conf': 'Hello world\n\n# added by L' - 'eapp\nServerKeychain /somew' - 'here/else\n' - } - ), - ( - { - '/etc/cups/cups-files.conf': 'ServerCertificate /somewher' - 'e/else/server.cert\nServerK' - 'ey /anywhere/else/server.ke' - 'y\nHello world\n', - '/somewhere/else/server.cert': '', - '/anywhere/else/server.key': '' - }, - { - '/etc/cups/cups-files.conf': 'Hello world\n', - 'ssl-dir': ['/etc/cups/ssl/server.cert', - '/etc/cups/ssl/server.key'] - } - ), - ( - { - '/etc/cups/cups-files.conf': 'ServerCertificate ssl/somedir/' - 'server.cert\nHello world\nServ' - 'erKey ssl/server.key\n', - '/etc/cups/ssl/somedir/server.cert': '', - '/etc/cups/ssl/server.key': '' - }, - { - '/etc/cups/cups-files.conf': 'Hello world\n', - 'ssl-dir': ['/etc/cups/ssl/somedir/server.cert', - '/etc/cups/ssl/server.key', - '/etc/cups/ssl/server.cert'] - } - ), - ( - { - '/etc/cups/cups-files.conf': 'ServerCertificate /etc/cups/ss' - 'l/somedir/server.cert\nHello w' - 'orld\nServerKey ssl/server.key\n', - '/etc/cups/ssl/somedir/server.cert': '', - '/etc/cups/ssl/server.key': '' - }, - { - '/etc/cups/cups-files.conf': 'Hello world\n', - 'ssl-dir': ['/etc/cups/ssl/somedir/server.cert', - '/etc/cups/ssl/server.key', - '/etc/cups/ssl/server.cert'] - } - ) -) - - -@pytest.mark.parametrize('files,expected', testdata) -def test_migrate_certkey(files, expected): - op = MockFileSystem(infiles=files) - - migrate_certkey(op) - - assert op.files.get('/etc/cups/cups-files.conf', None) == expected.get('/etc/cups/cups-files.conf', None) - assert op.ssl_dir == expected.get('ssl-dir', []) diff --git a/repos/system_upgrade/el7toel8/actors/cupsmigrate/tests/test_migrate_configuration.py b/repos/system_upgrade/el7toel8/actors/cupsmigrate/tests/test_migrate_configuration.py deleted file mode 100644 index f29766a1..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsmigrate/tests/test_migrate_configuration.py +++ /dev/null @@ -1,193 +0,0 @@ -import pytest - -from leapp.libraries.actor.cupsmigrate import migrate_configuration -from leapp.models import CupsChangedFeatures - - -class MockFileSystem(object): - def __init__(self, - infiles): - self.files = {} - self.ssl_dir = [] - self.files = infiles - for path in infiles.keys(): - if path.startswith('ssl') or path.startswith('/etc/cups/ssl'): - self.ssl_dir.append(path.rsplit('/', 1)[1]) - - def readlines(self, path): - if path in self.files.keys(): - return self.files[path].splitlines(True) - raise IOError('Error when reading file {} - file ' - 'does not exist.'.format(path)) - - def write(self, path, mode, content): - if isinstance(content, list): - content = ''.join(content) - - if mode == 'w': - self.files[path] = content - else: - self.files[path] += content - - def copy_to_ssl(self, oldpath): - self.ssl_dir.append(oldpath.rsplit('/', 1)[1]) - - -class MockLogger(object): - def __init__(self): - self.debug_msg = '' - self.error_msg = '' - - def debug_log(self, msg): - self.debug_msg += msg - - def error_log(self, msg): - self.error_msg += msg - - -class MockModel(object): - def __init__(self, facts): - if not facts: - self.model = None - return - - self.model = CupsChangedFeatures(include=facts.get('include', False), - digest=facts.get('digest', False), - env=facts.get('env', False), - certkey=facts.get('certkey', False), - printcap=facts.get('printcap', False), - include_files=facts.get('include_files', [])) - - def get_facts(self, model): - return self.model - - -testdata = ( - ( - None, - { - 'debug_msg': '', - 'error_msg': '' - } - ), - ( - {}, - { - 'debug_msg': '', - 'error_msg': '' - } - ), - ( - { - 'include': True, - 'include_files': ['/etc/cups/cupsd.conf', 'smth.conf', - 'any.conf'], - }, - { - 'debug_msg': 'Migrating CUPS configuration - Include directives.', - 'error_msg': 'Following errors happened during CUPS migration:\n ' - '- Include directive: Error when reading file /etc/cup' - 's/cupsd.conf - file does not exist.\n - Include dir' - 'ective: Error when reading file smth.conf - file does' - ' not exist.\n - Include directive: Error when readi' - 'ng file any.conf - file does not exist.' - } - ), - ( - { - 'digest': True, - }, - { - 'debug_msg': 'Migrating CUPS configuration - BasicDigest/Digest' - ' directives.', - 'error_msg': 'Following errors happened during CUPS migration:\n ' - '- Digest/BasicDigest values: Error when reading file ' - '/etc/cups/cupsd.conf - file does not exist.' - } - ), - ( - { - 'env': True, - }, - { - 'debug_msg': 'Migrating CUPS configuration - PassEnv/SetEnv directives.', - 'error_msg': 'Following errors happened during CUPS migration:\n ' - '- PassEnv/SetEnv directives: Error when reading file ' - '/etc/cups/cupsd.conf - file does not exist.' - } - ), - ( - { - 'certkey': True, - }, - { - 'debug_msg': 'Migrating CUPS configuration - ServerKey/ServerCertif' - 'icate directive.', - 'error_msg': 'Following errors happened during CUPS migration:\n ' - '- ServerKey/ServerCertificate directives: Error when ' - 'reading file /etc/cups/cups-files.conf - file does no' - 't exist.' - } - ), - ( - { - 'printcap': True, - }, - { - 'debug_msg': 'Migrating CUPS configuration - PrintcapFormat directive.', - 'error_msg': 'Following errors happened during CUPS migration:\n ' - '- PrintcapFormat directive: Error when reading file /' - 'etc/cups/cupsd.conf - file does not exist.' - } - ), - ( - { - 'certkey': True, - 'include': True, - 'env': True, - 'printcap': True, - 'digest': True, - 'include_files': ['/etc/cups/cupsd.conf', 'smth.conf', - 'any.conf'] - }, - { - 'debug_msg': 'Migrating CUPS configuration - Include directives.Mig' - 'rating CUPS configuration - BasicDigest/Digest direct' - 'ives.Migrating CUPS configuration - PassEnv/SetEnv di' - 'rectives.Migrating CUPS configuration - ServerKey/Ser' - 'verCertificate directive.Migrating CUPS configuration' - ' - PrintcapFormat directive.', - 'error_msg': 'Following errors happened during CUPS migration:\n ' - '- Include directive: Error when reading file /etc/cup' - 's/cupsd.conf - file does not exist.\n - Include dir' - 'ective: Error when reading file smth.conf - file does' - ' not exist.\n - Include directive: Error when readi' - 'ng file any.conf - file does not exist.\n - Digest/' - 'BasicDigest values: Error when reading file /etc/cups' - '/cupsd.conf - file does not exist.\n - PassEnv/SetE' - 'nv directives: Error when reading file /etc/cups/cups' - 'd.conf - file does not exist.\n - ServerKey/ServerC' - 'ertificate directives: Error when reading file /etc/c' - 'ups/cups-files.conf - file does not exist.\n - Prin' - 'tcapFormat directive: Error when reading file /etc/cu' - 'ps/cupsd.conf - file does not exist.' - } - ) -) - - -@pytest.mark.parametrize('facts,expected', testdata) -def test_migrate_configuration(facts, expected): - data_model = MockModel(facts) - - op = MockFileSystem({}) - - logger = MockLogger() - - migrate_configuration(logger.error_log, - logger.debug_log, - op, - data_model.get_facts) - - assert logger.debug_msg == expected['debug_msg'] - assert logger.error_msg == expected['error_msg'] diff --git a/repos/system_upgrade/el7toel8/actors/cupsmigrate/tests/test_migrate_digest.py b/repos/system_upgrade/el7toel8/actors/cupsmigrate/tests/test_migrate_digest.py deleted file mode 100644 index 25a29871..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsmigrate/tests/test_migrate_digest.py +++ /dev/null @@ -1,93 +0,0 @@ -import pytest - -from leapp.libraries.actor.cupsmigrate import migrate_digest - - -class MockFileSystem(object): - def __init__(self, - infiles): - self.files = {} - self.ssl_dir = [] - self.files = infiles - for path in infiles.keys(): - if path.startswith('ssl') or path.startswith('/etc/cups/ssl'): - self.ssl_dir.append(path.rsplit('/', 1)[1]) - - def readlines(self, path): - if path in self.files.keys(): - return self.files[path].splitlines(True) - raise IOError('Error when reading file {} - file ' - 'does not exist.'.format(path)) - - def write(self, path, mode, content): - if isinstance(content, list): - content = ''.join(content) - - if mode == 'w': - self.files[path] = content - else: - self.files[path] += content - - def copy_to_ssl(self, oldpath): - self.ssl_dir.append(oldpath.rsplit('/', 1)[1]) - - -testdata = ( - ( - { - '/etc/cups/cupsd.conf': 'ifdfdfgfg\n' - }, - 'ifdfdfgfg\n' - ), - ( - { - '/etc/cups/cupsd.conf': 'AuthType Basic\nHello world\n', - }, - 'AuthType Basic\nHello world\n' - ), - ( - { - '/etc/cups/cupsd.conf': 'DefaultAuthType Negotiate\nHello world\n', - }, - 'DefaultAuthType Negotiate\nHello world\n' - ), - ( - { - '/etc/cups/cupsd.conf': 'AuthType Digest\nHello world\n', - }, - 'AuthType Basic\nHello world\n' - ), - ( - { - '/etc/cups/cupsd.conf': 'DefaultAuthType Digest\nHello world\n', - }, - 'DefaultAuthType Basic\nHello world\n' - ), - ( - { - '/etc/cups/cupsd.conf': 'DefaultAuthType BasicDigest\nHello world\n', - }, - 'DefaultAuthType Basic\nHello world\n' - ), - ( - { - '/etc/cups/cupsd.conf': 'AuthType BasicDigest\nHello world\n', - }, - 'AuthType Basic\nHello world\n' - ), - ( - { - '/etc/cups/cupsd.conf': '#AuthType BasicDigest\nHello world\n', - }, - '#AuthType BasicDigest\nHello world\n' - ) -) - - -@pytest.mark.parametrize('files,expected', testdata) -def test_migrate_digest(files, expected): - op = MockFileSystem(infiles=files) - - migrate_digest(op) - - assert op.files.get('/etc/cups/cupsd.conf', None) == expected diff --git a/repos/system_upgrade/el7toel8/actors/cupsmigrate/tests/test_migrate_include.py b/repos/system_upgrade/el7toel8/actors/cupsmigrate/tests/test_migrate_include.py deleted file mode 100644 index f36b1963..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsmigrate/tests/test_migrate_include.py +++ /dev/null @@ -1,78 +0,0 @@ -import pytest - -from leapp.libraries.actor.cupsmigrate import migrate_include - - -class MockFileSystem(object): - def __init__(self, - infiles): - self.files = {} - self.ssl_dir = [] - self.files = infiles - for path in infiles.keys(): - if path.startswith('ssl') or path.startswith('/etc/cups/ssl'): - self.ssl_dir.append(path.rsplit('/', 1)[1]) - - def readlines(self, path): - if path in self.files.keys(): - return self.files[path].splitlines(True) - raise IOError('Error when reading file {} - file ' - 'does not exist.'.format(path)) - - def write(self, path, mode, content): - if isinstance(content, list): - content = ''.join(content) - - if mode == 'w': - self.files[path] = content - else: - self.files[path] += content - - def copy_to_ssl(self, oldpath): - self.ssl_dir.append(oldpath.rsplit('/', 1)[1]) - - -testdata = ( - ( - { - '/etc/cups/cupsd.conf': 'ifdfdfgfg\n' - }, - ['/etc/cups/cupsd.conf'], - 'ifdfdfgfg\n' - ), - ( - { - '/etc/cups/cupsd.conf': 'Include smth.conf\nHello world\n', - 'smth.conf': 'Policy two\n' - }, - ['/etc/cups/cupsd.conf', 'smth.conf'], - 'Hello world\n\n# added by Leapp\nPolicy two\n' - ), - ( - { - '/etc/cups/cupsd.conf': 'Include smth.conf\nHello world\n', - 'smth.conf': 'Include any.conf\nMake my day\n', - 'any.conf': 'Go ahead\n' - }, - ['/etc/cups/cupsd.conf', 'smth.conf', 'any.conf'], - 'Hello world\n\n# added by Leapp\nMake my day\n\n# added by Leapp\nGo ahead\n' - ), - ( - { - '/etc/cups/cupsd.conf': 'Include smth.conf\nHello world\n', - 'smth.conf': '#Include any.conf\nMake my day\n', - 'any.conf': 'Go ahead\n' - }, - ['/etc/cups/cupsd.conf', 'smth.conf'], - 'Hello world\n\n# added by Leapp\n#Include any.conf\nMake my day\n' - ) -) - - -@pytest.mark.parametrize('files,included_files,expected', testdata) -def test_migrate_include(files, included_files, expected): - op = MockFileSystem(infiles=files) - - migrate_include(included_files, op) - - assert op.files.get('/etc/cups/cupsd.conf', None) == expected diff --git a/repos/system_upgrade/el7toel8/actors/cupsmigrate/tests/test_move_directives.py b/repos/system_upgrade/el7toel8/actors/cupsmigrate/tests/test_move_directives.py deleted file mode 100644 index c8b60746..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsmigrate/tests/test_move_directives.py +++ /dev/null @@ -1,110 +0,0 @@ -import pytest - -from leapp.libraries.actor.cupsmigrate import move_directives - - -class MockFileSystem(object): - def __init__(self, - infiles): - self.files = {} - self.ssl_dir = [] - self.files = infiles - for path in infiles.keys(): - if path.startswith('ssl') or path.startswith('/etc/cups/ssl'): - self.ssl_dir.append(path.rsplit('/', 1)[1]) - - def readlines(self, path): - if path in self.files.keys(): - return self.files[path].splitlines(True) - raise IOError('Error when reading file {} - file ' - 'does not exist.'.format(path)) - - def write(self, path, mode, content): - if isinstance(content, list): - content = ''.join(content) - - if mode == 'w': - self.files[path] = content - else: - self.files[path] += content - - def copy_to_ssl(self, oldpath): - self.ssl_dir.append(oldpath.rsplit('/', 1)[1]) - - -testdata = ( - ( - { - '/etc/cups/cupsd.conf': 'ifdfdfgfg\n', - '/etc/cups/cups-files.conf': 'clean\n' - }, - { - '/etc/cups/cupsd.conf': 'ifdfdfgfg\n', - '/etc/cups/cups-files.conf': 'clean\n' - } - ), - ( - { - '/etc/cups/cupsd.conf': '#PassEnv smht\nHello world\n', - '/etc/cups/cups-files.conf': 'clean\n' - }, - { - '/etc/cups/cupsd.conf': '#PassEnv smht\nHello world\n', - '/etc/cups/cups-files.conf': 'clean\n' - } - ), - ( - { - '/etc/cups/cupsd.conf': 'PassEnv smth\nHello world\n', - '/etc/cups/cups-files.conf': 'clean\n' - }, - { - '/etc/cups/cupsd.conf': 'Hello world\n', - '/etc/cups/cups-files.conf': 'clean\n\n# added by Leapp\nPassEnv smth\n' - } - ), - ( - { - '/etc/cups/cupsd.conf': 'SetEnv smht to\nHello world\n', - '/etc/cups/cups-files.conf': 'clean\n' - }, - { - '/etc/cups/cupsd.conf': 'Hello world\n', - '/etc/cups/cups-files.conf': 'clean\n\n# added by Leapp\nSetEnv smht to\n' - } - ), - ( - { - '/etc/cups/cupsd.conf': 'PassEnv smht\nSetEnv set to\nHello world\n', - '/etc/cups/cups-files.conf': 'clean\n' - }, - { - '/etc/cups/cupsd.conf': 'Hello world\n', - '/etc/cups/cups-files.conf': 'clean\n\n# added by Leapp\n' - 'PassEnv smht\nSetEnv set to\n' - } - ), - ( - { - '/etc/cups/cupsd.conf': 'PassEnv smth\nSetEnv set to\nPri' - 'ntcapFormat any\nHello world\n', - '/etc/cups/cups-files.conf': 'clean\n' - }, - { - '/etc/cups/cupsd.conf': 'Hello world\n', - '/etc/cups/cups-files.conf': 'clean\n\n# added by Leapp\n' - 'PassEnv smth\nSetEnv set to' - '\nPrintcapFormat any\n' - } - ) -) - - -@pytest.mark.parametrize('files,expected', testdata) -def test_move_directives(files, expected): - op = MockFileSystem(infiles=files) - - move_directives(['PassEnv', 'SetEnv', 'PrintcapFormat'], op) - - assert op.files.get('/etc/cups/cupsd.conf', None) == expected.get('/etc/cups/cupsd.conf', None) - assert op.files.get('/etc/cups/cups-files.conf', None) == expected.get('/etc/cups/cups-files.conf', None) diff --git a/repos/system_upgrade/el7toel8/actors/cupsscanner/actor.py b/repos/system_upgrade/el7toel8/actors/cupsscanner/actor.py deleted file mode 100644 index f586cf64..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsscanner/actor.py +++ /dev/null @@ -1,29 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import cupsscanner -from leapp.models import CupsChangedFeatures, DistributionSignedRPM, Report -from leapp.tags import FactsPhaseTag, IPUWorkflowTag - - -class CupsScanner(Actor): - """ - Gather facts about CUPS features which needs to be migrated - - Actor checks if cups package is installed and if one or more following - situations appears in configuration files: - - interface scripts - - use of 'Digest' or 'BasicDigest' authentication - - use of 'Include' directive - - use of 'ServerCertificate' and 'ServerKey' directives - - use of 'SetEnv' or 'PassEnv' directives - - use of 'PrintcapFormat' directive - - The actor creates list from gathered data. - """ - - name = 'cups_scanner' - consumes = (DistributionSignedRPM,) - produces = (Report, CupsChangedFeatures) - tags = (FactsPhaseTag, IPUWorkflowTag) - - def process(self): - cupsscanner.find_features() diff --git a/repos/system_upgrade/el7toel8/actors/cupsscanner/libraries/cupsscanner.py b/repos/system_upgrade/el7toel8/actors/cupsscanner/libraries/cupsscanner.py deleted file mode 100644 index 82b312ec..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsscanner/libraries/cupsscanner.py +++ /dev/null @@ -1,280 +0,0 @@ -import os - -from leapp.exceptions import StopActorExecutionError -from leapp.libraries.common.rpms import has_package -from leapp.libraries.stdlib import api -from leapp.models import CupsChangedFeatures, DistributionSignedRPM - - -def _list_dir(path): - """ - Lists files which are in a directory specified by the path. - - :param str path: path to directory - """ - return os.listdir(path) - - -def _path_exists(path): - """ - Checks if the path exists on the machine. - - :param str path: path to file/directory - """ - return os.path.exists(path) - - -def _read_file(path): - """ - Read a file line by line. - - :param str path: path to file - """ - with open(path, 'r') as f: - return f.readlines() - - -def _check_package(pkg): - """ - Checks if a package is installed and signed. - - :param str pkg: name of package - """ - return has_package(DistributionSignedRPM, pkg) - - -def directive_exists(name, line): - """ - Checks if directive exists in the line, but it is not - commented out. - - :param str name: name of directive - :param str line: line of file - """ - return line.lstrip().startswith(name) - - -def get_directive_value(name, line): - """ - Returns directive value. - - :param str name: name of directive - :param str line: line of file - """ - if directive_exists(name, line): - return line.lstrip().lstrip(name).lstrip().split(' ')[0].rstrip() - return None - - -def interface_script_check(check_path_func=_path_exists, - list_dir_func=_list_dir): - """ - Checks if any file is in /etc/cups/interfaces, which means there could be - print queues using interface script. - - :param func check_path_func: checks if /etc/cups/interfaces exists - :param func list_dir_func: lists contents of directory - """ - if ( - check_path_func('/etc/cups/interfaces') and - list_dir_func('/etc/cups/interfaces') - ): - return True - return False - - -def include_directive_check(read_func=_read_file): - """ - Checks if 'Include' directive is present. - - :param str paths: path to cupsd configuration file - :param func read_func: function for reading a file as lines - """ - included_files = ['/etc/cups/cupsd.conf'] - error_list = [] - - vetted_included_files = [] - while included_files: - # NOTE(ivasilev) Will be using stack to process last encountered include directives first - included_file = included_files.pop(-1) - try: - lines = read_func(included_file) - except IOError: - error_list.append('Error during reading file {}: file not' - ' found'.format(included_file)) - continue - # Append to the resulting list of vetted files if exception wasn't raised - vetted_included_files.append(included_file) - # Mark any other included file you find as need-to-be-validated - includes_to_process = [] - for line in lines: - value = get_directive_value('Include', line) - if value: - includes_to_process.append(value) - # NOTE(ivasilev) Add discovered Include directives to the stack in reversed order, so that they are processed - # in the same order they appeared in the file - included_files.extend(reversed(includes_to_process)) - - return (vetted_included_files, error_list) - - -def digest_directive_check(path, read_func=_read_file): - """ - Checks if AuthType or DefaultAuthType directives contain - Digest or BasicDigest values, which were removed. - - :param str path: path to configuration file - :param func read_func: function for reading the file - """ - lines = read_func(path) - - for line in lines: - for name in ['AuthType', 'DefaultAuthType']: - for value in ['Digest', 'BasicDigest']: - found_value = get_directive_value(name, line) - if found_value == value: - return True - return False - - -def ssl_directive_check(read_func=_read_file): - """ - Checks if ServerCertificate or ServerKey directives are - used in cups-files.conf. - - :param func read_func: function for reading the file - """ - lines = read_func('/etc/cups/cups-files.conf') - - for line in lines: - for name in ['ServerCertificate', 'ServerKey']: - value = get_directive_value(name, line) - if value: - return True - return False - - -def environment_setup_check(path, read_func=_read_file): - """ - Checks if PassEnv or SetEnv directives are used in configuration. - They were moved to cups-files.conf in newer CUPS due security - issues. - - :param str path: path to configuration file - :param func read_func: reads the file - """ - lines = read_func(path) - - for line in lines: - for name in ['SetEnv', 'PassEnv']: - value = get_directive_value(name, line) - if value: - return True - return False - - -def print_capabilities_check(path, read_func=_read_file): - """ - Checks if PrintcapFormat directive is used in configuration. - It was moved to cups-files.conf in newer CUPS. - - :param str path: path to configuration file - :param func read_func: reads the file - """ - lines = read_func(path) - - for line in lines: - value = get_directive_value('PrintcapFormat', line) - if value: - return True - return False - - -def _send_model(interface, digest, include, certkey, env, - printcap, include_files_list): - """ - Produces model of facts. - - :param bool interface: true if interface scripts are used - :param bool digest: true if BasicDigest/Digest values are used - :param bool include: true if Include directive is used - :param bool certkey: true if ServerCertificate/ServerKey directives are used - :param bool env: true if PassEnv/SetEnv directives are used - :param bool printcap: true if PrintcapFormat directive is used - :param list include_files_list: contains paths to included files - """ - api.produce(CupsChangedFeatures(interface=interface, - digest=digest, - include=include, - certkey=certkey, - env=env, - printcap=printcap, - include_files=include_files_list)) - - -def find_features(debug_log=api.current_logger().debug, - warn_log=api.current_logger().warn, - error_log=api.current_logger().error, - send_features=_send_model, - is_installed=_check_package, - read_func=_read_file, - check_path_func=_path_exists, - list_dir_func=_list_dir): - """ - Checks every feature which changed between CUPS 1.6.3 and CUPS - 2.2.6. - - :param func debug_log: function for debug logging - :param func error_log: function for error logging - :param func warn_log: function for warning logging - :param func send_features: produces CupsMigrationModel if necessary - :param func is_installed: check if the package is installed - :param func read_func: reads a file - :param func check_path_func: checks if the file exists - :param func list_dir_func: list files in a directory - """ - - if not is_installed('cups'): - return - - if ( - not check_path_func('/etc/cups/cupsd.conf') or - not check_path_func('/etc/cups/cups-files.conf') - ): - # seatbelt - it's expected as super rare to have malfunction cupsd :) - raise StopActorExecutionError('Core CUPS configuration files ' - 'are missing. CUPS installation ' - 'is corrupted, terminating the actor.') - - debug_log('Checking if CUPS configuration contains removed features.') - - digest = env = printcap = interface = certkey = include = False - - include_file_list, error_list = include_directive_check(read_func) - - if error_list: - warn_log('Following included files will not be appended to ' - 'cupsd.conf due attached error:' - + ''.join(['\n - {}'.format(err) for err in error_list])) - - if len(include_file_list) > 1: - include = True - - interface = interface_script_check(check_path_func, list_dir_func) - - for config_file in include_file_list: - - if not digest: - digest = digest_directive_check(config_file, read_func) - - if not env: - env = environment_setup_check(config_file, read_func) - - if not printcap: - printcap = print_capabilities_check(config_file, read_func) - - certkey = ssl_directive_check(read_func) - - if any([interface, digest, include, certkey, env, printcap]): - send_features(interface, digest, include, certkey, env, - printcap, include_file_list) diff --git a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_digest_directive_check.py b/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_digest_directive_check.py deleted file mode 100644 index c00fabd5..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_digest_directive_check.py +++ /dev/null @@ -1,34 +0,0 @@ -import pytest - -from leapp.libraries.actor.cupsscanner import digest_directive_check - -testdata = ( - ('\n', False), - ('test\n', False), - ('AuthType Basic\n', False), - ('DefaultAuthType Basic\n', False), - ('#AuthType Digest\n', False), - ('#DefaultAuthType BasicDigest\n', False), - ('DefaultAuthType BasicDigest\n', True), - ('DefaultAuthType Digest\n', True), - ('AuthType Digest\n', True), - ('AuthType BasicDigest\n', True), - ('AuthType BasicDigest\nDefaultAuthType Digest\n', True), -) - - -class MockConfig(object): - def __init__(self, content): - self.content = content - - def read(self, path): - return self.content.splitlines(True) - - -@pytest.mark.parametrize("content,expected", testdata) -def test_digest_directive_check(content, expected): - config = MockConfig(content) - - ret = digest_directive_check('does_not_matter', config.read) - - assert ret == expected diff --git a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_directive_exists.py b/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_directive_exists.py deleted file mode 100644 index d1066d01..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_directive_exists.py +++ /dev/null @@ -1,19 +0,0 @@ -import pytest - -from leapp.libraries.actor.cupsscanner import directive_exists - -testdata = ( - ('PrintcapFormat', 'ble', False), - ('PrintcapFormat', '', False), - ('PrintcapFormat', '#PrintcapFormat', False), - ('PrintcapFormat', 'PrintcapFormat', True), - ('PrintcapFormat', ' PrintcapFormat', True) -) - - -@pytest.mark.parametrize("string, line, expected", testdata) -def test_directive_exists(string, line, expected): - - ret = directive_exists(string, line) - - assert ret == expected diff --git a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_environment_setup_check.py b/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_environment_setup_check.py deleted file mode 100644 index 78573eb7..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_environment_setup_check.py +++ /dev/null @@ -1,34 +0,0 @@ -import pytest - -from leapp.libraries.actor.cupsscanner import environment_setup_check - -testdata = ( - ('\n', False), - ('Something else\n', False), - ('#PassEnv smth\n', False), - (' #SetEnv smth\n', False), - ('PassEnv smth\n', True), - ('SetEnv smth\n', True), - ('PassEnv\n', False), - ('SetEnv\n', False), - ('PassEnv smth\nSetEnv smth\n', True) -) - - -class MockCUPSD(object): - def __init__(self, content): - self.content = content - - def read(self, path): - if path: - return self.content.splitlines(True) - return None - - -@pytest.mark.parametrize("content, expected", testdata) -def test_environment_setup_check(content, expected): - config = MockCUPSD(content) - - ret = environment_setup_check('does_not_matter', config.read) - - assert ret == expected diff --git a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_find_features.py b/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_find_features.py deleted file mode 100644 index c7798c75..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_find_features.py +++ /dev/null @@ -1,387 +0,0 @@ -import pytest - -from leapp.exceptions import StopActorExecutionError -from leapp.libraries.actor.cupsscanner import find_features - -message = 'Checking if CUPS configuration contains removed features.' - -testdata = ( - ( - ['ble'], - {}, - {} - ), - ( - ['cups'], - { - '/etc/cups/cupsd.conf': '', - '/etc/cups/cups-files.conf': '' - }, - { - 'debug': message - } - ), - ( - ['cups'], - { - '/etc/cups/cupsd.conf': 'Include smth\n', - '/etc/cups/cups-files.conf': '', - 'smth': '' - }, - { - 'include': True, - 'digest': False, - 'interface': False, - 'env': False, - 'certkey': False, - 'printcap': False, - 'included_files': ['/etc/cups/cupsd.conf', 'smth'], - 'debug': message - } - ), - ( - ['cups'], - { - '/etc/cups/cupsd.conf': 'Include smth\n', - '/etc/cups/cups-files.conf': '', - }, - { - 'debug': message, - 'warn': 'Following included files will not be appended to cupsd.c' - 'onf due attached error:\n - Error during reading file smth: file not found' - } - ), - ( - ['cups'], - { - '/etc/cups/cupsd.conf': 'Include smth\nInclude smb\n', - '/etc/cups/cups-files.conf': '', - 'smth': '', - 'smb': '' - }, - { - 'include': True, - 'digest': False, - 'interface': False, - 'env': False, - 'certkey': False, - 'printcap': False, - 'included_files': ['/etc/cups/cupsd.conf', 'smth', 'smb'], - 'debug': message - } - ), - ( - ['cups'], - { - '/etc/cups/cupsd.conf': 'Include smth\n', - '/etc/cups/cups-files.conf': '', - 'smth': 'AuthType Digest\nPassEnv smth\nPrintcapFormat neco\n' - }, - { - 'include': True, - 'digest': True, - 'interface': False, - 'env': True, - 'certkey': False, - 'printcap': True, - 'included_files': ['/etc/cups/cupsd.conf', 'smth'], - 'debug': message - } - ), - ( - ['cups'], - { - '/etc/cups/cupsd.conf': '', - '/etc/cups/cups-files.conf': 'ServerKey smth.key\n', - 'smth.key': '' - }, - { - 'include': False, - 'digest': False, - 'interface': False, - 'env': False, - 'certkey': True, - 'printcap': False, - 'included_files': ['/etc/cups/cupsd.conf'], - 'debug': message - } - ), - ( - ['cups'], - { - '/etc/cups/cupsd.conf': '', - '/etc/cups/cups-files.conf': 'ServerCertificate smth.cert\n', - 'smth.cert': '' - }, - { - 'include': False, - 'digest': False, - 'interface': False, - 'env': False, - 'certkey': True, - 'printcap': False, - 'included_files': ['/etc/cups/cupsd.conf'], - 'debug': message - } - ), - ( - ['cups'], - { - '/etc/cups/cupsd.conf': '', - '/etc/cups/cups-files.conf': 'ServerKey smth.key\n' - 'ServerCertificate smth.cert\n', - 'smth.key': '', - 'smth.cert': '' - }, - { - 'include': False, - 'digest': False, - 'interface': False, - 'env': False, - 'certkey': True, - 'printcap': False, - 'included_files': ['/etc/cups/cupsd.conf'], - 'debug': message - } - ), - ( - ['cups'], - { - '/etc/cups/cupsd.conf': 'AuthType Digest\n', - '/etc/cups/cups-files.conf': '', - }, - { - 'include': False, - 'digest': True, - 'interface': False, - 'env': False, - 'certkey': False, - 'printcap': False, - 'included_files': ['/etc/cups/cupsd.conf'], - 'debug': message - } - ), - ( - ['cups'], - { - '/etc/cups/cupsd.conf': 'DefaultAuthType BasicDigest\n', - '/etc/cups/cups-files.conf': '', - }, - { - 'include': False, - 'digest': True, - 'interface': False, - 'env': False, - 'certkey': False, - 'printcap': False, - 'included_files': ['/etc/cups/cupsd.conf'], - 'debug': message - } - ), - ( - ['cups'], - { - '/etc/cups/cupsd.conf': 'PassEnv smth\n', - '/etc/cups/cups-files.conf': '', - }, - { - 'include': False, - 'digest': False, - 'interface': False, - 'env': True, - 'certkey': False, - 'printcap': False, - 'included_files': ['/etc/cups/cupsd.conf'], - 'debug': message - } - ), - ( - ['cups'], - { - '/etc/cups/cupsd.conf': 'SetEnv smth\n', - '/etc/cups/cups-files.conf': '', - }, - { - 'include': False, - 'digest': False, - 'interface': False, - 'env': True, - 'certkey': False, - 'printcap': False, - 'included_files': ['/etc/cups/cupsd.conf'], - 'debug': message - } - ), - ( - ['cups'], - { - '/etc/cups/cupsd.conf': 'PrintcapFormat smth\n', - '/etc/cups/cups-files.conf': '', - }, - { - 'include': False, - 'digest': False, - 'interface': False, - 'env': False, - 'certkey': False, - 'printcap': True, - 'included_files': ['/etc/cups/cupsd.conf'], - 'debug': message - } - ), - ( - ['cups'], - { - '/etc/cups/cupsd.conf': '', - '/etc/cups/cups-files.conf': '', - '/etc/cups/interfaces': [] - }, - { - 'debug': message - } - ), - ( - ['cups'], - { - '/etc/cups/cupsd.conf': '', - '/etc/cups/cups-files.conf': '', - '/etc/cups/interfaces': ['smth', 'anything'], - 'smth': '', - 'anything': '' - }, - { - 'include': False, - 'digest': False, - 'interface': True, - 'env': False, - 'certkey': False, - 'printcap': False, - 'included_files': ['/etc/cups/cupsd.conf'], - 'debug': message - } - ), - ( - ['cups'], - { - '/etc/cups/cupsd.conf': 'Include mst\nAuthType Digest\n' - 'PassEnv too\nPrintcapFormat poo\n', - '/etc/cups/cups-files.conf': 'ServerKey my.key\n' - 'ServerCertificate my.cert\n', - '/etc/cups/interfaces': ['smth', 'anything'], - 'smth': '', - 'anything': '', - 'mst': '' - }, - { - 'include': True, - 'digest': True, - 'interface': True, - 'env': True, - 'certkey': True, - 'printcap': True, - 'included_files': ['/etc/cups/cupsd.conf', 'mst'], - 'debug': message - } - ) -) - - -class MockActor(object): - def __init__(self): - self.output = {} - - def send_features(self, interface, digest, include, certkey, env, - printcap, included_files): - self.output['interface'] = interface - self.output['digest'] = digest - self.output['include'] = include - self.output['certkey'] = certkey - self.output['env'] = env - self.output['printcap'] = printcap - self.output['included_files'] = included_files - - -class MockLogger(object): - def __init__(self): - self.debugmsg = '' - self.warnmsg = '' - self.errormsg = '' - - def debug(self, message): - self.debugmsg += message - - def error(self, message): - self.errormsg += message - - def warn(self, message): - self.warnmsg += message - - -class MockFileSystem(object): - def __init__(self, packages, files): - self.installed_packages = packages - self.files = files - - def is_installed(self, pkg): - if pkg in self.installed_packages: - return True - return False - - def read(self, path): - if path in self.files.keys(): - return self.files[path].splitlines(True) - raise IOError('Error during reading file {} - file' - ' not found.'.format(path)) - - def path_exists(self, path): - if path in self.files.keys(): - return True - return False - - def list_dir(self, path): - if path in self.files.keys(): - return self.files[path] - return False - - -def test_find_features_exception(): - logger = MockLogger() - system = MockFileSystem(['cups'], {}) - actor = MockActor() - - with pytest.raises(StopActorExecutionError): - find_features(logger.debug, - logger.warn, - logger.error, - actor.send_features, - system.is_installed, - system.read, - system.path_exists, - system.list_dir) - - -@pytest.mark.parametrize(("packages,files,expected"), testdata) -def test_find_features(packages, files, expected): - logger = MockLogger() - system = MockFileSystem(packages, files) - actor = MockActor() - - find_features(logger.debug, - logger.warn, - logger.error, - actor.send_features, - system.is_installed, - system.read, - system.path_exists, - system.list_dir) - - assert actor.output.get('interface', None) == expected.get('interface', None) - assert actor.output.get('digest', None) == expected.get('digest', None) - assert actor.output.get('include', None) == expected.get('include', None) - assert actor.output.get('certkey', None) == expected.get('certkey', None) - assert actor.output.get('env', None) == expected.get('env', None) - assert actor.output.get('printcap', None) == expected.get('printcap', None) - assert actor.output.get('included_files', None) == expected.get('included_files', None) - assert logger.debugmsg == expected.get('debug', '') - assert logger.warnmsg == expected.get('warn', '') - assert logger.errormsg == expected.get('error', '') diff --git a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_get_directive_value_check.py b/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_get_directive_value_check.py deleted file mode 100644 index 8b95c39d..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_get_directive_value_check.py +++ /dev/null @@ -1,20 +0,0 @@ -import pytest - -from leapp.libraries.actor.cupsscanner import get_directive_value - -testdata = ( - ('Include', 'Include smth', 'smth'), - ('Include', 'something_else', None), - ('Include', 'Include', ''), - ('Include', '#Include smth', None), - ('Include', ' Include smth', 'smth'), - ('Include', ' Include smth anything', 'smth'), -) - - -@pytest.mark.parametrize('string, line, expected', testdata) -def test_get_directive_value(string, line, expected): - - value = get_directive_value(string, line) - - assert value == expected diff --git a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_include_directive_check.py b/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_include_directive_check.py deleted file mode 100644 index 83b849cb..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_include_directive_check.py +++ /dev/null @@ -1,102 +0,0 @@ -import pytest - -from leapp.libraries.actor.cupsscanner import include_directive_check - -testdata = ( - ( - { - '/etc/cups/cupsd.conf': '\n' - }, - { - 'included_files': ['/etc/cups/cupsd.conf'], - } - ), - ( - { - '/etc/cups/cupsd.conf': 'Include smth.conf\n', - 'smth.conf': '\n' - }, - { - 'included_files': ['/etc/cups/cupsd.conf', 'smth.conf'], - } - ), - ( - { - '/etc/cups/cupsd.conf': 'Include smth.conf\nInclude smb.conf\n', - 'smth.conf': '\n', - 'smb.conf': '\n' - }, - { - 'included_files': ['/etc/cups/cupsd.conf', 'smth.conf', - 'smb.conf'], - } - ), - ( - { - '/etc/cups/cupsd.conf': 'Include smth.conf\n', - 'smth.conf': 'Include smb.conf\n', - 'smb.conf': '\n' - }, - { - 'included_files': ['/etc/cups/cupsd.conf', 'smth.conf', - 'smb.conf'], - } - ), - ( - { - '/etc/cups/cupsd.conf': 'Include smth.conf\n', - 'smth.conf': 'Include smb.conf\n', - 'smb.conf': 'Include any.conf\n', - 'any.conf': '\n' - }, - { - 'included_files': ['/etc/cups/cupsd.conf', 'smth.conf', - 'smb.conf', 'any.conf'], - } - ), - ( - { - '/etc/cups/cupsd.conf': '#Include smth.conf' - }, - { - 'included_files': ['/etc/cups/cupsd.conf'] - } - ), - ( - { - '/etc/cups/cupsd.conf': 'Include\n' - }, - { - 'included_files': ['/etc/cups/cupsd.conf'], - } - ), - ( - { - '/etc/cups/cupsd.conf': ' Include smth.conf smth_more\n', - 'smth.conf': '\n' - }, - { - 'included_files': ['/etc/cups/cupsd.conf', 'smth.conf'] - } - ) -) - - -class MockFileSystem(object): - def __init__(self, infiles): - self.files = infiles - - def read(self, path): - if path in self.files.keys(): - return self.files[path].splitlines(True) - raise IOError('Error during reading file.') - - -@pytest.mark.parametrize("files,expected", testdata) -def test_include_directive_check(files, expected): - f = MockFileSystem(files) - - included_files, error_list = include_directive_check(read_func=f.read) - - assert included_files == expected.get('included_files', []) - assert error_list == expected.get('error_list', []) diff --git a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_interface_script_check.py b/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_interface_script_check.py deleted file mode 100644 index dc4e3fd7..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_interface_script_check.py +++ /dev/null @@ -1,35 +0,0 @@ -import pytest - -from leapp.libraries.actor.cupsscanner import interface_script_check - -testdata = ( - ('bla', [], False), - ('/etc/cups/interfaces', [], False), - ('/etc/cups/interfaces', ['smth'], True), -) - - -class MockFilesystem(object): - def __init__(self, path, files): - self.path = path - self.files = files - - def check_path(self, path): - if self.path == path: - return True - return False - - def list_dir(self, path): - if self.path == path: - return self.files - return [] - - -@pytest.mark.parametrize("path,files,expected", testdata) -def test_interface_script_check(path, files, expected): - filesystem = MockFilesystem(path, files) - - ret = interface_script_check(filesystem.check_path, - filesystem.list_dir) - - assert ret == expected diff --git a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_print_capabilities_check.py b/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_print_capabilities_check.py deleted file mode 100644 index b3495de3..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_print_capabilities_check.py +++ /dev/null @@ -1,30 +0,0 @@ -import pytest - -from leapp.libraries.actor.cupsscanner import print_capabilities_check - -testdata = ( - ('\n', False), - ('Something else\n', False), - ('#PrintcapFormat smth\n', False), - ('PrintcapFormat\n', False), - ('PrintcapFormat smth\n', True) -) - - -class MockCUPSD(object): - def __init__(self, content): - self.content = content - - def read(self, path): - if path: - return self.content.splitlines(True) - return None - - -@pytest.mark.parametrize("content, expected", testdata) -def test_print_capabilities_check(content, expected): - config = MockCUPSD(content) - - ret = print_capabilities_check('does_not_matter', config.read) - - assert ret == expected diff --git a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_ssl_directive_check.py b/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_ssl_directive_check.py deleted file mode 100644 index 6dfc8080..00000000 --- a/repos/system_upgrade/el7toel8/actors/cupsscanner/tests/test_ssl_directive_check.py +++ /dev/null @@ -1,32 +0,0 @@ -import pytest - -from leapp.libraries.actor.cupsscanner import ssl_directive_check - -testdata = ( - ('\n', False), - ('smth\n', False), - ('#ServerCertificate my.crt\n', False), - ('#ServerKey my.key\n', False), - ('ServerCertificate\n', False), - ('ServerKey\n', False), - ('ServerKey my.key\n', True), - ('ServerCertificate my.crt\n', True), - ('ServerCertificate my.crt\nServerKey my.key\n', True) -) - - -class MockCupsfiles(object): - def __init__(self, content): - self.content = content - - def read(self, path): - return self.content.splitlines(True) - - -@pytest.mark.parametrize("content,expected", testdata) -def test_ssl_directive_check(content, expected): - config = MockCupsfiles(content) - - ret = ssl_directive_check(config.read) - - assert ret == expected diff --git a/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/actor.py b/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/actor.py deleted file mode 100644 index 4928710e..00000000 --- a/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/actor.py +++ /dev/null @@ -1,21 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import enabledeviceciofreeservice -from leapp.models import SystemdServicesTasks -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class EnableDeviceCioFreeService(Actor): - """ - Enables device_cio_free.service systemd service on s390x - - After an upgrade this service ends up disabled even though it's vendor preset is set to enabled. - The service is used to enable devices which are not explicitly enabled on the kernel command line. - """ - - name = 'enable_device_cio_free_service' - consumes = () - produces = (SystemdServicesTasks,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - enabledeviceciofreeservice.process() diff --git a/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/libraries/enabledeviceciofreeservice.py b/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/libraries/enabledeviceciofreeservice.py deleted file mode 100644 index 97e36f10..00000000 --- a/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/libraries/enabledeviceciofreeservice.py +++ /dev/null @@ -1,8 +0,0 @@ -from leapp.libraries.common.config import architecture -from leapp.libraries.stdlib import api -from leapp.models import SystemdServicesTasks - - -def process(): - if architecture.matches_architecture(architecture.ARCH_S390X): - api.produce(SystemdServicesTasks(to_enable=['device_cio_free.service'])) diff --git a/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/tests/test_enableddeviceciofreeservice.py b/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/tests/test_enableddeviceciofreeservice.py deleted file mode 100644 index 42527595..00000000 --- a/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/tests/test_enableddeviceciofreeservice.py +++ /dev/null @@ -1,32 +0,0 @@ -import pytest - -from leapp.libraries.actor import enabledeviceciofreeservice -from leapp.libraries.common.config import architecture -from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked -from leapp.libraries.stdlib import api -from leapp.models import SystemdServicesTasks - - -def test_task_produced_on_s390(monkeypatch): - monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch=architecture.ARCH_S390X)) - monkeypatch.setattr(api, "produce", produce_mocked()) - - enabledeviceciofreeservice.process() - - assert api.produce.called - assert isinstance(api.produce.model_instances[0], SystemdServicesTasks) - assert api.produce.model_instances[0].to_enable == ['device_cio_free.service'] - - -@pytest.mark.parametrize('arch', [ - architecture.ARCH_X86_64, - architecture.ARCH_ARM64, - architecture.ARCH_PPC64LE, -]) -def test_task_not_produced_on_non_s390(monkeypatch, arch): - monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch=arch)) - monkeypatch.setattr(api, "produce", produce_mocked()) - - enabledeviceciofreeservice.process() - - assert not api.produce.called diff --git a/repos/system_upgrade/el7toel8/actors/firewalldfactsactor/actor.py b/repos/system_upgrade/el7toel8/actors/firewalldfactsactor/actor.py deleted file mode 100644 index 1d8c1ad1..00000000 --- a/repos/system_upgrade/el7toel8/actors/firewalldfactsactor/actor.py +++ /dev/null @@ -1,56 +0,0 @@ -import os -from xml.etree import ElementTree -from xml.etree.ElementTree import ParseError - -from leapp.actors import Actor -from leapp.libraries.actor import firewalldfactsactor -from leapp.models import FirewalldFacts -from leapp.tags import FactsPhaseTag, IPUWorkflowTag - - -class FirewalldFactsActor(Actor): - """ - Provide data about firewalld - - After collecting data, a message with relevant data will be produced. - """ - - name = 'firewalld_facts_actor' - consumes = () - produces = (FirewalldFacts,) - tags = (FactsPhaseTag, IPUWorkflowTag) - - def process(self): - facts = FirewalldFacts() - - try: - tree = ElementTree.parse('/etc/firewalld/lockdown-whitelist.xml') - root = tree.getroot() - facts.firewall_config_command = firewalldfactsactor.getLockdownFirewallConfigCommand(root) - except (ParseError, IOError): - pass - - try: - tree = ElementTree.parse('/etc/firewalld/direct.xml') - root = tree.getroot() - facts.ebtablesTablesInUse = firewalldfactsactor.getEbtablesTablesInUse(root) - except (ParseError, IOError): - pass - - ipsetTypesInUse = set() - directory = '/etc/firewalld/ipsets' - try: - for filename in os.listdir(directory): - if not filename.endswith('.xml'): - continue - try: - tree = ElementTree.parse(os.path.join(directory, filename)) - root = tree.getroot() - ipsetTypesInUse |= set(firewalldfactsactor.getIpsetTypesInUse(root)) - except (ParseError, IOError): - pass - facts.ipsetTypesInUse = list(ipsetTypesInUse) - except OSError: - pass - - self.produce(facts) diff --git a/repos/system_upgrade/el7toel8/actors/firewalldfactsactor/libraries/firewalldfactsactor.py b/repos/system_upgrade/el7toel8/actors/firewalldfactsactor/libraries/firewalldfactsactor.py deleted file mode 100644 index 75fd075a..00000000 --- a/repos/system_upgrade/el7toel8/actors/firewalldfactsactor/libraries/firewalldfactsactor.py +++ /dev/null @@ -1,35 +0,0 @@ -def getLockdownFirewallConfigCommand(root): - for command in root.iter('command'): - if 'name' in command.attrib and \ - '/usr/bin/firewall-config' in command.attrib['name']: - return command.attrib['name'] - - return '' - - -def getEbtablesTablesInUse(root): - tables = [] - for rule in root.iter('rule'): - if 'ipv' in rule.attrib and rule.attrib['ipv'] == 'eb' and \ - 'table' in rule.attrib and rule.attrib['table'] not in tables: - tables.append(rule.attrib['table']) - - for passthrough in root.iter('passthrough'): - if 'ipv' in passthrough.attrib and passthrough.attrib['ipv'] == 'eb': - rule = passthrough.text.split() - try: - i = rule.index('-t') - if rule[i + 1] not in tables: - tables.append(rule[i + 1]) - except ValueError: - pass - - return tables - - -def getIpsetTypesInUse(root): - types = [] - for ipset in root.iter('ipset'): - if 'type' in ipset.attrib and ipset.attrib['type'] not in types: - types.append(ipset.attrib['type']) - return types diff --git a/repos/system_upgrade/el7toel8/actors/firewalldfactsactor/tests/component_test_firewalldfactsactor.py b/repos/system_upgrade/el7toel8/actors/firewalldfactsactor/tests/component_test_firewalldfactsactor.py deleted file mode 100644 index 047f03e2..00000000 --- a/repos/system_upgrade/el7toel8/actors/firewalldfactsactor/tests/component_test_firewalldfactsactor.py +++ /dev/null @@ -1,6 +0,0 @@ -from leapp.models import FirewalldFacts - - -def test_actor_execution(current_actor_context): - current_actor_context.run() - assert current_actor_context.consume(FirewalldFacts) diff --git a/repos/system_upgrade/el7toel8/actors/firewalldfactsactor/tests/unit_test_firewalldfactsactor.py b/repos/system_upgrade/el7toel8/actors/firewalldfactsactor/tests/unit_test_firewalldfactsactor.py deleted file mode 100644 index 82728688..00000000 --- a/repos/system_upgrade/el7toel8/actors/firewalldfactsactor/tests/unit_test_firewalldfactsactor.py +++ /dev/null @@ -1,97 +0,0 @@ -import xml.etree.ElementTree as ElementTree -from xml.etree.ElementTree import ParseError - -from leapp.libraries.actor import firewalldfactsactor -from leapp.models import FirewalldFacts - - -def test_firewalldfactsactor_direct(): - root = ElementTree.fromstring( - ''' - - -t broute -I BROUTING 1 -j ACCEPT - - ''') - assert firewalldfactsactor.getEbtablesTablesInUse(root) == ['broute'] - - root = ElementTree.fromstring( - ''' - - -j ACCEPT - - ''') - assert firewalldfactsactor.getEbtablesTablesInUse(root) == ['broute'] - - root = ElementTree.fromstring( - ''' - - -j ACCEPT - -j ACCEPT - -t nat -I PREROUTING 1 -j ACCEPT - - ''' - ) - assert set(firewalldfactsactor.getEbtablesTablesInUse(root)) == set(['broute', 'nat']) - - # emulate a parse error - facts = FirewalldFacts() - try: - raise ParseError() - except ParseError: - assert not facts.ebtablesTablesInUse - - -def test_firewalldfactsactor_firewallConfigCommand(): - root = ElementTree.fromstring( - ''' - - - - - - - - ''' - ) - assert firewalldfactsactor.getLockdownFirewallConfigCommand(root) == '/usr/bin/python -Es /usr/bin/firewall-config' - - root = ElementTree.fromstring( - ''' - - - - ''') - assert firewalldfactsactor.getLockdownFirewallConfigCommand(root) == '' - - root = ElementTree.fromstring( - ''' - - - - - - - ''') - EXP_RESULT = '/usr/libexec/platform-python -s /usr/bin/firewall-config' - assert firewalldfactsactor.getLockdownFirewallConfigCommand(root) == EXP_RESULT - - -def test_firewalldfactsactor_ipsetTypes(): - root = ElementTree.fromstring( - ''' - - My Ipset - description - 1.2.3.4 - - ''') - assert firewalldfactsactor.getIpsetTypesInUse(root) == ['hash:ip'] - - root = ElementTree.fromstring( - ''' - - My Ipset - description - - ''') - assert firewalldfactsactor.getIpsetTypesInUse(root) == ['hash:net,port'] diff --git a/repos/system_upgrade/el7toel8/actors/firewalldupdatelockdownwhitelist/actor.py b/repos/system_upgrade/el7toel8/actors/firewalldupdatelockdownwhitelist/actor.py deleted file mode 100644 index 2a89fc79..00000000 --- a/repos/system_upgrade/el7toel8/actors/firewalldupdatelockdownwhitelist/actor.py +++ /dev/null @@ -1,39 +0,0 @@ -import xml.etree.ElementTree as ElementTree - -from leapp.actors import Actor -from leapp.libraries.actor import firewalldupdatelockdownwhitelist -from leapp.models import FirewalldFacts -from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag - - -class FirewalldUpdateLockdownWhitelist(Actor): - """ - Update the firewalld Lockdown Whitelist. - - RHEL-8 uses a platform specific python interpreter for packaged - applications. For firewall-config, the interpreter path is part of the - lockdown list. In RHEL-7 this was simply /usr/bin/python, but in RHEL-8 - it's /usr/libexec/platform-python. However, if the user made changes to the - lockdown whitelist it won't be replaced by RPM/dnf. As such we must update - the interpreter if the old value is there. - """ - - name = 'firewalld_update_lockdown_whitelist' - consumes = (FirewalldFacts,) - produces = () - tags = (ApplicationsPhaseTag, IPUWorkflowTag) - - def process(self): - for facts in self.consume(FirewalldFacts): - if facts.firewall_config_command: - tree = ElementTree.parse('/etc/firewalld/lockdown-whitelist.xml') - root = tree.getroot() - - need_write = firewalldupdatelockdownwhitelist.updateFirewallConfigCommand( - root, - facts.firewall_config_command - ) - - if need_write: - tree.write('/etc/firewalld/lockdown-whitelist.xml') - self.log.info('Updated lockdown whitelist') diff --git a/repos/system_upgrade/el7toel8/actors/firewalldupdatelockdownwhitelist/libraries/firewalldupdatelockdownwhitelist.py b/repos/system_upgrade/el7toel8/actors/firewalldupdatelockdownwhitelist/libraries/firewalldupdatelockdownwhitelist.py deleted file mode 100644 index 12417788..00000000 --- a/repos/system_upgrade/el7toel8/actors/firewalldupdatelockdownwhitelist/libraries/firewalldupdatelockdownwhitelist.py +++ /dev/null @@ -1,13 +0,0 @@ -def updateFirewallConfigCommand(root, old_command): - changed = False - - # Only update the command element that corresponds to firewall-config - new_command = '/usr/libexec/platform-python -s /usr/bin/firewall-config' - for command in root.iter('command'): - if 'name' in command.attrib and \ - old_command == command.attrib['name'] and \ - old_command != new_command: - command.attrib['name'] = new_command - changed = True - - return changed diff --git a/repos/system_upgrade/el7toel8/actors/firewalldupdatelockdownwhitelist/tests/unit_test_firewalldupdatelockdownwhitelist.py b/repos/system_upgrade/el7toel8/actors/firewalldupdatelockdownwhitelist/tests/unit_test_firewalldupdatelockdownwhitelist.py deleted file mode 100644 index dc0087eb..00000000 --- a/repos/system_upgrade/el7toel8/actors/firewalldupdatelockdownwhitelist/tests/unit_test_firewalldupdatelockdownwhitelist.py +++ /dev/null @@ -1,51 +0,0 @@ -import xml.etree.ElementTree as ElementTree - -from leapp.libraries.actor import firewalldupdatelockdownwhitelist - - -def test_firewalldupdatelockdownwhitelist_library(): - root = ElementTree.fromstring( - ''' - - - - - - - - ''') - - assert firewalldupdatelockdownwhitelist.updateFirewallConfigCommand( - root, - '/usr/bin/python -Es /usr/bin/firewall-config' - ) - - -def test_firewalldupdatelockdownwhitelist_library_negative(): - root = ElementTree.fromstring( - ''' - - - - ''') - - assert not firewalldupdatelockdownwhitelist.updateFirewallConfigCommand(root, '') - assert not firewalldupdatelockdownwhitelist.updateFirewallConfigCommand( - root, - '/usr/bin/python -Es /usr/bin/firewall-config' - ) - - root = ElementTree.fromstring( - ''' - - - - - - - ''') - - assert not firewalldupdatelockdownwhitelist.updateFirewallConfigCommand( - root, - '/usr/libexec/platform-python -s /usr/bin/firewall-config' - ) diff --git a/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddebugkernels/checkinstalleddebugkernels/actor.py b/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddebugkernels/checkinstalleddebugkernels/actor.py deleted file mode 100644 index acd2d986..00000000 --- a/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddebugkernels/checkinstalleddebugkernels/actor.py +++ /dev/null @@ -1,23 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import checkinstalleddebugkernels -from leapp.models import DistributionSignedRPM -from leapp.reporting import Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckInstalledDebugKernels(Actor): - """ - Inhibit IPU (in-place upgrade) when multiple debug kernels are installed. - - Because of an issue in DNF, the transaction can't be validated if there's - more than one package named kernel-debug. Therefore, in this case, we - inhibit the upgrade with a clearer remediation. - """ - - name = 'check_installed_debug_kernels' - consumes = (DistributionSignedRPM,) - produces = (Report,) - tags = (IPUWorkflowTag, ChecksPhaseTag) - - def process(self): - checkinstalleddebugkernels.process() diff --git a/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddebugkernels/checkinstalleddebugkernels/libraries/checkinstalleddebugkernels.py b/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddebugkernels/checkinstalleddebugkernels/libraries/checkinstalleddebugkernels.py deleted file mode 100644 index 15b7b79e..00000000 --- a/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddebugkernels/checkinstalleddebugkernels/libraries/checkinstalleddebugkernels.py +++ /dev/null @@ -1,42 +0,0 @@ -from leapp import reporting -from leapp.libraries.stdlib import api -from leapp.models import DistributionSignedRPM - - -def get_kernel_rpm_release(rpm): - """ - Get the release of a kernel RPM as an integer. - - :param rpm: An instance of an RPM derived model. - """ - return int(rpm.release.split('.')[0]) - - -def get_kernel_debug_rpms(): - """ - Get all installed kernel-debug packages ordered by release number (ascending). - """ - rpms = next(api.consume(DistributionSignedRPM), DistributionSignedRPM()) - return sorted([pkg for pkg in rpms.items if pkg.name == 'kernel-debug'], key=get_kernel_rpm_release) - - -def process(): - pkgs = get_kernel_debug_rpms() - if len(pkgs) > 1: - title = 'Multiple debug kernels installed' - summary = ('DNF cannot produce a valid upgrade transaction when' - ' multiple kernel-debug packages are installed.') - hint = 'Remove all but one kernel-debug packages before running Leapp again.' - all_but_latest_kernel_debug = pkgs[:-1] - packages = ['{n}-{v}-{r}'.format(n=pkg.name, v=pkg.version, r=pkg.release) - for pkg in all_but_latest_kernel_debug] - commands = [['yum', '-y', 'remove'] + packages] - reporting.create_report([ - reporting.Title(title), - reporting.Summary(summary), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.KERNEL]), - reporting.Groups([reporting.Groups.INHIBITOR]), - reporting.Remediation(hint=hint, commands=commands), - reporting.RelatedResource('package', 'kernel-debug') - ]) diff --git a/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddebugkernels/checkinstalleddebugkernels/tests/unit_test_checkinstalleddebugkernels.py b/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddebugkernels/checkinstalleddebugkernels/tests/unit_test_checkinstalleddebugkernels.py deleted file mode 100644 index 86ec4c89..00000000 --- a/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddebugkernels/checkinstalleddebugkernels/tests/unit_test_checkinstalleddebugkernels.py +++ /dev/null @@ -1,35 +0,0 @@ -import pytest - -from leapp.models import DistributionSignedRPM, Report, RPM -from leapp.snactor.fixture import current_actor_context - -RH_PACKAGER = 'Red Hat, Inc. ' - -ballast1 = [ - RPM(name='b1', version='1', release='1', epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s'), - RPM(name='kernel', version='1', release='1', epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s'), - RPM(name='b2', version='1', release='1', epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s') -] -ballast2 = [ - RPM(name='b3', version='1', release='1', epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s'), - RPM(name='kernel', version='1', release='1', epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s'), - RPM(name='b4', version='1', release='1', epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s') -] -debug_kernels = [ - RPM(name='kernel-debug', version='3.10.0', release='957.27.4.el7', - epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s'), - RPM(name='kernel-debug', version='3.10.0', release='957.35.1.el7', - epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s'), - RPM(name='kernel-debug', version='3.10.0', release='957.43.1.el7', - epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s') -] - - -@pytest.mark.parametrize('n', [0, 1, 2, 3]) -def test_process_debug_kernels(current_actor_context, n): - current_actor_context.feed(DistributionSignedRPM(items=ballast1+debug_kernels[:n]+ballast2)) - current_actor_context.run() - if n < 2: - assert not current_actor_context.consume(Report) - else: - assert current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddevelkernels/checkinstalleddevelkernels/actor.py b/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddevelkernels/checkinstalleddevelkernels/actor.py deleted file mode 100644 index 4266323c..00000000 --- a/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddevelkernels/checkinstalleddevelkernels/actor.py +++ /dev/null @@ -1,23 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import checkinstalleddevelkernels -from leapp.models import DistributionSignedRPM -from leapp.reporting import Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class CheckInstalledDevelKernels(Actor): - """ - Inhibit IPU (in-place upgrade) when multiple devel kernels are installed. - - Because of an issue in DNF, the transaction can't be validated if there's - more than one package named kernel-devel. Therefore, in this case, we - inhibit the upgrade with a clearer remediation. - """ - - name = 'check_installed_devel_kernels' - consumes = (DistributionSignedRPM,) - produces = (Report,) - tags = (IPUWorkflowTag, ChecksPhaseTag) - - def process(self): - checkinstalleddevelkernels.process() diff --git a/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddevelkernels/checkinstalleddevelkernels/libraries/checkinstalleddevelkernels.py b/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddevelkernels/checkinstalleddevelkernels/libraries/checkinstalleddevelkernels.py deleted file mode 100644 index fa49092c..00000000 --- a/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddevelkernels/checkinstalleddevelkernels/libraries/checkinstalleddevelkernels.py +++ /dev/null @@ -1,46 +0,0 @@ -from leapp import reporting -from leapp.libraries.stdlib import api -from leapp.models import DistributionSignedRPM - - -def get_kernel_rpm_release(rpm): - """ - Get the release of a kernel RPM as an integer. - - :param rpm: An instance of an RPM derived model. - """ - return int(rpm.release.split('.')[0]) - - -def get_kernel_devel_rpms(): - """ - Get all installed kernel-devel packages ordered by release number (ascending). - """ - rpms = next(api.consume(DistributionSignedRPM), DistributionSignedRPM()) - return sorted([pkg for pkg in rpms.items if pkg.name == 'kernel-devel'], key=get_kernel_rpm_release) - - -def process(): - pkgs = get_kernel_devel_rpms() - if len(pkgs) > 1: - title = 'Multiple devel kernels installed' - summary = ('DNF cannot produce a valid upgrade transaction when' - ' multiple kernel-devel packages are installed.') - hint = ('Remove all but one kernel-devel packages before running Leapp again.') - all_but_latest_kernel_devel = pkgs[:-1] - packages = ['{n}-{v}-{r}'.format(n=pkg.name, v=pkg.version, r=pkg.release) - for pkg in all_but_latest_kernel_devel] - commands = [['yum', '-y', 'remove'] + packages] - reporting.create_report([ - reporting.Title(title), - reporting.Summary(summary), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.KERNEL]), - reporting.Groups([reporting.Groups.INHIBITOR]), - reporting.Remediation(hint=hint, commands=commands), - reporting.ExternalLink( - url='https://access.redhat.com/solutions/4723671', - title='leapp upgrade fails on kernel-devel packages' - ), - reporting.RelatedResource('package', 'kernel-devel') - ]) diff --git a/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddevelkernels/checkinstalleddevelkernels/tests/unit_test_checkinstalleddevelkernels.py b/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddevelkernels/checkinstalleddevelkernels/tests/unit_test_checkinstalleddevelkernels.py deleted file mode 100644 index d4f6b380..00000000 --- a/repos/system_upgrade/el7toel8/actors/kernel/checkinstalleddevelkernels/checkinstalleddevelkernels/tests/unit_test_checkinstalleddevelkernels.py +++ /dev/null @@ -1,35 +0,0 @@ -import pytest - -from leapp.models import DistributionSignedRPM, Report, RPM -from leapp.snactor.fixture import current_actor_context - -RH_PACKAGER = 'Red Hat, Inc. ' - -ballast1 = [ - RPM(name='b1', version='1', release='1', epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s'), - RPM(name='kernel', version='1', release='1', epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s'), - RPM(name='b2', version='1', release='1', epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s') -] -ballast2 = [ - RPM(name='b3', version='1', release='1', epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s'), - RPM(name='kernel', version='1', release='1', epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s'), - RPM(name='b4', version='1', release='1', epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s') -] -devel_kernels = [ - RPM(name='kernel-devel', version='3.10.0', release='957.27.4.el7', - epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s'), - RPM(name='kernel-devel', version='3.10.0', release='957.35.1.el7', - epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s'), - RPM(name='kernel-devel', version='3.10.0', release='957.43.1.el7', - epoch='1', packager=RH_PACKAGER, arch='noarch', pgpsig='s') -] - - -@pytest.mark.parametrize('n', [0, 1, 2, 3]) -def test_process_devel_kernels(current_actor_context, n): - current_actor_context.feed(DistributionSignedRPM(items=ballast1+devel_kernels[:n]+ballast2)) - current_actor_context.run() - if n < 2: - assert not current_actor_context.consume(Report) - else: - assert current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/migratebrltty/actor.py b/repos/system_upgrade/el7toel8/actors/migratebrltty/actor.py deleted file mode 100644 index d97e5382..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratebrltty/actor.py +++ /dev/null @@ -1,39 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.libraries.actor import migratebrltty -from leapp.models import BrlttyMigrationDecision -from leapp.reporting import create_report, Report -from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag - - -class MigrateBrltty(Actor): - """ - Migrate brltty configuration files. - """ - - name = 'migrate_brltty' - consumes = (BrlttyMigrationDecision,) - produces = (Report,) - tags = (ApplicationsPhaseTag, IPUWorkflowTag) - - def process(self): - for decision in self.consume(BrlttyMigrationDecision): - report_summary = '' - migratebrltty.migrate_file(decision.migrate_file, decision.migrate_bt, decision.migrate_espeak) - if decision.migrate_bt: - report_summary = 'Unsupported aliases for bluetooth devices (\'bth:\' and \'bluez:\') was ' - report_summary += 'renamed to \'bluetooth:\' in {}' - report_summary = report_summary.format(', '.join(decision.migrate_file)) - if decision.migrate_espeak: - if report_summary: - report_summary += '\n' - report_summary += 'eSpeak speech driver was switched to eSpeak-NG in {}' - report_summary = report_summary.format(', '.join(decision.migrate_file)) - if decision.migrate_bt or decision.migrate_espeak: - create_report([ - reporting.Title('brltty configuration files migrated'), - reporting.Summary(report_summary), - reporting.Severity(reporting.Severity.LOW), - reporting.Groups([reporting.Groups.TOOLS, reporting.Groups.ACCESSIBILITY]), - reporting.RelatedResource('package', 'brltty') - ]) diff --git a/repos/system_upgrade/el7toel8/actors/migratebrltty/libraries/migratebrltty.py b/repos/system_upgrade/el7toel8/actors/migratebrltty/libraries/migratebrltty.py deleted file mode 100644 index 3c18a551..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratebrltty/libraries/migratebrltty.py +++ /dev/null @@ -1,25 +0,0 @@ -import re -import shutil - -BackupSuffix = '.bak' - - -def migrate_file(fn, migrate_bt, migrate_espeak): - # nothing to migrate - if not fn or (not migrate_bt and not migrate_espeak): - return - - # make backup - shutil.copy2(fn, fn + BackupSuffix) - - regex_bt = re.compile(r'\b(?:(?:bth)|(?:bluez))((?:[:\-][0-9a-fA-F]{2}){6})\b') - regex_espeak = re.compile(r'^(\s*speech-driver\s+)es\b') - - with open(fn, 'w') as file_out: - with open(fn + BackupSuffix) as file_in: - for line in file_in: - if migrate_bt and regex_bt.search(line): - line = regex_bt.sub(r'bluetooth\1', line) - elif migrate_espeak and regex_espeak.search(line): - line = regex_espeak.sub(r'\1en', line) - file_out.write(line) diff --git a/repos/system_upgrade/el7toel8/actors/migratebrltty/tests/component_test_migratebrltty.py b/repos/system_upgrade/el7toel8/actors/migratebrltty/tests/component_test_migratebrltty.py deleted file mode 100644 index 80532da1..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratebrltty/tests/component_test_migratebrltty.py +++ /dev/null @@ -1,29 +0,0 @@ -import pytest -from six import text_type - -from leapp.models import BrlttyMigrationDecision - - -@pytest.mark.parametrize('test_input,expected,migrate_bt,migrate_espeak', [ - ('braille-device bth:AB-cd:ef:01:23:45\n', 'braille-device bluetooth:AB-cd:ef:01:23:45', True, False), - ('braille-device bluez:AB-cd:ef:01:23:45\n', 'braille-device bluetooth:AB-cd:ef:01:23:45', True, False), - ('speech-driver es\n', 'speech-driver en', False, True), - ('braille-device bth:AB-cd:ef:01:23:45\nbraille-device bluez:AB-cd:ef:01:23:45\nspeech-driver es\n', - 'braille-device bluetooth:AB-cd:ef:01:23:45\nbraille-device bluetooth:AB-cd:ef:01:23:45\nspeech-driver es', - True, False), - ('braille-device bth:AB-cd:ef:01:23:45\nbraille-device bluez:AB-cd:ef:01:23:45\nspeech-driver es\n', - 'braille-device bth:AB-cd:ef:01:23:45\nbraille-device bluez:AB-cd:ef:01:23:45\nspeech-driver en', False, True), - ('braille-device bth:AB-cd:ef:01:23:45\nbraille-device bluez:AB-cd:ef:01:23:45\nspeech-driver es\n', - 'braille-device bluetooth:AB-cd:ef:01:23:45\nbraille-device bluetooth:AB-cd:ef:01:23:45\nspeech-driver en', - True, True), -]) -def test_actor_migrate(tmpdir, test_input, expected, migrate_bt, migrate_espeak, current_actor_context): - brltty_conf = text_type(tmpdir.join('brltty.conf')) - with open(brltty_conf, 'w') as file_out: - file_out.write(test_input) - current_actor_context.feed(BrlttyMigrationDecision(migrate_file=brltty_conf, migrate_bt=migrate_bt, - migrate_espeak=migrate_espeak)) - current_actor_context.run() - with open(brltty_conf, 'r') as file_in: - data = file_in.read().strip() - assert expected == data diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/Makefile b/repos/system_upgrade/el7toel8/actors/migratentp/Makefile deleted file mode 100644 index 41c04a4c..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -install-deps: - -yum install -y python-ipaddress diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/actor.py b/repos/system_upgrade/el7toel8/actors/migratentp/actor.py deleted file mode 100644 index effd51d7..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/actor.py +++ /dev/null @@ -1,19 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.migratentp import migrate_ntp -from leapp.models import NtpMigrationDecision, Report -from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag - - -class MigrateNtp(Actor): - """ - Migrate ntp and/or ntpdate configuration to chrony. - """ - - name = 'migrate_ntp' - consumes = (NtpMigrationDecision,) - produces = (Report,) - tags = (ApplicationsPhaseTag, IPUWorkflowTag) - - def process(self): - for decision in self.consume(NtpMigrationDecision): - migrate_ntp(decision.migrate_services, decision.config_tgz64) diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py b/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py deleted file mode 100644 index 306ce09e..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py +++ /dev/null @@ -1,87 +0,0 @@ -import base64 -import io -import tarfile - -from leapp.libraries.stdlib import api, CalledProcessError, run - - -def extract_tgz64(s): - stream = io.BytesIO(base64.b64decode(s)) - tar = tarfile.open(fileobj=stream, mode='r:gz') - tar.extractall('/') - tar.close() - - -def enable_service(name): - try: - run(['systemctl', 'enable', '{}.service'.format(name)]) - except CalledProcessError: - api.current_logger().error('Could not enable {} service'.format(name)) - - -def write_file(name, content): - with open(name, 'w') as f: - f.write(content) - - -def ntp2chrony(root, ntp_conf, step_tickers): - # need to skip these on pylint to avoid "function already defined" if we move to the top of file - from leapp.libraries.actor import ntp2chrony # pylint: disable=import-outside-toplevel - - try: - ntp_configuration = ntp2chrony.NtpConfiguration(root, ntp_conf, step_tickers) - ntp_configuration.write_chrony_configuration('/etc/chrony.conf', '/etc/chrony.keys', - False, True) - except OSError as e: - api.current_logger().error('ntp2chrony failed: {}'.format(e)) - return False, set() - - # Return ignored lines from ntp.conf, except 'disable monitor' from - # the default ntp.conf - return True, set(ntp_configuration.ignored_lines) - set(['disable monitor']) - - -def migrate_ntp(migrate_services, config_tgz64): - # Map of ntp->chrony services and flag if using configuration - service_map = {'ntpd': ('chronyd', True), - 'ntpdate': ('chronyd', True), - 'ntp-wait': ('chrony-wait', False)} - - # Minimal secure ntp.conf with no sources to migrate ntpdate only - no_sources_directives = ( - '# This file was created to migrate ntpdate configuration to chrony\n' - '# without ntp configuration (ntpd service was disabled)\n' - 'driftfile /var/lib/ntp/drift\n' - 'restrict default ignore nomodify notrap nopeer noquery\n') - - if not migrate_services: - # Nothing to migrate - return - - migrate_configs = [] - for service in migrate_services: - if service not in service_map: - api.current_logger().error('Unknown service {}'.format(service)) - continue - enable_service(service_map[service][0]) - if service_map[service][1]: - migrate_configs.append(service) - - # Unpack archive with configuration files - extract_tgz64(config_tgz64) - - if 'ntpd' in migrate_configs: - ntp_conf = '/etc/ntp.conf' - else: - ntp_conf = '/etc/ntp.conf.nosources' - write_file(ntp_conf, no_sources_directives) - - step_tickers = '/etc/ntp/step-tickers' if 'ntpdate' in migrate_configs else '' - - conf_migrated, ignored_lines = ntp2chrony('/', ntp_conf, step_tickers) - - if conf_migrated: - api.current_logger().info('Configuration files migrated to chrony: {}'.format(' '.join(migrate_configs))) - if ignored_lines: - api.current_logger().warning('Some lines in /etc/ntp.conf were ignored in migration' - ' (check /etc/chrony.conf)') diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/libraries/ntp2chrony.py b/repos/system_upgrade/el7toel8/actors/migratentp/libraries/ntp2chrony.py deleted file mode 100644 index 45ef5def..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/libraries/ntp2chrony.py +++ /dev/null @@ -1,684 +0,0 @@ -#!/usr/bin/python -# -# Convert ntp configuration to chrony -# -# Copyright (C) 2018-2019 Miroslav Lichvar -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -from __future__ import print_function - -import argparse -import ipaddress -import logging -import os -import re -import subprocess -import sys - -import six - -# python2 compatibility hacks -if six.PY2: - from io import open - reload(sys) # pylint: disable=undefined-variable # noqa: F821 - sys.setdefaultencoding("utf-8") - - -class NtpConfiguration(object): - def __init__(self, root_dir, ntp_conf, step_tickers): - self.root_dir = root_dir if root_dir != "/" else "" - self.ntp_conf_path = ntp_conf - self.step_tickers_path = step_tickers - - # Read and write files using an 8-bit transparent encoding - self.file_encoding = "latin-1" - self.enabled_services = set() - self.step_tickers = [] - self.time_sources = [] - self.fudges = {} - self.restrictions = { - # Built-in defaults - ipaddress.ip_network(u"0.0.0.0/0"): set(), - ipaddress.ip_network(u"::/0"): set(), - } - self.keyfile = "" - self.keys = [] - self.trusted_keys = [] - self.driftfile = "" - self.statistics = [] - self.leapfile = "" - self.tos_options = {} - self.ignored_directives = set() - self.ignored_lines = [] - - # self.detect_enabled_services() - self.parse_step_tickers() - self.parse_ntp_conf() - - def detect_enabled_services(self): - for service in ["ntpdate", "ntpd", "ntp-wait"]: - service_path = os.path.join(self.root_dir, - "etc/systemd/system/multi-user.target.wants/{}.service".format(service)) - if os.path.islink(service_path): - self.enabled_services.add(service) - logging.info("Enabled services found in /etc/systemd/system: %s", - " ".join(self.enabled_services)) - - def parse_step_tickers(self): - if not self.step_tickers_path: - return - - path = os.path.join(self.root_dir, self.step_tickers_path) - if not os.path.isfile(path): - logging.info("Missing %s", path) - return - - with open(path, encoding=self.file_encoding) as f: - for line in f: - line = line[:line.find('#')] - - words = line.split() - - if not words: - continue - - self.step_tickers.extend(words) - - def parse_ntp_conf(self, path=None): - if path is None: - path = os.path.join(self.root_dir, self.ntp_conf_path) - - with open(path, encoding=self.file_encoding) as f: - logging.info("Reading %s", path) - - for line in f: - line = line[:line.find('#')] - - words = line.split() - - if not words: - continue - - if not self.parse_directive(words): - self.ignored_lines.append(line) - - def parse_directive(self, words): - res = True - name = words.pop(0) - if name.startswith("logconfig"): - name = "logconfig" - - if words: - if name in ["server", "peer", "pool"]: - res = self.parse_source(name, words) - elif name == "fudge": - res = self.parse_fudge(words) - elif name == "restrict": - res = self.parse_restrict(words) - elif name == "tos": - res = self.parse_tos(words) - elif name == "includefile": - res = self.parse_includefile(words) - elif name == "keys": - res = self.parse_keys(words) - elif name == "trustedkey": - res = self.parse_trustedkey(words) - elif name == "driftfile": - self.driftfile = words[0] - elif name == "statistics": - self.statistics = words - elif name == "leapfile": - self.leapfile = words[0] - else: - self.ignored_directives.add(name) - res = False - else: - self.ignored_directives.add(name) - res = False - - return res - - def parse_source(self, source_type, words): - ipv4_only = False - ipv6_only = False - source = { - "type": source_type, - "options": [] - } - - if words[0] == "-4": - ipv4_only = True - words.pop(0) - elif words[0] == "-6": - ipv6_only = True - words.pop(0) - - if not words: - return False - - source["address"] = words.pop(0) - - # Check if -4/-6 corresponds to the address and ignore hostnames - if ipv4_only or ipv6_only: - try: - version = ipaddress.ip_address(source["address"]).version - if (ipv4_only and version != 4) or (ipv6_only and version != 6): - return False - except ValueError: - return False - - if source["address"].startswith("127.127."): - if not source["address"].startswith("127.127.1."): - # Ignore non-LOCAL refclocks - return False - - while words: - if len(words) >= 2 and words[0] in ["minpoll", "maxpoll", "version", "key"]: - source["options"].append((words[0], words[1])) - words = words[2:] - elif words[0] in ["burst", "iburst", "noselect", "prefer", "true", "xleave"]: - source["options"].append((words[0],)) - words.pop(0) - else: - return False - - self.time_sources.append(source) - return True - - def parse_fudge(self, words): - address = words.pop(0) - options = {} - - while words: - if len(words) >= 2 and words[0] in ["stratum"]: - if not words[1].isdigit(): - return False - options[words[0]] = int(words[1]) - words = words[2:] - elif len(words) >= 2: - words = words[2:] - else: - return False - - self.fudges[address] = options - return True - - def parse_restrict(self, words): - ipv4_only = False - ipv6_only = False - flags = set() - mask = "" - - if words[0] == "-4": - ipv4_only = True - words.pop(0) - elif words[0] == "-6": - ipv6_only = True - words.pop(0) - - if not words: - return False - - address = words.pop(0) - - while words: - if len(words) >= 2 and words[0] == "mask": - mask = words[1] - words = words[2:] - else: - if words[0] not in ["kod", "nomodify", "notrap", "nopeer", "noquery", - "limited", "ignore", "noserve"]: - return False - flags.add(words[0]) - words.pop(0) - - # Convert to IP network(s), ignoring restrictions with hostnames - networks = [] - if address == "default" and not mask: - if not ipv6_only: - networks.append(ipaddress.ip_network(u"0.0.0.0/0")) - if not ipv4_only: - networks.append(ipaddress.ip_network(u"::/0")) - else: - try: - if mask: - # Count bits in the mask (ipaddress does not support - # expanded IPv6 netmasks) - mask_ip = ipaddress.ip_address(mask) - mask_str = "{0:0{1}b}".format(int(mask_ip), mask_ip.max_prefixlen) - networks.append(ipaddress.ip_network( - u"{}/{}".format(address, len(mask_str.rstrip('0'))))) - else: - networks.append(ipaddress.ip_network(address)) - except ValueError: - return False - - if (ipv4_only and networks[-1].version != 4) or \ - (ipv6_only and networks[-1].version != 6): - return False - - for network in networks: - self.restrictions[network] = flags - - return True - - def parse_tos(self, words): - options = {} - while words: - if len(words) >= 2 and words[0] in ["minsane", "orphan"]: - if not words[1].isdigit(): - return False - options[words[0]] = int(words[1]) - words = words[2:] - elif len(words) >= 2 and words[0] in ["maxdist"]: - # Check if it is a float value - if not words[1].replace('.', '', 1).isdigit(): - return False - options[words[0]] = float(words[1]) - words = words[2:] - else: - return False - - self.tos_options.update(options) - - return True - - def parse_includefile(self, words): - path = os.path.join(self.root_dir, words[0]) - if not os.path.isfile(path): - return False - - self.parse_ntp_conf(path) - return True - - def parse_keys(self, words): - keyfile = words[0] - path = os.path.join(self.root_dir, keyfile) - if not os.path.isfile(path): - logging.info("Missing %s", path) - return False - - with open(path, encoding=self.file_encoding) as f: - logging.info("Reading %s", path) - keys = [] - for line in f: - words = line.split() - if len(words) < 3 or not words[0].isdigit(): - continue - keys.append((int(words[0]), words[1], words[2])) - - self.keyfile = keyfile - self.keys = keys - - return True - - def parse_trustedkey(self, words): - key_ranges = [] - for word in words: - if word.isdigit(): - key_ranges.append((int(word), int(word))) - elif re.match("^[0-9]+-[0-9]+$", word): - first, last = word.split("-") - key_ranges.append((int(first), int(last))) - else: - return False - - self.trusted_keys = key_ranges - return True - - def write_chrony_configuration(self, chrony_conf_path, chrony_keys_path, - dry_run=False, backup=False): - chrony_conf = self.get_chrony_conf(chrony_keys_path) - logging.debug("Generated %s:\n%s", chrony_conf_path, chrony_conf) - - if not dry_run: - self.write_file(chrony_conf_path, 0o644, chrony_conf, backup) - - chrony_keys = self.get_chrony_keys() - if chrony_keys: - logging.debug("Generated %s:\n%s", chrony_keys_path, chrony_keys) - - if not dry_run: - self.write_file(chrony_keys_path, 0o640, chrony_keys, backup) - - def get_processed_time_sources(self): - # Convert {0,1,2,3}.*pool.ntp.org servers to 2.*pool.ntp.org pools - - # Make shallow copies of all sources (only type will be modified) - time_sources = [s.copy() for s in self.time_sources] - - pools = {} - for source in time_sources: - if source["type"] != "server": - continue - m = re.match("^([0123])(\\.\\w+)?\\.pool\\.ntp\\.org$", source["address"]) - if m is None: - continue - number = m.group(1) - zone = m.group(2) - if zone not in pools: - pools[zone] = [] - pools[zone].append((int(number), source)) - - remove_servers = set() - for zone, pool in pools.items(): - # sort and skip all pools not in [0, 3] range - pool.sort() - if [num for num, source in pool] != [0, 1, 2, 3]: - # only exact group of 4 servers can be converted, nothing to do here - continue - # verify that parameters are the same for all servers in the pool - if not all([p[1]["options"] == pool[0][1]["options"] for p in pool]): - break - remove_servers.update([pool[i][1]["address"] for i in [0, 1, 3]]) - pool[2][1]["type"] = "pool" - - processed_sources = [] - for source in time_sources: - if source["type"] == "server" and source["address"] in remove_servers: - continue - processed_sources.append(source) - return processed_sources - - def get_chrony_conf_sources(self): - conf = "" - - if self.step_tickers: - conf += "# Specify NTP servers used for initial correction.\n" - conf += "initstepslew 0.1 {}\n".format(" ".join(self.step_tickers)) - conf += "\n" - - conf += "# Specify time sources.\n" - - for source in self.get_processed_time_sources(): - address = source["address"] - if address.startswith("127.127."): - if address.startswith("127.127.1."): - continue - # No other refclocks are expected from the parser - assert False - else: - conf += "{} {}".format(source["type"], address) - for option in source["options"]: - if option[0] in ["minpoll", "maxpoll", "version", "key", - "iburst", "noselect", "prefer", "xleave"]: - conf += " {}".format(" ".join(option)) - elif option[0] == "burst": - conf += " presend 6" - elif option[0] == "true": - conf += " trust" - else: - # No other options are expected from the parser - assert False - conf += "\n" - conf += "\n" - - return conf - - def get_chrony_conf_allows(self): - allowed_networks = [n for n in self.restrictions - if "ignore" not in self.restrictions[n] and "noserve" not in self.restrictions[n]] - - conf = "" - for network in sorted(allowed_networks, key=lambda n: (n.version, n)): - if network.num_addresses > 1: - conf += "allow {}\n".format(network) - else: - conf += "allow {}\n".format(network.network_address) - - if conf: - conf = "# Allow NTP client access.\n" + conf - conf += "\n" - - return conf - - def get_chrony_conf_cmdallows(self): - - def _is_network_allowed(network): - return ("ignore" not in self.restrictions[network] and "noquery" not in self.restrictions[network] and - network != ipaddress.ip_network(u"127.0.0.1/32") and network != ipaddress.ip_network(u"::1/128")) - - allowed_networks = [n for n in self.restrictions if _is_network_allowed(n)] - - ip_versions = set() - conf = "" - for network in sorted(allowed_networks, key=lambda n: (n.version, n)): - ip_versions.add(network.version) - if network.num_addresses > 1: - conf += "cmdallow {}\n".format(network) - else: - conf += "cmdallow {}\n".format(network.network_address) - - if conf: - conf = "# Allow remote monitoring.\n" + conf - if 4 in ip_versions: - conf += "bindcmdaddress 0.0.0.0\n" - if 6 in ip_versions: - conf += "bindcmdaddress ::\n" - conf += "\n" - - return conf - - def get_chrony_conf(self, chrony_keys_path): - local_stratum = 0 - maxdistance = 0.0 - minsources = 1 - orphan_stratum = 0 - logs = [] - - for source in self.time_sources: - address = source["address"] - if address.startswith("127.127.1."): - if address in self.fudges and "stratum" in self.fudges[address]: - local_stratum = self.fudges[address]["stratum"] - else: - local_stratum = 5 - - if "maxdist" in self.tos_options: - maxdistance = self.tos_options["maxdist"] - if "minsane" in self.tos_options: - minsources = self.tos_options["minsane"] - if "orphan" in self.tos_options: - orphan_stratum = self.tos_options["orphan"] - - if "clockstats" in self.statistics: - logs.append("refclocks") - if "loopstats" in self.statistics: - logs.append("tracking") - if "peerstats" in self.statistics: - logs.append("statistics") - if "rawstats" in self.statistics: - logs.append("measurements") - - conf = "# This file was converted from {}{}.\n".format( - self.ntp_conf_path, - " and " + self.step_tickers_path if self.step_tickers_path else "") - conf += "\n" - - if self.ignored_lines: - conf += "# The following directives were ignored in the conversion:\n" - - for line in self.ignored_lines: - # Remove sensitive information - line = re.sub(r"\s+pw\s+\S+", " pw XXX", line.rstrip()) - conf += "# " + line + "\n" - conf += "\n" - - conf += self.get_chrony_conf_sources() - - conf += "# Record the rate at which the system clock gains/losses time.\n" - if not self.driftfile: - conf += "#" - conf += "driftfile /var/lib/chrony/drift\n" - conf += "\n" - - conf += "# Allow the system clock to be stepped in the first three updates\n" - conf += "# if its offset is larger than 1 second.\n" - conf += "makestep 1.0 3\n" - conf += "\n" - - conf += "# Enable kernel synchronization of the real-time clock (RTC).\n" - conf += "rtcsync\n" - conf += "\n" - - conf += "# Enable hardware timestamping on all interfaces that support it.\n" - conf += "#hwtimestamp *\n" - conf += "\n" - - if maxdistance > 0.0: - conf += "# Specify the maximum distance of sources to be selectable.\n" - conf += "maxdistance {}\n".format(maxdistance) - conf += "\n" - - conf += "# Increase the minimum number of selectable sources required to adjust\n" - conf += "# the system clock.\n" - if minsources > 1: - conf += "minsources {}\n".format(minsources) - else: - conf += "#minsources 2\n" - conf += "\n" - - conf += self.get_chrony_conf_allows() - - conf += self.get_chrony_conf_cmdallows() - - conf += "# Serve time even if not synchronized to a time source.\n" - if 0 < orphan_stratum < 16: - conf += "local stratum {} orphan\n".format(orphan_stratum) - elif 0 < local_stratum < 16: - conf += "local stratum {}\n".format(local_stratum) - else: - conf += "#local stratum 10\n" - conf += "\n" - - conf += "# Specify file containing keys for NTP authentication.\n" - conf += "keyfile {}\n".format(chrony_keys_path) - conf += "\n" - - conf += "# Get TAI-UTC offset and leap seconds from the system tz database.\n" - conf += "leapsectz right/UTC\n" - conf += "\n" - - conf += "# Specify directory for log files.\n" - conf += "logdir /var/log/chrony\n" - conf += "\n" - - conf += "# Select which information is logged.\n" - if logs: - conf += "log {}\n".format(" ".join(logs)) - else: - conf += "#log measurements statistics tracking\n" - - return conf - - def get_chrony_keys(self): - if not self.keyfile: - return "" - - keys = "# This file was converted from {}.\n".format(self.keyfile) - keys += "\n" - - for key in self.keys: - key_id = key[0] - key_type = key[1] - password = key[2] - - if key_type in ["m", "M"]: - key_type = "MD5" - elif key_type not in ["MD5", "SHA1", "SHA256", "SHA384", "SHA512"]: - continue - - prefix = "ASCII" if len(password) <= 20 else "HEX" - - for first, last in self.trusted_keys: - if first <= key_id <= last: - trusted = True - break - else: - trusted = False - - # Disable keys that were not marked as trusted - if not trusted: - keys += "#" - - keys += "{key_id} {key_type} {prefix}:{password}\n".format( - key_id=key_id, key_type=key_type, prefix=prefix, password=password) - - return keys - - def write_file(self, path, mode, content, backup): - path = self.root_dir + path - if backup and os.path.isfile(path): - os.rename(path, path + ".old") - - with open(os.open(path, os.O_CREAT | os.O_WRONLY | os.O_EXCL, mode), "w", - encoding=self.file_encoding) as f: - logging.info("Writing %s", path) - f.write(u"" + content) - - # Fix SELinux context if restorecon is installed - try: - subprocess.call(["restorecon", path]) - except OSError: - pass - - -def main(): - parser = argparse.ArgumentParser(description="Convert ntp configuration to chrony.") - parser.add_argument("-r", "--root", dest="roots", default=["/"], nargs="+", - metavar="DIR", help="specify root directory (default /)") - parser.add_argument("--ntp-conf", action="store", default="/etc/ntp.conf", - metavar="FILE", help="specify ntp config (default /etc/ntp.conf)") - parser.add_argument("--step-tickers", action="store", default="", - metavar="FILE", help="specify ntpdate step-tickers config (no default)") - parser.add_argument("--chrony-conf", action="store", default="/etc/chrony.conf", - metavar="FILE", help="specify chrony config (default /etc/chrony.conf)") - parser.add_argument("--chrony-keys", action="store", default="/etc/chrony.keys", - metavar="FILE", help="specify chrony keyfile (default /etc/chrony.keys)") - parser.add_argument("-b", "--backup", action="store_true", help="backup existing configs before writing") - parser.add_argument("-L", "--ignored-lines", action="store_true", help="print ignored lines") - parser.add_argument("-D", "--ignored-directives", action="store_true", - help="print names of ignored directives") - parser.add_argument("-n", "--dry-run", action="store_true", help="don't make any changes") - parser.add_argument("-v", "--verbose", action="count", default=0, help="increase verbosity") - - args = parser.parse_args() - - logging.basicConfig(format="%(message)s", - level=[logging.ERROR, logging.INFO, logging.DEBUG][min(args.verbose, 2)]) - - for root in args.roots: - conf = NtpConfiguration(root, args.ntp_conf, args.step_tickers) - - if args.ignored_lines: - for line in conf.ignored_lines: - print(line) - - if args.ignored_directives: - for directive in conf.ignored_directives: - print(directive) - - conf.write_chrony_configuration(args.chrony_conf, args.chrony_keys, args.dry_run, args.backup) - - -if __name__ == "__main__": - main() diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/1_chrony.conf b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/1_chrony.conf deleted file mode 100644 index e60ba6fb..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/1_chrony.conf +++ /dev/null @@ -1,47 +0,0 @@ -# This file was converted from tests/data/ntpconfs/1_ntp.conf. - -# Specify time sources. -server ntpserver - -# Record the rate at which the system clock gains/losses time. -driftfile /var/lib/chrony/drift - -# Allow the system clock to be stepped in the first three updates -# if its offset is larger than 1 second. -makestep 1.0 3 - -# Enable kernel synchronization of the real-time clock (RTC). -rtcsync - -# Enable hardware timestamping on all interfaces that support it. -#hwtimestamp * - -# Increase the minimum number of selectable sources required to adjust -# the system clock. -#minsources 2 - -# Allow NTP client access. -allow 0.0.0.0/0 -allow 127.0.0.1 -allow ::/0 - -# Allow remote monitoring. -cmdallow 0.0.0.0/0 -cmdallow ::/0 -bindcmdaddress 0.0.0.0 -bindcmdaddress :: - -# Serve time even if not synchronized to a time source. -#local stratum 10 - -# Specify file containing keys for NTP authentication. -keyfile data/chronyconfs/1_chrony.keys - -# Get TAI-UTC offset and leap seconds from the system tz database. -leapsectz right/UTC - -# Specify directory for log files. -logdir /var/log/chrony - -# Select which information is logged. -#log measurements statistics tracking diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/2_chrony.conf b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/2_chrony.conf deleted file mode 100644 index 577ffcfe..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/2_chrony.conf +++ /dev/null @@ -1,46 +0,0 @@ -# This file was converted from tests/data/ntpconfs/2_ntp.conf. - -# Specify time sources. -server 0.rhel.pool.ntp.org -server 1.rhel.pool.ntp.org -server 2.rhel.pool.ntp.org -server 172.18.242.69 prefer -server 172.18.242.71 prefer - -# Record the rate at which the system clock gains/losses time. -driftfile /var/lib/chrony/drift - -# Allow the system clock to be stepped in the first three updates -# if its offset is larger than 1 second. -makestep 1.0 3 - -# Enable kernel synchronization of the real-time clock (RTC). -rtcsync - -# Enable hardware timestamping on all interfaces that support it. -#hwtimestamp * - -# Increase the minimum number of selectable sources required to adjust -# the system clock. -#minsources 2 - -# Allow NTP client access. -allow 0.0.0.0/0 -allow 127.0.0.1 -allow ::/0 -allow ::1 - -# Serve time even if not synchronized to a time source. -#local stratum 10 - -# Specify file containing keys for NTP authentication. -keyfile data/chronyconfs/2_chrony.keys - -# Get TAI-UTC offset and leap seconds from the system tz database. -leapsectz right/UTC - -# Specify directory for log files. -logdir /var/log/chrony - -# Select which information is logged. -#log measurements statistics tracking diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/2_chrony.keys b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/2_chrony.keys deleted file mode 100644 index 52c6ac2d..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/2_chrony.keys +++ /dev/null @@ -1,3 +0,0 @@ -#42 MD5 HEX:SorryForInconvenience -#22 MD5 ASCII:Catch -#2702 MD5 HEX:LavenderRose diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/3_chrony.conf b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/3_chrony.conf deleted file mode 100644 index 232bd886..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/3_chrony.conf +++ /dev/null @@ -1,49 +0,0 @@ -# This file was converted from tests/data/ntpconfs/3_ntp.conf. - -# Specify time sources. -server 0.rhel.pool.ntp.org -server 1.rhel.pool.ntp.org -server 2.rhel.pool.ntp.org -server 0.rhel.pool.ntp.org -server 1.rhel.pool.ntp.org -server 2.rhel.pool.ntp.org -server 172.18.242.69 prefer -server 172.18.242.71 prefer - -# Record the rate at which the system clock gains/losses time. -driftfile /var/lib/chrony/drift - -# Allow the system clock to be stepped in the first three updates -# if its offset is larger than 1 second. -makestep 1.0 3 - -# Enable kernel synchronization of the real-time clock (RTC). -rtcsync - -# Enable hardware timestamping on all interfaces that support it. -#hwtimestamp * - -# Increase the minimum number of selectable sources required to adjust -# the system clock. -#minsources 2 - -# Allow NTP client access. -allow 0.0.0.0/0 -allow 127.0.0.1 -allow ::/0 -allow ::1 - -# Serve time even if not synchronized to a time source. -local stratum 10 - -# Specify file containing keys for NTP authentication. -keyfile data/chronyconfs/3_chrony.keys - -# Get TAI-UTC offset and leap seconds from the system tz database. -leapsectz right/UTC - -# Specify directory for log files. -logdir /var/log/chrony - -# Select which information is logged. -#log measurements statistics tracking diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/3_chrony.keys b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/3_chrony.keys deleted file mode 100644 index 26c4c168..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/3_chrony.keys +++ /dev/null @@ -1 +0,0 @@ -42 MD5 HEX:MarvinTheDepressiveAndroid diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/4_chrony.conf b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/4_chrony.conf deleted file mode 100644 index 34c79b9b..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/4_chrony.conf +++ /dev/null @@ -1,49 +0,0 @@ -# This file was converted from tests/data/ntpconfs/4_ntp.conf. - -# Specify time sources. -pool 2.pool.ntp.org -pool 2.rhel.pool.ntp.org -server 42.rhel.pool.ntp.org - -# Record the rate at which the system clock gains/losses time. -driftfile /var/lib/chrony/drift - -# Allow the system clock to be stepped in the first three updates -# if its offset is larger than 1 second. -makestep 1.0 3 - -# Enable kernel synchronization of the real-time clock (RTC). -rtcsync - -# Enable hardware timestamping on all interfaces that support it. -#hwtimestamp * - -# Increase the minimum number of selectable sources required to adjust -# the system clock. -#minsources 2 - -# Allow NTP client access. -allow 0.0.0.0/0 -allow 127.0.0.1 -allow 2001:db8:1234::1234 -allow 2001:db8:5670::/44 - -# Allow remote monitoring. -cmdallow 192.168.8.5 -cmdallow 192.168.10.0/24 -bindcmdaddress 0.0.0.0 - -# Serve time even if not synchronized to a time source. -#local stratum 10 - -# Specify file containing keys for NTP authentication. -keyfile data/chronyconfs/4_chrony.keys - -# Get TAI-UTC offset and leap seconds from the system tz database. -leapsectz right/UTC - -# Specify directory for log files. -logdir /var/log/chrony - -# Select which information is logged. -#log measurements statistics tracking diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/5_chrony.conf b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/5_chrony.conf deleted file mode 100644 index 25d1caf2..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/5_chrony.conf +++ /dev/null @@ -1,48 +0,0 @@ -# This file was converted from tests/data/ntpconfs/5_ntp.conf. - -# Specify time sources. -pool 2.pool.ntp.org -peer 0.pool.ntp.org - -# Record the rate at which the system clock gains/losses time. -driftfile /var/lib/chrony/drift - -# Allow the system clock to be stepped in the first three updates -# if its offset is larger than 1 second. -makestep 1.0 3 - -# Enable kernel synchronization of the real-time clock (RTC). -rtcsync - -# Enable hardware timestamping on all interfaces that support it. -#hwtimestamp * - -# Increase the minimum number of selectable sources required to adjust -# the system clock. -#minsources 2 - -# Allow NTP client access. -allow 0.0.0.0/0 -allow 127.0.0.1 -allow ::/0 - -# Allow remote monitoring. -cmdallow 0.0.0.0/0 -cmdallow ::/0 -bindcmdaddress 0.0.0.0 -bindcmdaddress :: - -# Serve time even if not synchronized to a time source. -#local stratum 10 - -# Specify file containing keys for NTP authentication. -keyfile data/chronyconfs/5_chrony.keys - -# Get TAI-UTC offset and leap seconds from the system tz database. -leapsectz right/UTC - -# Specify directory for log files. -logdir /var/log/chrony - -# Select which information is logged. -#log measurements statistics tracking diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/6_chrony.conf b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/6_chrony.conf deleted file mode 100644 index 440ee33d..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/6_chrony.conf +++ /dev/null @@ -1,51 +0,0 @@ -# This file was converted from tests/data/ntpconfs/6_ntp.conf. - -# The following directives were ignored in the conversion: -# server 127.127.8.1 mode 2 minpoll 3 maxpoll 3 noselect -# server 127.127.8.0 mode 5 minpoll 6 maxpoll 6 noselect -# server 127.127.20.0 mode 80 minpoll 3 maxpoll 3 prefer -# server 127.127.28.2 mode 1 - -# Specify time sources. - -# Record the rate at which the system clock gains/losses time. -#driftfile /var/lib/chrony/drift - -# Allow the system clock to be stepped in the first three updates -# if its offset is larger than 1 second. -makestep 1.0 3 - -# Enable kernel synchronization of the real-time clock (RTC). -rtcsync - -# Enable hardware timestamping on all interfaces that support it. -#hwtimestamp * - -# Increase the minimum number of selectable sources required to adjust -# the system clock. -#minsources 2 - -# Allow NTP client access. -allow 0.0.0.0/0 -allow ::/0 - -# Allow remote monitoring. -cmdallow 0.0.0.0/0 -cmdallow ::/0 -bindcmdaddress 0.0.0.0 -bindcmdaddress :: - -# Serve time even if not synchronized to a time source. -local stratum 5 - -# Specify file containing keys for NTP authentication. -keyfile data/chronyconfs/6_chrony.keys - -# Get TAI-UTC offset and leap seconds from the system tz database. -leapsectz right/UTC - -# Specify directory for log files. -logdir /var/log/chrony - -# Select which information is logged. -#log measurements statistics tracking diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/7_chrony.conf b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/7_chrony.conf deleted file mode 100644 index a0aebaa7..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/chronyconfs/7_chrony.conf +++ /dev/null @@ -1,50 +0,0 @@ -# This file was converted from tests/data/ntpconfs/7_ntp.conf. - -# The following directives were ignored in the conversion: -# server 192.168.1.3 nosuchoption - -# Specify time sources. -server 192.168.1.1 minpoll 3 maxpoll 12 iburst presend 6 -server 192.168.1.2 noselect prefer trust xleave - -# Record the rate at which the system clock gains/losses time. -#driftfile /var/lib/chrony/drift - -# Allow the system clock to be stepped in the first three updates -# if its offset is larger than 1 second. -makestep 1.0 3 - -# Enable kernel synchronization of the real-time clock (RTC). -rtcsync - -# Enable hardware timestamping on all interfaces that support it. -#hwtimestamp * - -# Increase the minimum number of selectable sources required to adjust -# the system clock. -#minsources 2 - -# Allow NTP client access. -allow 0.0.0.0/0 -allow ::/0 - -# Allow remote monitoring. -cmdallow 0.0.0.0/0 -cmdallow ::/0 -bindcmdaddress 0.0.0.0 -bindcmdaddress :: - -# Serve time even if not synchronized to a time source. -#local stratum 10 - -# Specify file containing keys for NTP authentication. -keyfile data/chronyconfs/7_chrony.keys - -# Get TAI-UTC offset and leap seconds from the system tz database. -leapsectz right/UTC - -# Specify directory for log files. -logdir /var/log/chrony - -# Select which information is logged. -#log measurements statistics tracking diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntp.conf b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntp.conf deleted file mode 100644 index 982ab516..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntp.conf +++ /dev/null @@ -1,45 +0,0 @@ -################################################################################ -## /etc/ntp.conf -## -## Sample NTP configuration file for basic unit tests. -## It's main purpose is to check ntp config parsing and minimal conversion. -## For real-world like scenarios another set of configs will be used. -## -## -################################################################################ - - -# By default, exchange time with everybody, but don't allow configuration. -restrict -4 default notrap nomodify nopeer noquery -restrict -6 default notrap nomodify nopeer noquery - -# Local users may interrogate the ntp server more closely. -restrict 127.0.0.1 -restrict ::1 - -# Clients from this (example!) subnet have unlimited access, but only if -# cryptographically authenticated. -#restrict 192.168.123.0 mask 255.255.255.0 notrust - -## -## Miscellaneous stuff -## - -driftfile /var/lib/ntp/drift/ntp.drift # path for drift file - -logfile /var/log/ntp # alternate log file -# logconfig =syncstatus + sysevents -# logconfig =all - -# statsdir /var/log/ntpstats/ # directory for statistics files -# filegen peerstats file peerstats type day enable -# filegen loopstats file loopstats type day enable -# filegen clockstats file clockstats type day enable - -# -# Authentication stuff -# -keys data/ntp.keys # path for keys file -trustedkey 42 # define trusted keys -requestkey 22 # key (7) for accessing server variables -controlkey 22 # key (6) for accessing server variables diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntp.keys b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntp.keys deleted file mode 100644 index 4eec33d9..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntp.keys +++ /dev/null @@ -1,3 +0,0 @@ -22 M Catch -42 M SorryForInconvenience -2702 M LavenderRose diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/1_ntp.conf b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/1_ntp.conf deleted file mode 100644 index 22161782..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/1_ntp.conf +++ /dev/null @@ -1,4 +0,0 @@ -restrict 127.0.0.1 -restrict default kod nomodify notrap -driftfile /var/lib/ntp/drift -server ntpserver diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/2_ntp.conf b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/2_ntp.conf deleted file mode 100644 index 38793960..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/2_ntp.conf +++ /dev/null @@ -1,54 +0,0 @@ -# For more information about this file, see the man pages -# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5). - -driftfile /var/lib/ntp/drift - -# Permit time synchronization with our time source, but do not -# permit the source to query or modify the service on this system. -restrict default kod nomodify notrap nopeer noquery -restrict -6 default kod nomodify notrap nopeer noquery - -# Permit all access over the loopback interface. This could -# be tightened as well, but to do so would effect some of -# the administrative functions. -restrict 127.0.0.1 -restrict -6 ::1 - -# Hosts on local network are less restricted. -#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap - -# Use public servers from the pool.ntp.org project. -# Please consider joining the pool (http://www.pool.ntp.org/join.html). -server 0.rhel.pool.ntp.org -server 1.rhel.pool.ntp.org -server 2.rhel.pool.ntp.org - -#broadcast 192.168.1.255 autokey # broadcast server -#broadcastclient # broadcast client -#broadcast 224.0.1.1 autokey # multicast server -#multicastclient 224.0.1.1 # multicast client -#manycastserver 239.255.254.254 # manycast server -#manycastclient 239.255.254.254 autokey # manycast client - -# Undisciplined Local Clock. This is a fake driver intended for backup -# and when no outside source of synchronized time is available. -#server 127.127.1.0 # local clock -#fudge 127.127.1.0 stratum 10 - -# Key file containing the keys and key identifiers used when operating -# with symmetric key cryptography. -keys data/ntpconfs/2_ntp.keys - -# Specify the key identifiers which are trusted. -#trustedkey 4 8 42 - -# Specify the key identifier to use with the ntpdc utility. -#requestkey 8 - -# Specify the key identifier to use with the ntpq utility. -#controlkey 8 - -# Enable writing of statistics records. -#statistics clockstats cryptostats loopstats peerstats -server 172.18.242.69 prefer -server 172.18.242.71 prefer diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/2_ntp.keys b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/2_ntp.keys deleted file mode 100644 index ad9058bb..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/2_ntp.keys +++ /dev/null @@ -1,3 +0,0 @@ -42 M SorryForInconvenience -22 M Catch -2702 M LavenderRose diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/3_ntp.conf b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/3_ntp.conf deleted file mode 100755 index 3c9bbcc8..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/3_ntp.conf +++ /dev/null @@ -1,59 +0,0 @@ -# For more information about this file, see the man pages -# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5). - -driftfile /var/lib/ntp/drift - -# Permit time synchronization with our time source, but do not -# permit the source to query or modify the service on this system. -restrict default kod nomodify notrap nopeer noquery -restrict -6 default kod nomodify notrap nopeer noquery - -# Permit all access over the loopback interface. This could -# be tightened as well, but to do so would effect some of -# the administrative functions. -restrict 127.0.0.1 -restrict -6 ::1 - -# Hosts on local network are less restricted. -#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap - -# Use public servers from the pool.ntp.org project. -# Please consider joining the pool (http://www.pool.ntp.org/join.html). -server 0.rhel.pool.ntp.org -server 1.rhel.pool.ntp.org -server 2.rhel.pool.ntp.org - -#broadcast 192.168.1.255 autokey # broadcast server -#broadcastclient # broadcast client -#broadcast 224.0.1.1 autokey # multicast server -#multicastclient 224.0.1.1 # multicast client -#manycastserver 239.255.254.254 # manycast server -#manycastclient 239.255.254.254 autokey # manycast client - -# Undisciplined Local Clock. This is a fake driver intended for backup -# and when no outside source of synchronized time is available. -#server 127.127.1.0 # local clock -#fudge 127.127.1.0 stratum 10 - -# Enable public key cryptography. -#crypto - -includefile data/ntpconfs/3_ntp.includefile - -# Key file containing the keys and key identifiers used when operating -# with symmetric key cryptography. -# keys tests/data/ntpconfs/3_ntp.keys - -# Specify the key identifiers which are trusted. -# trustedkey 42 - -# Specify the key identifier to use with the ntpdc utility. -#requestkey 8 - -# Specify the key identifier to use with the ntpq utility. -#controlkey 8 - -# Enable writing of statistics records. -#statistics clockstats cryptostats loopstats peerstats -server 172.18.242.69 prefer -server 172.18.242.71 prefer diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/3_ntp.includefile b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/3_ntp.includefile deleted file mode 100644 index 07c57f61..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/3_ntp.includefile +++ /dev/null @@ -1,50 +0,0 @@ -# Permit time synchronization with our time source, but do not -# permit the source to query or modify the service on this system. -restrict default kod nomodify notrap nopeer noquery -restrict -6 default kod nomodify notrap nopeer noquery - -# Permit all access over the loopback interface. This could -# be tightened as well, but to do so would effect some of -# the administrative functions. -restrict 127.0.0.1 -restrict -6 ::1 - -# Hosts on local network are less restricted. -#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap - -# Use public servers from the pool.ntp.org project. -# Please consider joining the pool (http://www.pool.ntp.org/join.html). -server 0.rhel.pool.ntp.org -server 1.rhel.pool.ntp.org -server 2.rhel.pool.ntp.org - -#broadcast 192.168.1.255 key 42 # broadcast server -#broadcastclient # broadcast client -#broadcast 224.0.1.1 key 42 # multicast server -#multicastclient 224.0.1.1 # multicast client -#manycastserver 239.255.254.254 # manycast server -#manycastclient 239.255.254.254 key 42 # manycast client - -# Undisciplined Local Clock. This is a fake driver intended for backup -# and when no outside source of synchronized time is available. -server 127.127.1.0 # local clock -fudge 127.127.1.0 stratum 10 - -# Drift file. Put this in a directory which the daemon can write to. -# No symbolic links allowed, either, since the daemon updates the file -# by creating a temporary in the same directory and then rename()'ing -# it to the file. -driftfile /var/lib/ntp/drift - -# Key file containing the keys and key identifiers used when operating -# with symmetric key cryptography. -keys data/ntpconfs/3_ntp.keys - -# Specify the key identifiers which are trusted. -trustedkey 42 - -# Specify the key identifier to use with the ntpdc utility. -#requestkey 8 - -# Specify the key identifier to use with the ntpq utility. -#controlkey 8 diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/3_ntp.keys b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/3_ntp.keys deleted file mode 100644 index 4db44e2a..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/3_ntp.keys +++ /dev/null @@ -1 +0,0 @@ -42 M MarvinTheDepressiveAndroid diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/4_ntp.conf b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/4_ntp.conf deleted file mode 100644 index 9fed82f3..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/4_ntp.conf +++ /dev/null @@ -1,20 +0,0 @@ -restrict 127.0.0.1 -restrict default kod nomodify notrap noserve noquery -restrict 192.168.8.5 noserve -restrict 192.168.10.0 mask 255.255.255.0 noserve -restrict 0.0.0.0 mask 0.0.0.0 noquery -restrict 2001:db8:1234::1234 noquery -restrict 2001:db8:5670:: mask ffff:ffff:fff0:: noquery -driftfile /var/lib/ntp/drift - -# Use public servers from the pool.ntp.org project. -# Please consider joining the pool (http://www.pool.ntp.org/join.html). -server 0.pool.ntp.org -server 1.pool.ntp.org -server 2.pool.ntp.org -server 3.pool.ntp.org -server 0.rhel.pool.ntp.org -server 1.rhel.pool.ntp.org -server 2.rhel.pool.ntp.org -server 3.rhel.pool.ntp.org -server 42.rhel.pool.ntp.org diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/5_ntp.conf b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/5_ntp.conf deleted file mode 100644 index dc9d47ef..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/5_ntp.conf +++ /dev/null @@ -1,9 +0,0 @@ -restrict 127.0.0.1 -restrict default kod nomodify notrap -driftfile /var/lib/ntp/drift - -server 0.pool.ntp.org -server 1.pool.ntp.org -server 2.pool.ntp.org -server 3.pool.ntp.org -peer 0.pool.ntp.org diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/6_ntp.conf b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/6_ntp.conf deleted file mode 100644 index 8ce28d7a..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/6_ntp.conf +++ /dev/null @@ -1,8 +0,0 @@ -server 127.127.1.0 minpoll 4 maxpoll 4 -fudge 127.127.1.0 stratum 5 -server 127.127.8.1 mode 2 minpoll 3 maxpoll 3 noselect -server 127.127.8.0 mode 5 minpoll 6 maxpoll 6 noselect -fudge 127.127.8.0 time1 0.03 -server 127.127.20.0 mode 80 minpoll 3 maxpoll 3 prefer -fudge 127.127.20.0 flag1 1 time2 0.5 -server 127.127.28.2 mode 1 diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/7_ntp.conf b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/7_ntp.conf deleted file mode 100644 index 5ecd8776..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/ntpconfs/7_ntp.conf +++ /dev/null @@ -1,3 +0,0 @@ -server 192.168.1.1 minpoll 3 maxpoll 12 iburst burst -server 192.168.1.2 noselect prefer true xleave -server 192.168.1.3 nosuchoption diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/step_tickers b/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/step_tickers deleted file mode 100644 index df9498e4..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/data/step_tickers +++ /dev/null @@ -1,3 +0,0 @@ -0.sample.pool.ntp.org -1.sample.pool.ntp.org -2.sample.pool.ntp.org diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/test_converter_migratentp.py b/repos/system_upgrade/el7toel8/actors/migratentp/tests/test_converter_migratentp.py deleted file mode 100644 index 89748880..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/test_converter_migratentp.py +++ /dev/null @@ -1,88 +0,0 @@ -import os - -import pytest - -from leapp.libraries.actor import ntp2chrony - -CUR_DIR = os.path.dirname(os.path.abspath(__file__)) - -NTP_CONF = os.path.join(CUR_DIR, "data/ntp.conf") -STEP_TICKERS = os.path.join(CUR_DIR, "data/step_tickers") - -# TODO [Artem] the following consts should use abs path as well. -# reader of [[:digit:]]chrony.conf files does not support wildcards, so we -# have to change the working directory here for now. -NTP_MATCH_DIR = "data/ntpconfs/" -CHRONY_MATCH_DIR = "data/chronyconfs/" - - -@pytest.fixture -def adjust_cwd(): - previous_cwd = os.getcwd() - os.chdir(CUR_DIR) - yield - os.chdir(previous_cwd) - - -class TestConverter(object): - def test_basic(self): - config = ntp2chrony.NtpConfiguration(CUR_DIR, NTP_CONF, step_tickers=STEP_TICKERS) - present = [config.restrictions, config.driftfile, config.trusted_keys, config.keys, - config.step_tickers, config.restrictions] - for section in present: - assert section - chrony_conf = config.get_chrony_conf('/etc/chrony.keys') - # additional verification section by section for each param in present? - - # verify step_tickers -> initstepslew - initstepslew_line = next((l for l in chrony_conf.split('\n') - if l.startswith('initstepslew')), None) - assert initstepslew_line and initstepslew_line.endswith(' '.join(config.step_tickers)) - chrony_keys = config.get_chrony_keys() - # verify keys generation - for num, _, key in config.keys: - expected = ('%(num)s MD5 %(key)s' % - {'key': 'HEX:' if len(key) > 20 else 'ASCII:' + key, 'num': num}) - # keys not from trusted keys are commented out by default - if not any(num in range(x, y + 1) for (x, y) in config.trusted_keys): - expected = '#' + expected - assert expected in chrony_keys - - -class TestConfigConversion(object): - def _do_match(self, expected_file, actual): - expected_lines = [] - actual_lines = [] - with open(expected_file) as f: - expected_lines = [l.strip() for l in f.readlines() - if l.strip() and not l.strip().startswith('#')] - actual_lines = [l.strip() for l in actual.split('\n') - if l.strip() and not l.strip().startswith('#')] - assert expected_lines == actual_lines - - def _check_existance(self, fname, default=''): - if os.path.exists(fname): - return fname - return default - - def test_match(self, adjust_cwd): - - for f in [fe for fe in os.listdir(NTP_MATCH_DIR) if fe.endswith('conf')]: - # get recorded actual result - num = f.split('.')[0].split('_')[0] - ntp_conf = os.path.join(NTP_MATCH_DIR, f) - step_tickers = self._check_existance( - os.path.join(NTP_MATCH_DIR, '%s_step_tickers' % num)) - config = ntp2chrony.NtpConfiguration('', - ntp_conf, - step_tickers=step_tickers) - potential_chrony_keys = os.path.join(CHRONY_MATCH_DIR, "%s_chrony.keys" % num) - actual_data = config.get_chrony_conf(chrony_keys_path=potential_chrony_keys) - expected_fname = os.path.join(CHRONY_MATCH_DIR, "%s_chrony.conf" % num) - # make sure recorded and generated configs match - self._do_match(expected_fname, actual_data) - actual_keys = config.get_chrony_keys() - expected_keys_file = self._check_existance(potential_chrony_keys) - # if keys are recorded or generated make sure they match - if actual_keys and expected_keys_file != '': - self._do_match(expected_keys_file, actual_keys) diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py b/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py deleted file mode 100644 index 5350029c..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py +++ /dev/null @@ -1,83 +0,0 @@ -from leapp import reporting -from leapp.libraries.actor import migratentp -from leapp.libraries.common.testutils import create_report_mocked - - -class extract_tgz64_mocked(object): - def __init__(self): - self.called = 0 - self.s = None - - def __call__(self, s): - self.called += 1 - self.s = s - - -class enable_service_mocked(object): - def __init__(self): - self.called = 0 - self.names = [] - - def __call__(self, name): - self.called += 1 - self.names.append(name) - - -class write_file_mocked(object): - def __init__(self): - self.called = 0 - self.name = None - self.content = None - - def __call__(self, name, content): - self.called += 1 - self.name = name - self.content = content - - -class ntp2chrony_mocked(object): - def __init__(self, lines): - self.called = 0 - self.ignored_lines = lines - self.args = None - - def __call__(self, *args): - self.called += 1 - self.args = args - return True, self.ignored_lines * ['a line'] - - -def test_migration(monkeypatch): - for ntp_services, chrony_services, ignored_lines in [ - ([], [], 0), - (['ntpd'], ['chronyd'], 0), - (['ntpdate'], ['chronyd'], 1), - (['ntp-wait'], ['chrony-wait'], 0), - (['ntpd', 'ntpdate', 'ntp-wait'], ['chronyd', 'chronyd', 'chrony-wait'], 1), - ]: - monkeypatch.setattr(migratentp, 'extract_tgz64', extract_tgz64_mocked()) - monkeypatch.setattr(migratentp, 'enable_service', enable_service_mocked()) - monkeypatch.setattr(migratentp, 'write_file', write_file_mocked()) - monkeypatch.setattr(migratentp, 'ntp2chrony', ntp2chrony_mocked(ignored_lines)) - - migratentp.migrate_ntp(ntp_services, 'abcdef') - - if ntp_services: - assert migratentp.extract_tgz64.called == 1 - assert migratentp.extract_tgz64.s == 'abcdef' - assert migratentp.enable_service.called == len(chrony_services) - assert migratentp.enable_service.names == chrony_services - assert migratentp.write_file.called == (0 if 'ntpd' in ntp_services else 1) - if migratentp.write_file.called: - assert migratentp.write_file.name == '/etc/ntp.conf.nosources' - assert 'without ntp configuration' in migratentp.write_file.content - assert migratentp.ntp2chrony.called == 1 - assert migratentp.ntp2chrony.args == ( - '/', - '/etc/ntp.conf' if 'ntpd' in ntp_services else '/etc/ntp.conf.nosources', - '/etc/ntp/step-tickers' if 'ntpdate' in ntp_services else '') - else: - assert migratentp.extract_tgz64.called == 0 - assert migratentp.enable_service.called == 0 - assert migratentp.write_file.called == 0 - assert migratentp.ntp2chrony.called == 0 diff --git a/repos/system_upgrade/el7toel8/actors/migratesendmail/Makefile b/repos/system_upgrade/el7toel8/actors/migratesendmail/Makefile deleted file mode 100644 index 41c04a4c..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratesendmail/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -install-deps: - -yum install -y python-ipaddress diff --git a/repos/system_upgrade/el7toel8/actors/migratesendmail/actor.py b/repos/system_upgrade/el7toel8/actors/migratesendmail/actor.py deleted file mode 100644 index f709b588..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratesendmail/actor.py +++ /dev/null @@ -1,54 +0,0 @@ -import os - -from leapp import reporting -from leapp.actors import Actor -from leapp.libraries.actor import migratesendmail -from leapp.libraries.stdlib import api -from leapp.models import SendmailMigrationDecision -from leapp.reporting import create_report, Report -from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag - - -class MigrateSendmail(Actor): - """ - Migrate sendmail configuration files. - """ - - name = 'migrate_sendmail' - consumes = (SendmailMigrationDecision,) - produces = (Report,) - tags = (ApplicationsPhaseTag, IPUWorkflowTag) - - def process(self): - decision = next(self.consume(SendmailMigrationDecision), None) - if not decision or not decision.migrate_files: - return - - not_migrated = [] - for f in decision.migrate_files: - if not os.path.exists(f): - api.current_logger().error('Cound not migrate file {}, because it does not exist.'.format(f)) - not_migrated.append(f) - else: - migratesendmail.migrate_file(f) - - list_separator_fmt = '\n - ' - title = 'sendmail configuration files migrated' - summary = 'Uncompressed IPv6 addresses in: {}{}'.format(list_separator_fmt, - list_separator_fmt.join(decision.migrate_files)) - severity = reporting.Severity.INFO - - if not_migrated: - title = 'sendmail configuration files not migrated' - summary = ('Could not migrate the configuration files, which might be caused ' - 'by removal of sendmail package during the upgrade. ' - 'Following files could not be migrated:{}{}').format(list_separator_fmt, - list_separator_fmt.join(not_migrated)) - severity = reporting.Severity.MEDIUM - - create_report([ - reporting.Title(title), - reporting.Summary(summary), - reporting.Severity(severity), - reporting.Groups([reporting.Groups.SERVICES, reporting.Groups.EMAIL]) - ]) diff --git a/repos/system_upgrade/el7toel8/actors/migratesendmail/libraries/migratesendmail.py b/repos/system_upgrade/el7toel8/actors/migratesendmail/libraries/migratesendmail.py deleted file mode 100644 index b2665f87..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratesendmail/libraries/migratesendmail.py +++ /dev/null @@ -1,41 +0,0 @@ -import ipaddress -import os -import re -import shutil - -from six import text_type - -BackupSuffix = '.bak' - -# false positives blacklist -rfp = re.compile(r'(^\s*RIPv6:::1\b)|(@\s+\[IPv6:::1\]\s+>)') - -rs = re.compile(r'IPv6:[0-9a-fA-F:]*::[0-9a-fA-F:]*') - - -def uncompress_ipv6(ipv6): - addr = text_type(ipv6.replace('IPv6:', '')) - try: - addr = 'IPv6:' + ipaddress.ip_address(addr).exploded - except ValueError: - addr = ipv6 - return re.sub(r':0([^:])', r':\1', re.sub(r'0+', r'0', addr)) - - -def check_false_positives(f, l): - return f in ['sendmail.cf', 'submit.cf'] and rfp.search(l) is not None - - -def sub_ipv6(m): - return uncompress_ipv6(m.group(0)) - - -def migrate_file(fn): - # make backup - shutil.copy2(fn, fn + BackupSuffix) - with open(fn, 'w') as file_out: - with open(fn + BackupSuffix) as file_in: - for line in file_in: - if rs.search(line) and not check_false_positives(os.path.basename(fn), line): - line = rs.sub(sub_ipv6, line) - file_out.write(line) diff --git a/repos/system_upgrade/el7toel8/actors/migratesendmail/tests/component_test_migratesendmail.py b/repos/system_upgrade/el7toel8/actors/migratesendmail/tests/component_test_migratesendmail.py deleted file mode 100644 index 834841e4..00000000 --- a/repos/system_upgrade/el7toel8/actors/migratesendmail/tests/component_test_migratesendmail.py +++ /dev/null @@ -1,17 +0,0 @@ -import os - -from six import text_type - -from leapp.models import SendmailMigrationDecision -from leapp.reporting import Report - - -def test_actor_migration(tmpdir, current_actor_context): - test_cfg_file = text_type(tmpdir.join('sendmail.cf')) - with open(test_cfg_file, 'w') as file_out: - file_out.write("IPv6:::1") - current_actor_context.feed(SendmailMigrationDecision(migrate_files=[test_cfg_file])) - current_actor_context.run() - with open(test_cfg_file, 'r') as file_in: - data = file_in.read() - assert data == 'IPv6:0:0:0:0:0:0:0:1' diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfcheck/actor.py b/repos/system_upgrade/el7toel8/actors/multipathconfcheck/actor.py deleted file mode 100644 index 633ab540..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfcheck/actor.py +++ /dev/null @@ -1,34 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import multipathconfcheck -from leapp.models import MultipathConfFacts -from leapp.reporting import Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class MultipathConfCheck(Actor): - """ - Checks whether the multipath configuration can be updated to RHEL-8 and - plan necessary tasks. - - Specifically, it checks if the path_checker/checker option is set to - something other than tur in the defaults section. If so, non-trivial - changes may be required in the multipath.conf file, and it is not - possible to auto-update it - in such a case inhibit upgrade. - - In addition create a task to ensure that configuration files are copied - into the target container (they are necessary for correct creation of the - upgrade initramfs. - """ - - name = 'multipath_conf_check' - consumes = (MultipathConfFacts,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - facts = next(self.consume(MultipathConfFacts), None) - if facts is None: - self.log.debug('Skipping execution. No MultipathConfFacts has ' - 'been produced') - return - multipathconfcheck.check_configs(facts) diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfcheck/libraries/multipathconfcheck.py b/repos/system_upgrade/el7toel8/actors/multipathconfcheck/libraries/multipathconfcheck.py deleted file mode 100644 index bd4ac763..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfcheck/libraries/multipathconfcheck.py +++ /dev/null @@ -1,126 +0,0 @@ -from leapp import reporting -from leapp.reporting import create_report - - -def _merge_configs(configs): - options = {'default_path_checker': None, 'detect_prio': None, - 'detect_path_checker': None, 'reassign_maps': None, - 'retain_attached_hw_handler': None} - for config in configs: - if config.default_path_checker is not None: - options['default_path_checker'] = (config.default_path_checker, - config.pathname) - - if config.reassign_maps is not None: - options['reassign_maps'] = (config.reassign_maps, config.pathname) - - if config.default_detect_checker is not None: - options['detect_path_checker'] = (config.default_detect_checker, - config.pathname) - - if config.default_detect_prio is not None: - options['detect_prio'] = (config.default_detect_prio, - config.pathname) - - if config.default_retain_hwhandler is not None: - options['retain_attached_hw_handler'] = (config.default_retain_hwhandler, config.pathname) - return options - - -def _check_default_path_checker(options): - if not options['default_path_checker']: - return - value, pathname = options['default_path_checker'] - if value == 'tur': - return - create_report([ - reporting.Title( - 'Unsupported device-mapper-multipath configuration' - ), - reporting.Summary( - 'device-mapper-multipath has changed the default path_checker ' - 'from "directio" to "tur" in RHEL-8. Further, changing the ' - 'default path_checker can cause issues with built-in device ' - 'configurations in RHEL-8. Please remove the "path_checker" ' - 'option from the defaults section of {}, and add it to the ' - 'device configuration of any devices that need it.'. - format(pathname) - ), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.SERVICES]), - reporting.Groups([reporting.Groups.INHIBITOR]), - reporting.RelatedResource('package', 'device-mapper-multipath'), - reporting.RelatedResource('file', pathname), - reporting.Remediation( - hint='Please remove the "path_checker {}" option from the ' - 'defaults section of {}, and add it to the device configuration ' - 'of any devices that need it.'.format(value, pathname) - ) - ]) - - -def _create_paths_str(paths): - if len(paths) < 2: - return paths[0] - return '{} and {}'.format(', '.join(paths[0:-1]), paths[-1]) - - -def _check_default_detection(options): - bad = [] - for keyword in ('detect_path_checker', 'detect_prio', - 'retain_attached_hw_handler'): - if options[keyword] and not options[keyword][0] and \ - options[keyword][1] not in bad: - bad.append(options[keyword][1]) - if not bad: - return - paths = _create_paths_str(bad) - create_report([ - reporting.Title( - 'device-mapper-multipath now defaults to detecting settings' - ), - reporting.Summary( - 'In RHEL-8, the default value for the "detect_path_checker", ' - '"detect_prio" and "retain_attached_hw_handler" options has ' - 'changed to "yes". Further, changing these default values can ' - 'cause issues with the built-in device configurations in RHEL-8. ' - 'They will be commented out in the defaults section of all ' - 'multipath config files. This is unlikely to cause any issues ' - 'with existing configurations. If it does, please move these ' - 'options from the defaults sections of {} to the device ' - 'configuration sections of any devices that need them.'. - format(paths) - ), - reporting.Severity(reporting.Severity.MEDIUM), - reporting.Groups([reporting.Groups.SERVICES]), - reporting.RelatedResource('package', 'device-mapper-multipath') - ]) - - -def _check_reassign_maps(options): - if not options['reassign_maps']: - return - value, pathname = options['reassign_maps'] - if not value: - return - create_report([ - reporting.Title( - 'device-mapper-multipath now disables reassign_maps by default' - ), - reporting.Summary( - 'In RHEL-8, the default value for "reassign_maps" has been ' - 'changed to "no", and it is not recommended to enable it in any ' - 'configuration going forward. This option will be commented out ' - 'in {}.'.format(pathname) - ), - reporting.Severity(reporting.Severity.MEDIUM), - reporting.Groups([reporting.Groups.SERVICES]), - reporting.RelatedResource('package', 'device-mapper-multipath') - ]) - - -def check_configs(facts): - options = _merge_configs(facts.configs) - _check_default_path_checker(options) - _check_default_detection(options) - _check_reassign_maps(options) diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfcheck/tests/test_actor_multipathconfcheck.py b/repos/system_upgrade/el7toel8/actors/multipathconfcheck/tests/test_actor_multipathconfcheck.py deleted file mode 100644 index b5ea1aeb..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfcheck/tests/test_actor_multipathconfcheck.py +++ /dev/null @@ -1,200 +0,0 @@ -from leapp.models import MultipathConfFacts, MultipathConfig, MultipathConfigOption -from leapp.reporting import Report -from leapp.snactor.fixture import current_actor_context -from leapp.utils.report import is_inhibitor - - -def _assert_default_checker_report(report, pathname): - assert report['title'] == \ - 'Unsupported device-mapper-multipath configuration' - assert report['severity'] == 'high' - assert is_inhibitor(report) - assert pathname in report['summary'] - - -def _assert_default_detect_report(report, pathname): - assert report['title'] == \ - 'device-mapper-multipath now defaults to detecting settings' - assert report['severity'] == 'medium' - assert pathname in report['summary'] - - -def _assert_reassign_maps(report, pathname): - assert report['title'] == \ - 'device-mapper-multipath now disables reassign_maps by default' - assert report['severity'] == 'medium' - assert pathname in report['summary'] - - -def test_config_all_bad(current_actor_context): - config = MultipathConfig( - pathname='all_bad.conf', default_path_checker='directio', - reassign_maps=True, default_detect_checker=False, - default_detect_prio=False, default_retain_hwhandler=False) - facts = MultipathConfFacts(configs=[config]) - - current_actor_context.feed(facts) - current_actor_context.run() - reports = list(current_actor_context.consume(Report)) - assert reports and len(reports) == 3 - _assert_default_checker_report(reports[0].report, 'all_bad.conf') - _assert_default_detect_report(reports[1].report, 'all_bad.conf') - _assert_reassign_maps(reports[2].report, 'all_bad.conf') - - -def test_config_all_good(current_actor_context): - config = MultipathConfig( - pathname='all_good.conf', default_path_checker='tur', - reassign_maps=False, default_detect_checker=True, - default_detect_prio=True, default_retain_hwhandler=True) - facts = MultipathConfFacts(configs=[config]) - - current_actor_context.feed(facts) - current_actor_context.run() - assert not current_actor_context.consume(Report) - - -def test_config_unimportant(current_actor_context): - option = MultipathConfigOption(name='path_checker', value='rdac') - config = MultipathConfig( - pathname='unimportant.conf', hw_str_match_exists=True, - ignore_new_boot_devs_exists=True, new_bindings_in_boot_exists=True, - unpriv_sgio_exists=True, detect_path_checker_exists=True, - overrides_hwhandler_exists=True, overrides_pg_timeout_exists=True, - queue_if_no_path_exists=True, all_devs_section_exists=True, - all_devs_options=[option]) - facts = MultipathConfFacts(configs=[config]) - - current_actor_context.feed(facts) - current_actor_context.run() - assert not current_actor_context.consume(Report) - - -def test_bad_then_good(current_actor_context): - bad_config = MultipathConfig( - pathname='all_bad.conf', default_path_checker='directio', - reassign_maps=True, default_detect_checker=False, - default_detect_prio=False, default_retain_hwhandler=False) - good_config = MultipathConfig( - pathname='all_good.conf', default_path_checker='tur', - reassign_maps=False, default_detect_checker=True, - default_detect_prio=True, default_retain_hwhandler=True) - facts = MultipathConfFacts(configs=[bad_config, good_config]) - - current_actor_context.feed(facts) - current_actor_context.run() - assert not current_actor_context.consume(Report) - - -def test_good_then_bad(current_actor_context): - good_config = MultipathConfig( - pathname='all_good.conf', default_path_checker='tur', - reassign_maps=False, default_detect_checker=True, - default_detect_prio=True, default_retain_hwhandler=True) - bad_config = MultipathConfig( - pathname='all_bad.conf', default_path_checker='directio', - reassign_maps=True, default_detect_checker=False, - default_detect_prio=False, default_retain_hwhandler=False) - facts = MultipathConfFacts(configs=[good_config, bad_config]) - - current_actor_context.feed(facts) - current_actor_context.run() - reports = list(current_actor_context.consume(Report)) - assert reports and len(reports) == 3 - _assert_default_checker_report(reports[0].report, 'all_bad.conf') - _assert_default_detect_report(reports[1].report, 'all_bad.conf') - _assert_reassign_maps(reports[2].report, 'all_bad.conf') - - -def test_bad_then_nothing(current_actor_context): - bad_config = MultipathConfig( - pathname='all_bad.conf', default_path_checker='directio', - reassign_maps=True, default_detect_checker=False, - default_detect_prio=False, default_retain_hwhandler=False) - none_config = MultipathConfig(pathname='none.conf') - facts = MultipathConfFacts(configs=[bad_config, none_config]) - - current_actor_context.feed(facts) - current_actor_context.run() - reports = list(current_actor_context.consume(Report)) - assert reports and len(reports) == 3 - _assert_default_checker_report(reports[0].report, 'all_bad.conf') - _assert_default_detect_report(reports[1].report, 'all_bad.conf') - _assert_reassign_maps(reports[2].report, 'all_bad.conf') - - -def test_nothing_then_bad(current_actor_context): - bad_config = MultipathConfig( - pathname='all_bad.conf', default_path_checker='directio', - reassign_maps=True, default_detect_checker=False, - default_detect_prio=False, default_retain_hwhandler=False) - none_config = MultipathConfig(pathname='none.conf') - facts = MultipathConfFacts(configs=[none_config, bad_config]) - - current_actor_context.feed(facts) - current_actor_context.run() - reports = list(current_actor_context.consume(Report)) - assert reports and len(reports) == 3 - _assert_default_checker_report(reports[0].report, 'all_bad.conf') - _assert_default_detect_report(reports[1].report, 'all_bad.conf') - _assert_reassign_maps(reports[2].report, 'all_bad.conf') - - -def test_only_bad_checker(current_actor_context): - bad_checker_config = MultipathConfig( - pathname='bad_checker.conf', default_path_checker='rdac', - default_retain_hwhandler=True) - facts = MultipathConfFacts(configs=[bad_checker_config]) - - current_actor_context.feed(facts) - current_actor_context.run() - reports = list(current_actor_context.consume(Report)) - assert reports and len(reports) == 1 - _assert_default_checker_report(reports[0].report, 'bad_checker.conf') - - -def test_only_bad_detect(current_actor_context): - bad_detect_config = MultipathConfig( - pathname='bad_detect.conf', default_detect_prio=True, - default_detect_checker=False) - facts = MultipathConfFacts(configs=[bad_detect_config]) - - current_actor_context.feed(facts) - current_actor_context.run() - reports = list(current_actor_context.consume(Report)) - assert reports and len(reports) == 1 - _assert_default_detect_report(reports[0].report, 'bad_detect.conf') - - -def test_only_bad_reassign(current_actor_context): - bad_reassign_config = MultipathConfig( - pathname='bad_reassign.conf', reassign_maps=True) - facts = MultipathConfFacts(configs=[bad_reassign_config]) - - current_actor_context.feed(facts) - current_actor_context.run() - reports = list(current_actor_context.consume(Report)) - assert reports and len(reports) == 1 - _assert_reassign_maps(reports[0].report, 'bad_reassign.conf') - - -def test_different_files(current_actor_context): - bad_detect_checker_config = MultipathConfig( - pathname='bad_detect_checker.conf', default_detect_checker=False) - bad_detect_prio_config = MultipathConfig( - pathname='bad_detect_prio.conf', default_detect_prio=False) - bad_retain_hwhandler_config = MultipathConfig( - pathname='bad_retain_hwhandler.conf', - default_retain_hwhandler=False) - facts = MultipathConfFacts( - configs=[bad_detect_checker_config, bad_detect_prio_config, - bad_retain_hwhandler_config]) - - current_actor_context.feed(facts) - current_actor_context.run() - reports = list(current_actor_context.consume(Report)) - assert reports and len(reports) == 1 - _assert_default_detect_report( - reports[0].report, - 'bad_detect_checker.conf, bad_detect_prio.conf and ' - 'bad_retain_hwhandler.conf') diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/actor.py b/repos/system_upgrade/el7toel8/actors/multipathconfread/actor.py deleted file mode 100644 index 66b1f431..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfread/actor.py +++ /dev/null @@ -1,33 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import multipathconfread -from leapp.models import DistributionSignedRPM, MultipathConfFacts, TargetUserSpaceUpgradeTasks -from leapp.tags import FactsPhaseTag, IPUWorkflowTag - - -class MultipathConfRead(Actor): - """ - Read multipath configuration files and extract the necessary information - - Related files: - - /etc/multipath.conf - - /etc/multipath/ - any files inside the directory - - /etc/xdrdevices.conf - - As well, create task (msg) to copy all needed multipath files into - the target container as the files are needed to create proper initramfs. - This covers the files mentioned above. - """ - - name = 'multipath_conf_read' - consumes = (DistributionSignedRPM,) - produces = (MultipathConfFacts, TargetUserSpaceUpgradeTasks) - tags = (FactsPhaseTag, IPUWorkflowTag) - - def process(self): - if multipathconfread.is_processable(): - res = multipathconfread.get_multipath_conf_facts() - if res: - self.produce(res) - # Create task to copy multipath config files Iff facts - # are generated - multipathconfread.produce_copy_to_target_task() diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/libraries/multipathconfread.py b/repos/system_upgrade/el7toel8/actors/multipathconfread/libraries/multipathconfread.py deleted file mode 100644 index 6e6ab540..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfread/libraries/multipathconfread.py +++ /dev/null @@ -1,242 +0,0 @@ -import errno -import os - -from leapp.libraries.common import multipathutil -from leapp.libraries.common.rpms import has_package -from leapp.libraries.stdlib import api -from leapp.models import ( - CopyFile, - DistributionSignedRPM, - MultipathConfFacts, - MultipathConfig, - MultipathConfigOption, - TargetUserSpaceUpgradeTasks -) - - -def _change_existing_option(curr_options, opt_name, opt_value): - for option in curr_options: - if option.name == opt_name: - option.value = opt_value # latest value is used - return True - return False - - -def _add_options(curr_options, new_options): - ignore = ['hardware_handler', 'pg_timeout', 'product', 'unpriv_sgio', - 'product_blacklist', 'revision', 'vendor'] - for opt_name, opt_value in new_options: - if opt_name in ignore: - continue - if opt_name == 'detect_path_checker': - opt_name = 'detect_checker' - if not _change_existing_option(curr_options, opt_name, opt_value): - curr_options.append(MultipathConfigOption(name=opt_name, - value=opt_value)) - - -def _remove_qinp(value): - items = value.split() - if items == [] or not items[0].isdigit(): - return value - nr_features = int(items[0]) - if nr_features != len(items) - 1: - return value - try: - items.remove('queue_if_no_path') - except ValueError: - return value - items[0] = str(nr_features - 1) - return ' '.join(items) - - -def _fix_qinp_options(options): - have_npr = False - need_npr = False - for option in options: - if option.name == 'features' and 'queue_if_no_path' in option.value: - option.value = _remove_qinp(option.value) - need_npr = True - if option.name == 'no_path_retry': - have_npr = True - if need_npr and not have_npr: - options.append(MultipathConfigOption(name='no_path_retry', - value='queue')) - - -def _options_match(overrides, all_devs): - if overrides == 'detect_path_checker' and all_devs == 'detect_checker': - return True - if overrides in ('path_checker', 'checker') and \ - all_devs in ('path_checker', 'checker'): - return True - if overrides == all_devs: - return True - return False - - -def _filter_options(all_dev_options, overrides_options): - for name, value in overrides_options: - if name == 'features' and 'queue_if_no_path' in value: - overrides_options.append(('no_path_retry', 'queue')) - break - for name, _value in overrides_options: - for option in all_dev_options: - if _options_match(name, option.name): - all_dev_options.remove(option) - break - - -def _parse_config(path): - contents = multipathutil.read_config(path) - if contents is None: - return None - conf = MultipathConfig(pathname=path) - conf.all_devs_options = [] - section = None - in_subsection = False - device_options = [] - overrides_options = [] - in_all_devs = False - for line in contents.split('\n'): - try: - data = multipathutil.LineData(line, section, in_subsection) - except ValueError: - continue - if data.type == data.TYPE_BLANK: - continue - if data.type == data.TYPE_SECTION_END: - if in_subsection: - in_subsection = False - if in_all_devs: - _add_options(conf.all_devs_options, device_options) - in_all_devs = False - device_options = [] - elif section: - section = None - continue - if data.type == data.TYPE_SECTION_START: - if not section: - section = data.section - elif not in_subsection: - in_subsection = True - continue - if data.type != data.TYPE_OPTION: - continue - if section == 'defaults': - if data.option in ('path_checker', 'checker'): - conf.default_path_checker = data.value - elif data.option == 'config_dir': - conf.config_dir = data.value - elif data.option == 'retain_attached_hw_handler': - conf.default_retain_hwhandler = data.is_enabled() - elif data.option == 'detect_prio': - conf.default_detect_prio = data.is_enabled() - elif data.option == 'detect_path_checker': - conf.default_detect_checker = data.is_enabled() - elif data.option == 'reassign_maps': - conf.reassign_maps = data.is_enabled() - elif data.option == 'hw_str_match': - conf.hw_str_match_exists = True - elif data.option == 'ignore_new_boot_devs': - conf.ignore_new_boot_devs_exists = True - elif data.option == 'new_bindings_in_boot': - conf.new_bindings_in_boot_exists = True - if section == 'devices' and in_subsection: - if data.option == 'all_devs' and data.is_enabled(): - conf.all_devs_section_exists = True - in_all_devs = True - else: - device_options.append((data.option, data.value)) - if section == 'overrides': - if data.option == 'hardware_handler': - conf.overrides_hwhandler_exists = True - elif data.option == 'pg_timeout': - conf.overrides_pg_timeout_exists = True - else: - overrides_options.append((data.option, data.value)) - if data.option == 'unpriv_sgio': - conf.unpriv_sgio_exists = True - if data.option == 'detect_path_checker': - conf.detect_path_checker_exists = True - if data.option == 'features' and 'queue_if_no_path' in data.value: - conf.queue_if_no_path_exists = True - - if in_subsection and in_all_devs: - _add_options(conf.all_devs_options, device_options) - _fix_qinp_options(conf.all_devs_options) - _filter_options(conf.all_devs_options, overrides_options) - return conf - - -def _parse_config_dir(config_dir): - res = [] - try: - for config_file in sorted(os.listdir(config_dir)): - path = os.path.join(config_dir, config_file) - if not path.endswith('.conf'): - continue - conf = _parse_config(path) - if conf: - res.append(conf) - except OSError as e: - if e.errno == errno.ENOENT: - api.current_logger().debug('Multipath conf directory ' + - '"{}" doesn\'t exist'.format(config_dir)) - else: - api.current_logger().warning('Failed to read multipath config ' + - 'directory ' + - '"{}": {}'.format(config_dir, e)) - return res - - -def is_processable(): - res = has_package(DistributionSignedRPM, 'device-mapper-multipath') - if not res: - api.current_logger().debug('device-mapper-multipath is not installed.') - return res - - -def get_multipath_conf_facts(config_file='/etc/multipath.conf'): - res_configs = [] - conf = _parse_config(config_file) - if not conf: - return None - res_configs.append(conf) - if conf.config_dir: - res_configs.extend(_parse_config_dir(conf.config_dir)) - else: - res_configs.extend(_parse_config_dir('/etc/multipath/conf.d')) - return MultipathConfFacts(configs=res_configs) - - -def produce_copy_to_target_task(): - """ - Produce task to copy files into the target userspace - - The multipath configuration files are needed when the upgrade init ramdisk - is generated to ensure we are able to boot into the upgrade environment - and start the upgrade process itself. By this msg it's told that these - files/dirs will be available when the upgrade init ramdisk is generated. - - See TargetUserSpaceUpgradeTasks and UpgradeInitramfsTasks for more info. - """ - # TODO(pstodulk): move the function to the multipathconfcheck actor - # and get rid of the hardcoded stuff. - # - The current behaviour looks from the user POV same as before this - # * commit. I am going to keep the proper fix for additional PR as we do - # * not want to make the current PR even more complex than now and the solution - # * is not so trivial. - # - As well, I am missing some information around xDR devices, which are - # * possibly not handled correctly (maybe missing some executables?..) - # * Update: practically we do not have enough info about xDR drivers, but - # * discussed with Ben Marzinski, as the multipath dracut module includes - # * the xDR utils stuff, we should handle it in the same way. - # * See xdrgetuid, xdrgetinfo (these two utils are now missing in our initramfs) - copy_files = [] - for fname in ['/etc/multipath.conf', '/etc/multipath', '/etc/xdrdevices.conf']: - if os.path.exists(fname): - copy_files.append(CopyFile(src=fname)) - - if copy_files: - api.produce(TargetUserSpaceUpgradeTasks(copy_files=copy_files)) diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/all_the_things.conf b/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/all_the_things.conf deleted file mode 100644 index 48ade1c6..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/all_the_things.conf +++ /dev/null @@ -1,1052 +0,0 @@ -defaults { - verbosity 2 - polling_interval 5 - max_polling_interval 20 - reassign_maps "yes" - multipath_dir "/lib64/multipath" - path_selector "service-time 0" - path_grouping_policy "failover" - uid_attribute "ID_SERIAL" - prio "const" - prio_args "" - features "0" - path_checker "directio" - alias_prefix "mpath" - failback "manual" - rr_min_io 1000 - rr_min_io_rq 1 - max_fds 1048576 - rr_weight "uniform" - queue_without_daemon "no" - flush_on_last_del "no" - user_friendly_names "yes" - fast_io_fail_tmo 5 - bindings_file "/etc/multipath/bindings" - wwids_file /etc/multipath/wwids - prkeys_file /etc/multipath/prkeys - log_checker_err always - find_multipaths yes - retain_attached_hw_handler no - detect_prio no - detect_path_checker no - hw_str_match no - force_sync no - deferred_remove no - ignore_new_boot_devs no - skip_kpartx no - config_dir "files/conf.d" - delay_watch_checks no - delay_wait_checks no - retrigger_tries 3 - retrigger_delay 10 - missing_uev_wait_timeout 30 - new_bindings_in_boot no - remove_retries 0 - disable_changed_wwids no - unpriv_sgio no - ghost_delay no - all_tg_pt no - marginal_path_err_sample_time no - marginal_path_err_rate_threshold no - marginal_path_err_recheck_gap_time no - marginal_path_double_failed_time no -} -blacklist { - devnode "sdb" - devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" - devnode "^(td|hd|vd)[a-z]" - devnode "^dcssblk[0-9]*" - device { - vendor "DGC" - product "LUNZ" - } - device { - vendor "EMC" - product "LUNZ" - } - device { - vendor "IBM" - product "Universal Xport" - } - device { - vendor "IBM" - product "S/390.*" - } - device { - vendor "DELL" - product "Universal Xport" - } - device { - vendor "LENOVO" - product "Universal Xport" - } - device { - vendor "SGI" - product "Universal Xport" - } - device { - vendor "STK" - product "Universal Xport" - } - device { - vendor "SUN" - product "Universal Xport" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "Universal Xport" - } -} -blacklist_exceptions { - devnode "sda" - wwid "123456789" - device { - vendor "IBM" - product "S/390x" - } -} - -devices { - device { - vendor "COMPELNT" - product "Compellent Vol" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "APPLE*" - product "Xserve RAID " - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "3PARdata" - product "VV" - path_grouping_policy "group_by_prio" - path_selector "service-time 0" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - fast_io_fail_tmo 10 - dev_loss_tmo "infinity" - } - device { - vendor "DEC" - product "HSG80" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "1 queue_if_no_path" - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - } - device { - vendor "HP" - product "A6189A" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(MSA|HSV)1.0.*" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "1 queue_if_no_path" - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "MSA VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "HSV1[01]1|HSV2[01]0|HSV3[046]0|HSV4[05]0" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2[02]12fc|MSA2012i" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2012sa|MSA23(12|24)(fc|i|sa)|MSA2000s VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA (1|2)040 SA(N|S)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "HSVX700" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "LOGICAL VOLUME.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "HP" - product "P2000 G3 FC|P2000G3 FC/iSCSI|P2000 G3 SAS|P2000 G3 iSCSI" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "DDN" - product "SAN DataDirector" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 6 - } - device { - vendor "DGC" - product ".*" - product_blacklist "LUNZ" - path_grouping_policy "group_by_prio" - path_checker "emc_clariion" - features "1 queue_if_no_path" - hardware_handler "1 emc" - prio "emc" - failback immediate - rr_weight "uniform" - no_path_retry 60 - retain_attached_hw_handler yes - detect_prio yes - detect_path_checker yes - } - device { - vendor "EMC" - product "Invista" - product_blacklist "LUNZ" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 5 - } - device { - vendor "FSC" - product "CentricStor" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "FUJITSU" - product "ETERNUS_DX(H|L|M|400|8000)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 10 - } - device { - vendor "(HITACHI|HP)" - product "OPEN-.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "HITACHI" - product "DF.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "hds" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "ProFibre 4000R" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1722-600" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1724" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1726" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1742" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1745|^1746" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "IBM" - product "^1814" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1815" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1818" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3526" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3542" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105800" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105F20" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1750500" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2107900" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2145" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD ECKD" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD FBA" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^IPR.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "1820N00" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 100 - } - device { - vendor "IBM" - product "2810XIV" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - failback 15 - rr_weight "uniform" - rr_min_io 15 - } - device { - vendor "AIX" - product "VDASD" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "IBM" - product "3303 NVDISK" - path_grouping_policy "failover" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "AIX" - product "NVDISK" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "DELL" - product "^MD3" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - } - device { - vendor "LENOVO" - product "DE_Series" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - no_path_retry 30 - } - device { - vendor "NETAPP" - product "LUN.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "3 queue_if_no_path pg_init_retries 50" - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - flush_on_last_del "yes" - dev_loss_tmo "infinity" - user_friendly_names no - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "NEXENTA" - product "COMSTAR" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 30 - rr_min_io 128 - } - device { - vendor "IBM" - product "Nseries.*" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - } - device { - vendor "Pillar" - product "Axiom.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[13]00" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[45]00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SGI" - product "IS.*" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "NEC" - product "DISK ARRAY" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "STK" - product "OPENstorage D280" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "SUN" - product "(StorEdge 3510|T4)" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SUN" - product "STK6580_6780" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "EUROLOGC" - product "FC2502" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "PIVOT3" - product "RAIGE VOLUME" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - rr_min_io 100 - } - device { - vendor "SUN" - product "CSM200_R" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "LCSM100_[IEFS]" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "SUN_6180" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 1000 - rr_min_io_rq 1 - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "INF-01-00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "STK" - product "FLEXLINE 380" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "Intel" - product "Multi-Flex" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "SANmelody" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "Virtual Disk" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "NFINIDAT" - product "InfiniBox.*" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback 30 - rr_weight "priorities" - no_path_retry "fail" - flush_on_last_del "yes" - dev_loss_tmo 30 - } - device { - vendor "Nimble" - product "Server" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "1 alua" - prio "alua" - failback immediate - dev_loss_tmo "infinity" - } - device { - vendor "XtremIO" - product "XtremApp" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 15 - } - device { - vendor "PURE" - product "FlashArray" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 10 - dev_loss_tmo 60 - user_friendly_names no - } - device { - vendor "HUAWEI" - product "XSG1" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - dev_loss_tmo 30 - } - device { - vendor "NVME" - product "^EMC PowerMax_" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - prio "const" - } - device { - vendor "NVME" - product ".*" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - detect_prio yes - } - device { - vendor "IBM" - product "^2145" - path_selector "service-time 0" - } - device { - fast_io_fail_tmo 5 - all_devs yes - no_path_retry fail - detect_path_checker yes - } - device { - features "1 queue_if_no_path" - path_checker "tur" - all_devs yes - } -} -multipaths { - multipath { - wwid "123456789" - alias "foo" - } -} - -overrides { - checker "rdac" - detect_path_checker no - hardware_handler "1 alua" - pg_timeout no - fast_io_fail_tmo 10 - unpriv_sgio no -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/already_updated.conf b/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/already_updated.conf deleted file mode 100644 index 81a6944d..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/already_updated.conf +++ /dev/null @@ -1,1069 +0,0 @@ -defaults { - verbosity 2 - polling_interval 5 - max_polling_interval 20 -# reassign_maps "yes" # Commented out by Leapp - multipath_dir "/lib64/multipath" - path_selector "service-time 0" - path_grouping_policy "failover" - uid_attribute "ID_SERIAL" - prio "const" - prio_args "" - features "0" -# path_checker "directio" # Commented out by Leapp - alias_prefix "mpath" - failback "manual" - rr_min_io 1000 - rr_min_io_rq 1 - max_fds 1048576 - rr_weight "uniform" - queue_without_daemon "no" - flush_on_last_del "no" - user_friendly_names "yes" - fast_io_fail_tmo 5 - bindings_file "/etc/multipath/bindings" - wwids_file /etc/multipath/wwids - prkeys_file /etc/multipath/prkeys - log_checker_err always - find_multipaths yes -# retain_attached_hw_handler no # Commented out by Leapp -# detect_prio no # Commented out by Leapp -# detect_path_checker no # Commented out by Leapp -# hw_str_match no # Commented out by Leapp - force_sync no - deferred_remove no -# ignore_new_boot_devs no # Commented out by Leapp - skip_kpartx no - config_dir "files/conf.d" - delay_watch_checks no - delay_wait_checks no - retrigger_tries 3 - retrigger_delay 10 - missing_uev_wait_timeout 30 -# new_bindings_in_boot no # Commented out by Leapp - remove_retries 0 - disable_changed_wwids no -# unpriv_sgio no # Commented out by Leapp - ghost_delay no - all_tg_pt no - marginal_path_err_sample_time no - marginal_path_err_rate_threshold no - marginal_path_err_recheck_gap_time no - marginal_path_double_failed_time no -} -blacklist { - devnode "sdb" - devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" - devnode "^(td|hd|vd)[a-z]" - devnode "^dcssblk[0-9]*" - device { - vendor "DGC" - product "LUNZ" - } - device { - vendor "EMC" - product "LUNZ" - } - device { - vendor "IBM" - product "Universal Xport" - } - device { - vendor "IBM" - product "S/390.*" - } - device { - vendor "DELL" - product "Universal Xport" - } - device { - vendor "LENOVO" - product "Universal Xport" - } - device { - vendor "SGI" - product "Universal Xport" - } - device { - vendor "STK" - product "Universal Xport" - } - device { - vendor "SUN" - product "Universal Xport" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "Universal Xport" - } -} -blacklist_exceptions { - devnode "sda" - wwid "123456789" - device { - vendor "IBM" - product "S/390x" - } -} - -devices { - device { - vendor "COMPELNT" - product "Compellent Vol" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "APPLE*" - product "Xserve RAID " - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "3PARdata" - product "VV" - path_grouping_policy "group_by_prio" - path_selector "service-time 0" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - fast_io_fail_tmo 10 - dev_loss_tmo "infinity" - } - device { - vendor "DEC" - product "HSG80" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - } - device { - vendor "HP" - product "A6189A" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(MSA|HSV)1.0.*" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "0" # Line modified by Leapp - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "MSA VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "HSV1[01]1|HSV2[01]0|HSV3[046]0|HSV4[05]0" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2[02]12fc|MSA2012i" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2012sa|MSA23(12|24)(fc|i|sa)|MSA2000s VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA (1|2)040 SA(N|S)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "HSVX700" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "LOGICAL VOLUME.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "HP" - product "P2000 G3 FC|P2000G3 FC/iSCSI|P2000 G3 SAS|P2000 G3 iSCSI" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "DDN" - product "SAN DataDirector" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 6 - } - device { - vendor "DGC" - product ".*" - product_blacklist "LUNZ" - path_grouping_policy "group_by_prio" - path_checker "emc_clariion" - features "0" # Line modified by Leapp - hardware_handler "1 emc" - prio "emc" - failback immediate - rr_weight "uniform" - no_path_retry 60 - retain_attached_hw_handler yes - detect_prio yes - detect_checker yes # Line modified by Leapp - } - device { - vendor "EMC" - product "Invista" - product_blacklist "LUNZ" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 5 - } - device { - vendor "FSC" - product "CentricStor" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "FUJITSU" - product "ETERNUS_DX(H|L|M|400|8000)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 10 - } - device { - vendor "(HITACHI|HP)" - product "OPEN-.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "HITACHI" - product "DF.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "hds" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "ProFibre 4000R" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1722-600" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" # Line modified by Leapp - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1724" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" # Line modified by Leapp - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1726" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" # Line modified by Leapp - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1742" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1745|^1746" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "IBM" - product "^1814" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1815" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1818" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3526" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3542" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105800" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105F20" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1750500" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2107900" - path_grouping_policy "multibus" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2145" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD ECKD" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD FBA" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^IPR.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "1820N00" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 100 - } - device { - vendor "IBM" - product "2810XIV" - path_grouping_policy "multibus" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - failback 15 - rr_weight "uniform" - rr_min_io 15 - } - device { - vendor "AIX" - product "VDASD" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "IBM" - product "3303 NVDISK" - path_grouping_policy "failover" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "AIX" - product "NVDISK" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "DELL" - product "^MD3" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - } - device { - vendor "LENOVO" - product "DE_Series" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - no_path_retry 30 - } - device { - vendor "NETAPP" - product "LUN.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "2 pg_init_retries 50" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - flush_on_last_del "yes" - dev_loss_tmo "infinity" - user_friendly_names no - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "NEXENTA" - product "COMSTAR" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "0" # Line modified by Leapp - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 30 - rr_min_io 128 - } - device { - vendor "IBM" - product "Nseries.*" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - } - device { - vendor "Pillar" - product "Axiom.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[13]00" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[45]00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SGI" - product "IS.*" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "NEC" - product "DISK ARRAY" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "STK" - product "OPENstorage D280" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "SUN" - product "(StorEdge 3510|T4)" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SUN" - product "STK6580_6780" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "EUROLOGC" - product "FC2502" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "PIVOT3" - product "RAIGE VOLUME" - path_grouping_policy "multibus" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - rr_min_io 100 - } - device { - vendor "SUN" - product "CSM200_R" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "LCSM100_[IEFS]" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "SUN_6180" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 1000 - rr_min_io_rq 1 - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "INF-01-00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "STK" - product "FLEXLINE 380" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "Intel" - product "Multi-Flex" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "SANmelody" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "Virtual Disk" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "NFINIDAT" - product "InfiniBox.*" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback 30 - rr_weight "priorities" - no_path_retry "fail" - flush_on_last_del "yes" - dev_loss_tmo 30 - } - device { - vendor "Nimble" - product "Server" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "1 alua" - prio "alua" - failback immediate - dev_loss_tmo "infinity" - } - device { - vendor "XtremIO" - product "XtremApp" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 15 - } - device { - vendor "PURE" - product "FlashArray" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 10 - dev_loss_tmo 60 - user_friendly_names no - } - device { - vendor "HUAWEI" - product "XSG1" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - dev_loss_tmo 30 - } - device { - vendor "NVME" - product "^EMC PowerMax_" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - prio "const" - } - device { - vendor "NVME" - product ".*" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - detect_prio yes - } - device { - vendor "IBM" - product "^2145" - path_selector "service-time 0" - } -# device { # Section commented out by Leapp -# fast_io_fail_tmo 5 -# all_devs yes -# no_path_retry fail -# detect_checker yes # Line modified by Leapp -# } -# device { # Section commented out by Leapp -# features "1 queue_if_no_path" -# path_checker "tur" -# all_devs yes -# } -} -multipaths { - multipath { - wwid "123456789" - alias "foo" - } -} - -overrides { - no_path_retry fail # Line added by Leapp - features 0 # Line added by Leapp - checker "rdac" - detect_checker no # Line modified by Leapp -# hardware_handler "1 alua" # Commented out by Leapp -# pg_timeout no # Commented out by Leapp - fast_io_fail_tmo 10 -# unpriv_sgio no # Commented out by Leapp -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/conf.d/all_devs.conf b/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/conf.d/all_devs.conf deleted file mode 100644 index fa52de4b..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/conf.d/all_devs.conf +++ /dev/null @@ -1,136 +0,0 @@ -# This is a basic configuration file with some examples, for device mapper -# multipath. -# -# For a complete list of the default configuration values, run either -# multipath -t -# or -# multipathd show config -# -# For a list of configuration options with descriptions, see the multipath.conf -# man page - -## By default, devices with vendor = "IBM" and product = "S/390.*" are -## blacklisted. To enable mulitpathing on these devies, uncomment the -## following lines. -#blacklist_exceptions { -# device { -# vendor "IBM" -# product "S/390.*" -# } -#} - -## Use user friendly names, instead of using WWIDs as names. -defaults { - user_friendly_names yes - find_multipaths yes -} - -devices { - device { - vendor "NVME" - product ".*" - path_grouping_policy multibus - - } - device { - all_devs yes - path_checker tur - pg_timeout no - detect_path_checker yes - } - - device { - features "3 queue_if_no_path pg_init_retries 50" - path_selector "service-time 0" - all_devs yes - unpriv_sgio no - } - - device { - hardware_handler "1 alua" - vendor "test_vendor" - product "test_product" - revision 1 - product_blacklist "test.*" - all_devs yes - fast_io_fail_tmo 5 - path_checker rdac - } - device { - vendor "IBM" - product "^2145" - path_selector "service-time 0" - features "1 queue_if_no_path" - } - -} - - - -## -## Here is an example of how to configure some standard options. -## -# -#defaults { -# polling_interval 10 -# path_selector "round-robin 0" -# path_grouping_policy multibus -# uid_attribute ID_SERIAL -# prio alua -# path_checker readsector0 -# rr_min_io 100 -# max_fds 8192 -# rr_weight priorities -# failback immediate -# no_path_retry fail -# user_friendly_names yes -#} -## -## The wwid line in the following blacklist section is shown as an example -## of how to blacklist devices by wwid. The 2 devnode lines are the -## compiled in default blacklist. If you want to blacklist entire types -## of devices, such as all scsi devices, you should use a devnode line. -## However, if you want to blacklist specific devices, you should use -## a wwid line. Since there is no guarantee that a specific device will -## not change names on reboot (from /dev/sda to /dev/sdb for example) -## devnode lines are not recommended for blacklisting specific devices. -## -blacklist { - devnode "sdb" -# wwid 26353900f02796769 -# devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" -# devnode "^hd[a-z]" -} -#multipaths { -# multipath { -# wwid 3600508b4000156d700012000000b0000 -# alias yellow -# path_grouping_policy multibus -# path_selector "round-robin 0" -# failback manual -# rr_weight priorities -# no_path_retry 5 -# } -# multipath { -# wwid 1DEC_____321816758474 -# alias red -# } -#} -#devices { -# device { -# vendor "COMPAQ " -# product "HSV110 (C)COMPAQ" -# path_grouping_policy multibus -# path_checker readsector0 -# path_selector "round-robin 0" -# hardware_handler "0" -# failback 15 -# rr_weight priorities -# no_path_retry queue -# } -# device { -# vendor "COMPAQ " -# product "MSA1000 " -# path_grouping_policy multibus -# } -#} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/conf.d/empty.conf b/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/conf.d/empty.conf deleted file mode 100644 index e69de29b..00000000 diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/default_rhel7.conf b/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/default_rhel7.conf deleted file mode 100644 index de91e5dd..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/default_rhel7.conf +++ /dev/null @@ -1,1021 +0,0 @@ -defaults { - verbosity 2 - polling_interval 5 - max_polling_interval 20 - reassign_maps "yes" - multipath_dir "/lib64/multipath" - path_selector "service-time 0" - path_grouping_policy "failover" - uid_attribute "ID_SERIAL" - prio "const" - prio_args "" - features "0" - path_checker "directio" - alias_prefix "mpath" - failback "manual" - rr_min_io 1000 - rr_min_io_rq 1 - max_fds 1048576 - rr_weight "uniform" - queue_without_daemon "no" - flush_on_last_del "no" - user_friendly_names "yes" - fast_io_fail_tmo 5 - bindings_file "/etc/multipath/bindings" - wwids_file /etc/multipath/wwids - prkeys_file /etc/multipath/prkeys - log_checker_err always - find_multipaths yes - retain_attached_hw_handler no - detect_prio no - detect_path_checker no - hw_str_match no - force_sync no - deferred_remove no - ignore_new_boot_devs no - skip_kpartx no - config_dir "files/conf.d" - delay_watch_checks no - delay_wait_checks no - retrigger_tries 3 - retrigger_delay 10 - missing_uev_wait_timeout 30 - new_bindings_in_boot no - remove_retries 0 - disable_changed_wwids no - unpriv_sgio no - ghost_delay no - all_tg_pt no - marginal_path_err_sample_time no - marginal_path_err_rate_threshold no - marginal_path_err_recheck_gap_time no - marginal_path_double_failed_time no -} -blacklist { - devnode "sdb" - devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" - devnode "^(td|hd|vd)[a-z]" - devnode "^dcssblk[0-9]*" - device { - vendor "DGC" - product "LUNZ" - } - device { - vendor "EMC" - product "LUNZ" - } - device { - vendor "IBM" - product "Universal Xport" - } - device { - vendor "IBM" - product "S/390.*" - } - device { - vendor "DELL" - product "Universal Xport" - } - device { - vendor "LENOVO" - product "Universal Xport" - } - device { - vendor "SGI" - product "Universal Xport" - } - device { - vendor "STK" - product "Universal Xport" - } - device { - vendor "SUN" - product "Universal Xport" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "Universal Xport" - } -} -blacklist_exceptions { -} -devices { - device { - vendor "COMPELNT" - product "Compellent Vol" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "APPLE*" - product "Xserve RAID " - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "3PARdata" - product "VV" - path_grouping_policy "group_by_prio" - path_selector "service-time 0" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - fast_io_fail_tmo 10 - dev_loss_tmo "infinity" - } - device { - vendor "DEC" - product "HSG80" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "1 queue_if_no_path" - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - } - device { - vendor "HP" - product "A6189A" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(MSA|HSV)1.0.*" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "1 queue_if_no_path" - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "MSA VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "HSV1[01]1|HSV2[01]0|HSV3[046]0|HSV4[05]0" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2[02]12fc|MSA2012i" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2012sa|MSA23(12|24)(fc|i|sa)|MSA2000s VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA (1|2)040 SA(N|S)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "HSVX700" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "LOGICAL VOLUME.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "HP" - product "P2000 G3 FC|P2000G3 FC/iSCSI|P2000 G3 SAS|P2000 G3 iSCSI" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "DDN" - product "SAN DataDirector" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 6 - } - device { - vendor "DGC" - product ".*" - product_blacklist "LUNZ" - path_grouping_policy "group_by_prio" - path_checker "emc_clariion" - features "1 queue_if_no_path" - hardware_handler "1 emc" - prio "emc" - failback immediate - rr_weight "uniform" - no_path_retry 60 - retain_attached_hw_handler yes - detect_prio yes - detect_path_checker yes - } - device { - vendor "EMC" - product "Invista" - product_blacklist "LUNZ" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 5 - } - device { - vendor "FSC" - product "CentricStor" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "FUJITSU" - product "ETERNUS_DX(H|L|M|400|8000)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 10 - } - device { - vendor "(HITACHI|HP)" - product "OPEN-.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "HITACHI" - product "DF.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "hds" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "ProFibre 4000R" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1722-600" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1724" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1726" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1742" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1745|^1746" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "IBM" - product "^1814" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1815" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1818" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3526" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3542" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105800" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105F20" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1750500" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2107900" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2145" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD ECKD" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD FBA" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^IPR.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "1820N00" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 100 - } - device { - vendor "IBM" - product "2810XIV" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - failback 15 - rr_weight "uniform" - rr_min_io 15 - } - device { - vendor "AIX" - product "VDASD" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "IBM" - product "3303 NVDISK" - path_grouping_policy "failover" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "AIX" - product "NVDISK" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "DELL" - product "^MD3" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - } - device { - vendor "LENOVO" - product "DE_Series" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - no_path_retry 30 - } - device { - vendor "NETAPP" - product "LUN.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "3 queue_if_no_path pg_init_retries 50" - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - flush_on_last_del "yes" - dev_loss_tmo "infinity" - user_friendly_names no - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "NEXENTA" - product "COMSTAR" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 30 - rr_min_io 128 - } - device { - vendor "IBM" - product "Nseries.*" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - } - device { - vendor "Pillar" - product "Axiom.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[13]00" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[45]00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SGI" - product "IS.*" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "NEC" - product "DISK ARRAY" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "STK" - product "OPENstorage D280" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "SUN" - product "(StorEdge 3510|T4)" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SUN" - product "STK6580_6780" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "EUROLOGC" - product "FC2502" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "PIVOT3" - product "RAIGE VOLUME" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - rr_min_io 100 - } - device { - vendor "SUN" - product "CSM200_R" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "LCSM100_[IEFS]" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "SUN_6180" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 1000 - rr_min_io_rq 1 - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "INF-01-00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "STK" - product "FLEXLINE 380" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "Intel" - product "Multi-Flex" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "SANmelody" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "Virtual Disk" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "NFINIDAT" - product "InfiniBox.*" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback 30 - rr_weight "priorities" - no_path_retry "fail" - flush_on_last_del "yes" - dev_loss_tmo 30 - } - device { - vendor "Nimble" - product "Server" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "1 alua" - prio "alua" - failback immediate - dev_loss_tmo "infinity" - } - device { - vendor "XtremIO" - product "XtremApp" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 15 - } - device { - vendor "PURE" - product "FlashArray" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 10 - dev_loss_tmo 60 - user_friendly_names no - } - device { - vendor "HUAWEI" - product "XSG1" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - dev_loss_tmo 30 - } - device { - vendor "NVME" - product "^EMC PowerMax_" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - prio "const" - } - device { - vendor "NVME" - product ".*" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - detect_prio yes - } - device { - vendor "IBM" - product "^2145" - path_selector "service-time 0" - } -} -multipaths { -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/default_rhel8.conf b/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/default_rhel8.conf deleted file mode 100644 index 62f889dc..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/default_rhel8.conf +++ /dev/null @@ -1,1049 +0,0 @@ -defaults { - verbosity 2 - polling_interval 5 - max_polling_interval 20 - reassign_maps "no" - multipath_dir "/lib64/multipath" - path_selector "service-time 0" - path_grouping_policy "failover" - uid_attribute "ID_SERIAL" - prio "const" - prio_args "" - features "0" - path_checker "tur" - alias_prefix "mpath" - failback "manual" - rr_min_io 1000 - rr_min_io_rq 1 - max_fds "max" - rr_weight "uniform" - queue_without_daemon "no" - flush_on_last_del "no" - user_friendly_names "yes" - fast_io_fail_tmo 5 - bindings_file "/etc/multipath/bindings" - wwids_file "/etc/multipath/wwids" - prkeys_file "/etc/multipath/prkeys" - log_checker_err always - all_tg_pt "no" - retain_attached_hw_handler "yes" - detect_prio "yes" - detect_checker "yes" - force_sync "yes" - strict_timing "no" - deferred_remove "no" - config_dir "/etc/multipath/conf.d" - delay_watch_checks "no" - delay_wait_checks "no" - san_path_err_threshold "no" - san_path_err_forget_rate "no" - san_path_err_recovery_time "no" - marginal_path_err_sample_time "no" - marginal_path_err_rate_threshold "no" - marginal_path_err_recheck_gap_time "no" - marginal_path_double_failed_time "no" - find_multipaths "on" - uxsock_timeout 4000 - retrigger_tries 0 - retrigger_delay 10 - missing_uev_wait_timeout 30 - skip_kpartx "no" - disable_changed_wwids ignored - remove_retries 0 - ghost_delay "no" - find_multipaths_timeout -10 - enable_foreign "^$" - marginal_pathgroups "no" -} -blacklist { - devnode "^(ram|zram|raw|loop|fd|md|dm-|sr|scd|st|dcssblk)[0-9]" - devnode "^(td|hd|vd)[a-z]" - device { - vendor "SGI" - product "Universal Xport" - } - device { - vendor "^DGC" - product "LUNZ" - } - device { - vendor "EMC" - product "LUNZ" - } - device { - vendor "DELL" - product "Universal Xport" - } - device { - vendor "IBM" - product "Universal Xport" - } - device { - vendor "IBM" - product "S/390" - } - device { - vendor "LENOVO" - product "Universal Xport" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "Universal Xport" - } - device { - vendor "STK" - product "Universal Xport" - } - device { - vendor "SUN" - product "Universal Xport" - } - device { - vendor "(Intel|INTEL)" - product "VTrak V-LUN" - } - device { - vendor "Promise" - product "VTrak V-LUN" - } - device { - vendor "Promise" - product "Vess V-LUN" - } -} -blacklist_exceptions { - property "(SCSI_IDENT_|ID_WWN)" -} -devices { - device { - vendor "NVME" - product ".*" - uid_attribute "ID_WWN" - path_checker "none" - retain_attached_hw_handler "no" - } - device { - vendor "APPLE" - product "Xserve RAID" - path_grouping_policy "multibus" - } - device { - vendor "3PARdata" - product "VV" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 18 - fast_io_fail_tmo 10 - dev_loss_tmo "infinity" - vpd_vendor hp3par - } - device { - vendor "DEC" - product "HSG80" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - hardware_handler "1 hp_sw" - prio "hp_sw" - no_path_retry "queue" - } - device { - vendor "HP" - product "A6189A" - path_grouping_policy "multibus" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(MSA|HSV)1[01]0" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - hardware_handler "1 hp_sw" - prio "hp_sw" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "MSA VOLUME" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(HSV1[01]1|HSV2[01]0|HSV3[046]0|HSV4[05]0)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "HP" - product "(MSA2[02]12fc|MSA2012i)" - path_grouping_policy "multibus" - no_path_retry 18 - } - device { - vendor "HP" - product "(MSA2012sa|MSA23(12|24)(fc|i|sa)|MSA2000s VOLUME)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "HP" - product "MSA [12]0[45]0 SA[NS]" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "HP" - product "HSVX700" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "HP" - product "LOGICAL VOLUME" - path_grouping_policy "multibus" - no_path_retry 12 - } - device { - vendor "HP" - product "(P2000 G3 FC|P2000G3 FC/iSCSI|P2000 G3 SAS|P2000 G3 iSCSI)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "LEFTHAND" - product "(P4000|iSCSIDisk|FCDISK)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "Nimble" - product "Server" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "SGI" - product "TP9100" - path_grouping_policy "multibus" - } - device { - vendor "SGI" - product "TP9[3457]00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SGI" - product "IS" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SGI" - product "^DD[46]A-" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "DDN" - product "SAN DataDirector" - path_grouping_policy "multibus" - } - device { - vendor "DDN" - product "^EF3010" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "DDN" - product "^(EF3015|S2A|SFA)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "NEXENTA" - product "COMSTAR" - path_grouping_policy "group_by_serial" - no_path_retry 30 - } - device { - vendor "TEGILE" - product "(ZEBI-(FC|ISCSI)|INTELLIFLASH)" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 10 - } - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy "multibus" - no_path_retry 6 - } - device { - vendor "^DGC" - product "^(RAID|DISK|VRAID)" - product_blacklist "LUNZ" - path_grouping_policy "group_by_prio" - path_checker "emc_clariion" - hardware_handler "1 emc" - prio "emc" - failback "immediate" - no_path_retry 60 - } - device { - vendor "EMC" - product "Invista" - product_blacklist "LUNZ" - path_grouping_policy "multibus" - no_path_retry 5 - } - device { - vendor "XtremIO" - product "XtremApp" - path_grouping_policy "multibus" - } - device { - vendor "COMPELNT" - product "Compellent Vol" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "DELL" - product "^MD3" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "NVME" - product "^EMC PowerMax_" - path_grouping_policy "multibus" - } - device { - vendor "FSC" - product "CentricStor" - path_grouping_policy "group_by_serial" - } - device { - vendor "FUJITSU" - product "ETERNUS_DX(H|L|M|400|8000)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 10 - } - device { - vendor "(EUROLOGC|EuroLogc)" - product "FC2502" - path_grouping_policy "multibus" - } - device { - vendor "FUJITSU" - product "E[234]000" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 10 - } - device { - vendor "FUJITSU" - product "E[68]000" - path_grouping_policy "multibus" - no_path_retry 10 - } - device { - vendor "(HITACHI|HP)" - product "^OPEN-" - path_grouping_policy "multibus" - } - device { - vendor "HITACHI" - product "^DF" - path_grouping_policy "group_by_prio" - prio "hds" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "HITACHI" - product "^DF600F" - path_grouping_policy "multibus" - } - device { - vendor "IBM" - product "ProFibre 4000R" - path_grouping_policy "multibus" - } - device { - vendor "IBM" - product "^1722-600" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1724" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1726" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1742" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1746" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1813" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1814" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1815" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1818" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^3526" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^(3542|3552)" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^2105" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1750500" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^2107900" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^2145" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "IBM" - product "S/390 DASD ECKD" - product_blacklist "S/390" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - no_path_retry "queue" - } - device { - vendor "IBM" - product "S/390 DASD FBA" - product_blacklist "S/390" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^IPR" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "IBM" - product "1820N00" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "(XIV|IBM)" - product "(NEXTRA|2810XIV)" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "(TMS|IBM)" - product "(RamSan|FlashSystem)" - path_grouping_policy "multibus" - } - device { - vendor "IBM" - product "^(DCS9900|2851)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "AIX" - product "VDASD" - path_grouping_policy "multibus" - no_path_retry 60 - } - device { - vendor "IBM" - product "3303[ ]+NVDISK" - no_path_retry 60 - } - device { - vendor "AIX" - product "NVDISK" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 60 - } - device { - vendor "LENOVO" - product "DE_Series" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "NETAPP" - product "LUN" - path_grouping_policy "group_by_prio" - features "2 pg_init_retries 50" - prio "ontap" - failback "immediate" - no_path_retry "queue" - flush_on_last_del "yes" - dev_loss_tmo "infinity" - user_friendly_names "no" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "INF-01-00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SolidFir" - product "SSD SAN" - path_grouping_policy "multibus" - no_path_retry 24 - } - device { - vendor "NVME" - product "^NetApp ONTAP Controller" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "NEC" - product "DISK ARRAY" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - } - device { - vendor "^Pillar" - product "^Axiom" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - } - device { - vendor "^Oracle" - product "^Oracle FS" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - } - device { - vendor "STK" - product "BladeCtlr" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "STK" - product "OPENstorage" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "STK" - product "FLEXLINE 380" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "StorEdge 3" - path_grouping_policy "multibus" - } - device { - vendor "SUN" - product "STK6580_6780" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "CSM[12]00_R" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "LCSM100_[IEFS]" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "SUN_6180" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "ArrayStorage" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "(Sun Storage|ZFS Storage|COMSTAR)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "PIVOT3" - product "RAIGE VOLUME" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "(NexGen|Pivot3)" - product "(TierStore|vSTAC)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "(Intel|INTEL)" - product "Multi-Flex" - product_blacklist "VTrak V-LUN" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "(LIO-ORG|SUSE)" - product "RBD" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "DataCore" - product "SANmelody" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "Virtual Disk" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "PURE" - product "FlashArray" - path_grouping_policy "multibus" - } - device { - vendor "HUAWEI" - product "XSG1" - path_grouping_policy "group_by_prio" - prio "alua" - } - device { - vendor "KOVE" - product "XPD" - path_grouping_policy "multibus" - } - device { - vendor "NFINIDAT" - product "InfiniBox" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - prio "alua" - failback 30 - rr_weight "priorities" - no_path_retry "fail" - rr_min_io 1 - rr_min_io_rq 1 - flush_on_last_del "yes" - fast_io_fail_tmo 15 - dev_loss_tmo 15 - } - device { - vendor "KMNRIO" - product "K2" - path_grouping_policy "multibus" - } - device { - vendor "NEXSAN" - product "NXS-B0" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 15 - } - device { - vendor "NEXSAN" - product "SATAB" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 15 - } - device { - vendor "Nexsan" - product "(NestOS|NST5000)" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "VIOLIN" - product "SAN ARRAY$" - path_grouping_policy "group_by_serial" - no_path_retry 30 - } - device { - vendor "VIOLIN" - product "SAN ARRAY ALUA" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "VIOLIN" - product "CONCERTO ARRAY" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "(XIOTECH|XIOtech)" - product "ISE" - path_grouping_policy "multibus" - no_path_retry 12 - } - device { - vendor "(XIOTECH|XIOtech)" - product "IGLU DISK" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "(XIOTECH|XIOtech)" - product "Magnitude" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "Promise" - product "VTrak" - product_blacklist "VTrak V-LUN" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "Promise" - product "Vess" - product_blacklist "Vess V-LUN" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "^IFT" - product ".*" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "DotHill" - product "SANnet" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "DotHill" - product "R/Evo" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "DotHill" - product "^DH" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "AStor" - product "NeoSapphire" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "INSPUR" - product "MCS" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - } -} -overrides { -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/just_all_devs.conf b/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/just_all_devs.conf deleted file mode 100644 index 4a34b7bf..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/just_all_devs.conf +++ /dev/null @@ -1,5 +0,0 @@ -devices { - device { - all_devs yes - } -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/just_checker.conf b/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/just_checker.conf deleted file mode 100644 index 0b3462e4..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/just_checker.conf +++ /dev/null @@ -1,1049 +0,0 @@ -defaults { - verbosity 2 - polling_interval 5 - max_polling_interval 20 - reassign_maps "no" - multipath_dir "/lib64/multipath" - path_selector "service-time 0" - path_grouping_policy "failover" - uid_attribute "ID_SERIAL" - prio "const" - prio_args "" - features "0" - checker "rdac" - alias_prefix "mpath" - failback "manual" - rr_min_io 1000 - rr_min_io_rq 1 - max_fds "max" - rr_weight "uniform" - queue_without_daemon "no" - flush_on_last_del "no" - user_friendly_names "yes" - fast_io_fail_tmo 5 - bindings_file "/etc/multipath/bindings" - wwids_file "/etc/multipath/wwids" - prkeys_file "/etc/multipath/prkeys" - log_checker_err always - all_tg_pt "no" - retain_attached_hw_handler "yes" - detect_prio "yes" - detect_checker "yes" - force_sync "yes" - strict_timing "no" - deferred_remove "no" - config_dir "/etc/multipath/conf.d" - delay_watch_checks "no" - delay_wait_checks "no" - san_path_err_threshold "no" - san_path_err_forget_rate "no" - san_path_err_recovery_time "no" - marginal_path_err_sample_time "no" - marginal_path_err_rate_threshold "no" - marginal_path_err_recheck_gap_time "no" - marginal_path_double_failed_time "no" - find_multipaths "on" - uxsock_timeout 4000 - retrigger_tries 0 - retrigger_delay 10 - missing_uev_wait_timeout 30 - skip_kpartx "no" - disable_changed_wwids ignored - remove_retries 0 - ghost_delay "no" - find_multipaths_timeout -10 - enable_foreign "^$" - marginal_pathgroups "no" -} -blacklist { - devnode "^(ram|zram|raw|loop|fd|md|dm-|sr|scd|st|dcssblk)[0-9]" - devnode "^(td|hd|vd)[a-z]" - device { - vendor "SGI" - product "Universal Xport" - } - device { - vendor "^DGC" - product "LUNZ" - } - device { - vendor "EMC" - product "LUNZ" - } - device { - vendor "DELL" - product "Universal Xport" - } - device { - vendor "IBM" - product "Universal Xport" - } - device { - vendor "IBM" - product "S/390" - } - device { - vendor "LENOVO" - product "Universal Xport" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "Universal Xport" - } - device { - vendor "STK" - product "Universal Xport" - } - device { - vendor "SUN" - product "Universal Xport" - } - device { - vendor "(Intel|INTEL)" - product "VTrak V-LUN" - } - device { - vendor "Promise" - product "VTrak V-LUN" - } - device { - vendor "Promise" - product "Vess V-LUN" - } -} -blacklist_exceptions { - property "(SCSI_IDENT_|ID_WWN)" -} -devices { - device { - vendor "NVME" - product ".*" - uid_attribute "ID_WWN" - path_checker "none" - retain_attached_hw_handler "no" - } - device { - vendor "APPLE" - product "Xserve RAID" - path_grouping_policy "multibus" - } - device { - vendor "3PARdata" - product "VV" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 18 - fast_io_fail_tmo 10 - dev_loss_tmo "infinity" - vpd_vendor hp3par - } - device { - vendor "DEC" - product "HSG80" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - hardware_handler "1 hp_sw" - prio "hp_sw" - no_path_retry "queue" - } - device { - vendor "HP" - product "A6189A" - path_grouping_policy "multibus" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(MSA|HSV)1[01]0" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - hardware_handler "1 hp_sw" - prio "hp_sw" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "MSA VOLUME" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(HSV1[01]1|HSV2[01]0|HSV3[046]0|HSV4[05]0)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "HP" - product "(MSA2[02]12fc|MSA2012i)" - path_grouping_policy "multibus" - no_path_retry 18 - } - device { - vendor "HP" - product "(MSA2012sa|MSA23(12|24)(fc|i|sa)|MSA2000s VOLUME)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "HP" - product "MSA [12]0[45]0 SA[NS]" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "HP" - product "HSVX700" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "HP" - product "LOGICAL VOLUME" - path_grouping_policy "multibus" - no_path_retry 12 - } - device { - vendor "HP" - product "(P2000 G3 FC|P2000G3 FC/iSCSI|P2000 G3 SAS|P2000 G3 iSCSI)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "LEFTHAND" - product "(P4000|iSCSIDisk|FCDISK)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "Nimble" - product "Server" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "SGI" - product "TP9100" - path_grouping_policy "multibus" - } - device { - vendor "SGI" - product "TP9[3457]00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SGI" - product "IS" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SGI" - product "^DD[46]A-" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "DDN" - product "SAN DataDirector" - path_grouping_policy "multibus" - } - device { - vendor "DDN" - product "^EF3010" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "DDN" - product "^(EF3015|S2A|SFA)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "NEXENTA" - product "COMSTAR" - path_grouping_policy "group_by_serial" - no_path_retry 30 - } - device { - vendor "TEGILE" - product "(ZEBI-(FC|ISCSI)|INTELLIFLASH)" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 10 - } - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy "multibus" - no_path_retry 6 - } - device { - vendor "^DGC" - product "^(RAID|DISK|VRAID)" - product_blacklist "LUNZ" - path_grouping_policy "group_by_prio" - path_checker "emc_clariion" - hardware_handler "1 emc" - prio "emc" - failback "immediate" - no_path_retry 60 - } - device { - vendor "EMC" - product "Invista" - product_blacklist "LUNZ" - path_grouping_policy "multibus" - no_path_retry 5 - } - device { - vendor "XtremIO" - product "XtremApp" - path_grouping_policy "multibus" - } - device { - vendor "COMPELNT" - product "Compellent Vol" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "DELL" - product "^MD3" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "NVME" - product "^EMC PowerMax_" - path_grouping_policy "multibus" - } - device { - vendor "FSC" - product "CentricStor" - path_grouping_policy "group_by_serial" - } - device { - vendor "FUJITSU" - product "ETERNUS_DX(H|L|M|400|8000)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 10 - } - device { - vendor "(EUROLOGC|EuroLogc)" - product "FC2502" - path_grouping_policy "multibus" - } - device { - vendor "FUJITSU" - product "E[234]000" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 10 - } - device { - vendor "FUJITSU" - product "E[68]000" - path_grouping_policy "multibus" - no_path_retry 10 - } - device { - vendor "(HITACHI|HP)" - product "^OPEN-" - path_grouping_policy "multibus" - } - device { - vendor "HITACHI" - product "^DF" - path_grouping_policy "group_by_prio" - prio "hds" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "HITACHI" - product "^DF600F" - path_grouping_policy "multibus" - } - device { - vendor "IBM" - product "ProFibre 4000R" - path_grouping_policy "multibus" - } - device { - vendor "IBM" - product "^1722-600" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1724" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1726" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1742" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1746" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1813" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1814" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1815" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1818" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^3526" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^(3542|3552)" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^2105" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1750500" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^2107900" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^2145" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "IBM" - product "S/390 DASD ECKD" - product_blacklist "S/390" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - no_path_retry "queue" - } - device { - vendor "IBM" - product "S/390 DASD FBA" - product_blacklist "S/390" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^IPR" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "IBM" - product "1820N00" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "(XIV|IBM)" - product "(NEXTRA|2810XIV)" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "(TMS|IBM)" - product "(RamSan|FlashSystem)" - path_grouping_policy "multibus" - } - device { - vendor "IBM" - product "^(DCS9900|2851)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "AIX" - product "VDASD" - path_grouping_policy "multibus" - no_path_retry 60 - } - device { - vendor "IBM" - product "3303[ ]+NVDISK" - no_path_retry 60 - } - device { - vendor "AIX" - product "NVDISK" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 60 - } - device { - vendor "LENOVO" - product "DE_Series" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "NETAPP" - product "LUN" - path_grouping_policy "group_by_prio" - features "2 pg_init_retries 50" - prio "ontap" - failback "immediate" - no_path_retry "queue" - flush_on_last_del "yes" - dev_loss_tmo "infinity" - user_friendly_names "no" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "INF-01-00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SolidFir" - product "SSD SAN" - path_grouping_policy "multibus" - no_path_retry 24 - } - device { - vendor "NVME" - product "^NetApp ONTAP Controller" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "NEC" - product "DISK ARRAY" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - } - device { - vendor "^Pillar" - product "^Axiom" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - } - device { - vendor "^Oracle" - product "^Oracle FS" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - } - device { - vendor "STK" - product "BladeCtlr" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "STK" - product "OPENstorage" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "STK" - product "FLEXLINE 380" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "StorEdge 3" - path_grouping_policy "multibus" - } - device { - vendor "SUN" - product "STK6580_6780" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "CSM[12]00_R" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "LCSM100_[IEFS]" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "SUN_6180" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "ArrayStorage" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "(Sun Storage|ZFS Storage|COMSTAR)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "PIVOT3" - product "RAIGE VOLUME" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "(NexGen|Pivot3)" - product "(TierStore|vSTAC)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "(Intel|INTEL)" - product "Multi-Flex" - product_blacklist "VTrak V-LUN" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "(LIO-ORG|SUSE)" - product "RBD" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "DataCore" - product "SANmelody" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "Virtual Disk" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "PURE" - product "FlashArray" - path_grouping_policy "multibus" - } - device { - vendor "HUAWEI" - product "XSG1" - path_grouping_policy "group_by_prio" - prio "alua" - } - device { - vendor "KOVE" - product "XPD" - path_grouping_policy "multibus" - } - device { - vendor "NFINIDAT" - product "InfiniBox" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - prio "alua" - failback 30 - rr_weight "priorities" - no_path_retry "fail" - rr_min_io 1 - rr_min_io_rq 1 - flush_on_last_del "yes" - fast_io_fail_tmo 15 - dev_loss_tmo 15 - } - device { - vendor "KMNRIO" - product "K2" - path_grouping_policy "multibus" - } - device { - vendor "NEXSAN" - product "NXS-B0" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 15 - } - device { - vendor "NEXSAN" - product "SATAB" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 15 - } - device { - vendor "Nexsan" - product "(NestOS|NST5000)" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "VIOLIN" - product "SAN ARRAY$" - path_grouping_policy "group_by_serial" - no_path_retry 30 - } - device { - vendor "VIOLIN" - product "SAN ARRAY ALUA" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "VIOLIN" - product "CONCERTO ARRAY" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "(XIOTECH|XIOtech)" - product "ISE" - path_grouping_policy "multibus" - no_path_retry 12 - } - device { - vendor "(XIOTECH|XIOtech)" - product "IGLU DISK" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "(XIOTECH|XIOtech)" - product "Magnitude" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "Promise" - product "VTrak" - product_blacklist "VTrak V-LUN" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "Promise" - product "Vess" - product_blacklist "Vess V-LUN" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "^IFT" - product ".*" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "DotHill" - product "SANnet" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "DotHill" - product "R/Evo" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "DotHill" - product "^DH" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "AStor" - product "NeoSapphire" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "INSPUR" - product "MCS" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - } -} -overrides { -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/just_detect.conf b/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/just_detect.conf deleted file mode 100644 index b68733c5..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/just_detect.conf +++ /dev/null @@ -1,3 +0,0 @@ -defaults { - detect_prio 0 -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/just_exists.conf b/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/just_exists.conf deleted file mode 100644 index ac84ba87..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/just_exists.conf +++ /dev/null @@ -1,32 +0,0 @@ -# device-mapper-multipath configuration file - -# For a complete list of the default configuration values, run either: -# # multipath -t -# or -# # multipathd show config - -# For a list of configuration options with descriptions, see the -# multipath.conf man page. - -defaults { - user_friendly_names yes - find_multipaths yes -} - -devices { - device { - vendor "Foo" - product "Bar" - features "1 queue_if_no_path" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - } -} - -blacklist_exceptions { - property "(SCSI_IDENT_|ID_WWN)" -} - -blacklist { -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/just_reassign.conf b/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/just_reassign.conf deleted file mode 100644 index cbd4399e..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/just_reassign.conf +++ /dev/null @@ -1,93 +0,0 @@ -# This is a basic configuration file with some examples, for device mapper -# multipath. -# -# For a complete list of the default configuration values, run either -# multipath -t -# or -# multipathd show config -# -# For a list of configuration options with descriptions, see the multipath.conf -# man page - -## By default, devices with vendor = "IBM" and product = "S/390.*" are -## blacklisted. To enable mulitpathing on these devies, uncomment the -## following lines. -#blacklist_exceptions { -# device { -# vendor "IBM" -# product "S/390.*" -# } -#} - -## Use user friendly names, instead of using WWIDs as names. -defaults { - user_friendly_names yes - find_multipaths yes - reassign_maps "yes" -} -## -## Here is an example of how to configure some standard options. -## -# -#defaults { -# polling_interval 10 -# path_selector "round-robin 0" -# path_grouping_policy multibus -# uid_attribute ID_SERIAL -# prio alua -# path_checker readsector0 -# rr_min_io 100 -# max_fds 8192 -# rr_weight priorities -# failback immediate -# no_path_retry fail -# user_friendly_names yes -#} -## -## The wwid line in the following blacklist section is shown as an example -## of how to blacklist devices by wwid. The 2 devnode lines are the -## compiled in default blacklist. If you want to blacklist entire types -## of devices, such as all scsi devices, you should use a devnode line. -## However, if you want to blacklist specific devices, you should use -## a wwid line. Since there is no guarantee that a specific device will -## not change names on reboot (from /dev/sda to /dev/sdb for example) -## devnode lines are not recommended for blacklisting specific devices. -## -#blacklist { -# wwid 26353900f02796769 -# devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" -# devnode "^hd[a-z]" -#} -#multipaths { -# multipath { -# wwid 3600508b4000156d700012000000b0000 -# alias yellow -# path_grouping_policy multibus -# path_selector "round-robin 0" -# failback manual -# rr_weight priorities -# no_path_retry 5 -# } -# multipath { -# wwid 1DEC_____321816758474 -# alias red -# } -#} -#devices { -# device { -# vendor "COMPAQ " -# product "HSV110 (C)COMPAQ" -# path_grouping_policy multibus -# path_checker readsector0 -# path_selector "round-robin 0" -# hardware_handler "0" -# failback 15 -# rr_weight priorities -# no_path_retry queue -# } -# device { -# vendor "COMPAQ " -# product "MSA1000 " -# path_grouping_policy multibus -# } -#} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/ugly1.conf b/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/ugly1.conf deleted file mode 100644 index 87c8dcf4..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/ugly1.conf +++ /dev/null @@ -1,1055 +0,0 @@ -defaults THIS SHOULDN'T BE HERE - verbosity 2 - polling_interval 5 - max_polling_interval 20 - reassign_maps "yes" - multipath_dir "/lib64/multipath" - path_selector "service-time 0" - path_grouping_policy "failover" - uid_attribute "ID_SERIAL" - prio "const" - prio_args "" - features "0" - path_checker "directio" - alias_prefix "mpath" - failback "manual" - rr_min_io 1000 - rr_min_io_rq 1 - max_fds 1048576 - rr_weight "uniform" - queue_without_daemon "no" - flush_on_last_del "no" - user_friendly_names "yes" - fast_io_fail_tmo 5 - bindings_file "/etc/multipath/bindings" - wwids_file /etc/multipath/wwids - prkeys_file /etc/multipath/prkeys - log_checker_err always - find_multipaths yes - retain_attached_hw_handler no - detect_prio no - detect_path_checker no - hw_str_match no - force_sync no - deferred_remove no - ignore_new_boot_devs no - skip_kpartx no - config_dir "files/conf.d" - delay_watch_checks no - delay_wait_checks no - retrigger_tries 3 - retrigger_delay 10 - missing_uev_wait_timeout 30 - new_bindings_in_boot no - remove_retries 0 - disable_changed_wwids no - unpriv_sgio no - ghost_delay no - all_tg_pt no - marginal_path_err_sample_time no - marginal_path_err_rate_threshold no - marginal_path_err_recheck_gap_time no - marginal_path_double_failed_time no -} -blacklist { - devnode "sdb" - devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" - devnode "^(td|hd|vd)[a-z]" - devnode "^dcssblk[0-9]*" - device - vendor "DGC" - product "LUNZ" - } - device { - vendor "EMC" - product "LUNZ" - } - device { - vendor "IBM" - product "Universal Xport" - } - device { - vendor "IBM" - product "S/390.*" - } - device { - vendor "DELL" - product "Universal Xport" - } - device { - vendor "LENOVO" - product "Universal Xport" - } - device { - vendor "SGI" - product "Universal Xport" - } - device { - vendor "STK" - product "Universal Xport" - } - device { - vendor "SUN" - product "Universal Xport" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "Universal Xport" - } -} -blacklist_exceptions { - devnode "sda" - wwid "123456789" - device { - vendor "IBM" - product "S/390x" - } -} - -devices { BAD DATA - device { - vendor "COMPELNT" - product "Compellent Vol" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "APPLE*" - product "Xserve RAID " - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "3PARdata" - product "VV" - path_grouping_policy "group_by_prio" - path_selector "service-time 0" - path_checker "tur" - features "0" - hardware_handler "1 alua" EXTRA DATA - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - fast_io_fail_tmo 10 - dev_loss_tmo "infinity" - } - device { - vendor "DEC" - product "HSG80" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "1 queue_if_no_path" - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - } - device { - vendor "HP" - product "A6189A" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(MSA|HSV)1.0.*" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "1 queue_if_no_path" - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "MSA VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "HSV1[01]1|HSV2[01]0|HSV3[046]0|HSV4[05]0" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2[02]12fc|MSA2012i" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2012sa|MSA23(12|24)(fc|i|sa)|MSA2000s VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA (1|2)040 SA(N|S)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "HSVX700" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "LOGICAL VOLUME.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "HP" - product "P2000 G3 FC|P2000G3 FC/iSCSI|P2000 G3 SAS|P2000 G3 iSCSI" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "DDN" - product "SAN DataDirector" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 6 - } - device { - vendor "DGC" - product ".*" - product_blacklist "LUNZ" - path_grouping_policy "group_by_prio" - path_checker "emc_clariion" - features "1 queue_if_no_path" - hardware_handler "1 emc" - prio "emc" - failback immediate - rr_weight "uniform" - no_path_retry 60 - retain_attached_hw_handler yes - detect_prio yes - detect_path_checker yes - } - device { - vendor "EMC" - product "Invista" - product_blacklist "LUNZ" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 5 - } - device { - vendor "FSC" - product "CentricStor" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "FUJITSU" - product "ETERNUS_DX(H|L|M|400|8000)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 10 - } - device { - vendor "(HITACHI|HP)" - product "OPEN-.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "HITACHI" - product "DF.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "hds" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "ProFibre 4000R" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1722-600" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1724" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1726" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1742" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1745|^1746" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "IBM" - product "^1814" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1815" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1818" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3526" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3542" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105800" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105F20" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1750500" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2107900" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2145" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD ECKD" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD FBA" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^IPR.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "1820N00" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 100 - } - device { - vendor "IBM" - product "2810XIV" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - failback 15 - rr_weight "uniform" - rr_min_io 15 - } - device { - vendor "AIX" - product "VDASD" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "IBM" - product "3303 NVDISK" - path_grouping_policy "failover" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "AIX" - product "NVDISK" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "DELL" - product "^MD3" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - } - device { - vendor "LENOVO" - product "DE_Series" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - no_path_retry 30 - } - device { - vendor "NETAPP" - product "LUN.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "3 queue_if_no_path pg_init_retries 50" - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - flush_on_last_del "yes" - dev_loss_tmo "infinity" - user_friendly_names no - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "NEXENTA" - product "COMSTAR" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 30 - rr_min_io 128 - } - device { - vendor "IBM" - product "Nseries.*" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - } - device { - vendor "Pillar" - product "Axiom.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[13]00" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[45]00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SGI" - product "IS.*" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "NEC" - product "DISK ARRAY" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "STK" - product "OPENstorage D280" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "SUN" - product "(StorEdge 3510|T4)" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SUN" - product "STK6580_6780" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "EUROLOGC" - product "FC2502" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "PIVOT3" - product "RAIGE VOLUME" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - rr_min_io 100 - } - device { - vendor "SUN" - product "CSM200_R" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "LCSM100_[IEFS]" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "SUN_6180" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 1000 - rr_min_io_rq 1 - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "INF-01-00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "STK" - product "FLEXLINE 380" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "Intel" - product "Multi-Flex" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "SANmelody" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "Virtual Disk" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "NFINIDAT" - product "InfiniBox.*" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback 30 - rr_weight "priorities" - no_path_retry "fail" - flush_on_last_del "yes" - dev_loss_tmo 30 - } - device { - vendor "Nimble" - product "Server" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "1 alua" - prio "alua" - failback immediate - dev_loss_tmo "infinity" - } - device { - vendor "XtremIO" - product "XtremApp" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 15 - } - device { - vendor "PURE" - product "FlashArray" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 10 - dev_loss_tmo 60 - user_friendly_names no - } - device { - vendor "HUAWEI" - product "XSG1" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - dev_loss_tmo 30 - } - device { - vendor "NVME" - product "^EMC PowerMax_" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - prio "const" - } - device { - vendor "NVME" - product ".*" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - detect_prio yes - } - device { - vendor "IBM" - product "^2145" - path_selector "service-time 0" - } - device { - fast_io_fail_tmo 5 - dev_loss_tmo 60 - all_devs yes - no_path_retry fail - detect_path_checker yes - } - device { - path_selector "service-time 0" JUNK IN LINE - features "1 queue_if_no_path" - path_checker "tur" - all_devs yes - } -} -multipaths { - multipath { - wwid "123456789" - alias "foo" - } -} - -overrides - checker "rdac" - detect_path_checker no - hardware_handler "1 alua" - pg_timeout no - fast_io_fail_tmo 10 - unpriv_sgio no - features "3 queue_if_no_path pg_init_retries 50" -# Missing closing brace diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/ugly2.conf b/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/ugly2.conf deleted file mode 100644 index d9b5038d..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/files/ugly2.conf +++ /dev/null @@ -1,123 +0,0 @@ -# This is a basic configuration file with some examples, for device mapper -# multipath. -# -# For a complete list of the default configuration values, run either -# multipath -t -# or -# multipathd show config -# -# For a list of configuration options with descriptions, see the multipath.conf -# man page - -## By default, devices with vendor = "IBM" and product = "S/390.*" are -## blacklisted. To enable mulitpathing on these devies, uncomment the -## following lines. -#blacklist_exceptions { -# device { -# vendor "IBM" -# product "S/390.*" -# } -#} - -## Use user friendly names, instead of using WWIDs as names. -defaults { - user_friendly_names yes - find_multipaths yes -} - -devices { - device { - vendor "NVME" - product ".*" - path_grouping_policy multibus - - } - device { - all_devs yes - path_checker tur - pg_timeout no - detect_path_checker yes - } - - device { - features "3 queue_if_no_path pg_init_retries 50" - path_selector "service-time 0" - all_devs yes - unpriv_sgio no - } - - device { - hardware_handler "1 alua" - vendor "test_vendor" - product "test_product" - revision 1 - product_blacklist "test.*" - all_devs yes - fast_io_fail_tmo 5 - path_checker rdac -# no closing braces - -## -## Here is an example of how to configure some standard options. -## -# -#defaults { -# polling_interval 10 -# path_selector "round-robin 0" -# path_grouping_policy multibus -# uid_attribute ID_SERIAL -# prio alua -# path_checker readsector0 -# rr_min_io 100 -# max_fds 8192 -# rr_weight priorities -# failback immediate -# no_path_retry fail -# user_friendly_names yes -#} -## -## The wwid line in the following blacklist section is shown as an example -## of how to blacklist devices by wwid. The 2 devnode lines are the -## compiled in default blacklist. If you want to blacklist entire types -## of devices, such as all scsi devices, you should use a devnode line. -## However, if you want to blacklist specific devices, you should use -## a wwid line. Since there is no guarantee that a specific device will -## not change names on reboot (from /dev/sda to /dev/sdb for example) -## devnode lines are not recommended for blacklisting specific devices. -## -# wwid 26353900f02796769 -# devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" -# devnode "^hd[a-z]" -#multipaths { -# multipath { -# wwid 3600508b4000156d700012000000b0000 -# alias yellow -# path_grouping_policy multibus -# path_selector "round-robin 0" -# failback manual -# rr_weight priorities -# no_path_retry 5 -# } -# multipath { -# wwid 1DEC_____321816758474 -# alias red -# } -#} -#devices { -# device { -# vendor "COMPAQ " -# product "HSV110 (C)COMPAQ" -# path_grouping_policy multibus -# path_checker readsector0 -# path_selector "round-robin 0" -# hardware_handler "0" -# failback 15 -# rr_weight priorities -# no_path_retry queue -# } -# device { -# vendor "COMPAQ " -# product "MSA1000 " -# path_grouping_policy multibus -# } -#} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/test_library_multipathconfread.py b/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/test_library_multipathconfread.py deleted file mode 100644 index 7399aa42..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfread/tests/test_library_multipathconfread.py +++ /dev/null @@ -1,199 +0,0 @@ -import os - -import pytest - -from leapp.libraries.actor import multipathconfread -from leapp.models import MultipathConfFacts, MultipathConfig, MultipathConfigOption - -# TODO [Artem] We shouldn't chdir in tests -TEST_DIR = os.path.dirname(os.path.abspath(__file__)) - - -@pytest.fixture -def adjust_cwd(): - previous_cwd = os.getcwd() - os.chdir(TEST_DIR) - yield - os.chdir(previous_cwd) - - -CUR_DIR = "" - - -def build_config(val): - all_devs_options_val = [] - for name_val, value_val in val[16]: - option = MultipathConfigOption(name=name_val, value=value_val) - all_devs_options_val.append(option) - return MultipathConfig( - pathname=val[0], - default_path_checker=val[1], - config_dir=val[2], - default_retain_hwhandler=val[3], - default_detect_prio=val[4], - default_detect_checker=val[5], - reassign_maps=val[6], - hw_str_match_exists=val[7], - ignore_new_boot_devs_exists=val[8], - new_bindings_in_boot_exists=val[9], - unpriv_sgio_exists=val[10], - detect_path_checker_exists=val[11], - overrides_hwhandler_exists=val[12], - overrides_pg_timeout_exists=val[13], - queue_if_no_path_exists=val[14], - all_devs_section_exists=val[15], - all_devs_options=all_devs_options_val) - - -default_rhel7_conf = build_config( - [os.path.join(CUR_DIR, 'files/default_rhel7.conf'), 'directio', os.path.join(CUR_DIR, 'files/conf.d'), False, - False, False, True, True, True, True, True, True, False, False, True, False, [], ]) - -all_devs_conf = build_config( - [os.path.join(CUR_DIR, 'files/conf.d/all_devs.conf'), None, None, None, None, None, None, False, False, False, - True, True, False, False, True, True, - [('path_checker', 'rdac'), ('detect_checker', 'yes'), ('features', '2 pg_init_retries 50'), - ('path_selector', 'service-time 0'), ('fast_io_fail_tmo', '5'), ('no_path_retry', 'queue'), ], ]) - -empty_conf = build_config( - [os.path.join(CUR_DIR, 'files/conf.d/empty.conf'), None, None, None, None, None, None, False, False, False, False, - False, False, False, False, False, [], ]) - -default_rhel8_conf = build_config( - [os.path.join(CUR_DIR, 'files/default_rhel8.conf'), 'tur', '/etc/multipath/conf.d', True, True, None, False, False, - False, False, False, False, False, False, False, False, [], ]) - -all_the_things_conf = build_config( - [os.path.join(CUR_DIR, 'files/all_the_things.conf'), 'directio', os.path.join(CUR_DIR, 'files/conf.d'), False, - False, False, True, True, True, True, True, True, True, True, True, True, - [('no_path_retry', 'fail'), ('features', '0')], ]) - -already_updated_conf = build_config( - [os.path.join(CUR_DIR, 'files/already_updated.conf'), None, os.path.join(CUR_DIR, 'files/conf.d'), None, None, - None, None, False, False, False, False, False, False, False, False, False, [], ]) - -ugly1_conf = build_config( - [os.path.join(CUR_DIR, 'files/ugly1.conf'), 'directio', os.path.join(CUR_DIR, 'files/conf.d'), False, False, False, - True, True, True, True, True, True, True, True, True, True, - [('dev_loss_tmo', '60'), ('path_selector', 'service-time 0')], ]) - -# same results as all_devs_conf -ugly2_conf = build_config( - [os.path.join(CUR_DIR, 'files/ugly2.conf'), None, None, None, None, None, None, False, False, False, True, True, - False, False, True, True, - [('path_checker', 'rdac'), ('detect_checker', 'yes'), ('features', '2 pg_init_retries 50'), - ('path_selector', 'service-time 0'), ('fast_io_fail_tmo', '5'), ('no_path_retry', 'queue'), ], ]) - -just_checker_conf = build_config( - [os.path.join(CUR_DIR, 'files/just_checker.conf'), 'rdac', '/etc/multipath/conf.d', True, True, None, False, False, - False, False, False, False, False, False, False, False, [], ]) - -just_detect_conf = build_config( - [os.path.join(CUR_DIR, 'files/just_detect.conf'), None, None, None, False, None, None, False, False, False, False, - False, False, False, False, False, [], ]) - -just_reassign_conf = build_config( - [os.path.join(CUR_DIR, 'files/just_reassign.conf'), None, None, None, None, None, True, False, False, False, False, - False, False, False, False, False, [], ]) - -just_exists_conf = build_config( - [os.path.join(CUR_DIR, 'files/just_exists.conf'), None, None, None, None, None, None, False, False, False, False, - False, False, False, True, False, [], ]) - -just_all_devs_conf = build_config( - [os.path.join(CUR_DIR, 'files/just_all_devs.conf'), None, None, None, None, None, None, False, False, False, False, - False, False, False, False, True, [], ]) - - -def assert_config(config, expected): - assert config.pathname == expected.pathname - assert config.default_path_checker == expected.default_path_checker - assert config.config_dir == expected.config_dir - assert config.default_retain_hwhandler == expected.default_retain_hwhandler - assert config.default_detect_prio == expected.default_detect_prio - assert config.default_detect_checker == expected.default_detect_checker - assert config.reassign_maps == expected.reassign_maps - assert config.hw_str_match_exists == expected.hw_str_match_exists - assert config.ignore_new_boot_devs_exists == expected.ignore_new_boot_devs_exists - assert config.new_bindings_in_boot_exists == expected.new_bindings_in_boot_exists - assert config.unpriv_sgio_exists == expected.unpriv_sgio_exists - assert config.detect_path_checker_exists == expected.detect_path_checker_exists - assert config.overrides_hwhandler_exists == expected.overrides_hwhandler_exists - assert config.overrides_pg_timeout_exists == expected.overrides_pg_timeout_exists - assert config.queue_if_no_path_exists == expected.queue_if_no_path_exists - assert config.all_devs_section_exists == expected.all_devs_section_exists - assert len(config.all_devs_options) == len(expected.all_devs_options) - for i in range(len(config.all_devs_options)): - conf_opt = config.all_devs_options[i] - expt_opt = expected.all_devs_options[i] - assert conf_opt.name == expt_opt.name - assert conf_opt.value == expt_opt.value - - -def test_config_dir(adjust_cwd): - expected_configs = (default_rhel7_conf, all_devs_conf, empty_conf) - facts = multipathconfread.get_multipath_conf_facts(config_file=os.path.join(CUR_DIR, 'files/default_rhel7.conf')) - assert facts - assert len(facts.configs) == 3 - for i in range(len(facts.configs)): - assert_config(facts.configs[i], expected_configs[i]) - - -def test_already_rhel8(adjust_cwd): - config = multipathconfread._parse_config(os.path.join(CUR_DIR, 'files/default_rhel8.conf')) - assert config - assert_config(config, default_rhel8_conf) - - -def test_all_the_things(adjust_cwd): - config = multipathconfread._parse_config(os.path.join(CUR_DIR, 'files/all_the_things.conf')) - assert config - assert_config(config, all_the_things_conf) - - -def test_already_updated(adjust_cwd): - config = multipathconfread._parse_config(os.path.join(CUR_DIR, 'files/already_updated.conf')) - assert config - assert_config(config, already_updated_conf) - - -def tests_ugly1(adjust_cwd): - config = multipathconfread._parse_config(os.path.join(CUR_DIR, 'files/ugly1.conf')) - assert config - assert_config(config, ugly1_conf) - - -def tests_ugly2(adjust_cwd): - config = multipathconfread._parse_config(os.path.join(CUR_DIR, 'files/ugly2.conf')) - assert config - assert_config(config, ugly2_conf) - - -def tests_just_checker(adjust_cwd): - config = multipathconfread._parse_config(os.path.join(CUR_DIR, 'files/just_checker.conf')) - assert config - assert_config(config, just_checker_conf) - - -def tests_just_detect(adjust_cwd): - config = multipathconfread._parse_config(os.path.join(CUR_DIR, 'files/just_detect.conf')) - assert config - assert_config(config, just_detect_conf) - - -def tests_just_reassign(adjust_cwd): - config = multipathconfread._parse_config(os.path.join(CUR_DIR, 'files/just_reassign.conf')) - assert config - assert_config(config, just_reassign_conf) - - -def tests_just_exists(adjust_cwd): - config = multipathconfread._parse_config(os.path.join(CUR_DIR, 'files/just_exists.conf')) - assert config - assert_config(config, just_exists_conf) - - -def tests_just_all_devs(adjust_cwd): - config = multipathconfread._parse_config(os.path.join(CUR_DIR, 'files/just_all_devs.conf')) - assert config - assert_config(config, just_all_devs_conf) diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/actor.py b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/actor.py deleted file mode 100644 index 221285e1..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/actor.py +++ /dev/null @@ -1,29 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import multipathconfupdate -from leapp.models import MultipathConfFacts -from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag - - -class MultipathConfUpdate(Actor): - """ - Modifies multipath configuration files on the target RHEL-8 system so that - they will run properly. This is done in three ways - 1. commenting out lines for options that no longer exist, or whose value - is no longer current in RHEL-8 - 2. Migrating any options in an devices section with all_devs to an - overrides sections - 3. Rename options that have changed names - """ - - name = 'multipath_conf_update' - consumes = (MultipathConfFacts,) - produces = () - tags = (ApplicationsPhaseTag, IPUWorkflowTag) - - def process(self): - facts = next(self.consume(MultipathConfFacts), None) - if facts is None: - self.log.debug('Skipping execution. No MultipathConfFacts has ' - 'been produced') - return - multipathconfupdate.update_configs(facts) diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/libraries/multipathconfupdate.py b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/libraries/multipathconfupdate.py deleted file mode 100644 index 56f06f3a..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/libraries/multipathconfupdate.py +++ /dev/null @@ -1,250 +0,0 @@ -import re - -from leapp.libraries.common import multipathutil - -_bool_options = {'retain_attached_hw_handler': True, 'detect_prio': True, - 'detect_path_checker': True, 'reassign_maps': False} - -_exist_options = ('hw_str_match', 'ignore_new_boot_devs', - 'new_bindings_in_boot', 'unpriv_sgio') - -_ovr_options = ('hardware_handler', 'pg_timeout') - - -class _QueueIfNoPathInfo(object): - def __init__(self, line, value): - self.line = line - self.value = value - self.has_no_path_retry = False - - -def _nothing_to_do(config): - if config.default_path_checker and config.default_path_checker != 'tur': - return False - - config_checks = ( - (config.default_retain_hwhandler, False), - (config.default_detect_prio, False), - (config.default_detect_checker, False), - (config.reassign_maps, True), - (config.hw_str_match_exists, True), - (config.ignore_new_boot_devs_exists, True), - (config.new_bindings_in_boot_exists, True), - (config.unpriv_sgio_exists, True), - (config.detect_path_checker_exists, True), - (config.overrides_hwhandler_exists, True), - (config.overrides_pg_timeout_exists, True), - (config.queue_if_no_path_exists, True), - (config.all_devs_section_exists, True) - ) - for option, value in config_checks: - if option is value: - return False - - return config.all_devs_options == [] - - -def _comment_out_line(line): - return '# ' + line + ' # Commented out by Leapp' - - -def _comment_out_ranges(lines, ranges): - for start, end in ranges: - line = lines[start] - lines[start] = '# ' + line + ' # Section commented out by Leapp' - for i in range(start + 1, end): - line = lines[i] - if line == '': - lines[i] = '#' - elif line[0] != '#': - lines[i] = '# ' + line - - -def _setup_value(value): - if re.search(r'\s', value): - return '"' + value + '"' - return value - - -def _add_overrides(lines, options): - lines.append('overrides { # Section added by Leapp') - for option in options: - lines.append('\t{} {}'.format(option.name, _setup_value(option.value))) - lines.append('}') - lines.append('') - - -def _update_overrides(lines, ovr_line, options): - new_lines = [] - start = None - for i, line in enumerate(lines): - if line is ovr_line: - start = i + 1 - break - if not start: - return - for option in options: - new_lines.append('\t{} {} # Line added by Leapp'. - format(option.name, _setup_value(option.value))) - lines[start:start] = new_lines # insert new_lines - - -def _convert_checker_line(line): - return line.replace('detect_path_checker', 'detect_checker') + \ - ' # Line modified by Leapp' - - -def _modify_features_line(line, value): - items = value.split() - if items == [] or not items[0].isdigit(): - return _comment_out_line(line) - nr_features = int(items[0]) - if nr_features != len(items) - 1: - return _comment_out_line(line) - r = re.match('^(.*)features', line) - if not r: - return _comment_out_line(line) - line_start = r.group(1) - try: - items.remove('queue_if_no_path') - except ValueError: - return _comment_out_line(line) - items[0] = str(nr_features - 1) - return line_start + 'features "' + ' '.join(items) + \ - '" # Line modified by Leapp' - - -def _add_npr(lines, line, i): - r = re.match('^(.*)features', line) - if not r: - return - line_start = r.group(1) - lines.insert(i, - line_start + 'no_path_retry queue # Line added by Leapp') - - -def _remove_qinp(lines, qinp_infos): - infos_iter = iter(qinp_infos) - info = next(infos_iter, None) - if not info: - return - i = 0 - while i < len(lines): - if lines[i] is info.line: - lines[i] = _modify_features_line(info.line, info.value) - if not info.has_no_path_retry: - _add_npr(lines, info.line, i + 1) - info = next(infos_iter, None) - if not info: - return - i += 1 - - -def _valid_npr(value): - if value.isdigit() and int(value) >= 0: - return True - if value in ('fail', 'queue'): - return True - return False - - -def _update_config(config): - if _nothing_to_do(config): - return None - contents = multipathutil.read_config(config.pathname) - if contents is None: - return None - lines = contents.split('\n') - section = None - in_subsection = False - in_all_devs = False - subsection_start = None - all_devs_ranges = [] - overrides_line = None - qinp_info = None - has_no_path_retry = False - qinp_infos = [] - for i, line in enumerate(lines): - try: - data = multipathutil.LineData(line, section, in_subsection) - except ValueError: - continue - if data.type == data.TYPE_SECTION_END: - if qinp_info and not in_all_devs: - qinp_info.has_no_path_retry = has_no_path_retry - qinp_infos.append(qinp_info) - qinp_info = None - has_no_path_retry = False - if in_subsection: - in_subsection = False - if in_all_devs: - all_devs_ranges.append((subsection_start, i + 1)) - in_all_devs = False - subsection_start = None - elif section is not None: - section = None - elif data.type == data.TYPE_SECTION_START: - if section is None: - section = data.section - if section == 'overrides': - overrides_line = line - elif not in_subsection: - in_subsection = True - subsection_start = i - if data.type != data.TYPE_OPTION: - continue - if section == 'defaults': - if (data.option in ('path_checker', 'checker')) and data.value != 'tur': - lines[i] = _comment_out_line(line) - continue - if data.option in _bool_options and \ - _bool_options[data.option] != data.is_enabled(): - lines[i] = _comment_out_line(line) - continue - elif section == 'overrides' and data.option in _ovr_options: - lines[i] = _comment_out_line(line) - continue - elif section == 'devices' and in_subsection and \ - data.option == 'all_devs' and data.is_enabled(): - in_all_devs = True - continue - if data.option in _exist_options: - lines[i] = _comment_out_line(line) - elif data.option == 'detect_path_checker': - lines[i] = _convert_checker_line(line) - elif data.option == 'no_path_retry' and _valid_npr(data.value): - has_no_path_retry = True - elif data.option == 'features' and 'queue_if_no_path' in data.value: - qinp_info = _QueueIfNoPathInfo(line, data.value) - - if in_subsection: - lines.append('\t} # line added by Leapp') - if in_all_devs: - all_devs_ranges.append((subsection_start, len(lines))) - elif qinp_info: - qinp_info.has_no_path_retry = has_no_path_retry - qinp_infos.append(qinp_info) - qinp_info = None - if section is not None: - lines.append('} # line added by Leapp') - lines.append('') - if qinp_info: - qinp_info.has_no_path_retry = has_no_path_retry - qinp_infos.append(qinp_info) - _comment_out_ranges(lines, all_devs_ranges) - if qinp_infos: - _remove_qinp(lines, qinp_infos) - if config.all_devs_options != []: - if overrides_line: - _update_overrides(lines, overrides_line, config.all_devs_options) - else: - _add_overrides(lines, config.all_devs_options) - contents = '\n'.join(lines) - return contents - - -def update_configs(facts): - for config in facts.configs: - contents = _update_config(config) - if contents: - multipathutil.write_config(config.pathname, contents) diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/all_devs.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/all_devs.conf deleted file mode 100644 index 645b3196..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/all_devs.conf +++ /dev/null @@ -1,146 +0,0 @@ -# This is a basic configuration file with some examples, for device mapper -# multipath. -# -# For a complete list of the default configuration values, run either -# multipath -t -# or -# multipathd show config -# -# For a list of configuration options with descriptions, see the multipath.conf -# man page - -## By default, devices with vendor = "IBM" and product = "S/390.*" are -## blacklisted. To enable mulitpathing on these devies, uncomment the -## following lines. -#blacklist_exceptions { -# device { -# vendor "IBM" -# product "S/390.*" -# } -#} - -## Use user friendly names, instead of using WWIDs as names. -defaults { - user_friendly_names yes - find_multipaths yes -} - -devices { - device { - vendor "NVME" - product ".*" - path_grouping_policy multibus - - } -# device { # Section commented out by Leapp -# all_devs yes -# path_checker tur -# pg_timeout no -# detect_checker yes # Line modified by Leapp -# } - -# device { # Section commented out by Leapp -# features "3 queue_if_no_path pg_init_retries 50" -# path_selector "service-time 0" -# all_devs yes -# unpriv_sgio no # Commented out by Leapp -# } - -# device { # Section commented out by Leapp -# hardware_handler "1 alua" -# vendor "test_vendor" -# product "test_product" -# revision 1 -# product_blacklist "test.*" -# all_devs yes -# fast_io_fail_tmo 5 -# path_checker rdac -# } - device { - vendor "IBM" - product "^2145" - path_selector "service-time 0" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - } - -} - - - -## -## Here is an example of how to configure some standard options. -## -# -#defaults { -# polling_interval 10 -# path_selector "round-robin 0" -# path_grouping_policy multibus -# uid_attribute ID_SERIAL -# prio alua -# path_checker readsector0 -# rr_min_io 100 -# max_fds 8192 -# rr_weight priorities -# failback immediate -# no_path_retry fail -# user_friendly_names yes -#} -## -## The wwid line in the following blacklist section is shown as an example -## of how to blacklist devices by wwid. The 2 devnode lines are the -## compiled in default blacklist. If you want to blacklist entire types -## of devices, such as all scsi devices, you should use a devnode line. -## However, if you want to blacklist specific devices, you should use -## a wwid line. Since there is no guarantee that a specific device will -## not change names on reboot (from /dev/sda to /dev/sdb for example) -## devnode lines are not recommended for blacklisting specific devices. -## -blacklist { - devnode "sdb" -# wwid 26353900f02796769 -# devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" -# devnode "^hd[a-z]" -} -#multipaths { -# multipath { -# wwid 3600508b4000156d700012000000b0000 -# alias yellow -# path_grouping_policy multibus -# path_selector "round-robin 0" -# failback manual -# rr_weight priorities -# no_path_retry 5 -# } -# multipath { -# wwid 1DEC_____321816758474 -# alias red -# } -#} -#devices { -# device { -# vendor "COMPAQ " -# product "HSV110 (C)COMPAQ" -# path_grouping_policy multibus -# path_checker readsector0 -# path_selector "round-robin 0" -# hardware_handler "0" -# failback 15 -# rr_weight priorities -# no_path_retry queue -# } -# device { -# vendor "COMPAQ " -# product "MSA1000 " -# path_grouping_policy multibus -# } -#} - -overrides { # Section added by Leapp - path_checker rdac - detect_checker yes - features "2 pg_init_retries 50" - path_selector "service-time 0" - fast_io_fail_tmo 5 - no_path_retry queue -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/all_the_things.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/all_the_things.conf deleted file mode 100644 index ee54d939..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/all_the_things.conf +++ /dev/null @@ -1,1069 +0,0 @@ -defaults { - verbosity 2 - polling_interval 5 - max_polling_interval 20 -# reassign_maps "yes" # Commented out by Leapp - multipath_dir "/lib64/multipath" - path_selector "service-time 0" - path_grouping_policy "failover" - uid_attribute "ID_SERIAL" - prio "const" - prio_args "" - features "0" -# path_checker "directio" # Commented out by Leapp - alias_prefix "mpath" - failback "manual" - rr_min_io 1000 - rr_min_io_rq 1 - max_fds 1048576 - rr_weight "uniform" - queue_without_daemon "no" - flush_on_last_del "no" - user_friendly_names "yes" - fast_io_fail_tmo 5 - bindings_file "/etc/multipath/bindings" - wwids_file /etc/multipath/wwids - prkeys_file /etc/multipath/prkeys - log_checker_err always - find_multipaths yes -# retain_attached_hw_handler no # Commented out by Leapp -# detect_prio no # Commented out by Leapp -# detect_path_checker no # Commented out by Leapp -# hw_str_match no # Commented out by Leapp - force_sync no - deferred_remove no -# ignore_new_boot_devs no # Commented out by Leapp - skip_kpartx no - config_dir "tests/files/conf.d" - delay_watch_checks no - delay_wait_checks no - retrigger_tries 3 - retrigger_delay 10 - missing_uev_wait_timeout 30 -# new_bindings_in_boot no # Commented out by Leapp - remove_retries 0 - disable_changed_wwids no -# unpriv_sgio no # Commented out by Leapp - ghost_delay no - all_tg_pt no - marginal_path_err_sample_time no - marginal_path_err_rate_threshold no - marginal_path_err_recheck_gap_time no - marginal_path_double_failed_time no -} -blacklist { - devnode "sdb" - devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" - devnode "^(td|hd|vd)[a-z]" - devnode "^dcssblk[0-9]*" - device { - vendor "DGC" - product "LUNZ" - } - device { - vendor "EMC" - product "LUNZ" - } - device { - vendor "IBM" - product "Universal Xport" - } - device { - vendor "IBM" - product "S/390.*" - } - device { - vendor "DELL" - product "Universal Xport" - } - device { - vendor "LENOVO" - product "Universal Xport" - } - device { - vendor "SGI" - product "Universal Xport" - } - device { - vendor "STK" - product "Universal Xport" - } - device { - vendor "SUN" - product "Universal Xport" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "Universal Xport" - } -} -blacklist_exceptions { - devnode "sda" - wwid "123456789" - device { - vendor "IBM" - product "S/390x" - } -} - -devices { - device { - vendor "COMPELNT" - product "Compellent Vol" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "APPLE*" - product "Xserve RAID " - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "3PARdata" - product "VV" - path_grouping_policy "group_by_prio" - path_selector "service-time 0" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - fast_io_fail_tmo 10 - dev_loss_tmo "infinity" - } - device { - vendor "DEC" - product "HSG80" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - } - device { - vendor "HP" - product "A6189A" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(MSA|HSV)1.0.*" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "0" # Line modified by Leapp - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "MSA VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "HSV1[01]1|HSV2[01]0|HSV3[046]0|HSV4[05]0" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2[02]12fc|MSA2012i" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2012sa|MSA23(12|24)(fc|i|sa)|MSA2000s VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA (1|2)040 SA(N|S)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "HSVX700" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "LOGICAL VOLUME.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "HP" - product "P2000 G3 FC|P2000G3 FC/iSCSI|P2000 G3 SAS|P2000 G3 iSCSI" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "DDN" - product "SAN DataDirector" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 6 - } - device { - vendor "DGC" - product ".*" - product_blacklist "LUNZ" - path_grouping_policy "group_by_prio" - path_checker "emc_clariion" - features "0" # Line modified by Leapp - hardware_handler "1 emc" - prio "emc" - failback immediate - rr_weight "uniform" - no_path_retry 60 - retain_attached_hw_handler yes - detect_prio yes - detect_checker yes # Line modified by Leapp - } - device { - vendor "EMC" - product "Invista" - product_blacklist "LUNZ" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 5 - } - device { - vendor "FSC" - product "CentricStor" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "FUJITSU" - product "ETERNUS_DX(H|L|M|400|8000)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 10 - } - device { - vendor "(HITACHI|HP)" - product "OPEN-.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "HITACHI" - product "DF.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "hds" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "ProFibre 4000R" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1722-600" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" # Line modified by Leapp - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1724" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" # Line modified by Leapp - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1726" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" # Line modified by Leapp - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1742" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1745|^1746" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "IBM" - product "^1814" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1815" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1818" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3526" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3542" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105800" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105F20" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1750500" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2107900" - path_grouping_policy "multibus" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2145" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD ECKD" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD FBA" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^IPR.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "1820N00" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 100 - } - device { - vendor "IBM" - product "2810XIV" - path_grouping_policy "multibus" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - failback 15 - rr_weight "uniform" - rr_min_io 15 - } - device { - vendor "AIX" - product "VDASD" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "IBM" - product "3303 NVDISK" - path_grouping_policy "failover" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "AIX" - product "NVDISK" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "DELL" - product "^MD3" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - } - device { - vendor "LENOVO" - product "DE_Series" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - no_path_retry 30 - } - device { - vendor "NETAPP" - product "LUN.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "2 pg_init_retries 50" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - flush_on_last_del "yes" - dev_loss_tmo "infinity" - user_friendly_names no - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "NEXENTA" - product "COMSTAR" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "0" # Line modified by Leapp - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 30 - rr_min_io 128 - } - device { - vendor "IBM" - product "Nseries.*" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - } - device { - vendor "Pillar" - product "Axiom.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[13]00" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[45]00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SGI" - product "IS.*" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "NEC" - product "DISK ARRAY" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "STK" - product "OPENstorage D280" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "SUN" - product "(StorEdge 3510|T4)" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SUN" - product "STK6580_6780" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "EUROLOGC" - product "FC2502" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "PIVOT3" - product "RAIGE VOLUME" - path_grouping_policy "multibus" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - rr_min_io 100 - } - device { - vendor "SUN" - product "CSM200_R" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "LCSM100_[IEFS]" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "SUN_6180" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 1000 - rr_min_io_rq 1 - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "INF-01-00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "STK" - product "FLEXLINE 380" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "Intel" - product "Multi-Flex" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "SANmelody" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "Virtual Disk" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "NFINIDAT" - product "InfiniBox.*" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback 30 - rr_weight "priorities" - no_path_retry "fail" - flush_on_last_del "yes" - dev_loss_tmo 30 - } - device { - vendor "Nimble" - product "Server" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "1 alua" - prio "alua" - failback immediate - dev_loss_tmo "infinity" - } - device { - vendor "XtremIO" - product "XtremApp" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 15 - } - device { - vendor "PURE" - product "FlashArray" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 10 - dev_loss_tmo 60 - user_friendly_names no - } - device { - vendor "HUAWEI" - product "XSG1" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - dev_loss_tmo 30 - } - device { - vendor "NVME" - product "^EMC PowerMax_" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - prio "const" - } - device { - vendor "NVME" - product ".*" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - detect_prio yes - } - device { - vendor "IBM" - product "^2145" - path_selector "service-time 0" - } -# device { # Section commented out by Leapp -# fast_io_fail_tmo 5 -# all_devs yes -# no_path_retry fail -# detect_checker yes # Line modified by Leapp -# } -# device { # Section commented out by Leapp -# features "1 queue_if_no_path" -# path_checker "tur" -# all_devs yes -# } -} -multipaths { - multipath { - wwid "123456789" - alias "foo" - } -} - -overrides { - no_path_retry fail # Line added by Leapp - features 0 # Line added by Leapp - checker "rdac" - detect_checker no # Line modified by Leapp -# hardware_handler "1 alua" # Commented out by Leapp -# pg_timeout no # Commented out by Leapp - fast_io_fail_tmo 10 -# unpriv_sgio no # Commented out by Leapp -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/default_rhel7.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/default_rhel7.conf deleted file mode 100644 index fb694f1c..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/default_rhel7.conf +++ /dev/null @@ -1,1036 +0,0 @@ -defaults { - verbosity 2 - polling_interval 5 - max_polling_interval 20 -# reassign_maps "yes" # Commented out by Leapp - multipath_dir "/lib64/multipath" - path_selector "service-time 0" - path_grouping_policy "failover" - uid_attribute "ID_SERIAL" - prio "const" - prio_args "" - features "0" -# path_checker "directio" # Commented out by Leapp - alias_prefix "mpath" - failback "manual" - rr_min_io 1000 - rr_min_io_rq 1 - max_fds 1048576 - rr_weight "uniform" - queue_without_daemon "no" - flush_on_last_del "no" - user_friendly_names "yes" - fast_io_fail_tmo 5 - bindings_file "/etc/multipath/bindings" - wwids_file /etc/multipath/wwids - prkeys_file /etc/multipath/prkeys - log_checker_err always - find_multipaths yes -# retain_attached_hw_handler no # Commented out by Leapp -# detect_prio no # Commented out by Leapp -# detect_path_checker no # Commented out by Leapp -# hw_str_match no # Commented out by Leapp - force_sync no - deferred_remove no -# ignore_new_boot_devs no # Commented out by Leapp - skip_kpartx no - config_dir "tests/files/conf.d" - delay_watch_checks no - delay_wait_checks no - retrigger_tries 3 - retrigger_delay 10 - missing_uev_wait_timeout 30 -# new_bindings_in_boot no # Commented out by Leapp - remove_retries 0 - disable_changed_wwids no -# unpriv_sgio no # Commented out by Leapp - ghost_delay no - all_tg_pt no - marginal_path_err_sample_time no - marginal_path_err_rate_threshold no - marginal_path_err_recheck_gap_time no - marginal_path_double_failed_time no -} -blacklist { - devnode "sdb" - devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" - devnode "^(td|hd|vd)[a-z]" - devnode "^dcssblk[0-9]*" - device { - vendor "DGC" - product "LUNZ" - } - device { - vendor "EMC" - product "LUNZ" - } - device { - vendor "IBM" - product "Universal Xport" - } - device { - vendor "IBM" - product "S/390.*" - } - device { - vendor "DELL" - product "Universal Xport" - } - device { - vendor "LENOVO" - product "Universal Xport" - } - device { - vendor "SGI" - product "Universal Xport" - } - device { - vendor "STK" - product "Universal Xport" - } - device { - vendor "SUN" - product "Universal Xport" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "Universal Xport" - } -} -blacklist_exceptions { -} -devices { - device { - vendor "COMPELNT" - product "Compellent Vol" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "APPLE*" - product "Xserve RAID " - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "3PARdata" - product "VV" - path_grouping_policy "group_by_prio" - path_selector "service-time 0" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - fast_io_fail_tmo 10 - dev_loss_tmo "infinity" - } - device { - vendor "DEC" - product "HSG80" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - } - device { - vendor "HP" - product "A6189A" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(MSA|HSV)1.0.*" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "0" # Line modified by Leapp - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "MSA VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "HSV1[01]1|HSV2[01]0|HSV3[046]0|HSV4[05]0" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2[02]12fc|MSA2012i" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2012sa|MSA23(12|24)(fc|i|sa)|MSA2000s VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA (1|2)040 SA(N|S)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "HSVX700" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "LOGICAL VOLUME.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "HP" - product "P2000 G3 FC|P2000G3 FC/iSCSI|P2000 G3 SAS|P2000 G3 iSCSI" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "DDN" - product "SAN DataDirector" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 6 - } - device { - vendor "DGC" - product ".*" - product_blacklist "LUNZ" - path_grouping_policy "group_by_prio" - path_checker "emc_clariion" - features "0" # Line modified by Leapp - hardware_handler "1 emc" - prio "emc" - failback immediate - rr_weight "uniform" - no_path_retry 60 - retain_attached_hw_handler yes - detect_prio yes - detect_checker yes # Line modified by Leapp - } - device { - vendor "EMC" - product "Invista" - product_blacklist "LUNZ" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 5 - } - device { - vendor "FSC" - product "CentricStor" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "FUJITSU" - product "ETERNUS_DX(H|L|M|400|8000)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 10 - } - device { - vendor "(HITACHI|HP)" - product "OPEN-.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "HITACHI" - product "DF.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "hds" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "ProFibre 4000R" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1722-600" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" # Line modified by Leapp - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1724" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" # Line modified by Leapp - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1726" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" # Line modified by Leapp - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1742" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1745|^1746" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "IBM" - product "^1814" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1815" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1818" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3526" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3542" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105800" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105F20" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1750500" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2107900" - path_grouping_policy "multibus" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2145" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD ECKD" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD FBA" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^IPR.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "1820N00" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 100 - } - device { - vendor "IBM" - product "2810XIV" - path_grouping_policy "multibus" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - failback 15 - rr_weight "uniform" - rr_min_io 15 - } - device { - vendor "AIX" - product "VDASD" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "IBM" - product "3303 NVDISK" - path_grouping_policy "failover" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "AIX" - product "NVDISK" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "DELL" - product "^MD3" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - } - device { - vendor "LENOVO" - product "DE_Series" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - no_path_retry 30 - } - device { - vendor "NETAPP" - product "LUN.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "2 pg_init_retries 50" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - flush_on_last_del "yes" - dev_loss_tmo "infinity" - user_friendly_names no - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "NEXENTA" - product "COMSTAR" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "0" # Line modified by Leapp - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 30 - rr_min_io 128 - } - device { - vendor "IBM" - product "Nseries.*" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - } - device { - vendor "Pillar" - product "Axiom.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[13]00" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[45]00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SGI" - product "IS.*" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "NEC" - product "DISK ARRAY" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "STK" - product "OPENstorage D280" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "SUN" - product "(StorEdge 3510|T4)" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SUN" - product "STK6580_6780" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "EUROLOGC" - product "FC2502" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "PIVOT3" - product "RAIGE VOLUME" - path_grouping_policy "multibus" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - rr_min_io 100 - } - device { - vendor "SUN" - product "CSM200_R" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "LCSM100_[IEFS]" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "SUN_6180" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 1000 - rr_min_io_rq 1 - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "INF-01-00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "STK" - product "FLEXLINE 380" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "Intel" - product "Multi-Flex" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "SANmelody" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "Virtual Disk" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "NFINIDAT" - product "InfiniBox.*" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback 30 - rr_weight "priorities" - no_path_retry "fail" - flush_on_last_del "yes" - dev_loss_tmo 30 - } - device { - vendor "Nimble" - product "Server" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "1 alua" - prio "alua" - failback immediate - dev_loss_tmo "infinity" - } - device { - vendor "XtremIO" - product "XtremApp" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 15 - } - device { - vendor "PURE" - product "FlashArray" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 10 - dev_loss_tmo 60 - user_friendly_names no - } - device { - vendor "HUAWEI" - product "XSG1" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - dev_loss_tmo 30 - } - device { - vendor "NVME" - product "^EMC PowerMax_" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - prio "const" - } - device { - vendor "NVME" - product ".*" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - detect_prio yes - } - device { - vendor "IBM" - product "^2145" - path_selector "service-time 0" - } -} -multipaths { -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/just_all_devs.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/just_all_devs.conf deleted file mode 100644 index a456ef4c..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/just_all_devs.conf +++ /dev/null @@ -1,5 +0,0 @@ -devices { -# device { # Section commented out by Leapp -# all_devs yes -# } -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/just_checker.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/just_checker.conf deleted file mode 100644 index 615d496f..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/just_checker.conf +++ /dev/null @@ -1,1049 +0,0 @@ -defaults { - verbosity 2 - polling_interval 5 - max_polling_interval 20 - reassign_maps "no" - multipath_dir "/lib64/multipath" - path_selector "service-time 0" - path_grouping_policy "failover" - uid_attribute "ID_SERIAL" - prio "const" - prio_args "" - features "0" -# checker "rdac" # Commented out by Leapp - alias_prefix "mpath" - failback "manual" - rr_min_io 1000 - rr_min_io_rq 1 - max_fds "max" - rr_weight "uniform" - queue_without_daemon "no" - flush_on_last_del "no" - user_friendly_names "yes" - fast_io_fail_tmo 5 - bindings_file "/etc/multipath/bindings" - wwids_file "/etc/multipath/wwids" - prkeys_file "/etc/multipath/prkeys" - log_checker_err always - all_tg_pt "no" - retain_attached_hw_handler "yes" - detect_prio "yes" - detect_checker "yes" - force_sync "yes" - strict_timing "no" - deferred_remove "no" - config_dir "/etc/multipath/conf.d" - delay_watch_checks "no" - delay_wait_checks "no" - san_path_err_threshold "no" - san_path_err_forget_rate "no" - san_path_err_recovery_time "no" - marginal_path_err_sample_time "no" - marginal_path_err_rate_threshold "no" - marginal_path_err_recheck_gap_time "no" - marginal_path_double_failed_time "no" - find_multipaths "on" - uxsock_timeout 4000 - retrigger_tries 0 - retrigger_delay 10 - missing_uev_wait_timeout 30 - skip_kpartx "no" - disable_changed_wwids ignored - remove_retries 0 - ghost_delay "no" - find_multipaths_timeout -10 - enable_foreign "^$" - marginal_pathgroups "no" -} -blacklist { - devnode "^(ram|zram|raw|loop|fd|md|dm-|sr|scd|st|dcssblk)[0-9]" - devnode "^(td|hd|vd)[a-z]" - device { - vendor "SGI" - product "Universal Xport" - } - device { - vendor "^DGC" - product "LUNZ" - } - device { - vendor "EMC" - product "LUNZ" - } - device { - vendor "DELL" - product "Universal Xport" - } - device { - vendor "IBM" - product "Universal Xport" - } - device { - vendor "IBM" - product "S/390" - } - device { - vendor "LENOVO" - product "Universal Xport" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "Universal Xport" - } - device { - vendor "STK" - product "Universal Xport" - } - device { - vendor "SUN" - product "Universal Xport" - } - device { - vendor "(Intel|INTEL)" - product "VTrak V-LUN" - } - device { - vendor "Promise" - product "VTrak V-LUN" - } - device { - vendor "Promise" - product "Vess V-LUN" - } -} -blacklist_exceptions { - property "(SCSI_IDENT_|ID_WWN)" -} -devices { - device { - vendor "NVME" - product ".*" - uid_attribute "ID_WWN" - path_checker "none" - retain_attached_hw_handler "no" - } - device { - vendor "APPLE" - product "Xserve RAID" - path_grouping_policy "multibus" - } - device { - vendor "3PARdata" - product "VV" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 18 - fast_io_fail_tmo 10 - dev_loss_tmo "infinity" - vpd_vendor hp3par - } - device { - vendor "DEC" - product "HSG80" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - hardware_handler "1 hp_sw" - prio "hp_sw" - no_path_retry "queue" - } - device { - vendor "HP" - product "A6189A" - path_grouping_policy "multibus" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(MSA|HSV)1[01]0" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - hardware_handler "1 hp_sw" - prio "hp_sw" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "MSA VOLUME" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(HSV1[01]1|HSV2[01]0|HSV3[046]0|HSV4[05]0)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "HP" - product "(MSA2[02]12fc|MSA2012i)" - path_grouping_policy "multibus" - no_path_retry 18 - } - device { - vendor "HP" - product "(MSA2012sa|MSA23(12|24)(fc|i|sa)|MSA2000s VOLUME)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "HP" - product "MSA [12]0[45]0 SA[NS]" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "HP" - product "HSVX700" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "HP" - product "LOGICAL VOLUME" - path_grouping_policy "multibus" - no_path_retry 12 - } - device { - vendor "HP" - product "(P2000 G3 FC|P2000G3 FC/iSCSI|P2000 G3 SAS|P2000 G3 iSCSI)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "LEFTHAND" - product "(P4000|iSCSIDisk|FCDISK)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "Nimble" - product "Server" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "SGI" - product "TP9100" - path_grouping_policy "multibus" - } - device { - vendor "SGI" - product "TP9[3457]00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SGI" - product "IS" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SGI" - product "^DD[46]A-" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "DDN" - product "SAN DataDirector" - path_grouping_policy "multibus" - } - device { - vendor "DDN" - product "^EF3010" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "DDN" - product "^(EF3015|S2A|SFA)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "NEXENTA" - product "COMSTAR" - path_grouping_policy "group_by_serial" - no_path_retry 30 - } - device { - vendor "TEGILE" - product "(ZEBI-(FC|ISCSI)|INTELLIFLASH)" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 10 - } - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy "multibus" - no_path_retry 6 - } - device { - vendor "^DGC" - product "^(RAID|DISK|VRAID)" - product_blacklist "LUNZ" - path_grouping_policy "group_by_prio" - path_checker "emc_clariion" - hardware_handler "1 emc" - prio "emc" - failback "immediate" - no_path_retry 60 - } - device { - vendor "EMC" - product "Invista" - product_blacklist "LUNZ" - path_grouping_policy "multibus" - no_path_retry 5 - } - device { - vendor "XtremIO" - product "XtremApp" - path_grouping_policy "multibus" - } - device { - vendor "COMPELNT" - product "Compellent Vol" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "DELL" - product "^MD3" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "NVME" - product "^EMC PowerMax_" - path_grouping_policy "multibus" - } - device { - vendor "FSC" - product "CentricStor" - path_grouping_policy "group_by_serial" - } - device { - vendor "FUJITSU" - product "ETERNUS_DX(H|L|M|400|8000)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 10 - } - device { - vendor "(EUROLOGC|EuroLogc)" - product "FC2502" - path_grouping_policy "multibus" - } - device { - vendor "FUJITSU" - product "E[234]000" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 10 - } - device { - vendor "FUJITSU" - product "E[68]000" - path_grouping_policy "multibus" - no_path_retry 10 - } - device { - vendor "(HITACHI|HP)" - product "^OPEN-" - path_grouping_policy "multibus" - } - device { - vendor "HITACHI" - product "^DF" - path_grouping_policy "group_by_prio" - prio "hds" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "HITACHI" - product "^DF600F" - path_grouping_policy "multibus" - } - device { - vendor "IBM" - product "ProFibre 4000R" - path_grouping_policy "multibus" - } - device { - vendor "IBM" - product "^1722-600" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1724" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1726" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1742" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1746" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1813" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1814" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1815" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1818" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^3526" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^(3542|3552)" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^2105" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1750500" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^2107900" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^2145" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "IBM" - product "S/390 DASD ECKD" - product_blacklist "S/390" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - no_path_retry "queue" - } - device { - vendor "IBM" - product "S/390 DASD FBA" - product_blacklist "S/390" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^IPR" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "IBM" - product "1820N00" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "(XIV|IBM)" - product "(NEXTRA|2810XIV)" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "(TMS|IBM)" - product "(RamSan|FlashSystem)" - path_grouping_policy "multibus" - } - device { - vendor "IBM" - product "^(DCS9900|2851)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "AIX" - product "VDASD" - path_grouping_policy "multibus" - no_path_retry 60 - } - device { - vendor "IBM" - product "3303[ ]+NVDISK" - no_path_retry 60 - } - device { - vendor "AIX" - product "NVDISK" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 60 - } - device { - vendor "LENOVO" - product "DE_Series" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "NETAPP" - product "LUN" - path_grouping_policy "group_by_prio" - features "2 pg_init_retries 50" - prio "ontap" - failback "immediate" - no_path_retry "queue" - flush_on_last_del "yes" - dev_loss_tmo "infinity" - user_friendly_names "no" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "INF-01-00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SolidFir" - product "SSD SAN" - path_grouping_policy "multibus" - no_path_retry 24 - } - device { - vendor "NVME" - product "^NetApp ONTAP Controller" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "NEC" - product "DISK ARRAY" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - } - device { - vendor "^Pillar" - product "^Axiom" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - } - device { - vendor "^Oracle" - product "^Oracle FS" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - } - device { - vendor "STK" - product "BladeCtlr" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "STK" - product "OPENstorage" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "STK" - product "FLEXLINE 380" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "StorEdge 3" - path_grouping_policy "multibus" - } - device { - vendor "SUN" - product "STK6580_6780" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "CSM[12]00_R" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "LCSM100_[IEFS]" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "SUN_6180" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "ArrayStorage" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "(Sun Storage|ZFS Storage|COMSTAR)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "PIVOT3" - product "RAIGE VOLUME" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "(NexGen|Pivot3)" - product "(TierStore|vSTAC)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "(Intel|INTEL)" - product "Multi-Flex" - product_blacklist "VTrak V-LUN" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "(LIO-ORG|SUSE)" - product "RBD" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "DataCore" - product "SANmelody" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "Virtual Disk" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "PURE" - product "FlashArray" - path_grouping_policy "multibus" - } - device { - vendor "HUAWEI" - product "XSG1" - path_grouping_policy "group_by_prio" - prio "alua" - } - device { - vendor "KOVE" - product "XPD" - path_grouping_policy "multibus" - } - device { - vendor "NFINIDAT" - product "InfiniBox" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - prio "alua" - failback 30 - rr_weight "priorities" - no_path_retry "fail" - rr_min_io 1 - rr_min_io_rq 1 - flush_on_last_del "yes" - fast_io_fail_tmo 15 - dev_loss_tmo 15 - } - device { - vendor "KMNRIO" - product "K2" - path_grouping_policy "multibus" - } - device { - vendor "NEXSAN" - product "NXS-B0" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 15 - } - device { - vendor "NEXSAN" - product "SATAB" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 15 - } - device { - vendor "Nexsan" - product "(NestOS|NST5000)" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "VIOLIN" - product "SAN ARRAY$" - path_grouping_policy "group_by_serial" - no_path_retry 30 - } - device { - vendor "VIOLIN" - product "SAN ARRAY ALUA" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "VIOLIN" - product "CONCERTO ARRAY" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "(XIOTECH|XIOtech)" - product "ISE" - path_grouping_policy "multibus" - no_path_retry 12 - } - device { - vendor "(XIOTECH|XIOtech)" - product "IGLU DISK" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "(XIOTECH|XIOtech)" - product "Magnitude" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "Promise" - product "VTrak" - product_blacklist "VTrak V-LUN" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "Promise" - product "Vess" - product_blacklist "Vess V-LUN" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "^IFT" - product ".*" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "DotHill" - product "SANnet" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "DotHill" - product "R/Evo" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "DotHill" - product "^DH" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "AStor" - product "NeoSapphire" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "INSPUR" - product "MCS" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - } -} -overrides { -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/just_detect.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/just_detect.conf deleted file mode 100644 index c2824c3e..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/just_detect.conf +++ /dev/null @@ -1,3 +0,0 @@ -defaults { -# detect_prio 0 # Commented out by Leapp -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/just_exists.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/just_exists.conf deleted file mode 100644 index 778abbea..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/just_exists.conf +++ /dev/null @@ -1,33 +0,0 @@ -# device-mapper-multipath configuration file - -# For a complete list of the default configuration values, run either: -# # multipath -t -# or -# # multipathd show config - -# For a list of configuration options with descriptions, see the -# multipath.conf man page. - -defaults { - user_friendly_names yes - find_multipaths yes -} - -devices { - device { - vendor "Foo" - product "Bar" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - } -} - -blacklist_exceptions { - property "(SCSI_IDENT_|ID_WWN)" -} - -blacklist { -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/just_reassign.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/just_reassign.conf deleted file mode 100644 index 2094269a..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/just_reassign.conf +++ /dev/null @@ -1,93 +0,0 @@ -# This is a basic configuration file with some examples, for device mapper -# multipath. -# -# For a complete list of the default configuration values, run either -# multipath -t -# or -# multipathd show config -# -# For a list of configuration options with descriptions, see the multipath.conf -# man page - -## By default, devices with vendor = "IBM" and product = "S/390.*" are -## blacklisted. To enable mulitpathing on these devies, uncomment the -## following lines. -#blacklist_exceptions { -# device { -# vendor "IBM" -# product "S/390.*" -# } -#} - -## Use user friendly names, instead of using WWIDs as names. -defaults { - user_friendly_names yes - find_multipaths yes -# reassign_maps "yes" # Commented out by Leapp -} -## -## Here is an example of how to configure some standard options. -## -# -#defaults { -# polling_interval 10 -# path_selector "round-robin 0" -# path_grouping_policy multibus -# uid_attribute ID_SERIAL -# prio alua -# path_checker readsector0 -# rr_min_io 100 -# max_fds 8192 -# rr_weight priorities -# failback immediate -# no_path_retry fail -# user_friendly_names yes -#} -## -## The wwid line in the following blacklist section is shown as an example -## of how to blacklist devices by wwid. The 2 devnode lines are the -## compiled in default blacklist. If you want to blacklist entire types -## of devices, such as all scsi devices, you should use a devnode line. -## However, if you want to blacklist specific devices, you should use -## a wwid line. Since there is no guarantee that a specific device will -## not change names on reboot (from /dev/sda to /dev/sdb for example) -## devnode lines are not recommended for blacklisting specific devices. -## -#blacklist { -# wwid 26353900f02796769 -# devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" -# devnode "^hd[a-z]" -#} -#multipaths { -# multipath { -# wwid 3600508b4000156d700012000000b0000 -# alias yellow -# path_grouping_policy multibus -# path_selector "round-robin 0" -# failback manual -# rr_weight priorities -# no_path_retry 5 -# } -# multipath { -# wwid 1DEC_____321816758474 -# alias red -# } -#} -#devices { -# device { -# vendor "COMPAQ " -# product "HSV110 (C)COMPAQ" -# path_grouping_policy multibus -# path_checker readsector0 -# path_selector "round-robin 0" -# hardware_handler "0" -# failback 15 -# rr_weight priorities -# no_path_retry queue -# } -# device { -# vendor "COMPAQ " -# product "MSA1000 " -# path_grouping_policy multibus -# } -#} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/ugly1.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/ugly1.conf deleted file mode 100644 index 21702219..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/ugly1.conf +++ /dev/null @@ -1,1075 +0,0 @@ -defaults THIS SHOULDN'T BE HERE - verbosity 2 - polling_interval 5 - max_polling_interval 20 -# reassign_maps "yes" # Commented out by Leapp - multipath_dir "/lib64/multipath" - path_selector "service-time 0" - path_grouping_policy "failover" - uid_attribute "ID_SERIAL" - prio "const" - prio_args "" - features "0" -# path_checker "directio" # Commented out by Leapp - alias_prefix "mpath" - failback "manual" - rr_min_io 1000 - rr_min_io_rq 1 - max_fds 1048576 - rr_weight "uniform" - queue_without_daemon "no" - flush_on_last_del "no" - user_friendly_names "yes" - fast_io_fail_tmo 5 - bindings_file "/etc/multipath/bindings" - wwids_file /etc/multipath/wwids - prkeys_file /etc/multipath/prkeys - log_checker_err always - find_multipaths yes -# retain_attached_hw_handler no # Commented out by Leapp -# detect_prio no # Commented out by Leapp -# detect_path_checker no # Commented out by Leapp -# hw_str_match no # Commented out by Leapp - force_sync no - deferred_remove no -# ignore_new_boot_devs no # Commented out by Leapp - skip_kpartx no - config_dir "tests/files/conf.d" - delay_watch_checks no - delay_wait_checks no - retrigger_tries 3 - retrigger_delay 10 - missing_uev_wait_timeout 30 -# new_bindings_in_boot no # Commented out by Leapp - remove_retries 0 - disable_changed_wwids no -# unpriv_sgio no # Commented out by Leapp - ghost_delay no - all_tg_pt no - marginal_path_err_sample_time no - marginal_path_err_rate_threshold no - marginal_path_err_recheck_gap_time no - marginal_path_double_failed_time no -} -blacklist { - devnode "sdb" - devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" - devnode "^(td|hd|vd)[a-z]" - devnode "^dcssblk[0-9]*" - device - vendor "DGC" - product "LUNZ" - } - device { - vendor "EMC" - product "LUNZ" - } - device { - vendor "IBM" - product "Universal Xport" - } - device { - vendor "IBM" - product "S/390.*" - } - device { - vendor "DELL" - product "Universal Xport" - } - device { - vendor "LENOVO" - product "Universal Xport" - } - device { - vendor "SGI" - product "Universal Xport" - } - device { - vendor "STK" - product "Universal Xport" - } - device { - vendor "SUN" - product "Universal Xport" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "Universal Xport" - } -} -blacklist_exceptions { - devnode "sda" - wwid "123456789" - device { - vendor "IBM" - product "S/390x" - } -} - -devices { BAD DATA - device { - vendor "COMPELNT" - product "Compellent Vol" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "APPLE*" - product "Xserve RAID " - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "3PARdata" - product "VV" - path_grouping_policy "group_by_prio" - path_selector "service-time 0" - path_checker "tur" - features "0" - hardware_handler "1 alua" EXTRA DATA - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - fast_io_fail_tmo 10 - dev_loss_tmo "infinity" - } - device { - vendor "DEC" - product "HSG80" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - } - device { - vendor "HP" - product "A6189A" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(MSA|HSV)1.0.*" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "0" # Line modified by Leapp - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "MSA VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "HSV1[01]1|HSV2[01]0|HSV3[046]0|HSV4[05]0" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2[02]12fc|MSA2012i" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2012sa|MSA23(12|24)(fc|i|sa)|MSA2000s VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA (1|2)040 SA(N|S)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "HSVX700" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "LOGICAL VOLUME.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "HP" - product "P2000 G3 FC|P2000G3 FC/iSCSI|P2000 G3 SAS|P2000 G3 iSCSI" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "DDN" - product "SAN DataDirector" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 6 - } - device { - vendor "DGC" - product ".*" - product_blacklist "LUNZ" - path_grouping_policy "group_by_prio" - path_checker "emc_clariion" - features "0" # Line modified by Leapp - hardware_handler "1 emc" - prio "emc" - failback immediate - rr_weight "uniform" - no_path_retry 60 - retain_attached_hw_handler yes - detect_prio yes - detect_checker yes # Line modified by Leapp - } - device { - vendor "EMC" - product "Invista" - product_blacklist "LUNZ" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 5 - } - device { - vendor "FSC" - product "CentricStor" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "FUJITSU" - product "ETERNUS_DX(H|L|M|400|8000)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 10 - } - device { - vendor "(HITACHI|HP)" - product "OPEN-.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "HITACHI" - product "DF.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "hds" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "ProFibre 4000R" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1722-600" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" # Line modified by Leapp - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1724" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" # Line modified by Leapp - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1726" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" # Line modified by Leapp - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1742" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1745|^1746" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "IBM" - product "^1814" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1815" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1818" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3526" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3542" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105800" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105F20" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1750500" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2107900" - path_grouping_policy "multibus" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2145" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD ECKD" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD FBA" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^IPR.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "1820N00" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 100 - } - device { - vendor "IBM" - product "2810XIV" - path_grouping_policy "multibus" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - failback 15 - rr_weight "uniform" - rr_min_io 15 - } - device { - vendor "AIX" - product "VDASD" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "IBM" - product "3303 NVDISK" - path_grouping_policy "failover" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "AIX" - product "NVDISK" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "DELL" - product "^MD3" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - } - device { - vendor "LENOVO" - product "DE_Series" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - no_path_retry 30 - } - device { - vendor "NETAPP" - product "LUN.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "2 pg_init_retries 50" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - flush_on_last_del "yes" - dev_loss_tmo "infinity" - user_friendly_names no - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "NEXENTA" - product "COMSTAR" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "0" # Line modified by Leapp - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 30 - rr_min_io 128 - } - device { - vendor "IBM" - product "Nseries.*" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - } - device { - vendor "Pillar" - product "Axiom.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[13]00" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[45]00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SGI" - product "IS.*" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "NEC" - product "DISK ARRAY" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "STK" - product "OPENstorage D280" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "SUN" - product "(StorEdge 3510|T4)" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SUN" - product "STK6580_6780" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "EUROLOGC" - product "FC2502" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "PIVOT3" - product "RAIGE VOLUME" - path_grouping_policy "multibus" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - rr_min_io 100 - } - device { - vendor "SUN" - product "CSM200_R" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "LCSM100_[IEFS]" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "SUN_6180" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 1000 - rr_min_io_rq 1 - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "INF-01-00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "STK" - product "FLEXLINE 380" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "Intel" - product "Multi-Flex" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "SANmelody" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "Virtual Disk" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "NFINIDAT" - product "InfiniBox.*" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback 30 - rr_weight "priorities" - no_path_retry "fail" - flush_on_last_del "yes" - dev_loss_tmo 30 - } - device { - vendor "Nimble" - product "Server" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "1 alua" - prio "alua" - failback immediate - dev_loss_tmo "infinity" - } - device { - vendor "XtremIO" - product "XtremApp" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 15 - } - device { - vendor "PURE" - product "FlashArray" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 10 - dev_loss_tmo 60 - user_friendly_names no - } - device { - vendor "HUAWEI" - product "XSG1" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - dev_loss_tmo 30 - } - device { - vendor "NVME" - product "^EMC PowerMax_" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - prio "const" - } - device { - vendor "NVME" - product ".*" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - detect_prio yes - } - device { - vendor "IBM" - product "^2145" - path_selector "service-time 0" - } -# device { # Section commented out by Leapp -# fast_io_fail_tmo 5 -# dev_loss_tmo 60 -# all_devs yes -# no_path_retry fail -# detect_checker yes # Line modified by Leapp -# } -# device { # Section commented out by Leapp -# path_selector "service-time 0" JUNK IN LINE -# features "1 queue_if_no_path" -# path_checker "tur" -# all_devs yes -# } -} -multipaths { - multipath { - wwid "123456789" - alias "foo" - } -} - -overrides - dev_loss_tmo 60 # Line added by Leapp - path_selector "service-time 0" # Line added by Leapp - checker "rdac" - detect_checker no # Line modified by Leapp -# hardware_handler "1 alua" # Commented out by Leapp -# pg_timeout no # Commented out by Leapp - fast_io_fail_tmo 10 -# unpriv_sgio no # Commented out by Leapp - features "2 pg_init_retries 50" # Line modified by Leapp - no_path_retry queue # Line added by Leapp -# Missing closing brace - -} # line added by Leapp diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/ugly2.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/ugly2.conf deleted file mode 100644 index 3508a464..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/after/ugly2.conf +++ /dev/null @@ -1,135 +0,0 @@ -# This is a basic configuration file with some examples, for device mapper -# multipath. -# -# For a complete list of the default configuration values, run either -# multipath -t -# or -# multipathd show config -# -# For a list of configuration options with descriptions, see the multipath.conf -# man page - -## By default, devices with vendor = "IBM" and product = "S/390.*" are -## blacklisted. To enable mulitpathing on these devies, uncomment the -## following lines. -#blacklist_exceptions { -# device { -# vendor "IBM" -# product "S/390.*" -# } -#} - -## Use user friendly names, instead of using WWIDs as names. -defaults { - user_friendly_names yes - find_multipaths yes -} - -devices { - device { - vendor "NVME" - product ".*" - path_grouping_policy multibus - - } -# device { # Section commented out by Leapp -# all_devs yes -# path_checker tur -# pg_timeout no -# detect_checker yes # Line modified by Leapp -# } - -# device { # Section commented out by Leapp -# features "3 queue_if_no_path pg_init_retries 50" -# path_selector "service-time 0" -# all_devs yes -# unpriv_sgio no # Commented out by Leapp -# } - -# device { # Section commented out by Leapp -# hardware_handler "1 alua" -# vendor "test_vendor" -# product "test_product" -# revision 1 -# product_blacklist "test.*" -# all_devs yes -# fast_io_fail_tmo 5 -# path_checker rdac -# no closing braces -# -## -## Here is an example of how to configure some standard options. -## -# -#defaults { -# polling_interval 10 -# path_selector "round-robin 0" -# path_grouping_policy multibus -# uid_attribute ID_SERIAL -# prio alua -# path_checker readsector0 -# rr_min_io 100 -# max_fds 8192 -# rr_weight priorities -# failback immediate -# no_path_retry fail -# user_friendly_names yes -#} -## -## The wwid line in the following blacklist section is shown as an example -## of how to blacklist devices by wwid. The 2 devnode lines are the -## compiled in default blacklist. If you want to blacklist entire types -## of devices, such as all scsi devices, you should use a devnode line. -## However, if you want to blacklist specific devices, you should use -## a wwid line. Since there is no guarantee that a specific device will -## not change names on reboot (from /dev/sda to /dev/sdb for example) -## devnode lines are not recommended for blacklisting specific devices. -## -# wwid 26353900f02796769 -# devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" -# devnode "^hd[a-z]" -#multipaths { -# multipath { -# wwid 3600508b4000156d700012000000b0000 -# alias yellow -# path_grouping_policy multibus -# path_selector "round-robin 0" -# failback manual -# rr_weight priorities -# no_path_retry 5 -# } -# multipath { -# wwid 1DEC_____321816758474 -# alias red -# } -#} -#devices { -# device { -# vendor "COMPAQ " -# product "HSV110 (C)COMPAQ" -# path_grouping_policy multibus -# path_checker readsector0 -# path_selector "round-robin 0" -# hardware_handler "0" -# failback 15 -# rr_weight priorities -# no_path_retry queue -# } -# device { -# vendor "COMPAQ " -# product "MSA1000 " -# path_grouping_policy multibus -# } -#} -# -# } # line added by Leapp -} # line added by Leapp - -overrides { # Section added by Leapp - path_checker rdac - detect_checker yes - features "2 pg_init_retries 50" - path_selector "service-time 0" - fast_io_fail_tmo 5 - no_path_retry queue -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/all_devs.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/all_devs.conf deleted file mode 100644 index fa52de4b..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/all_devs.conf +++ /dev/null @@ -1,136 +0,0 @@ -# This is a basic configuration file with some examples, for device mapper -# multipath. -# -# For a complete list of the default configuration values, run either -# multipath -t -# or -# multipathd show config -# -# For a list of configuration options with descriptions, see the multipath.conf -# man page - -## By default, devices with vendor = "IBM" and product = "S/390.*" are -## blacklisted. To enable mulitpathing on these devies, uncomment the -## following lines. -#blacklist_exceptions { -# device { -# vendor "IBM" -# product "S/390.*" -# } -#} - -## Use user friendly names, instead of using WWIDs as names. -defaults { - user_friendly_names yes - find_multipaths yes -} - -devices { - device { - vendor "NVME" - product ".*" - path_grouping_policy multibus - - } - device { - all_devs yes - path_checker tur - pg_timeout no - detect_path_checker yes - } - - device { - features "3 queue_if_no_path pg_init_retries 50" - path_selector "service-time 0" - all_devs yes - unpriv_sgio no - } - - device { - hardware_handler "1 alua" - vendor "test_vendor" - product "test_product" - revision 1 - product_blacklist "test.*" - all_devs yes - fast_io_fail_tmo 5 - path_checker rdac - } - device { - vendor "IBM" - product "^2145" - path_selector "service-time 0" - features "1 queue_if_no_path" - } - -} - - - -## -## Here is an example of how to configure some standard options. -## -# -#defaults { -# polling_interval 10 -# path_selector "round-robin 0" -# path_grouping_policy multibus -# uid_attribute ID_SERIAL -# prio alua -# path_checker readsector0 -# rr_min_io 100 -# max_fds 8192 -# rr_weight priorities -# failback immediate -# no_path_retry fail -# user_friendly_names yes -#} -## -## The wwid line in the following blacklist section is shown as an example -## of how to blacklist devices by wwid. The 2 devnode lines are the -## compiled in default blacklist. If you want to blacklist entire types -## of devices, such as all scsi devices, you should use a devnode line. -## However, if you want to blacklist specific devices, you should use -## a wwid line. Since there is no guarantee that a specific device will -## not change names on reboot (from /dev/sda to /dev/sdb for example) -## devnode lines are not recommended for blacklisting specific devices. -## -blacklist { - devnode "sdb" -# wwid 26353900f02796769 -# devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" -# devnode "^hd[a-z]" -} -#multipaths { -# multipath { -# wwid 3600508b4000156d700012000000b0000 -# alias yellow -# path_grouping_policy multibus -# path_selector "round-robin 0" -# failback manual -# rr_weight priorities -# no_path_retry 5 -# } -# multipath { -# wwid 1DEC_____321816758474 -# alias red -# } -#} -#devices { -# device { -# vendor "COMPAQ " -# product "HSV110 (C)COMPAQ" -# path_grouping_policy multibus -# path_checker readsector0 -# path_selector "round-robin 0" -# hardware_handler "0" -# failback 15 -# rr_weight priorities -# no_path_retry queue -# } -# device { -# vendor "COMPAQ " -# product "MSA1000 " -# path_grouping_policy multibus -# } -#} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/all_the_things.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/all_the_things.conf deleted file mode 100644 index cb710e4f..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/all_the_things.conf +++ /dev/null @@ -1,1052 +0,0 @@ -defaults { - verbosity 2 - polling_interval 5 - max_polling_interval 20 - reassign_maps "yes" - multipath_dir "/lib64/multipath" - path_selector "service-time 0" - path_grouping_policy "failover" - uid_attribute "ID_SERIAL" - prio "const" - prio_args "" - features "0" - path_checker "directio" - alias_prefix "mpath" - failback "manual" - rr_min_io 1000 - rr_min_io_rq 1 - max_fds 1048576 - rr_weight "uniform" - queue_without_daemon "no" - flush_on_last_del "no" - user_friendly_names "yes" - fast_io_fail_tmo 5 - bindings_file "/etc/multipath/bindings" - wwids_file /etc/multipath/wwids - prkeys_file /etc/multipath/prkeys - log_checker_err always - find_multipaths yes - retain_attached_hw_handler no - detect_prio no - detect_path_checker no - hw_str_match no - force_sync no - deferred_remove no - ignore_new_boot_devs no - skip_kpartx no - config_dir "tests/files/conf.d" - delay_watch_checks no - delay_wait_checks no - retrigger_tries 3 - retrigger_delay 10 - missing_uev_wait_timeout 30 - new_bindings_in_boot no - remove_retries 0 - disable_changed_wwids no - unpriv_sgio no - ghost_delay no - all_tg_pt no - marginal_path_err_sample_time no - marginal_path_err_rate_threshold no - marginal_path_err_recheck_gap_time no - marginal_path_double_failed_time no -} -blacklist { - devnode "sdb" - devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" - devnode "^(td|hd|vd)[a-z]" - devnode "^dcssblk[0-9]*" - device { - vendor "DGC" - product "LUNZ" - } - device { - vendor "EMC" - product "LUNZ" - } - device { - vendor "IBM" - product "Universal Xport" - } - device { - vendor "IBM" - product "S/390.*" - } - device { - vendor "DELL" - product "Universal Xport" - } - device { - vendor "LENOVO" - product "Universal Xport" - } - device { - vendor "SGI" - product "Universal Xport" - } - device { - vendor "STK" - product "Universal Xport" - } - device { - vendor "SUN" - product "Universal Xport" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "Universal Xport" - } -} -blacklist_exceptions { - devnode "sda" - wwid "123456789" - device { - vendor "IBM" - product "S/390x" - } -} - -devices { - device { - vendor "COMPELNT" - product "Compellent Vol" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "APPLE*" - product "Xserve RAID " - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "3PARdata" - product "VV" - path_grouping_policy "group_by_prio" - path_selector "service-time 0" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - fast_io_fail_tmo 10 - dev_loss_tmo "infinity" - } - device { - vendor "DEC" - product "HSG80" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "1 queue_if_no_path" - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - } - device { - vendor "HP" - product "A6189A" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(MSA|HSV)1.0.*" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "1 queue_if_no_path" - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "MSA VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "HSV1[01]1|HSV2[01]0|HSV3[046]0|HSV4[05]0" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2[02]12fc|MSA2012i" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2012sa|MSA23(12|24)(fc|i|sa)|MSA2000s VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA (1|2)040 SA(N|S)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "HSVX700" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "LOGICAL VOLUME.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "HP" - product "P2000 G3 FC|P2000G3 FC/iSCSI|P2000 G3 SAS|P2000 G3 iSCSI" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "DDN" - product "SAN DataDirector" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 6 - } - device { - vendor "DGC" - product ".*" - product_blacklist "LUNZ" - path_grouping_policy "group_by_prio" - path_checker "emc_clariion" - features "1 queue_if_no_path" - hardware_handler "1 emc" - prio "emc" - failback immediate - rr_weight "uniform" - no_path_retry 60 - retain_attached_hw_handler yes - detect_prio yes - detect_path_checker yes - } - device { - vendor "EMC" - product "Invista" - product_blacklist "LUNZ" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 5 - } - device { - vendor "FSC" - product "CentricStor" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "FUJITSU" - product "ETERNUS_DX(H|L|M|400|8000)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 10 - } - device { - vendor "(HITACHI|HP)" - product "OPEN-.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "HITACHI" - product "DF.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "hds" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "ProFibre 4000R" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1722-600" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1724" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1726" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1742" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1745|^1746" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "IBM" - product "^1814" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1815" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1818" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3526" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3542" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105800" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105F20" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1750500" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2107900" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2145" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD ECKD" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD FBA" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^IPR.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "1820N00" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 100 - } - device { - vendor "IBM" - product "2810XIV" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - failback 15 - rr_weight "uniform" - rr_min_io 15 - } - device { - vendor "AIX" - product "VDASD" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "IBM" - product "3303 NVDISK" - path_grouping_policy "failover" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "AIX" - product "NVDISK" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "DELL" - product "^MD3" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - } - device { - vendor "LENOVO" - product "DE_Series" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - no_path_retry 30 - } - device { - vendor "NETAPP" - product "LUN.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "3 queue_if_no_path pg_init_retries 50" - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - flush_on_last_del "yes" - dev_loss_tmo "infinity" - user_friendly_names no - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "NEXENTA" - product "COMSTAR" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 30 - rr_min_io 128 - } - device { - vendor "IBM" - product "Nseries.*" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - } - device { - vendor "Pillar" - product "Axiom.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[13]00" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[45]00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SGI" - product "IS.*" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "NEC" - product "DISK ARRAY" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "STK" - product "OPENstorage D280" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "SUN" - product "(StorEdge 3510|T4)" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SUN" - product "STK6580_6780" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "EUROLOGC" - product "FC2502" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "PIVOT3" - product "RAIGE VOLUME" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - rr_min_io 100 - } - device { - vendor "SUN" - product "CSM200_R" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "LCSM100_[IEFS]" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "SUN_6180" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 1000 - rr_min_io_rq 1 - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "INF-01-00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "STK" - product "FLEXLINE 380" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "Intel" - product "Multi-Flex" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "SANmelody" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "Virtual Disk" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "NFINIDAT" - product "InfiniBox.*" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback 30 - rr_weight "priorities" - no_path_retry "fail" - flush_on_last_del "yes" - dev_loss_tmo 30 - } - device { - vendor "Nimble" - product "Server" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "1 alua" - prio "alua" - failback immediate - dev_loss_tmo "infinity" - } - device { - vendor "XtremIO" - product "XtremApp" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 15 - } - device { - vendor "PURE" - product "FlashArray" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 10 - dev_loss_tmo 60 - user_friendly_names no - } - device { - vendor "HUAWEI" - product "XSG1" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - dev_loss_tmo 30 - } - device { - vendor "NVME" - product "^EMC PowerMax_" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - prio "const" - } - device { - vendor "NVME" - product ".*" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - detect_prio yes - } - device { - vendor "IBM" - product "^2145" - path_selector "service-time 0" - } - device { - fast_io_fail_tmo 5 - all_devs yes - no_path_retry fail - detect_path_checker yes - } - device { - features "1 queue_if_no_path" - path_checker "tur" - all_devs yes - } -} -multipaths { - multipath { - wwid "123456789" - alias "foo" - } -} - -overrides { - checker "rdac" - detect_path_checker no - hardware_handler "1 alua" - pg_timeout no - fast_io_fail_tmo 10 - unpriv_sgio no -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/already_updated.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/already_updated.conf deleted file mode 100644 index ee54d939..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/already_updated.conf +++ /dev/null @@ -1,1069 +0,0 @@ -defaults { - verbosity 2 - polling_interval 5 - max_polling_interval 20 -# reassign_maps "yes" # Commented out by Leapp - multipath_dir "/lib64/multipath" - path_selector "service-time 0" - path_grouping_policy "failover" - uid_attribute "ID_SERIAL" - prio "const" - prio_args "" - features "0" -# path_checker "directio" # Commented out by Leapp - alias_prefix "mpath" - failback "manual" - rr_min_io 1000 - rr_min_io_rq 1 - max_fds 1048576 - rr_weight "uniform" - queue_without_daemon "no" - flush_on_last_del "no" - user_friendly_names "yes" - fast_io_fail_tmo 5 - bindings_file "/etc/multipath/bindings" - wwids_file /etc/multipath/wwids - prkeys_file /etc/multipath/prkeys - log_checker_err always - find_multipaths yes -# retain_attached_hw_handler no # Commented out by Leapp -# detect_prio no # Commented out by Leapp -# detect_path_checker no # Commented out by Leapp -# hw_str_match no # Commented out by Leapp - force_sync no - deferred_remove no -# ignore_new_boot_devs no # Commented out by Leapp - skip_kpartx no - config_dir "tests/files/conf.d" - delay_watch_checks no - delay_wait_checks no - retrigger_tries 3 - retrigger_delay 10 - missing_uev_wait_timeout 30 -# new_bindings_in_boot no # Commented out by Leapp - remove_retries 0 - disable_changed_wwids no -# unpriv_sgio no # Commented out by Leapp - ghost_delay no - all_tg_pt no - marginal_path_err_sample_time no - marginal_path_err_rate_threshold no - marginal_path_err_recheck_gap_time no - marginal_path_double_failed_time no -} -blacklist { - devnode "sdb" - devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" - devnode "^(td|hd|vd)[a-z]" - devnode "^dcssblk[0-9]*" - device { - vendor "DGC" - product "LUNZ" - } - device { - vendor "EMC" - product "LUNZ" - } - device { - vendor "IBM" - product "Universal Xport" - } - device { - vendor "IBM" - product "S/390.*" - } - device { - vendor "DELL" - product "Universal Xport" - } - device { - vendor "LENOVO" - product "Universal Xport" - } - device { - vendor "SGI" - product "Universal Xport" - } - device { - vendor "STK" - product "Universal Xport" - } - device { - vendor "SUN" - product "Universal Xport" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "Universal Xport" - } -} -blacklist_exceptions { - devnode "sda" - wwid "123456789" - device { - vendor "IBM" - product "S/390x" - } -} - -devices { - device { - vendor "COMPELNT" - product "Compellent Vol" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "APPLE*" - product "Xserve RAID " - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "3PARdata" - product "VV" - path_grouping_policy "group_by_prio" - path_selector "service-time 0" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - fast_io_fail_tmo 10 - dev_loss_tmo "infinity" - } - device { - vendor "DEC" - product "HSG80" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - } - device { - vendor "HP" - product "A6189A" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(MSA|HSV)1.0.*" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "0" # Line modified by Leapp - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "MSA VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "HSV1[01]1|HSV2[01]0|HSV3[046]0|HSV4[05]0" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2[02]12fc|MSA2012i" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2012sa|MSA23(12|24)(fc|i|sa)|MSA2000s VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA (1|2)040 SA(N|S)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "HSVX700" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "LOGICAL VOLUME.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "HP" - product "P2000 G3 FC|P2000G3 FC/iSCSI|P2000 G3 SAS|P2000 G3 iSCSI" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "DDN" - product "SAN DataDirector" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 6 - } - device { - vendor "DGC" - product ".*" - product_blacklist "LUNZ" - path_grouping_policy "group_by_prio" - path_checker "emc_clariion" - features "0" # Line modified by Leapp - hardware_handler "1 emc" - prio "emc" - failback immediate - rr_weight "uniform" - no_path_retry 60 - retain_attached_hw_handler yes - detect_prio yes - detect_checker yes # Line modified by Leapp - } - device { - vendor "EMC" - product "Invista" - product_blacklist "LUNZ" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 5 - } - device { - vendor "FSC" - product "CentricStor" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "FUJITSU" - product "ETERNUS_DX(H|L|M|400|8000)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 10 - } - device { - vendor "(HITACHI|HP)" - product "OPEN-.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "HITACHI" - product "DF.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "hds" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "ProFibre 4000R" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1722-600" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" # Line modified by Leapp - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1724" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" # Line modified by Leapp - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1726" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" # Line modified by Leapp - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1742" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1745|^1746" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "IBM" - product "^1814" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1815" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1818" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3526" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3542" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105800" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105F20" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1750500" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2107900" - path_grouping_policy "multibus" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2145" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD ECKD" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD FBA" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^IPR.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "1820N00" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 100 - } - device { - vendor "IBM" - product "2810XIV" - path_grouping_policy "multibus" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - failback 15 - rr_weight "uniform" - rr_min_io 15 - } - device { - vendor "AIX" - product "VDASD" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "IBM" - product "3303 NVDISK" - path_grouping_policy "failover" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "AIX" - product "NVDISK" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "DELL" - product "^MD3" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - } - device { - vendor "LENOVO" - product "DE_Series" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - no_path_retry 30 - } - device { - vendor "NETAPP" - product "LUN.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "2 pg_init_retries 50" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - flush_on_last_del "yes" - dev_loss_tmo "infinity" - user_friendly_names no - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "NEXENTA" - product "COMSTAR" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "0" # Line modified by Leapp - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 30 - rr_min_io 128 - } - device { - vendor "IBM" - product "Nseries.*" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - } - device { - vendor "Pillar" - product "Axiom.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[13]00" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[45]00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SGI" - product "IS.*" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "NEC" - product "DISK ARRAY" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "STK" - product "OPENstorage D280" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "SUN" - product "(StorEdge 3510|T4)" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SUN" - product "STK6580_6780" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "EUROLOGC" - product "FC2502" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "PIVOT3" - product "RAIGE VOLUME" - path_grouping_policy "multibus" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "0" - prio "const" - rr_weight "uniform" - rr_min_io 100 - } - device { - vendor "SUN" - product "CSM200_R" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "LCSM100_[IEFS]" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "SUN_6180" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 1000 - rr_min_io_rq 1 - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "INF-01-00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "STK" - product "FLEXLINE 380" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "Intel" - product "Multi-Flex" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "SANmelody" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "Virtual Disk" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "NFINIDAT" - product "InfiniBox.*" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback 30 - rr_weight "priorities" - no_path_retry "fail" - flush_on_last_del "yes" - dev_loss_tmo 30 - } - device { - vendor "Nimble" - product "Server" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "0" # Line modified by Leapp - no_path_retry queue # Line added by Leapp - hardware_handler "1 alua" - prio "alua" - failback immediate - dev_loss_tmo "infinity" - } - device { - vendor "XtremIO" - product "XtremApp" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 15 - } - device { - vendor "PURE" - product "FlashArray" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 10 - dev_loss_tmo 60 - user_friendly_names no - } - device { - vendor "HUAWEI" - product "XSG1" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - dev_loss_tmo 30 - } - device { - vendor "NVME" - product "^EMC PowerMax_" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - prio "const" - } - device { - vendor "NVME" - product ".*" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - detect_prio yes - } - device { - vendor "IBM" - product "^2145" - path_selector "service-time 0" - } -# device { # Section commented out by Leapp -# fast_io_fail_tmo 5 -# all_devs yes -# no_path_retry fail -# detect_checker yes # Line modified by Leapp -# } -# device { # Section commented out by Leapp -# features "1 queue_if_no_path" -# path_checker "tur" -# all_devs yes -# } -} -multipaths { - multipath { - wwid "123456789" - alias "foo" - } -} - -overrides { - no_path_retry fail # Line added by Leapp - features 0 # Line added by Leapp - checker "rdac" - detect_checker no # Line modified by Leapp -# hardware_handler "1 alua" # Commented out by Leapp -# pg_timeout no # Commented out by Leapp - fast_io_fail_tmo 10 -# unpriv_sgio no # Commented out by Leapp -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/default_rhel7.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/default_rhel7.conf deleted file mode 100644 index 6afc7edc..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/default_rhel7.conf +++ /dev/null @@ -1,1021 +0,0 @@ -defaults { - verbosity 2 - polling_interval 5 - max_polling_interval 20 - reassign_maps "yes" - multipath_dir "/lib64/multipath" - path_selector "service-time 0" - path_grouping_policy "failover" - uid_attribute "ID_SERIAL" - prio "const" - prio_args "" - features "0" - path_checker "directio" - alias_prefix "mpath" - failback "manual" - rr_min_io 1000 - rr_min_io_rq 1 - max_fds 1048576 - rr_weight "uniform" - queue_without_daemon "no" - flush_on_last_del "no" - user_friendly_names "yes" - fast_io_fail_tmo 5 - bindings_file "/etc/multipath/bindings" - wwids_file /etc/multipath/wwids - prkeys_file /etc/multipath/prkeys - log_checker_err always - find_multipaths yes - retain_attached_hw_handler no - detect_prio no - detect_path_checker no - hw_str_match no - force_sync no - deferred_remove no - ignore_new_boot_devs no - skip_kpartx no - config_dir "tests/files/conf.d" - delay_watch_checks no - delay_wait_checks no - retrigger_tries 3 - retrigger_delay 10 - missing_uev_wait_timeout 30 - new_bindings_in_boot no - remove_retries 0 - disable_changed_wwids no - unpriv_sgio no - ghost_delay no - all_tg_pt no - marginal_path_err_sample_time no - marginal_path_err_rate_threshold no - marginal_path_err_recheck_gap_time no - marginal_path_double_failed_time no -} -blacklist { - devnode "sdb" - devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" - devnode "^(td|hd|vd)[a-z]" - devnode "^dcssblk[0-9]*" - device { - vendor "DGC" - product "LUNZ" - } - device { - vendor "EMC" - product "LUNZ" - } - device { - vendor "IBM" - product "Universal Xport" - } - device { - vendor "IBM" - product "S/390.*" - } - device { - vendor "DELL" - product "Universal Xport" - } - device { - vendor "LENOVO" - product "Universal Xport" - } - device { - vendor "SGI" - product "Universal Xport" - } - device { - vendor "STK" - product "Universal Xport" - } - device { - vendor "SUN" - product "Universal Xport" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "Universal Xport" - } -} -blacklist_exceptions { -} -devices { - device { - vendor "COMPELNT" - product "Compellent Vol" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "APPLE*" - product "Xserve RAID " - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "3PARdata" - product "VV" - path_grouping_policy "group_by_prio" - path_selector "service-time 0" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - fast_io_fail_tmo 10 - dev_loss_tmo "infinity" - } - device { - vendor "DEC" - product "HSG80" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "1 queue_if_no_path" - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - } - device { - vendor "HP" - product "A6189A" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(MSA|HSV)1.0.*" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "1 queue_if_no_path" - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "MSA VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "HSV1[01]1|HSV2[01]0|HSV3[046]0|HSV4[05]0" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2[02]12fc|MSA2012i" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2012sa|MSA23(12|24)(fc|i|sa)|MSA2000s VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA (1|2)040 SA(N|S)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "HSVX700" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "LOGICAL VOLUME.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "HP" - product "P2000 G3 FC|P2000G3 FC/iSCSI|P2000 G3 SAS|P2000 G3 iSCSI" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "DDN" - product "SAN DataDirector" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 6 - } - device { - vendor "DGC" - product ".*" - product_blacklist "LUNZ" - path_grouping_policy "group_by_prio" - path_checker "emc_clariion" - features "1 queue_if_no_path" - hardware_handler "1 emc" - prio "emc" - failback immediate - rr_weight "uniform" - no_path_retry 60 - retain_attached_hw_handler yes - detect_prio yes - detect_path_checker yes - } - device { - vendor "EMC" - product "Invista" - product_blacklist "LUNZ" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 5 - } - device { - vendor "FSC" - product "CentricStor" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "FUJITSU" - product "ETERNUS_DX(H|L|M|400|8000)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 10 - } - device { - vendor "(HITACHI|HP)" - product "OPEN-.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "HITACHI" - product "DF.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "hds" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "ProFibre 4000R" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1722-600" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1724" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1726" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1742" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1745|^1746" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "IBM" - product "^1814" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1815" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1818" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3526" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3542" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105800" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105F20" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1750500" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2107900" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2145" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD ECKD" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD FBA" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^IPR.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "1820N00" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 100 - } - device { - vendor "IBM" - product "2810XIV" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - failback 15 - rr_weight "uniform" - rr_min_io 15 - } - device { - vendor "AIX" - product "VDASD" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "IBM" - product "3303 NVDISK" - path_grouping_policy "failover" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "AIX" - product "NVDISK" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "DELL" - product "^MD3" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - } - device { - vendor "LENOVO" - product "DE_Series" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - no_path_retry 30 - } - device { - vendor "NETAPP" - product "LUN.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "3 queue_if_no_path pg_init_retries 50" - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - flush_on_last_del "yes" - dev_loss_tmo "infinity" - user_friendly_names no - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "NEXENTA" - product "COMSTAR" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 30 - rr_min_io 128 - } - device { - vendor "IBM" - product "Nseries.*" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - } - device { - vendor "Pillar" - product "Axiom.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[13]00" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[45]00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SGI" - product "IS.*" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "NEC" - product "DISK ARRAY" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "STK" - product "OPENstorage D280" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "SUN" - product "(StorEdge 3510|T4)" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SUN" - product "STK6580_6780" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "EUROLOGC" - product "FC2502" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "PIVOT3" - product "RAIGE VOLUME" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - rr_min_io 100 - } - device { - vendor "SUN" - product "CSM200_R" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "LCSM100_[IEFS]" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "SUN_6180" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 1000 - rr_min_io_rq 1 - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "INF-01-00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "STK" - product "FLEXLINE 380" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "Intel" - product "Multi-Flex" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "SANmelody" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "Virtual Disk" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "NFINIDAT" - product "InfiniBox.*" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback 30 - rr_weight "priorities" - no_path_retry "fail" - flush_on_last_del "yes" - dev_loss_tmo 30 - } - device { - vendor "Nimble" - product "Server" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "1 alua" - prio "alua" - failback immediate - dev_loss_tmo "infinity" - } - device { - vendor "XtremIO" - product "XtremApp" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 15 - } - device { - vendor "PURE" - product "FlashArray" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 10 - dev_loss_tmo 60 - user_friendly_names no - } - device { - vendor "HUAWEI" - product "XSG1" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - dev_loss_tmo 30 - } - device { - vendor "NVME" - product "^EMC PowerMax_" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - prio "const" - } - device { - vendor "NVME" - product ".*" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - detect_prio yes - } - device { - vendor "IBM" - product "^2145" - path_selector "service-time 0" - } -} -multipaths { -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/default_rhel8.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/default_rhel8.conf deleted file mode 100644 index 62f889dc..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/default_rhel8.conf +++ /dev/null @@ -1,1049 +0,0 @@ -defaults { - verbosity 2 - polling_interval 5 - max_polling_interval 20 - reassign_maps "no" - multipath_dir "/lib64/multipath" - path_selector "service-time 0" - path_grouping_policy "failover" - uid_attribute "ID_SERIAL" - prio "const" - prio_args "" - features "0" - path_checker "tur" - alias_prefix "mpath" - failback "manual" - rr_min_io 1000 - rr_min_io_rq 1 - max_fds "max" - rr_weight "uniform" - queue_without_daemon "no" - flush_on_last_del "no" - user_friendly_names "yes" - fast_io_fail_tmo 5 - bindings_file "/etc/multipath/bindings" - wwids_file "/etc/multipath/wwids" - prkeys_file "/etc/multipath/prkeys" - log_checker_err always - all_tg_pt "no" - retain_attached_hw_handler "yes" - detect_prio "yes" - detect_checker "yes" - force_sync "yes" - strict_timing "no" - deferred_remove "no" - config_dir "/etc/multipath/conf.d" - delay_watch_checks "no" - delay_wait_checks "no" - san_path_err_threshold "no" - san_path_err_forget_rate "no" - san_path_err_recovery_time "no" - marginal_path_err_sample_time "no" - marginal_path_err_rate_threshold "no" - marginal_path_err_recheck_gap_time "no" - marginal_path_double_failed_time "no" - find_multipaths "on" - uxsock_timeout 4000 - retrigger_tries 0 - retrigger_delay 10 - missing_uev_wait_timeout 30 - skip_kpartx "no" - disable_changed_wwids ignored - remove_retries 0 - ghost_delay "no" - find_multipaths_timeout -10 - enable_foreign "^$" - marginal_pathgroups "no" -} -blacklist { - devnode "^(ram|zram|raw|loop|fd|md|dm-|sr|scd|st|dcssblk)[0-9]" - devnode "^(td|hd|vd)[a-z]" - device { - vendor "SGI" - product "Universal Xport" - } - device { - vendor "^DGC" - product "LUNZ" - } - device { - vendor "EMC" - product "LUNZ" - } - device { - vendor "DELL" - product "Universal Xport" - } - device { - vendor "IBM" - product "Universal Xport" - } - device { - vendor "IBM" - product "S/390" - } - device { - vendor "LENOVO" - product "Universal Xport" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "Universal Xport" - } - device { - vendor "STK" - product "Universal Xport" - } - device { - vendor "SUN" - product "Universal Xport" - } - device { - vendor "(Intel|INTEL)" - product "VTrak V-LUN" - } - device { - vendor "Promise" - product "VTrak V-LUN" - } - device { - vendor "Promise" - product "Vess V-LUN" - } -} -blacklist_exceptions { - property "(SCSI_IDENT_|ID_WWN)" -} -devices { - device { - vendor "NVME" - product ".*" - uid_attribute "ID_WWN" - path_checker "none" - retain_attached_hw_handler "no" - } - device { - vendor "APPLE" - product "Xserve RAID" - path_grouping_policy "multibus" - } - device { - vendor "3PARdata" - product "VV" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 18 - fast_io_fail_tmo 10 - dev_loss_tmo "infinity" - vpd_vendor hp3par - } - device { - vendor "DEC" - product "HSG80" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - hardware_handler "1 hp_sw" - prio "hp_sw" - no_path_retry "queue" - } - device { - vendor "HP" - product "A6189A" - path_grouping_policy "multibus" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(MSA|HSV)1[01]0" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - hardware_handler "1 hp_sw" - prio "hp_sw" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "MSA VOLUME" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(HSV1[01]1|HSV2[01]0|HSV3[046]0|HSV4[05]0)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "HP" - product "(MSA2[02]12fc|MSA2012i)" - path_grouping_policy "multibus" - no_path_retry 18 - } - device { - vendor "HP" - product "(MSA2012sa|MSA23(12|24)(fc|i|sa)|MSA2000s VOLUME)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "HP" - product "MSA [12]0[45]0 SA[NS]" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "HP" - product "HSVX700" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "HP" - product "LOGICAL VOLUME" - path_grouping_policy "multibus" - no_path_retry 12 - } - device { - vendor "HP" - product "(P2000 G3 FC|P2000G3 FC/iSCSI|P2000 G3 SAS|P2000 G3 iSCSI)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "LEFTHAND" - product "(P4000|iSCSIDisk|FCDISK)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "Nimble" - product "Server" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "SGI" - product "TP9100" - path_grouping_policy "multibus" - } - device { - vendor "SGI" - product "TP9[3457]00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SGI" - product "IS" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SGI" - product "^DD[46]A-" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "DDN" - product "SAN DataDirector" - path_grouping_policy "multibus" - } - device { - vendor "DDN" - product "^EF3010" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "DDN" - product "^(EF3015|S2A|SFA)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "NEXENTA" - product "COMSTAR" - path_grouping_policy "group_by_serial" - no_path_retry 30 - } - device { - vendor "TEGILE" - product "(ZEBI-(FC|ISCSI)|INTELLIFLASH)" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 10 - } - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy "multibus" - no_path_retry 6 - } - device { - vendor "^DGC" - product "^(RAID|DISK|VRAID)" - product_blacklist "LUNZ" - path_grouping_policy "group_by_prio" - path_checker "emc_clariion" - hardware_handler "1 emc" - prio "emc" - failback "immediate" - no_path_retry 60 - } - device { - vendor "EMC" - product "Invista" - product_blacklist "LUNZ" - path_grouping_policy "multibus" - no_path_retry 5 - } - device { - vendor "XtremIO" - product "XtremApp" - path_grouping_policy "multibus" - } - device { - vendor "COMPELNT" - product "Compellent Vol" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "DELL" - product "^MD3" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "NVME" - product "^EMC PowerMax_" - path_grouping_policy "multibus" - } - device { - vendor "FSC" - product "CentricStor" - path_grouping_policy "group_by_serial" - } - device { - vendor "FUJITSU" - product "ETERNUS_DX(H|L|M|400|8000)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 10 - } - device { - vendor "(EUROLOGC|EuroLogc)" - product "FC2502" - path_grouping_policy "multibus" - } - device { - vendor "FUJITSU" - product "E[234]000" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 10 - } - device { - vendor "FUJITSU" - product "E[68]000" - path_grouping_policy "multibus" - no_path_retry 10 - } - device { - vendor "(HITACHI|HP)" - product "^OPEN-" - path_grouping_policy "multibus" - } - device { - vendor "HITACHI" - product "^DF" - path_grouping_policy "group_by_prio" - prio "hds" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "HITACHI" - product "^DF600F" - path_grouping_policy "multibus" - } - device { - vendor "IBM" - product "ProFibre 4000R" - path_grouping_policy "multibus" - } - device { - vendor "IBM" - product "^1722-600" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1724" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1726" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1742" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1746" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1813" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1814" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1815" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1818" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^3526" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^(3542|3552)" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^2105" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1750500" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^2107900" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^2145" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "IBM" - product "S/390 DASD ECKD" - product_blacklist "S/390" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - no_path_retry "queue" - } - device { - vendor "IBM" - product "S/390 DASD FBA" - product_blacklist "S/390" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^IPR" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "IBM" - product "1820N00" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "(XIV|IBM)" - product "(NEXTRA|2810XIV)" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "(TMS|IBM)" - product "(RamSan|FlashSystem)" - path_grouping_policy "multibus" - } - device { - vendor "IBM" - product "^(DCS9900|2851)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "AIX" - product "VDASD" - path_grouping_policy "multibus" - no_path_retry 60 - } - device { - vendor "IBM" - product "3303[ ]+NVDISK" - no_path_retry 60 - } - device { - vendor "AIX" - product "NVDISK" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 60 - } - device { - vendor "LENOVO" - product "DE_Series" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "NETAPP" - product "LUN" - path_grouping_policy "group_by_prio" - features "2 pg_init_retries 50" - prio "ontap" - failback "immediate" - no_path_retry "queue" - flush_on_last_del "yes" - dev_loss_tmo "infinity" - user_friendly_names "no" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "INF-01-00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SolidFir" - product "SSD SAN" - path_grouping_policy "multibus" - no_path_retry 24 - } - device { - vendor "NVME" - product "^NetApp ONTAP Controller" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "NEC" - product "DISK ARRAY" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - } - device { - vendor "^Pillar" - product "^Axiom" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - } - device { - vendor "^Oracle" - product "^Oracle FS" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - } - device { - vendor "STK" - product "BladeCtlr" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "STK" - product "OPENstorage" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "STK" - product "FLEXLINE 380" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "StorEdge 3" - path_grouping_policy "multibus" - } - device { - vendor "SUN" - product "STK6580_6780" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "CSM[12]00_R" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "LCSM100_[IEFS]" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "SUN_6180" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "ArrayStorage" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "(Sun Storage|ZFS Storage|COMSTAR)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "PIVOT3" - product "RAIGE VOLUME" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "(NexGen|Pivot3)" - product "(TierStore|vSTAC)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "(Intel|INTEL)" - product "Multi-Flex" - product_blacklist "VTrak V-LUN" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "(LIO-ORG|SUSE)" - product "RBD" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "DataCore" - product "SANmelody" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "Virtual Disk" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "PURE" - product "FlashArray" - path_grouping_policy "multibus" - } - device { - vendor "HUAWEI" - product "XSG1" - path_grouping_policy "group_by_prio" - prio "alua" - } - device { - vendor "KOVE" - product "XPD" - path_grouping_policy "multibus" - } - device { - vendor "NFINIDAT" - product "InfiniBox" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - prio "alua" - failback 30 - rr_weight "priorities" - no_path_retry "fail" - rr_min_io 1 - rr_min_io_rq 1 - flush_on_last_del "yes" - fast_io_fail_tmo 15 - dev_loss_tmo 15 - } - device { - vendor "KMNRIO" - product "K2" - path_grouping_policy "multibus" - } - device { - vendor "NEXSAN" - product "NXS-B0" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 15 - } - device { - vendor "NEXSAN" - product "SATAB" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 15 - } - device { - vendor "Nexsan" - product "(NestOS|NST5000)" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "VIOLIN" - product "SAN ARRAY$" - path_grouping_policy "group_by_serial" - no_path_retry 30 - } - device { - vendor "VIOLIN" - product "SAN ARRAY ALUA" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "VIOLIN" - product "CONCERTO ARRAY" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "(XIOTECH|XIOtech)" - product "ISE" - path_grouping_policy "multibus" - no_path_retry 12 - } - device { - vendor "(XIOTECH|XIOtech)" - product "IGLU DISK" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "(XIOTECH|XIOtech)" - product "Magnitude" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "Promise" - product "VTrak" - product_blacklist "VTrak V-LUN" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "Promise" - product "Vess" - product_blacklist "Vess V-LUN" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "^IFT" - product ".*" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "DotHill" - product "SANnet" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "DotHill" - product "R/Evo" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "DotHill" - product "^DH" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "AStor" - product "NeoSapphire" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "INSPUR" - product "MCS" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - } -} -overrides { -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/empty.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/empty.conf deleted file mode 100644 index e69de29b..00000000 diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/just_all_devs.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/just_all_devs.conf deleted file mode 100644 index 4a34b7bf..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/just_all_devs.conf +++ /dev/null @@ -1,5 +0,0 @@ -devices { - device { - all_devs yes - } -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/just_checker.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/just_checker.conf deleted file mode 100644 index 0b3462e4..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/just_checker.conf +++ /dev/null @@ -1,1049 +0,0 @@ -defaults { - verbosity 2 - polling_interval 5 - max_polling_interval 20 - reassign_maps "no" - multipath_dir "/lib64/multipath" - path_selector "service-time 0" - path_grouping_policy "failover" - uid_attribute "ID_SERIAL" - prio "const" - prio_args "" - features "0" - checker "rdac" - alias_prefix "mpath" - failback "manual" - rr_min_io 1000 - rr_min_io_rq 1 - max_fds "max" - rr_weight "uniform" - queue_without_daemon "no" - flush_on_last_del "no" - user_friendly_names "yes" - fast_io_fail_tmo 5 - bindings_file "/etc/multipath/bindings" - wwids_file "/etc/multipath/wwids" - prkeys_file "/etc/multipath/prkeys" - log_checker_err always - all_tg_pt "no" - retain_attached_hw_handler "yes" - detect_prio "yes" - detect_checker "yes" - force_sync "yes" - strict_timing "no" - deferred_remove "no" - config_dir "/etc/multipath/conf.d" - delay_watch_checks "no" - delay_wait_checks "no" - san_path_err_threshold "no" - san_path_err_forget_rate "no" - san_path_err_recovery_time "no" - marginal_path_err_sample_time "no" - marginal_path_err_rate_threshold "no" - marginal_path_err_recheck_gap_time "no" - marginal_path_double_failed_time "no" - find_multipaths "on" - uxsock_timeout 4000 - retrigger_tries 0 - retrigger_delay 10 - missing_uev_wait_timeout 30 - skip_kpartx "no" - disable_changed_wwids ignored - remove_retries 0 - ghost_delay "no" - find_multipaths_timeout -10 - enable_foreign "^$" - marginal_pathgroups "no" -} -blacklist { - devnode "^(ram|zram|raw|loop|fd|md|dm-|sr|scd|st|dcssblk)[0-9]" - devnode "^(td|hd|vd)[a-z]" - device { - vendor "SGI" - product "Universal Xport" - } - device { - vendor "^DGC" - product "LUNZ" - } - device { - vendor "EMC" - product "LUNZ" - } - device { - vendor "DELL" - product "Universal Xport" - } - device { - vendor "IBM" - product "Universal Xport" - } - device { - vendor "IBM" - product "S/390" - } - device { - vendor "LENOVO" - product "Universal Xport" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "Universal Xport" - } - device { - vendor "STK" - product "Universal Xport" - } - device { - vendor "SUN" - product "Universal Xport" - } - device { - vendor "(Intel|INTEL)" - product "VTrak V-LUN" - } - device { - vendor "Promise" - product "VTrak V-LUN" - } - device { - vendor "Promise" - product "Vess V-LUN" - } -} -blacklist_exceptions { - property "(SCSI_IDENT_|ID_WWN)" -} -devices { - device { - vendor "NVME" - product ".*" - uid_attribute "ID_WWN" - path_checker "none" - retain_attached_hw_handler "no" - } - device { - vendor "APPLE" - product "Xserve RAID" - path_grouping_policy "multibus" - } - device { - vendor "3PARdata" - product "VV" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 18 - fast_io_fail_tmo 10 - dev_loss_tmo "infinity" - vpd_vendor hp3par - } - device { - vendor "DEC" - product "HSG80" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - hardware_handler "1 hp_sw" - prio "hp_sw" - no_path_retry "queue" - } - device { - vendor "HP" - product "A6189A" - path_grouping_policy "multibus" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(MSA|HSV)1[01]0" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - hardware_handler "1 hp_sw" - prio "hp_sw" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "MSA VOLUME" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(HSV1[01]1|HSV2[01]0|HSV3[046]0|HSV4[05]0)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "HP" - product "(MSA2[02]12fc|MSA2012i)" - path_grouping_policy "multibus" - no_path_retry 18 - } - device { - vendor "HP" - product "(MSA2012sa|MSA23(12|24)(fc|i|sa)|MSA2000s VOLUME)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "HP" - product "MSA [12]0[45]0 SA[NS]" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "HP" - product "HSVX700" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "HP" - product "LOGICAL VOLUME" - path_grouping_policy "multibus" - no_path_retry 12 - } - device { - vendor "HP" - product "(P2000 G3 FC|P2000G3 FC/iSCSI|P2000 G3 SAS|P2000 G3 iSCSI)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "LEFTHAND" - product "(P4000|iSCSIDisk|FCDISK)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 18 - } - device { - vendor "Nimble" - product "Server" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "SGI" - product "TP9100" - path_grouping_policy "multibus" - } - device { - vendor "SGI" - product "TP9[3457]00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SGI" - product "IS" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SGI" - product "^DD[46]A-" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "DDN" - product "SAN DataDirector" - path_grouping_policy "multibus" - } - device { - vendor "DDN" - product "^EF3010" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "DDN" - product "^(EF3015|S2A|SFA)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "NEXENTA" - product "COMSTAR" - path_grouping_policy "group_by_serial" - no_path_retry 30 - } - device { - vendor "TEGILE" - product "(ZEBI-(FC|ISCSI)|INTELLIFLASH)" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 10 - } - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy "multibus" - no_path_retry 6 - } - device { - vendor "^DGC" - product "^(RAID|DISK|VRAID)" - product_blacklist "LUNZ" - path_grouping_policy "group_by_prio" - path_checker "emc_clariion" - hardware_handler "1 emc" - prio "emc" - failback "immediate" - no_path_retry 60 - } - device { - vendor "EMC" - product "Invista" - product_blacklist "LUNZ" - path_grouping_policy "multibus" - no_path_retry 5 - } - device { - vendor "XtremIO" - product "XtremApp" - path_grouping_policy "multibus" - } - device { - vendor "COMPELNT" - product "Compellent Vol" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "DELL" - product "^MD3" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "NVME" - product "^EMC PowerMax_" - path_grouping_policy "multibus" - } - device { - vendor "FSC" - product "CentricStor" - path_grouping_policy "group_by_serial" - } - device { - vendor "FUJITSU" - product "ETERNUS_DX(H|L|M|400|8000)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 10 - } - device { - vendor "(EUROLOGC|EuroLogc)" - product "FC2502" - path_grouping_policy "multibus" - } - device { - vendor "FUJITSU" - product "E[234]000" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 10 - } - device { - vendor "FUJITSU" - product "E[68]000" - path_grouping_policy "multibus" - no_path_retry 10 - } - device { - vendor "(HITACHI|HP)" - product "^OPEN-" - path_grouping_policy "multibus" - } - device { - vendor "HITACHI" - product "^DF" - path_grouping_policy "group_by_prio" - prio "hds" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "HITACHI" - product "^DF600F" - path_grouping_policy "multibus" - } - device { - vendor "IBM" - product "ProFibre 4000R" - path_grouping_policy "multibus" - } - device { - vendor "IBM" - product "^1722-600" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1724" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1726" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1742" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1746" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1813" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1814" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1815" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^1818" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^3526" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^(3542|3552)" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "IBM" - product "^2105" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1750500" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^2107900" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^2145" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "IBM" - product "S/390 DASD ECKD" - product_blacklist "S/390" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - no_path_retry "queue" - } - device { - vendor "IBM" - product "S/390 DASD FBA" - product_blacklist "S/390" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^IPR" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "IBM" - product "1820N00" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "(XIV|IBM)" - product "(NEXTRA|2810XIV)" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "(TMS|IBM)" - product "(RamSan|FlashSystem)" - path_grouping_policy "multibus" - } - device { - vendor "IBM" - product "^(DCS9900|2851)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "AIX" - product "VDASD" - path_grouping_policy "multibus" - no_path_retry 60 - } - device { - vendor "IBM" - product "3303[ ]+NVDISK" - no_path_retry 60 - } - device { - vendor "AIX" - product "NVDISK" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 60 - } - device { - vendor "LENOVO" - product "DE_Series" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "NETAPP" - product "LUN" - path_grouping_policy "group_by_prio" - features "2 pg_init_retries 50" - prio "ontap" - failback "immediate" - no_path_retry "queue" - flush_on_last_del "yes" - dev_loss_tmo "infinity" - user_friendly_names "no" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "INF-01-00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SolidFir" - product "SSD SAN" - path_grouping_policy "multibus" - no_path_retry 24 - } - device { - vendor "NVME" - product "^NetApp ONTAP Controller" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "NEC" - product "DISK ARRAY" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - } - device { - vendor "^Pillar" - product "^Axiom" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - } - device { - vendor "^Oracle" - product "^Oracle FS" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - } - device { - vendor "STK" - product "BladeCtlr" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "STK" - product "OPENstorage" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "STK" - product "FLEXLINE 380" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "StorEdge 3" - path_grouping_policy "multibus" - } - device { - vendor "SUN" - product "STK6580_6780" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "CSM[12]00_R" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "LCSM100_[IEFS]" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "SUN_6180" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "ArrayStorage" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback "immediate" - no_path_retry 30 - } - device { - vendor "SUN" - product "(Sun Storage|ZFS Storage|COMSTAR)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "PIVOT3" - product "RAIGE VOLUME" - path_grouping_policy "multibus" - no_path_retry "queue" - } - device { - vendor "(NexGen|Pivot3)" - product "(TierStore|vSTAC)" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "(Intel|INTEL)" - product "Multi-Flex" - product_blacklist "VTrak V-LUN" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "(LIO-ORG|SUSE)" - product "RBD" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 12 - } - device { - vendor "DataCore" - product "SANmelody" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "Virtual Disk" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry "queue" - } - device { - vendor "PURE" - product "FlashArray" - path_grouping_policy "multibus" - } - device { - vendor "HUAWEI" - product "XSG1" - path_grouping_policy "group_by_prio" - prio "alua" - } - device { - vendor "KOVE" - product "XPD" - path_grouping_policy "multibus" - } - device { - vendor "NFINIDAT" - product "InfiniBox" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - prio "alua" - failback 30 - rr_weight "priorities" - no_path_retry "fail" - rr_min_io 1 - rr_min_io_rq 1 - flush_on_last_del "yes" - fast_io_fail_tmo 15 - dev_loss_tmo 15 - } - device { - vendor "KMNRIO" - product "K2" - path_grouping_policy "multibus" - } - device { - vendor "NEXSAN" - product "NXS-B0" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 15 - } - device { - vendor "NEXSAN" - product "SATAB" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 15 - } - device { - vendor "Nexsan" - product "(NestOS|NST5000)" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "VIOLIN" - product "SAN ARRAY$" - path_grouping_policy "group_by_serial" - no_path_retry 30 - } - device { - vendor "VIOLIN" - product "SAN ARRAY ALUA" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "VIOLIN" - product "CONCERTO ARRAY" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "(XIOTECH|XIOtech)" - product "ISE" - path_grouping_policy "multibus" - no_path_retry 12 - } - device { - vendor "(XIOTECH|XIOtech)" - product "IGLU DISK" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "(XIOTECH|XIOtech)" - product "Magnitude" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "Promise" - product "VTrak" - product_blacklist "VTrak V-LUN" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "Promise" - product "Vess" - product_blacklist "Vess V-LUN" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "^IFT" - product ".*" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "DotHill" - product "SANnet" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "DotHill" - product "R/Evo" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "DotHill" - product "^DH" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - no_path_retry 30 - } - device { - vendor "AStor" - product "NeoSapphire" - path_grouping_policy "multibus" - no_path_retry 30 - } - device { - vendor "INSPUR" - product "MCS" - path_grouping_policy "group_by_prio" - prio "alua" - failback "immediate" - } -} -overrides { -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/just_detect.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/just_detect.conf deleted file mode 100644 index b68733c5..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/just_detect.conf +++ /dev/null @@ -1,3 +0,0 @@ -defaults { - detect_prio 0 -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/just_exists.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/just_exists.conf deleted file mode 100644 index ac84ba87..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/just_exists.conf +++ /dev/null @@ -1,32 +0,0 @@ -# device-mapper-multipath configuration file - -# For a complete list of the default configuration values, run either: -# # multipath -t -# or -# # multipathd show config - -# For a list of configuration options with descriptions, see the -# multipath.conf man page. - -defaults { - user_friendly_names yes - find_multipaths yes -} - -devices { - device { - vendor "Foo" - product "Bar" - features "1 queue_if_no_path" - path_grouping_policy "group_by_prio" - hardware_handler "1 alua" - prio "alua" - } -} - -blacklist_exceptions { - property "(SCSI_IDENT_|ID_WWN)" -} - -blacklist { -} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/just_reassign.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/just_reassign.conf deleted file mode 100644 index cbd4399e..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/just_reassign.conf +++ /dev/null @@ -1,93 +0,0 @@ -# This is a basic configuration file with some examples, for device mapper -# multipath. -# -# For a complete list of the default configuration values, run either -# multipath -t -# or -# multipathd show config -# -# For a list of configuration options with descriptions, see the multipath.conf -# man page - -## By default, devices with vendor = "IBM" and product = "S/390.*" are -## blacklisted. To enable mulitpathing on these devies, uncomment the -## following lines. -#blacklist_exceptions { -# device { -# vendor "IBM" -# product "S/390.*" -# } -#} - -## Use user friendly names, instead of using WWIDs as names. -defaults { - user_friendly_names yes - find_multipaths yes - reassign_maps "yes" -} -## -## Here is an example of how to configure some standard options. -## -# -#defaults { -# polling_interval 10 -# path_selector "round-robin 0" -# path_grouping_policy multibus -# uid_attribute ID_SERIAL -# prio alua -# path_checker readsector0 -# rr_min_io 100 -# max_fds 8192 -# rr_weight priorities -# failback immediate -# no_path_retry fail -# user_friendly_names yes -#} -## -## The wwid line in the following blacklist section is shown as an example -## of how to blacklist devices by wwid. The 2 devnode lines are the -## compiled in default blacklist. If you want to blacklist entire types -## of devices, such as all scsi devices, you should use a devnode line. -## However, if you want to blacklist specific devices, you should use -## a wwid line. Since there is no guarantee that a specific device will -## not change names on reboot (from /dev/sda to /dev/sdb for example) -## devnode lines are not recommended for blacklisting specific devices. -## -#blacklist { -# wwid 26353900f02796769 -# devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" -# devnode "^hd[a-z]" -#} -#multipaths { -# multipath { -# wwid 3600508b4000156d700012000000b0000 -# alias yellow -# path_grouping_policy multibus -# path_selector "round-robin 0" -# failback manual -# rr_weight priorities -# no_path_retry 5 -# } -# multipath { -# wwid 1DEC_____321816758474 -# alias red -# } -#} -#devices { -# device { -# vendor "COMPAQ " -# product "HSV110 (C)COMPAQ" -# path_grouping_policy multibus -# path_checker readsector0 -# path_selector "round-robin 0" -# hardware_handler "0" -# failback 15 -# rr_weight priorities -# no_path_retry queue -# } -# device { -# vendor "COMPAQ " -# product "MSA1000 " -# path_grouping_policy multibus -# } -#} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/ugly1.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/ugly1.conf deleted file mode 100644 index ac0d12b4..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/ugly1.conf +++ /dev/null @@ -1,1055 +0,0 @@ -defaults THIS SHOULDN'T BE HERE - verbosity 2 - polling_interval 5 - max_polling_interval 20 - reassign_maps "yes" - multipath_dir "/lib64/multipath" - path_selector "service-time 0" - path_grouping_policy "failover" - uid_attribute "ID_SERIAL" - prio "const" - prio_args "" - features "0" - path_checker "directio" - alias_prefix "mpath" - failback "manual" - rr_min_io 1000 - rr_min_io_rq 1 - max_fds 1048576 - rr_weight "uniform" - queue_without_daemon "no" - flush_on_last_del "no" - user_friendly_names "yes" - fast_io_fail_tmo 5 - bindings_file "/etc/multipath/bindings" - wwids_file /etc/multipath/wwids - prkeys_file /etc/multipath/prkeys - log_checker_err always - find_multipaths yes - retain_attached_hw_handler no - detect_prio no - detect_path_checker no - hw_str_match no - force_sync no - deferred_remove no - ignore_new_boot_devs no - skip_kpartx no - config_dir "tests/files/conf.d" - delay_watch_checks no - delay_wait_checks no - retrigger_tries 3 - retrigger_delay 10 - missing_uev_wait_timeout 30 - new_bindings_in_boot no - remove_retries 0 - disable_changed_wwids no - unpriv_sgio no - ghost_delay no - all_tg_pt no - marginal_path_err_sample_time no - marginal_path_err_rate_threshold no - marginal_path_err_recheck_gap_time no - marginal_path_double_failed_time no -} -blacklist { - devnode "sdb" - devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" - devnode "^(td|hd|vd)[a-z]" - devnode "^dcssblk[0-9]*" - device - vendor "DGC" - product "LUNZ" - } - device { - vendor "EMC" - product "LUNZ" - } - device { - vendor "IBM" - product "Universal Xport" - } - device { - vendor "IBM" - product "S/390.*" - } - device { - vendor "DELL" - product "Universal Xport" - } - device { - vendor "LENOVO" - product "Universal Xport" - } - device { - vendor "SGI" - product "Universal Xport" - } - device { - vendor "STK" - product "Universal Xport" - } - device { - vendor "SUN" - product "Universal Xport" - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "Universal Xport" - } -} -blacklist_exceptions { - devnode "sda" - wwid "123456789" - device { - vendor "IBM" - product "S/390x" - } -} - -devices { BAD DATA - device { - vendor "COMPELNT" - product "Compellent Vol" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "APPLE*" - product "Xserve RAID " - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "3PARdata" - product "VV" - path_grouping_policy "group_by_prio" - path_selector "service-time 0" - path_checker "tur" - features "0" - hardware_handler "1 alua" EXTRA DATA - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - fast_io_fail_tmo 10 - dev_loss_tmo "infinity" - } - device { - vendor "DEC" - product "HSG80" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "1 queue_if_no_path" - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - } - device { - vendor "HP" - product "A6189A" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "(COMPAQ|HP)" - product "(MSA|HSV)1.0.*" - path_grouping_policy "group_by_prio" - path_checker "hp_sw" - features "1 queue_if_no_path" - hardware_handler "1 hp_sw" - prio "hp_sw" - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "MSA VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "(COMPAQ|HP)" - product "HSV1[01]1|HSV2[01]0|HSV3[046]0|HSV4[05]0" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2[02]12fc|MSA2012i" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA2012sa|MSA23(12|24)(fc|i|sa)|MSA2000s VOLUME" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "MSA (1|2)040 SA(N|S)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "HP" - product "HSVX700" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 12 - rr_min_io 100 - } - device { - vendor "HP" - product "LOGICAL VOLUME.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 12 - } - device { - vendor "HP" - product "P2000 G3 FC|P2000G3 FC/iSCSI|P2000 G3 SAS|P2000 G3 iSCSI" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 18 - rr_min_io 100 - } - device { - vendor "DDN" - product "SAN DataDirector" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 6 - } - device { - vendor "DGC" - product ".*" - product_blacklist "LUNZ" - path_grouping_policy "group_by_prio" - path_checker "emc_clariion" - features "1 queue_if_no_path" - hardware_handler "1 emc" - prio "emc" - failback immediate - rr_weight "uniform" - no_path_retry 60 - retain_attached_hw_handler yes - detect_prio yes - detect_path_checker yes - } - device { - vendor "EMC" - product "Invista" - product_blacklist "LUNZ" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - no_path_retry 5 - } - device { - vendor "FSC" - product "CentricStor" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "FUJITSU" - product "ETERNUS_DX(H|L|M|400|8000)" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 10 - } - device { - vendor "(HITACHI|HP)" - product "OPEN-.*" - path_grouping_policy "multibus" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "HITACHI" - product "DF.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "hds" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "ProFibre 4000R" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1722-600" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1724" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1726" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "1 queue_if_no_path" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 300 - } - device { - vendor "IBM" - product "^1742" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1745|^1746" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "IBM" - product "^1814" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1815" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^1818" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3526" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "IBM" - product "^3542" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105800" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2105F20" - path_grouping_policy "group_by_serial" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^1750500" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2107900" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^2145" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD ECKD" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "S/390 DASD FBA" - product_blacklist "S/390.*" - path_grouping_policy "multibus" - uid_attribute "ID_UID" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "IBM" - product "^IPR.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "IBM" - product "1820N00" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 100 - } - device { - vendor "IBM" - product "2810XIV" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - failback 15 - rr_weight "uniform" - rr_min_io 15 - } - device { - vendor "AIX" - product "VDASD" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "IBM" - product "3303 NVDISK" - path_grouping_policy "failover" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "AIX" - product "NVDISK" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry 60 - } - device { - vendor "DELL" - product "^MD3" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - } - device { - vendor "LENOVO" - product "DE_Series" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - no_path_retry 30 - } - device { - vendor "NETAPP" - product "LUN.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "3 queue_if_no_path pg_init_retries 50" - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - flush_on_last_del "yes" - dev_loss_tmo "infinity" - user_friendly_names no - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "NEXENTA" - product "COMSTAR" - path_grouping_policy "group_by_serial" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - failback immediate - rr_weight "uniform" - no_path_retry 30 - rr_min_io 128 - } - device { - vendor "IBM" - product "Nseries.*" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "1 queue_if_no_path" - hardware_handler "0" - prio "ontap" - failback immediate - rr_weight "uniform" - rr_min_io 128 - } - device { - vendor "Pillar" - product "Axiom.*" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[13]00" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SGI" - product "TP9[45]00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SGI" - product "IS.*" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 15 - } - device { - vendor "NEC" - product "DISK ARRAY" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - } - device { - vendor "STK" - product "OPENstorage D280" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "SUN" - product "(StorEdge 3510|T4)" - path_grouping_policy "multibus" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "SUN" - product "STK6580_6780" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - } - device { - vendor "EUROLOGC" - product "FC2502" - path_grouping_policy "group_by_prio" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - rr_weight "uniform" - } - device { - vendor "PIVOT3" - product "RAIGE VOLUME" - path_grouping_policy "multibus" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "0" - prio "const" - rr_weight "uniform" - rr_min_io 100 - } - device { - vendor "SUN" - product "CSM200_R" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "LCSM100_[IEFS]" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "SUN" - product "SUN_6180" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - rr_min_io 1000 - rr_min_io_rq 1 - } - device { - vendor "(NETAPP|LSI|ENGENIO)" - product "INF-01-00" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "2 pg_init_retries 50" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry 30 - retain_attached_hw_handler yes - detect_prio yes - } - device { - vendor "STK" - product "FLEXLINE 380" - product_blacklist "Universal Xport" - path_grouping_policy "group_by_prio" - path_checker "rdac" - features "0" - hardware_handler "1 rdac" - prio "rdac" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "Intel" - product "Multi-Flex" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "1 alua" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "SANmelody" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "DataCore" - product "Virtual Disk" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - rr_weight "uniform" - no_path_retry "queue" - } - device { - vendor "NFINIDAT" - product "InfiniBox.*" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback 30 - rr_weight "priorities" - no_path_retry "fail" - flush_on_last_del "yes" - dev_loss_tmo 30 - } - device { - vendor "Nimble" - product "Server" - path_grouping_policy "group_by_prio" - path_selector "round-robin 0" - path_checker "tur" - features "1 queue_if_no_path" - hardware_handler "1 alua" - prio "alua" - failback immediate - dev_loss_tmo "infinity" - } - device { - vendor "XtremIO" - product "XtremApp" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "directio" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 15 - } - device { - vendor "PURE" - product "FlashArray" - path_grouping_policy "multibus" - path_selector "queue-length 0" - path_checker "tur" - features "0" - hardware_handler "0" - prio "const" - failback immediate - fast_io_fail_tmo 10 - dev_loss_tmo 60 - user_friendly_names no - } - device { - vendor "HUAWEI" - product "XSG1" - path_grouping_policy "group_by_prio" - path_checker "tur" - features "0" - hardware_handler "0" - prio "alua" - failback immediate - dev_loss_tmo 30 - } - device { - vendor "NVME" - product "^EMC PowerMax_" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - prio "const" - } - device { - vendor "NVME" - product ".*" - path_grouping_policy "multibus" - uid_attribute "ID_WWN" - path_checker "none" - detect_prio yes - } - device { - vendor "IBM" - product "^2145" - path_selector "service-time 0" - } - device { - fast_io_fail_tmo 5 - dev_loss_tmo 60 - all_devs yes - no_path_retry fail - detect_path_checker yes - } - device { - path_selector "service-time 0" JUNK IN LINE - features "1 queue_if_no_path" - path_checker "tur" - all_devs yes - } -} -multipaths { - multipath { - wwid "123456789" - alias "foo" - } -} - -overrides - checker "rdac" - detect_path_checker no - hardware_handler "1 alua" - pg_timeout no - fast_io_fail_tmo 10 - unpriv_sgio no - features "3 queue_if_no_path pg_init_retries 50" -# Missing closing brace diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/ugly2.conf b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/ugly2.conf deleted file mode 100644 index d9b5038d..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/files/before/ugly2.conf +++ /dev/null @@ -1,123 +0,0 @@ -# This is a basic configuration file with some examples, for device mapper -# multipath. -# -# For a complete list of the default configuration values, run either -# multipath -t -# or -# multipathd show config -# -# For a list of configuration options with descriptions, see the multipath.conf -# man page - -## By default, devices with vendor = "IBM" and product = "S/390.*" are -## blacklisted. To enable mulitpathing on these devies, uncomment the -## following lines. -#blacklist_exceptions { -# device { -# vendor "IBM" -# product "S/390.*" -# } -#} - -## Use user friendly names, instead of using WWIDs as names. -defaults { - user_friendly_names yes - find_multipaths yes -} - -devices { - device { - vendor "NVME" - product ".*" - path_grouping_policy multibus - - } - device { - all_devs yes - path_checker tur - pg_timeout no - detect_path_checker yes - } - - device { - features "3 queue_if_no_path pg_init_retries 50" - path_selector "service-time 0" - all_devs yes - unpriv_sgio no - } - - device { - hardware_handler "1 alua" - vendor "test_vendor" - product "test_product" - revision 1 - product_blacklist "test.*" - all_devs yes - fast_io_fail_tmo 5 - path_checker rdac -# no closing braces - -## -## Here is an example of how to configure some standard options. -## -# -#defaults { -# polling_interval 10 -# path_selector "round-robin 0" -# path_grouping_policy multibus -# uid_attribute ID_SERIAL -# prio alua -# path_checker readsector0 -# rr_min_io 100 -# max_fds 8192 -# rr_weight priorities -# failback immediate -# no_path_retry fail -# user_friendly_names yes -#} -## -## The wwid line in the following blacklist section is shown as an example -## of how to blacklist devices by wwid. The 2 devnode lines are the -## compiled in default blacklist. If you want to blacklist entire types -## of devices, such as all scsi devices, you should use a devnode line. -## However, if you want to blacklist specific devices, you should use -## a wwid line. Since there is no guarantee that a specific device will -## not change names on reboot (from /dev/sda to /dev/sdb for example) -## devnode lines are not recommended for blacklisting specific devices. -## -# wwid 26353900f02796769 -# devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" -# devnode "^hd[a-z]" -#multipaths { -# multipath { -# wwid 3600508b4000156d700012000000b0000 -# alias yellow -# path_grouping_policy multibus -# path_selector "round-robin 0" -# failback manual -# rr_weight priorities -# no_path_retry 5 -# } -# multipath { -# wwid 1DEC_____321816758474 -# alias red -# } -#} -#devices { -# device { -# vendor "COMPAQ " -# product "HSV110 (C)COMPAQ" -# path_grouping_policy multibus -# path_checker readsector0 -# path_selector "round-robin 0" -# hardware_handler "0" -# failback 15 -# rr_weight priorities -# no_path_retry queue -# } -# device { -# vendor "COMPAQ " -# product "MSA1000 " -# path_grouping_policy multibus -# } -#} diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/test_library_multipathconfupdate.py b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/test_library_multipathconfupdate.py deleted file mode 100644 index d76eb661..00000000 --- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/tests/test_library_multipathconfupdate.py +++ /dev/null @@ -1,116 +0,0 @@ -import os - -from leapp.libraries.actor import multipathconfupdate -from leapp.libraries.common import multipathutil -from leapp.models import MultipathConfig, MultipathConfigOption - -CUR_DIR = os.path.dirname(os.path.abspath(__file__)) - - -def build_config(val): - all_devs_options_val = [] - for name_val, value_val in val[16]: - option = MultipathConfigOption(name=name_val, value=value_val) - all_devs_options_val.append(option) - return MultipathConfig( - pathname=val[0], - default_path_checker=val[1], - config_dir=val[2], - default_retain_hwhandler=val[3], - default_detect_prio=val[4], - default_detect_checker=val[5], - reassign_maps=val[6], - hw_str_match_exists=val[7], - ignore_new_boot_devs_exists=val[8], - new_bindings_in_boot_exists=val[9], - unpriv_sgio_exists=val[10], - detect_path_checker_exists=val[11], - overrides_hwhandler_exists=val[12], - overrides_pg_timeout_exists=val[13], - queue_if_no_path_exists=val[14], - all_devs_section_exists=val[15], - all_devs_options=all_devs_options_val) - - -default_rhel7_conf = build_config( - [os.path.join(CUR_DIR, 'files/before/default_rhel7.conf'), 'directio', os.path.join(CUR_DIR, 'files/conf.d'), - False, False, False, True, True, True, True, True, True, False, False, True, False, [], ]) - -all_devs_conf = build_config( - [os.path.join(CUR_DIR, 'files/before/all_devs.conf'), None, None, None, None, None, None, False, False, False, - True, True, False, False, True, True, - [('path_checker', 'rdac'), ('detect_checker', 'yes'), ('features', '2 pg_init_retries 50'), - ('path_selector', 'service-time 0'), ('fast_io_fail_tmo', '5'), ('no_path_retry', 'queue'), ], ]) - -empty_conf = build_config( - [os.path.join(CUR_DIR, 'files/before/empty.conf'), None, None, None, None, None, None, False, False, False, False, - False, False, False, False, False, [], ]) - -default_rhel8_conf = build_config( - [os.path.join(CUR_DIR, 'files/before/default_rhel8.conf'), 'tur', '/etc/multipath/conf.d', True, True, None, False, - False, False, False, False, False, False, False, False, False, [], ]) - -all_the_things_conf = build_config( - [os.path.join(CUR_DIR, 'files/before/all_the_things.conf'), 'directio', os.path.join(CUR_DIR, 'files/conf.d'), - False, False, False, True, True, True, True, True, True, True, True, True, True, - [('no_path_retry', 'fail'), ('features', '0')], ]) - -already_updated_conf = build_config( - [os.path.join(CUR_DIR, 'files/before/already_updated.conf'), None, os.path.join(CUR_DIR, 'files/conf.d'), None, - None, None, None, False, False, False, False, False, False, False, False, False, [], ]) - -ugly1_conf = build_config( - [os.path.join(CUR_DIR, 'files/before/ugly1.conf'), 'directio', os.path.join(CUR_DIR, 'files/conf.d'), False, False, - False, True, True, True, True, True, True, True, True, True, True, - [('dev_loss_tmo', '60'), ('path_selector', 'service-time 0')], ]) - -# same results as all_devs_conf -ugly2_conf = build_config( - [os.path.join(CUR_DIR, 'files/before/ugly2.conf'), None, None, None, None, None, None, False, False, False, True, - True, False, False, True, True, - [('path_checker', 'rdac'), ('detect_checker', 'yes'), ('features', '2 pg_init_retries 50'), - ('path_selector', 'service-time 0'), ('fast_io_fail_tmo', '5'), ('no_path_retry', 'queue'), ], ]) - -just_checker_conf = build_config( - [os.path.join(CUR_DIR, 'files/before/just_checker.conf'), 'rdac', '/etc/multipath/conf.d', True, True, None, False, - False, False, False, False, False, False, False, False, False, [], ]) - -just_detect_conf = build_config( - [os.path.join(CUR_DIR, 'files/before/just_detect.conf'), None, None, None, False, None, None, False, False, False, - False, False, False, False, False, False, [], ]) - -just_reassign_conf = build_config( - [os.path.join(CUR_DIR, 'files/before/just_reassign.conf'), None, None, None, None, None, True, False, False, False, - False, False, False, False, False, False, [], ]) - -just_exists_conf = build_config( - [os.path.join(CUR_DIR, 'files/before/just_exists.conf'), None, None, None, None, None, None, False, False, False, - False, False, False, False, True, False, [], ]) - -just_all_devs_conf = build_config( - [os.path.join(CUR_DIR, 'files/before/just_all_devs.conf'), None, None, None, None, None, None, False, False, False, - False, False, False, False, False, True, [], ]) - - -def test_configs(): - tests = [(default_rhel7_conf, os.path.join(CUR_DIR, 'files/after/default_rhel7.conf'),), - (all_devs_conf, os.path.join(CUR_DIR, 'files/after/all_devs.conf')), (empty_conf, None), - (default_rhel8_conf, None), - (all_the_things_conf, os.path.join(CUR_DIR, 'files/after/all_the_things.conf'),), - (already_updated_conf, None), (ugly1_conf, os.path.join(CUR_DIR, 'files/after/ugly1.conf')), - (ugly2_conf, os.path.join(CUR_DIR, 'files/after/ugly2.conf')), - (just_checker_conf, os.path.join(CUR_DIR, 'files/after/just_checker.conf'),), - (just_detect_conf, os.path.join(CUR_DIR, 'files/after/just_detect.conf'),), - (just_reassign_conf, os.path.join(CUR_DIR, 'files/after/just_reassign.conf'),), - (just_exists_conf, os.path.join(CUR_DIR, 'files/after/just_exists.conf'),), - (just_all_devs_conf, os.path.join(CUR_DIR, 'files/after/just_all_devs.conf'),), ] - for config, expected_config in tests: - config_lines = multipathconfupdate._update_config(config) - if config_lines is None: - assert expected_config is None - continue - expected_lines = multipathutil.read_config(expected_config) - assert expected_lines is not None - assert len(config_lines) == len(expected_lines) - for config_line, expected_line in zip(config_lines, expected_lines): - assert config_line == expected_line diff --git a/repos/system_upgrade/el7toel8/actors/networkmanagerupdateconfig/actor.py b/repos/system_upgrade/el7toel8/actors/networkmanagerupdateconfig/actor.py deleted file mode 100644 index 85608296..00000000 --- a/repos/system_upgrade/el7toel8/actors/networkmanagerupdateconfig/actor.py +++ /dev/null @@ -1,36 +0,0 @@ -from leapp.actors import Actor -from leapp.models import NetworkManagerConfig -from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag - -snippet_path = '/usr/lib/NetworkManager/conf.d/10-dhcp-dhclient.conf' -snippet_data = ("# Generated by leapp when upgrading from RHEL7 to RHEL8\n" - "[main]\n" - "dhcp=dhclient\n") - - -class NetworkManagerUpdateConfig(Actor): - """ - Updates NetworkManager configuration for Red Hat Enterprise Linux 8. - - On Red Hat Enterprise Linux 7 NetworkManager uses the "dhclient" DHCP backend by default, while - the default is "internal" on Red Hat Enterprise Linux 8. We want to keep "dhclient" enabled on - upgrade, unless the user explicitly chose another backend in the configuration. To do so, we - drop a configuration snippet in /usr/lib. - """ - - name = 'network_manager_update_config' - consumes = (NetworkManagerConfig,) - produces = () - tags = (ApplicationsPhaseTag, IPUWorkflowTag) - - def process(self): - for nm_config in self.consume(NetworkManagerConfig): - self.log.info('Consuming dhcp={}'.format(nm_config.dhcp)) - if nm_config.dhcp == '': - try: - with open(snippet_path, 'w') as f: - f.write(snippet_data) - self.log.info('Written the following to {}:\n{}\n'.format(snippet_path, snippet_data)) - except IOError as e: - self.log.warning('Write error: {}'.format(e)) - break diff --git a/repos/system_upgrade/el7toel8/actors/networkmanagerupdateconnections/actor.py b/repos/system_upgrade/el7toel8/actors/networkmanagerupdateconnections/actor.py deleted file mode 100644 index 69ca0f03..00000000 --- a/repos/system_upgrade/el7toel8/actors/networkmanagerupdateconnections/actor.py +++ /dev/null @@ -1,34 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.stdlib import CalledProcessError, run -from leapp.models import NetworkManagerConfig -from leapp.tags import FirstBootPhaseTag, IPUWorkflowTag - - -class NetworkManagerUpdateConnections(Actor): - """ - Update NetworkManager connections. - - When using dhcp=dhclient on Red Hat Enterprise Linux 7, a non-hexadecimal client-id (a string) - is sent on the wire as is (i.e. the first character is the 'type' as per RFC 2132 section - 9.14). On Red Hat Enterprise Linux 8, a zero byte is prepended to string-only client-ids. To - preserve behavior on upgrade, we convert client-ids to the hexadecimal form. - """ - - name = 'network_manager_update_connections' - consumes = (NetworkManagerConfig,) - produces = () - tags = (FirstBootPhaseTag, IPUWorkflowTag) - - def process(self): - for nm_config in self.consume(NetworkManagerConfig): - if nm_config.dhcp not in ('', 'dhclient'): - self.log.info('DHCP client is "{}", nothing to do'.format(nm_config.dhcp)) - return - - try: - r = run(['/usr/bin/python3', 'tools/nm-update-client-ids.py'])['stdout'] - self.log.info('Updated client-ids: {}'.format(r)) - except (OSError, CalledProcessError) as e: - self.log.warning('Error calling nm-update-client-ids script: {}'.format(e)) - - break diff --git a/repos/system_upgrade/el7toel8/actors/networkmanagerupdateconnections/tools/nm-update-client-ids.py b/repos/system_upgrade/el7toel8/actors/networkmanagerupdateconnections/tools/nm-update-client-ids.py deleted file mode 100755 index 1c8d70cc..00000000 --- a/repos/system_upgrade/el7toel8/actors/networkmanagerupdateconnections/tools/nm-update-client-ids.py +++ /dev/null @@ -1,50 +0,0 @@ -from __future__ import print_function - -import sys - -import gi - -gi.require_version('NM', '1.0') -from gi.repository import NM # noqa: E402; pylint: disable=wrong-import-position - - -def is_hexstring(s): - arr = s.split(':') - for a in arr: - if len(a) != 1 and len(a) != 2: - return False - try: - int(a, 16) - except ValueError: - return False - return True - - -client = NM.Client.new(None) -if not client: - print('Cannot create NM client instance') - sys.exit(0) - -processed = 0 -changed = 0 -errors = 0 - -for c in client.get_connections(): - s_ip4 = c.get_setting_ip4_config() - processed += 1 - if s_ip4 is not None: - client_id = s_ip4.get_dhcp_client_id() - if client_id is not None: - if not is_hexstring(client_id): - new_client_id = ':'.join(hex(ord(x))[2:] for x in client_id) - s_ip4.set_property(NM.SETTING_IP4_CONFIG_DHCP_CLIENT_ID, new_client_id) - success = c.commit_changes(True, None) - if success: - changed += 1 - else: - errors += 1 - print('Connection {}: \'{}\' -> \'{}\' ({})'.format(c.get_uuid(), - client_id, new_client_id, - 'OK' if success else 'FAIL')) - -print("{} processed, {} changed, {} errors".format(processed, changed, errors)) diff --git a/repos/system_upgrade/el7toel8/actors/networkmanagerupdateservice/actor.py b/repos/system_upgrade/el7toel8/actors/networkmanagerupdateservice/actor.py deleted file mode 100644 index cb581df8..00000000 --- a/repos/system_upgrade/el7toel8/actors/networkmanagerupdateservice/actor.py +++ /dev/null @@ -1,56 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.stdlib import CalledProcessError, run -from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag - - -class NetworkManagerUpdateService(Actor): - """ - Updates NetworkManager services status. - - On Red Hat Enterprise Linux 7 if the NetworkManager service was disabled and - NetworkManager-wait-online enabled, the former would not be started. This changed on Red Hat - Enterprise Linux 8, where NM-w-o 'Requires' NM and so NM can be started even if disabled. Upon - upgrade, to keep the previous behavior we must disable NM-w-o when NM is disabled. - - See also: - https://bugzilla.redhat.com/show_bug.cgi?id=1520865 - """ - - name = 'network_manager_update_service' - consumes = () - produces = () - tags = (ApplicationsPhaseTag, IPUWorkflowTag) - - def process(self): - nm_enabled = self.unit_enabled('NetworkManager.service') - nmwo_enabled = self.unit_enabled('NetworkManager-wait-online.service') - self.log_services_state('initial', nm_enabled, nmwo_enabled) - - if not nm_enabled and nmwo_enabled: - self.log.info('Disabling NetworkManager-wait-online.service') - - try: - run(['systemctl', 'disable', 'NetworkManager-wait-online.service']) - except (OSError, CalledProcessError) as e: - self.log.warning('Error disabling NetworkManager-wait-online.service: {}'.format(e)) - return - - nm_enabled = self.unit_enabled('NetworkManager.service') - nmwo_enabled = self.unit_enabled('NetworkManager-wait-online.service') - self.log_services_state('after upgrade', nm_enabled, nmwo_enabled) - - def log_services_state(self, detail, nm, nmwo): - self.log.info('Services state ({}):'.format(detail)) - self.log.info(' - NetworkManager : {}'.format('enabled' if nm else 'disabled')) - self.log.info(' - NetworkManager-wait-online: {}'.format('enabled' if nmwo else 'disabled')) - - def unit_enabled(self, name): - try: - ret = run(['systemctl', 'is-enabled', name], split=True)['stdout'] - if ret: - enabled = ret[0] == 'enabled' - else: - enabled = False - except (OSError, CalledProcessError): - enabled = False - return enabled diff --git a/repos/system_upgrade/el7toel8/actors/opensshalgorithmscheck/actor.py b/repos/system_upgrade/el7toel8/actors/opensshalgorithmscheck/actor.py deleted file mode 100644 index 22f372b8..00000000 --- a/repos/system_upgrade/el7toel8/actors/opensshalgorithmscheck/actor.py +++ /dev/null @@ -1,20 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import opensshalgorithmscheck -from leapp.models import OpenSshConfig, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class OpenSshAlgorithmsCheck(Actor): - """ - OpenSSH configuration does not contain any unsupported cryptographic algorithms. - - Check the values of Ciphers and MACs in OpenSSH server config file and warn - about removed algorithms which might cause the server to fail to start. - """ - name = 'open_ssh_algorithms' - consumes = (OpenSshConfig,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - opensshalgorithmscheck.process(self.consume(OpenSshConfig)) diff --git a/repos/system_upgrade/el7toel8/actors/opensshalgorithmscheck/libraries/opensshalgorithmscheck.py b/repos/system_upgrade/el7toel8/actors/opensshalgorithmscheck/libraries/opensshalgorithmscheck.py deleted file mode 100644 index 13cd3b0b..00000000 --- a/repos/system_upgrade/el7toel8/actors/opensshalgorithmscheck/libraries/opensshalgorithmscheck.py +++ /dev/null @@ -1,82 +0,0 @@ -from leapp import reporting -from leapp.exceptions import StopActorExecutionError -from leapp.libraries.stdlib import api - - -def process(openssh_messages): - removed_ciphers = [ - "blowfish-cbc", - "cast128-cbc", - "arcfour", - "arcfour128", - "arcfour256", - ] - removed_macs = [ - "hmac-ripemd160", - ] - found_ciphers = [] - found_macs = [] - - config = next(openssh_messages, None) - if list(openssh_messages): - api.current_logger().warning('Unexpectedly received more than one OpenSshConfig message.') - if not config: - raise StopActorExecutionError( - 'Could not check openssh configuration', details={'details': 'No OpenSshConfig facts found.'} - ) - - for cipher in removed_ciphers: - if config.ciphers and cipher in config.ciphers: - found_ciphers.append(cipher) - for mac in removed_macs: - if config.macs and mac in config.macs: - found_macs.append(mac) - - resources = [ - reporting.RelatedResource('package', 'openssh-server'), - reporting.RelatedResource('file', '/etc/ssh/sshd_config') - ] - if found_ciphers: - reporting.create_report([ - reporting.Title('OpenSSH configured to use removed ciphers'), - reporting.Summary( - 'OpenSSH is configured to use removed ciphers {}. ' - 'These ciphers were removed from OpenSSH and if ' - 'present the sshd daemon will not start in RHEL 8' - ''.format(','.join(found_ciphers)) - ), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([ - reporting.Groups.AUTHENTICATION, - reporting.Groups.SECURITY, - reporting.Groups.NETWORK, - reporting.Groups.SERVICES - ]), - reporting.Remediation( - hint='Remove the following ciphers from sshd_config: ' - '{}'.format(','.join(found_ciphers)) - ), - reporting.Groups([reporting.Groups.INHIBITOR]) - ] + resources) - - if found_macs: - reporting.create_report([ - reporting.Title('OpenSSH configured to use removed mac'), - reporting.Summary( - 'OpenSSH is configured to use removed mac {}. ' - 'This MAC was removed from OpenSSH and if present ' - 'the sshd daemon will not start in RHEL 8' - ''.format(','.join(found_macs)) - ), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([ - reporting.Groups.AUTHENTICATION, - reporting.Groups.SECURITY, - reporting.Groups.NETWORK, - reporting.Groups.SERVICES - ]), - reporting.Remediation( - hint='Remove the following MACs from sshd_config: {}'.format(','.join(found_macs)) - ), - reporting.Groups([reporting.Groups.INHIBITOR]) - ] + resources) diff --git a/repos/system_upgrade/el7toel8/actors/opensshalgorithmscheck/tests/unit_test_opensshalgorithmscheck.py b/repos/system_upgrade/el7toel8/actors/opensshalgorithmscheck/tests/unit_test_opensshalgorithmscheck.py deleted file mode 100644 index f606583a..00000000 --- a/repos/system_upgrade/el7toel8/actors/opensshalgorithmscheck/tests/unit_test_opensshalgorithmscheck.py +++ /dev/null @@ -1,35 +0,0 @@ -import pytest - -from leapp.exceptions import StopActorExecutionError -from leapp.libraries.actor import opensshalgorithmscheck -from leapp.models import OpenSshConfig, OpenSshPermitRootLogin, Report -from leapp.snactor.fixture import current_actor_context - - -def test_no_config(current_actor_context): - with pytest.raises(StopActorExecutionError): - opensshalgorithmscheck.process(iter([])) - - -osprl = OpenSshPermitRootLogin(value='no') - - -@pytest.mark.parametrize('ciphers,expected_report', [ - (None, False), - ('aes128-ctr', False), - ('aes128-ctr,aes192-ctr,aes256-ctr', False), - ('arcfour', True), - ('arcfour,arcfour128,arcfour256', True), - ('arcfour,aes128-ctr', True), - ('aes128-ctr,arcfour', True)]) -def test_ciphers(current_actor_context, ciphers, expected_report): - current_actor_context.feed(OpenSshConfig( - permit_root_login=[osprl], - deprecated_directives=[], - ciphers=ciphers - )) - current_actor_context.run() - if expected_report: - assert current_actor_context.consume(Report) - else: - assert not current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/opensshdeprecateddirectivescheck/actor.py b/repos/system_upgrade/el7toel8/actors/opensshdeprecateddirectivescheck/actor.py deleted file mode 100644 index 69f9ee79..00000000 --- a/repos/system_upgrade/el7toel8/actors/opensshdeprecateddirectivescheck/actor.py +++ /dev/null @@ -1,23 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.opensshdeprecateddirectivescheck import inhibit_if_deprecated_directives_used -from leapp.models import OpenSshConfig, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class OpenSshDeprecatedDirectivesCheck(Actor): - """ - Check for any deprecated directives in the OpenSSH configuration. - - Checks the directives used in the OpenSSH configuration for ones that have - been deprecated and their usage in newer versions would result in the sshd - service failing to start after the upgrade. - """ - - name = 'open_ssh_deprecated_directives_check' - consumes = (OpenSshConfig,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - ssh_config = next(self.consume(OpenSshConfig)) - inhibit_if_deprecated_directives_used(ssh_config) diff --git a/repos/system_upgrade/el7toel8/actors/opensshdeprecateddirectivescheck/libraries/opensshdeprecateddirectivescheck.py b/repos/system_upgrade/el7toel8/actors/opensshdeprecateddirectivescheck/libraries/opensshdeprecateddirectivescheck.py deleted file mode 100644 index db88f869..00000000 --- a/repos/system_upgrade/el7toel8/actors/opensshdeprecateddirectivescheck/libraries/opensshdeprecateddirectivescheck.py +++ /dev/null @@ -1,30 +0,0 @@ -from leapp import reporting - -LIST_SEPARATOR_FMT = '\n - ' - - -def inhibit_if_deprecated_directives_used(ssh_config_msg): - """ Inhibits the upgrade if any deprecated directives were found in the sshd configuration. """ - - if ssh_config_msg.deprecated_directives: - # Prepare the output of the deprecated directives for the user - deprecated_directives_report_text = '' - for deprecated_directive in ssh_config_msg.deprecated_directives: - deprecated_directives_report_text += '{0}{1}'.format(LIST_SEPARATOR_FMT, deprecated_directive) - - sshd_config_path = '/etc/ssh/sshd_config' - reporting.create_report([ - reporting.Title('A deprecated directive in the sshd configuration'), - reporting.Summary( - 'The following deprecated directives were found in the sshd configuration file {0}:{1}' - .format(sshd_config_path, deprecated_directives_report_text) - ), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.SERVICES]), - reporting.Groups([reporting.Groups.INHIBITOR]), - reporting.RelatedResource('file', sshd_config_path), - reporting.Remediation( - hint='Remove the deprecated directives from the sshd configuration.', - commands=[["sed", "-i", "/^\\s*ShowPatchLevel.*$/Id", sshd_config_path]] - ) - ]) diff --git a/repos/system_upgrade/el7toel8/actors/opensshdeprecateddirectivescheck/tests/test_opensshdeprecateddirectivescheck.py b/repos/system_upgrade/el7toel8/actors/opensshdeprecateddirectivescheck/tests/test_opensshdeprecateddirectivescheck.py deleted file mode 100644 index 8bb3fc32..00000000 --- a/repos/system_upgrade/el7toel8/actors/opensshdeprecateddirectivescheck/tests/test_opensshdeprecateddirectivescheck.py +++ /dev/null @@ -1,51 +0,0 @@ -import pytest - -from leapp import reporting -from leapp.libraries.actor.opensshdeprecateddirectivescheck import inhibit_if_deprecated_directives_used -from leapp.libraries.common.testutils import create_report_mocked, logger_mocked -from leapp.models import OpenSshConfig -from leapp.utils.report import is_inhibitor - - -def test_inhibit_if_deprecated_directives_used(monkeypatch): - """Tests whether the upgrade is inhibited when deprecated directives are used in config.""" - created_report = create_report_mocked() - monkeypatch.setattr(reporting, 'create_report', created_report) - - ssh_config = OpenSshConfig( - permit_root_login=[], - deprecated_directives=['ShowPatchLevel'] - ) - - inhibit_if_deprecated_directives_used(ssh_config) - - fail_description = 'Report entry was not created when deprecated directive found in the ssh config.' - assert created_report.called == 1, fail_description - - fail_description = 'Report doesn\'t have information about deprecated directive in the title.' - assert 'deprecated directive' in created_report.report_fields['title'].lower(), fail_description - - fail_description = 'Report doesn\'t contain the (mocked) deprecated directive present in the config.' - # The report should have the directive in a preserved form (same as found in configuration) - assert 'ShowPatchLevel' in created_report.report_fields['summary'], fail_description - - assert created_report.report_fields['severity'] == 'high', 'Report has incorrect severity.' - - fail_description = 'Report should have the inhibition flag set when deprecated directive is present.' - assert is_inhibitor(created_report.report_fields), fail_description - - assert created_report.report_fields['detail']['remediations'], 'Report should carry some remediation information.' - - -def test_inhibit_if_deprecated_directives_used_no_deprecated_directives(monkeypatch): - """Tests whether the upgrade is not inhibited when no deprecated directives are used in config.""" - created_report = create_report_mocked() - monkeypatch.setattr(reporting, 'create_report', created_report) - - ssh_config = OpenSshConfig( - permit_root_login=[], - deprecated_directives=[] - ) - - inhibit_if_deprecated_directives_used(ssh_config) - assert created_report.called == 0, 'No report should be created if no deprecated directive present in the config.' diff --git a/repos/system_upgrade/el7toel8/actors/opensshprotocolcheck/actor.py b/repos/system_upgrade/el7toel8/actors/opensshprotocolcheck/actor.py deleted file mode 100644 index 1c9bdebd..00000000 --- a/repos/system_upgrade/el7toel8/actors/opensshprotocolcheck/actor.py +++ /dev/null @@ -1,22 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import opensshprotocolcheck -from leapp.models import OpenSshConfig, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class OpenSshProtocolCheck(Actor): - """ - Protocol configuration option was removed. - - Check the value of Protocol in OpenSSH server config file - and warn about its deprecation if it is set. This option was removed - in RHEL 7.4, but it might still be hanging around. - """ - - name = 'open_ssh_protocol' - consumes = (OpenSshConfig, ) - produces = (Report, ) - tags = (ChecksPhaseTag, IPUWorkflowTag, ) - - def process(self): - opensshprotocolcheck.process(self.consume(OpenSshConfig)) diff --git a/repos/system_upgrade/el7toel8/actors/opensshprotocolcheck/libraries/opensshprotocolcheck.py b/repos/system_upgrade/el7toel8/actors/opensshprotocolcheck/libraries/opensshprotocolcheck.py deleted file mode 100644 index 936c3dd0..00000000 --- a/repos/system_upgrade/el7toel8/actors/opensshprotocolcheck/libraries/opensshprotocolcheck.py +++ /dev/null @@ -1,31 +0,0 @@ -from leapp import reporting -from leapp.exceptions import StopActorExecutionError -from leapp.libraries.stdlib import api - - -def process(openssh_messages): - config = next(openssh_messages, None) - if list(openssh_messages): - api.current_logger().warning('Unexpectedly received more than one OpenSshConfig message.') - if not config: - raise StopActorExecutionError( - 'Could not check openssh configuration', details={'details': 'No OpenSshConfig facts found.'} - ) - - if config.protocol: - reporting.create_report([ - reporting.Title('OpenSSH configured with removed configuration Protocol'), - reporting.Summary( - 'OpenSSH is configured with removed configuration ' - 'option Protocol. If this used to be for enabling ' - 'SSHv1, this is no longer supported in RHEL 8. ' - 'Otherwise this option can be simply removed.' - ), - reporting.Severity(reporting.Severity.LOW), - reporting.Groups([ - reporting.Groups.AUTHENTICATION, - reporting.Groups.SECURITY, - reporting.Groups.NETWORK, - reporting.Groups.SERVICES - ]), - ]) diff --git a/repos/system_upgrade/el7toel8/actors/opensshprotocolcheck/tests/unit_test_opensshprotocolcheck.py b/repos/system_upgrade/el7toel8/actors/opensshprotocolcheck/tests/unit_test_opensshprotocolcheck.py deleted file mode 100644 index f9962884..00000000 --- a/repos/system_upgrade/el7toel8/actors/opensshprotocolcheck/tests/unit_test_opensshprotocolcheck.py +++ /dev/null @@ -1,28 +0,0 @@ -import pytest - -from leapp.exceptions import StopActorExecutionError -from leapp.libraries.actor import opensshprotocolcheck -from leapp.models import OpenSshConfig, OpenSshPermitRootLogin, Report -from leapp.snactor.fixture import current_actor_context - - -def test_no_config(current_actor_context): - with pytest.raises(StopActorExecutionError): - opensshprotocolcheck.process(iter([])) - - -osprl = OpenSshPermitRootLogin(value='no') - - -@pytest.mark.parametrize('protocol', [None, '1', '2', '1,2', '2,1', '7']) -def test_protocol(current_actor_context, protocol): - current_actor_context.feed(OpenSshConfig( - permit_root_login=[osprl], - protocol=protocol, - deprecated_directives=[] - )) - current_actor_context.run() - if protocol: - assert current_actor_context.consume(Report) - else: - assert not current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/opensshuseprivilegeseparationcheck/actor.py b/repos/system_upgrade/el7toel8/actors/opensshuseprivilegeseparationcheck/actor.py deleted file mode 100644 index 1a46c089..00000000 --- a/repos/system_upgrade/el7toel8/actors/opensshuseprivilegeseparationcheck/actor.py +++ /dev/null @@ -1,20 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import opensshuseprivilegeseparationcheck -from leapp.models import OpenSshConfig, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class OpenSshUsePrivilegeSeparationCheck(Actor): - """ - UsePrivilegeSeparation configuration option was removed. - - Check the value of UsePrivilegeSeparation in OpenSSH server config file - and warn about its deprecation if it is set to non-default value. - """ - name = 'open_ssh_use_privilege_separation' - consumes = (OpenSshConfig, ) - produces = (Report, ) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - opensshuseprivilegeseparationcheck.process(self.consume(OpenSshConfig)) diff --git a/repos/system_upgrade/el7toel8/actors/opensshuseprivilegeseparationcheck/libraries/opensshuseprivilegeseparationcheck.py b/repos/system_upgrade/el7toel8/actors/opensshuseprivilegeseparationcheck/libraries/opensshuseprivilegeseparationcheck.py deleted file mode 100644 index db82a395..00000000 --- a/repos/system_upgrade/el7toel8/actors/opensshuseprivilegeseparationcheck/libraries/opensshuseprivilegeseparationcheck.py +++ /dev/null @@ -1,31 +0,0 @@ -from leapp import reporting -from leapp.exceptions import StopActorExecutionError -from leapp.libraries.stdlib import api - - -def process(openssh_messages): - config = next(openssh_messages, None) - if list(openssh_messages): - api.current_logger().warning('Unexpectedly received more than one OpenSshConfig message.') - if not config: - raise StopActorExecutionError( - 'Could not check openssh configuration', details={'details': 'No OpenSshConfig facts found.'} - ) - - if config.use_privilege_separation is not None and \ - config.use_privilege_separation != "sandbox": - reporting.create_report([ - reporting.Title('OpenSSH configured not to use privilege separation sandbox'), - reporting.Summary( - 'OpenSSH is configured to disable privilege ' - 'separation sandbox, which is decreasing security ' - 'and is no longer supported in RHEL 8' - ), - reporting.Severity(reporting.Severity.LOW), - reporting.Groups([ - reporting.Groups.AUTHENTICATION, - reporting.Groups.SECURITY, - reporting.Groups.NETWORK, - reporting.Groups.SERVICES - ]), - ]) diff --git a/repos/system_upgrade/el7toel8/actors/opensshuseprivilegeseparationcheck/tests/unit_test_opensshuseprivilegeseparationcheck.py b/repos/system_upgrade/el7toel8/actors/opensshuseprivilegeseparationcheck/tests/unit_test_opensshuseprivilegeseparationcheck.py deleted file mode 100644 index 8a35c7a4..00000000 --- a/repos/system_upgrade/el7toel8/actors/opensshuseprivilegeseparationcheck/tests/unit_test_opensshuseprivilegeseparationcheck.py +++ /dev/null @@ -1,41 +0,0 @@ -import pytest - -from leapp.exceptions import StopActorExecutionError -from leapp.libraries.actor import opensshuseprivilegeseparationcheck -from leapp.models import OpenSshConfig, OpenSshPermitRootLogin, Report -from leapp.snactor.fixture import current_actor_context - - -def test_no_config(current_actor_context): - with pytest.raises(StopActorExecutionError): - opensshuseprivilegeseparationcheck.process(iter([])) - - -osprl = OpenSshPermitRootLogin(value='no') - - -@pytest.mark.parametrize('values,expected_report', [ - ([''], False), - (['sandbox'], False), - (['yes'], True), - (['no'], True), - (['sandbox', 'yes'], False), - (['yes', 'sandbox'], True)]) -def test_separation(current_actor_context, values, expected_report): - for value in values: - if value: - current_actor_context.feed(OpenSshConfig( - permit_root_login=[osprl], - use_privilege_separation=value, - deprecated_directives=[] - )) - else: - current_actor_context.feed(OpenSshConfig( - permit_root_login=[osprl], - deprecated_directives=[] - )) - current_actor_context.run() - if expected_report: - assert current_actor_context.consume(Report) - else: - assert not current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/pammodulesscanner/actor.py b/repos/system_upgrade/el7toel8/actors/pammodulesscanner/actor.py deleted file mode 100644 index 4243cd3b..00000000 --- a/repos/system_upgrade/el7toel8/actors/pammodulesscanner/actor.py +++ /dev/null @@ -1,43 +0,0 @@ -import os - -from leapp.actors import Actor -from leapp.libraries.common.pam import PAM -from leapp.libraries.stdlib import api -from leapp.models import PamConfiguration, PamService -from leapp.tags import FactsPhaseTag, IPUWorkflowTag - - -class PamModulesScanner(Actor): - """ - Scan the pam directory for services and modules used in them - - This produces a PAMConfiguration message containing the whole - list of configured PAM services and what modules they contain. - """ - - name = 'pam_modules_scanner' - consumes = () - produces = (PamConfiguration, ) - tags = (FactsPhaseTag, IPUWorkflowTag, ) - - def process(self): - conf = [] - path = "/etc/pam.d/" - for f in os.listdir(path): - pam_file = os.path.join(path, f) - # Ignore symlinks (usually handled by authconfig) - if not os.path.isfile(pam_file) or os.path.islink(pam_file): - continue - - # Use the existing PAM library to parse the files, but unpack it to our model - try: - content = PAM.read_file(pam_file) - modules = PAM(content) - service = PamService(service=f, modules=modules.modules) - conf.append(service) - except OSError as err: - # if leapp can not read that file it will probably not be important - api.current_logger().warning('Failed to read file {}: {}'.format(pam_file, err.strerror)) - - pam = PamConfiguration(services=conf) - self.produce(pam) diff --git a/repos/system_upgrade/el7toel8/actors/postgresqlcheck/libraries/postgresqlcheck.py b/repos/system_upgrade/el7toel8/actors/postgresqlcheck/libraries/postgresqlcheck.py deleted file mode 100644 index 575a2798..00000000 --- a/repos/system_upgrade/el7toel8/actors/postgresqlcheck/libraries/postgresqlcheck.py +++ /dev/null @@ -1,88 +0,0 @@ -from leapp import reporting -from leapp.libraries.common.rpms import has_package -from leapp.libraries.stdlib import api -from leapp.models import DistributionSignedRPM - -# Summary for postgresql-server report -report_server_inst_summary = ( - 'PostgreSQL server component will be upgraded. Since RHEL-8 includes' - ' PostgreSQL server 10 by default, which is incompatible with 9.2' - ' included in RHEL-7, it is necessary to proceed with additional steps' - ' for the complete upgrade of the PostgreSQL data.' -) - -report_server_inst_hint = ( - 'Back up your data before proceeding with the upgrade' - ' and follow steps in the documentation section "Migrating to a RHEL 8 version of PostgreSQL"' - ' after the upgrade.' -) - -# Link URL for postgresql-server report -report_server_inst_link_url = 'https://red.ht/rhel-8-migrate-postgresql-server' - -# List of dropped extensions from postgresql-contrib package -report_contrib_inst_dropext = ['dummy_seclabel', 'test_parser', 'tsearch2'] - -# Summary for postgresql-contrib report -report_contrib_inst_summary = ( - 'Please note that some extensions have been dropped from the' - ' postgresql-contrib package and might not be available after' - ' the upgrade:{}' - .format(''.join(['\n - {}'.format(i) for i in report_contrib_inst_dropext])) -) - - -def _report_server_installed(): - """ - Create report on postgresql-server package installation detection. - - Should remind user about present PostgreSQL server package - installation, warn them about necessary additional steps, and - redirect them to online documentation for the upgrade process. - """ - reporting.create_report([ - reporting.Title('PostgreSQL (postgresql-server) has been detected on your system'), - reporting.Summary(report_server_inst_summary), - reporting.Severity(reporting.Severity.MEDIUM), - reporting.Groups([reporting.Groups.SERVICES]), - reporting.ExternalLink(title='Migrating to a RHEL 8 version of PostgreSQL', - url=report_server_inst_link_url), - reporting.RelatedResource('package', 'postgresql-server'), - reporting.Remediation(hint=report_server_inst_hint), - ]) - - -def _report_contrib_installed(): - """ - Create report on postgresql-contrib package installation detection. - - Should remind user about present PostgreSQL contrib package - installation and provide them with a list of extensions no longer - shipped with this package. - """ - reporting.create_report([ - reporting.Title('PostgreSQL (postgresql-contrib) has been detected on your system'), - reporting.Summary(report_contrib_inst_summary), - reporting.Severity(reporting.Severity.MEDIUM), - reporting.Groups([reporting.Groups.SERVICES]), - reporting.RelatedResource('package', 'postgresql-contrib') - ]) - - -def report_installed_packages(_context=api): - """ - Create reports according to detected PostgreSQL packages. - - Create the report if the postgresql-server rpm (RH signed) is installed. - Additionally, create another report if the postgresql-contrib rpm - is installed. - """ - has_server = has_package(DistributionSignedRPM, 'postgresql-server', context=_context) - has_contrib = has_package(DistributionSignedRPM, 'postgresql-contrib', context=_context) - - if has_server: - # postgresql-server - _report_server_installed() - if has_contrib: - # postgresql-contrib - _report_contrib_installed() diff --git a/repos/system_upgrade/el7toel8/actors/powertop/actor.py b/repos/system_upgrade/el7toel8/actors/powertop/actor.py deleted file mode 100644 index 905dd729..00000000 --- a/repos/system_upgrade/el7toel8/actors/powertop/actor.py +++ /dev/null @@ -1,35 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.libraries.common.rpms import has_package -from leapp.models import DistributionSignedRPM -from leapp.reporting import create_report, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class PowerTop(Actor): - """ - Check if PowerTOP is installed. If yes, write information about non-compatible changes. - """ - - name = 'powertop' - consumes = (DistributionSignedRPM,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - if has_package(DistributionSignedRPM, 'powertop'): - create_report([ - reporting.Title('PowerTOP compatibility options removed in the next major version'), - reporting.Summary( - 'The -d (dump) option which has been kept for RHEL backward compatibility has been ' - 'dropped.\n' - 'The -h option which has been used for RHEL backward compatibility is no longer ' - 'alias for --html, but it\'s now an alias for --help to follow the upstream.\n' - 'The -u option which has been used for RHEL backward compatibility as an alias for ' - '--help has been dropped.\n' - ), - reporting.Severity(reporting.Severity.LOW), - reporting.Groups([reporting.Groups.TOOLS, reporting.Groups.MONITORING]), - reporting.Remediation(hint='Please remove the dropped options from your scripts.'), - reporting.RelatedResource('package', 'powertop') - ]) diff --git a/repos/system_upgrade/el7toel8/actors/powertop/tests/component_test_powertop.py b/repos/system_upgrade/el7toel8/actors/powertop/tests/component_test_powertop.py deleted file mode 100644 index 0e45d19d..00000000 --- a/repos/system_upgrade/el7toel8/actors/powertop/tests/component_test_powertop.py +++ /dev/null @@ -1,33 +0,0 @@ -from leapp.models import DistributionSignedRPM, RPM -from leapp.reporting import Report -from leapp.snactor.fixture import current_actor_context - -RH_PACKAGER = 'Red Hat, Inc. ' - - -def create_modulesfacts(installed_rpm): - return DistributionSignedRPM(items=installed_rpm) - - -def test_actor_with_powertop_package(current_actor_context): - with_powertop = [ - RPM(name='grep', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='powertop', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - - current_actor_context.feed(create_modulesfacts(installed_rpm=with_powertop)) - current_actor_context.run() - assert current_actor_context.consume(Report) - - -def test_actor_without_powertop_package(current_actor_context): - without_powertop = [ - RPM(name='grep', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), - RPM(name='sed', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')] - - current_actor_context.feed(create_modulesfacts(installed_rpm=without_powertop)) - current_actor_context.run() - assert not current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/pythoninformuser/actor.py b/repos/system_upgrade/el7toel8/actors/pythoninformuser/actor.py deleted file mode 100644 index b7204750..00000000 --- a/repos/system_upgrade/el7toel8/actors/pythoninformuser/actor.py +++ /dev/null @@ -1,33 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.reporting import create_report, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class PythonInformUser(Actor): - name = "python_inform_user" - description = "This actor informs the user of differences in Python version and support in RHEL 8." - consumes = () - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - url = "https://red.ht/rhel-8-python" - title = "Difference in Python versions and support in RHEL 8" - summary = ("In RHEL 8, there is no 'python' command." - " Python 3 (backward incompatible) is the primary Python version" - " and Python 2 is available with limited support and limited set of packages." - " If you no longer require Python 2 packages following the upgrade, please remove them." - " Read more here: {}".format(url)) - create_report([ - reporting.Title(title), - reporting.Summary(summary), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.PYTHON]), - reporting.Audience('developer'), - reporting.ExternalLink(url, title), - reporting.Remediation(hint='Please run "alternatives --set python /usr/bin/python3" after upgrade'), - reporting.RelatedResource('package', 'python'), - reporting.RelatedResource('package', 'python2'), - reporting.RelatedResource('package', 'python3') - ]) diff --git a/repos/system_upgrade/el7toel8/actors/quaggadaemons/actor.py b/repos/system_upgrade/el7toel8/actors/quaggadaemons/actor.py deleted file mode 100644 index b623017c..00000000 --- a/repos/system_upgrade/el7toel8/actors/quaggadaemons/actor.py +++ /dev/null @@ -1,24 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.quaggadaemons import process_daemons -from leapp.libraries.common.rpms import has_package -from leapp.models import DistributionSignedRPM, QuaggaToFrrFacts -from leapp.tags import FactsPhaseTag, IPUWorkflowTag - - -class QuaggaDaemons(Actor): - """ - Active quagga daemons check. - - Checking for daemons that are currently running in the system. - These should be enabled in /etc/frr/daemons later in the process. - The tools will check for config files later on since these should stay in the system. - """ - - name = 'quagga_daemons' - consumes = (DistributionSignedRPM,) - produces = (QuaggaToFrrFacts,) - tags = (FactsPhaseTag, IPUWorkflowTag) - - def process(self): - if has_package(DistributionSignedRPM, 'quagga'): - self.produce(process_daemons()) diff --git a/repos/system_upgrade/el7toel8/actors/quaggadaemons/libraries/quaggadaemons.py b/repos/system_upgrade/el7toel8/actors/quaggadaemons/libraries/quaggadaemons.py deleted file mode 100644 index 6340efcc..00000000 --- a/repos/system_upgrade/el7toel8/actors/quaggadaemons/libraries/quaggadaemons.py +++ /dev/null @@ -1,37 +0,0 @@ -from leapp.libraries.stdlib import api, CalledProcessError, run -from leapp.models import QuaggaToFrrFacts - -QUAGGA_DAEMONS = [ - 'babeld', - 'bgpd', - 'isisd', - 'ospf6d', - 'ospfd', - 'ripd', - 'ripngd', - 'zebra' -] - - -def _check_service(name, state): - try: - run(['systemctl', 'is-{}'.format(state), name]) - api.current_logger().debug('%s is %s', name, state) - except CalledProcessError: - api.current_logger().debug('%s is not %s', name, state) - return False - - return True - - -def process_daemons(): - active_daemons = [daemon for daemon in QUAGGA_DAEMONS if _check_service(daemon, 'active')] - enabled_daemons = [daemon for daemon in QUAGGA_DAEMONS if _check_service(daemon, 'enabled')] - - if active_daemons: - api.current_logger().debug('active quaggadaemons: %s', ', '.join(active_daemons)) - - if enabled_daemons: - api.current_logger().debug('enabled quaggadaemons: %s', ', '.join(enabled_daemons)) - - return QuaggaToFrrFacts(active_daemons=active_daemons, enabled_daemons=enabled_daemons) diff --git a/repos/system_upgrade/el7toel8/actors/quaggadaemons/tests/test_unit_quaggadaemons.py b/repos/system_upgrade/el7toel8/actors/quaggadaemons/tests/test_unit_quaggadaemons.py deleted file mode 100644 index a25faeac..00000000 --- a/repos/system_upgrade/el7toel8/actors/quaggadaemons/tests/test_unit_quaggadaemons.py +++ /dev/null @@ -1,21 +0,0 @@ -from leapp.libraries.actor import quaggadaemons -from leapp.models import QuaggaToFrrFacts - -# daemons for mocked _check_service function -TEST_DAEMONS = ['bgpd', 'ospfd', 'zebra'] - - -def mock_check_service(name, state): - if name in TEST_DAEMONS: - return True - - return False - - -def test_process_daemons(): - quaggadaemons._check_service = mock_check_service - - facts = quaggadaemons.process_daemons() - assert isinstance(facts, QuaggaToFrrFacts) - assert facts.active_daemons == TEST_DAEMONS - assert facts.enabled_daemons == TEST_DAEMONS diff --git a/repos/system_upgrade/el7toel8/actors/quaggareport/actor.py b/repos/system_upgrade/el7toel8/actors/quaggareport/actor.py deleted file mode 100644 index faa55ebc..00000000 --- a/repos/system_upgrade/el7toel8/actors/quaggareport/actor.py +++ /dev/null @@ -1,47 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.models import QuaggaToFrrFacts, Report -from leapp.reporting import create_report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - -COMMON_REPORT_TAGS = [ - reporting.Groups.NETWORK, - reporting.Groups.SERVICES -] - - -class QuaggaReport(Actor): - """ - Checking for babeld on RHEL-7. - - This actor is supposed to report that babeld was used on RHEL-7 - and it is no longer available in RHEL-8. - """ - - name = 'quagga_report' - consumes = (QuaggaToFrrFacts, ) - produces = (Report, ) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - try: - quagga_facts = next(self.consume(QuaggaToFrrFacts)) - except StopIteration: - return - if 'babeld' in quagga_facts.active_daemons or 'babeld' in quagga_facts.enabled_daemons: - create_report([ - reporting.Title('Babeld is not available in FRR'), - reporting.ExternalLink( - url='https://red.ht/rhel-8-configuring-routing-protocols', - title='Setting routing protocols in RHEL8'), - reporting.Summary( - 'babeld daemon which was a part of quagga implementation in RHEL7 ' - 'is not available in RHEL8 in FRR due to licensing issues.' - ), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups(COMMON_REPORT_TAGS), - reporting.Groups([reporting.Groups.INHIBITOR]), - reporting.Remediation(hint='Please use RIP, OSPF or EIGRP instead of Babel') - ]) - else: - self.log.debug('babeld not used, moving on.') diff --git a/repos/system_upgrade/el7toel8/actors/quaggareport/tests/test_quaggareport.py b/repos/system_upgrade/el7toel8/actors/quaggareport/tests/test_quaggareport.py deleted file mode 100644 index 4bf7f64a..00000000 --- a/repos/system_upgrade/el7toel8/actors/quaggareport/tests/test_quaggareport.py +++ /dev/null @@ -1,40 +0,0 @@ -import pytest - -from leapp.models import QuaggaToFrrFacts -from leapp.snactor.fixture import ActorContext - - -# TODO We can't use caplog here as logs from other processes is -# hard to capture and caplog not see it. -@pytest.mark.parametrize( - ("quagga_facts", "active_daemons", "has_report", "msg_in_log"), - [ - (True, ["babeld"], True, None), - (True, ["something_else"], False, "babeld not used, moving on"), - (False, [], False, None), - ], -) -def test_quaggareport( - monkeypatch, - current_actor_context, - quagga_facts, - active_daemons, - has_report, - msg_in_log, -): - """Test quaggareport. - - :type current_actor_context:ActorContext - """ - if quagga_facts: - current_actor_context.feed( - QuaggaToFrrFacts( - active_daemons=active_daemons, - enabled_daemons=["bgpd", "ospfd", "zebra"], - ) - ) - current_actor_context.run() - if has_report: - assert current_actor_context.messages()[0]["type"] == "Report" - if msg_in_log: - assert not current_actor_context.messages() diff --git a/repos/system_upgrade/el7toel8/actors/quaggatofrr/actor.py b/repos/system_upgrade/el7toel8/actors/quaggatofrr/actor.py deleted file mode 100644 index 5623cd4d..00000000 --- a/repos/system_upgrade/el7toel8/actors/quaggatofrr/actor.py +++ /dev/null @@ -1,23 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.quaggatofrr import process_facts -from leapp.models import QuaggaToFrrFacts -from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag - - -class QuaggaToFrr(Actor): - """ - Edit frr configuration on the new system. - - Take gathered info about quagga from RHEL 7 and apply these to frr in RHEL 8. - """ - - name = 'quagga_to_frr' - consumes = (QuaggaToFrrFacts, ) - produces = () - tags = (ApplicationsPhaseTag, IPUWorkflowTag) - - def process(self): - quagga_facts = next(self.consume(QuaggaToFrrFacts), None) - - if quagga_facts: - process_facts(quagga_facts) diff --git a/repos/system_upgrade/el7toel8/actors/quaggatofrr/libraries/quaggatofrr.py b/repos/system_upgrade/el7toel8/actors/quaggatofrr/libraries/quaggatofrr.py deleted file mode 100644 index 07bccf95..00000000 --- a/repos/system_upgrade/el7toel8/actors/quaggatofrr/libraries/quaggatofrr.py +++ /dev/null @@ -1,101 +0,0 @@ -import os -import re -import shutil - -from leapp.libraries.common.config import version -from leapp.libraries.stdlib import api, CalledProcessError, run - -DAEMON_FILE = '/etc/frr/daemons' -# if this file still exists after the removal of quagga, it has been modified -CONFIG_FILE = '/etc/sysconfig/quagga.rpmsave' -QUAGGA_CONF_FILES = '/etc/quagga/' -FRR_CONF_FILES = '/etc/frr/' -BGPD_CONF_FILE = '/etc/frr/bgpd.conf' - -regex = re.compile(r'\w+(?= 8.4"): - if os.path.isfile(BGPD_CONF_FILE): - with open(BGPD_CONF_FILE, 'r') as f: - data = f.read() - data = re.sub("ip extcommunity-list", "bgp extcommunity-list", data, flags=re.MULTILINE) - with open(BGPD_CONF_FILE, 'w') as f: - f.write(data) - - -def process_facts(quagga_facts): - _change_config(quagga_facts) - _copy_config_files(QUAGGA_CONF_FILES, FRR_CONF_FILES) - _fix_commands() - _enable_frr(quagga_facts) diff --git a/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/files/bgpd.conf b/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/files/bgpd.conf deleted file mode 100644 index d37d27df..00000000 --- a/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/files/bgpd.conf +++ /dev/null @@ -1,33 +0,0 @@ -hostname BGP_Seed -password test -! -router bgp 65000 - bgp router-id 127.0.0.1 - network 10.0.0.0/24 - neighbor 127.0.0.1 remote-as 65001 - neighbor 127.0.0.1 route-map RMAPpsklenar in - neighbor 127.0.0.1 route-map RMAPpsklenar out -! -! ACCEPT ECOMMUNITY -ip extcommunity-list standard xuser permit rt 65001:80 -! -route-map RMAPbrno permit 20 - match extcommunity psklenar - set local-preference 80 -! -log file /var/log/quagga/bgpd.log debugging -! -!route-map SetAttr permit 10 -! set community 65000:1 additive -! set extcommunity rt 65000:1 -! set aggregator as 65002 1.2.3.4 -! set as-path prepend 1 2 3 4 -! set atomic-aggregate -! set metric 20 -! set originator-id 1.2.3.4 -! -line vty - no login -! -access-list CONF permit 10.0.0.0/24 -!end diff --git a/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/files/daemons b/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/files/daemons deleted file mode 100644 index 6b5ccd4a..00000000 --- a/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/files/daemons +++ /dev/null @@ -1,82 +0,0 @@ -# This file tells the frr package which daemons to start. -# -# Entries are in the format: =(yes|no|priority) -# 0, "no" = disabled -# 1, "yes" = highest priority -# 2 .. 10 = lower priorities -# -# For daemons which support multiple instances, a 2nd line listing -# the instances can be added. Eg for ospfd: -# ospfd=yes -# ospfd_instances="1,2" -# -# Priorities were suggested by Dancer . -# They're used to start the FRR daemons in more than one step -# (for example start one or two at network initialization and the -# rest later). The number of FRR daemons being small, priorities -# must be between 1 and 9, inclusive (or the initscript has to be -# changed). /etc/init.d/frr then can be started as -# -# /etc/init.d/frr > -# -# where priority 0 is the same as 'stop', priority 10 or 'start' -# means 'start all' -# -# Sample configurations for these daemons can be found in -# /usr/share/doc/frr/examples/. -# -# ATTENTION: -# -# When activation a daemon at the first time, a config file, even if it is -# empty, has to be present *and* be owned by the user and group "frr", else -# the daemon will not be started by /etc/init.d/frr. The permissions should -# be u=rw,g=r,o=. -# When using "vtysh" such a config file is also needed. It should be owned by -# group "frrvty" and set to ug=rw,o= though. Check /etc/pam.d/frr, too. -# -watchfrr_enable=yes -watchfrr_options="-r '/usr/lib/frr/frr restart %s' -s '/usr/lib/frr/frr start %s' -k '/usr/lib/frr/frr stop %s'" -# -zebra=no -bgpd=no -ospfd=no -ospf6d=no -ripd=no -ripngd=no -isisd=no -pimd=no -nhrpd=no -eigrpd=no -sharpd=no -pbrd=no -staticd=no -bfdd=no -fabricd=no - -# -# Command line options for the daemons -# -zebra_options=("-A 127.0.0.1") -bgpd_options=("-A 127.0.0.1") -ospfd_options=("-A 127.0.0.1") -ospf6d_options=("-A ::1") -ripd_options=("-A 127.0.0.1") -ripngd_options=("-A ::1") -isisd_options=("-A 127.0.0.1") -pimd_options=("-A 127.0.0.1") -nhrpd_options=("-A 127.0.0.1") -eigrpd_options=("-A 127.0.0.1") -sharpd_options=("-A 127.0.0.1") -pbrd_options=("-A 127.0.0.1") -staticd_options=("-A 127.0.0.1") -bfdd_options=("-A 127.0.0.1") -fabricd_options=("-A 127.0.0.1") - -# -# If the vtysh_enable is yes, then the unified config is read -# and applied if it exists. If no unified frr.conf exists -# then the per-daemon .conf files are used) -# If vtysh_enable is no or non-existent, the frr.conf is ignored. -# it is highly suggested to have this set to yes -vtysh_enable=yes - diff --git a/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/files/quagga b/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/files/quagga deleted file mode 100644 index 1f9f0cda..00000000 --- a/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/files/quagga +++ /dev/null @@ -1,24 +0,0 @@ -# -# Default: Bind all daemon vtys to the loopback(s) only -# -BABELD_OPTS="--daemon -A 192.168.100.1" -BGPD_OPTS="--daemon -A 10.10.100.1" -ISISD_OPTS="--daemon -A ::1" -OSPF6D_OPTS="-A ::1" -OSPFD_OPTS="-A 127.0.0.1" -RIPD_OPTS="-A 127.0.0.1" -RIPNGD_OPTS="-A ::1" -ZEBRA_OPTS="-s 90000000 --daemon -A 127.0.0.1" - -# Watchquagga configuration for LSB initscripts -# -# (Not needed with systemd: the service files are configured to automatically -# restart any daemon on failure. If zebra fails, all running daemons will be -# stopped; zebra will be started again; and then the previously running daemons -# will be started again.) -# -# Uncomment and edit this line to reflect the daemons you are actually using: -#WATCH_DAEMONS="zebra bgpd ospfd ospf6d ripd ripngd" -# -# Timer values can be adjusting by editing this line: -WATCH_OPTS="-Az -b_ -r/sbin/service_%s_restart -s/sbin/service_%s_start -k/sbin/service_%s_stop" diff --git a/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/test_unit_quaggatofrr.py b/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/test_unit_quaggatofrr.py deleted file mode 100644 index 503dbfbc..00000000 --- a/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/test_unit_quaggatofrr.py +++ /dev/null @@ -1,168 +0,0 @@ -import contextlib -import os -import shutil - -import pytest - -from leapp.libraries.actor import quaggatofrr -from leapp.libraries.common.testutils import CurrentActorMocked - -ACTIVE_DAEMONS = ['bgpd', 'ospfd', 'zebra'] -CUR_DIR = os.path.dirname(os.path.abspath(__file__)) -FROM_DIR = '/tmp/from_dir/' -TO_DIR = '/tmp/to_dir/' -CONFIG_DATA = { - 'bgpd': '--daemon -A 10.10.100.1', - 'isisd': '--daemon -A ::1', - 'ospf6d': '-A ::1', - 'ospfd': '-A 127.0.0.1', - 'ripd': '-A 127.0.0.1', - 'ripngd': '-A ::1', - 'zebra': '-s 90000000 --daemon -A 127.0.0.1' -} - - -@contextlib.contextmanager -def _create_mock_files(): - try: - os.mkdir(FROM_DIR) - os.mkdir(TO_DIR) - - for num in range(1, 10): - full_path = "{}test_file_{}".format(FROM_DIR, num) - with open(full_path, 'w') as fp: - fp.write("test_file_{}".format(num)) - yield - finally: - shutil.rmtree(FROM_DIR) - shutil.rmtree(TO_DIR) - - -class MockedFilePointer(object): - def __init__(self, orig_open, fname, mode='r'): - self._orig_open = orig_open - self.fname = fname - self.mode = mode - # we want always read only.. - self._fp = self._orig_open(fname, 'r') - self._read = None - self.written = None - - def __enter__(self): - return self - - def __exit__(self, *args, **kwargs): - self.close() - - def close(self): - if self._fp: - self._fp.close() - self._fp = None - - def read(self): - self._read = self._fp.read() - return self._read - - def write(self, data): - if not self.written: - self.written = data - else: - self.written += data - - -class MockedOpen(object): - """ - This is mock for the open function. When called it creates - the MockedFilePointer object. - """ - - def __init__(self): - # currently we want to actually open the real files, we need - # to mock other stuff related to file pointers / file objects - self._orig_open = open - self._open_called = [] - - def __call__(self, fname, mode='r'): - opened = MockedFilePointer(self._orig_open, fname, mode) - self._open_called.append(opened) - return opened - - def get_mocked_pointers(self, fname, mode=None): - """ - Get list of MockedFilePointer objects with the specified fname. - - if the mode is set (expected 'r', 'rw', 'w' ..) discovered files are - additionally filtered to match the same mode (same string). - """ - fnames = [i for i in self._open_called if i.fname == fname] - return fnames if not mode else [i for i in fnames if i.mode == mode] - - -def test_copy_config_files(): - with _create_mock_files(): - quaggatofrr._copy_config_files(FROM_DIR, TO_DIR) - conf_files = os.listdir(TO_DIR) - for file_name in conf_files: - full_path = os.path.join(TO_DIR, file_name) - assert os.path.isfile(full_path) - - -def test_get_config_data(): - conf_data = quaggatofrr._get_config_data( - os.path.join(CUR_DIR, 'files/quagga') - ) - - assert 'babels' not in conf_data - assert conf_data['bgpd'] == CONFIG_DATA['bgpd'] - assert conf_data['isisd'] == CONFIG_DATA['isisd'] - assert conf_data['ospf6d'] == CONFIG_DATA['ospf6d'] - assert conf_data['ospfd'] == CONFIG_DATA['ospfd'] - assert conf_data['ripd'] == CONFIG_DATA['ripd'] - assert conf_data['ripngd'] == CONFIG_DATA['ripngd'] - assert conf_data['zebra'] == CONFIG_DATA['zebra'] - - -def test_edit_new_config(): - # writing the data to the new config file - data = quaggatofrr._edit_new_config( - os.path.join(CUR_DIR, 'files/daemons'), - ACTIVE_DAEMONS, - CONFIG_DATA - ) - - assert 'zebra=yes' in data - assert 'bgpd=yes' in data - assert 'ospfd=yes' in data - assert 'zebra_options=("-s 90000000 --daemon -A 127.0.0.1")' in data - assert 'bgpd_options=("--daemon -A 10.10.100.1")' in data - assert 'ospfd_options=("-A 127.0.0.1")' in data - assert 'ospf6d_options=("-A ::1")' in data - assert 'ripd_options=("-A 127.0.0.1")' in data - assert 'ripngd_options=("-A ::1")' in data - assert 'isisd_options=("--daemon -A ::1")' in data - - -@pytest.mark.parametrize('dst_ver', ['8.4', '8.5']) -def test_fix_commands(monkeypatch, dst_ver): - monkeypatch.setattr(quaggatofrr, "BGPD_CONF_FILE", os.path.join(CUR_DIR, 'files/bgpd.conf')) - monkeypatch.setattr(quaggatofrr.api, 'current_actor', CurrentActorMocked(dst_ver=dst_ver)) - monkeypatch.setattr(quaggatofrr, "open", MockedOpen(), False) - quaggatofrr._fix_commands() - - fp_list = quaggatofrr.open.get_mocked_pointers(quaggatofrr.BGPD_CONF_FILE, "w") - assert len(fp_list) == 1 - assert 'bgp extcommunity-list' in fp_list[0].written - - -def test_fix_commands_not_applied(monkeypatch): - is_file_called = False - - def mocked_is_file(dummy): - is_file_called = True - return is_file_called - - monkeypatch.setattr(quaggatofrr.api, 'current_actor', CurrentActorMocked(dst_ver='8.3')) - monkeypatch.setattr(os.path, 'isfile', mocked_is_file) - monkeypatch.setattr(quaggatofrr, "open", MockedOpen(), False) - quaggatofrr._fix_commands() - assert not is_file_called diff --git a/repos/system_upgrade/el7toel8/actors/registeryumadjustment/actor.py b/repos/system_upgrade/el7toel8/actors/registeryumadjustment/actor.py deleted file mode 100644 index 62e48c24..00000000 --- a/repos/system_upgrade/el7toel8/actors/registeryumadjustment/actor.py +++ /dev/null @@ -1,22 +0,0 @@ -from leapp.actors import Actor -from leapp.models import DNFWorkaround -from leapp.tags import FactsPhaseTag, IPUWorkflowTag - - -class RegisterYumAdjustment(Actor): - """ - Registers a workaround which will adjust the yum directories during the upgrade. - """ - - name = 'register_yum_adjustment' - consumes = () - produces = (DNFWorkaround,) - tags = (IPUWorkflowTag, FactsPhaseTag) - - def process(self): - self.produce( - DNFWorkaround( - display_name='yum config fix', - script_path=self.get_tool_path('handleyumconfig'), - ) - ) diff --git a/repos/system_upgrade/el7toel8/actors/registeryumadjustment/tests/test_register_yum_adjustments.py b/repos/system_upgrade/el7toel8/actors/registeryumadjustment/tests/test_register_yum_adjustments.py deleted file mode 100644 index f8439aa3..00000000 --- a/repos/system_upgrade/el7toel8/actors/registeryumadjustment/tests/test_register_yum_adjustments.py +++ /dev/null @@ -1,12 +0,0 @@ -import os.path - -from leapp.models import DNFWorkaround -from leapp.snactor.fixture import current_actor_context - - -def test_register_yum_adjustments(current_actor_context): - current_actor_context.run() - assert len(current_actor_context.consume(DNFWorkaround)) == 1 - assert current_actor_context.consume(DNFWorkaround)[0].display_name == 'yum config fix' - assert os.path.basename(current_actor_context.consume(DNFWorkaround)[0].script_path) == 'handleyumconfig' - assert os.path.exists(current_actor_context.consume(DNFWorkaround)[0].script_path) diff --git a/repos/system_upgrade/el7toel8/actors/removeoldpammodulesapply/actor.py b/repos/system_upgrade/el7toel8/actors/removeoldpammodulesapply/actor.py deleted file mode 100644 index cda33e1b..00000000 --- a/repos/system_upgrade/el7toel8/actors/removeoldpammodulesapply/actor.py +++ /dev/null @@ -1,28 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.removeoldpammodulesapply import comment_modules, read_file -from leapp.libraries.common.pam import PAM -from leapp.models import RemovedPAMModules -from leapp.tags import IPUWorkflowTag, PreparationPhaseTag - - -class RemoveOldPAMModulesApply(Actor): - """ - Remove old PAM modules that are no longer available in RHEL-8 from - PAM configuration to avoid system lock out. - """ - - name = 'removed_pam_modules_apply' - consumes = (RemovedPAMModules,) - produces = () - tags = (IPUWorkflowTag, PreparationPhaseTag) - - def process(self): - for model in self.consume(RemovedPAMModules): - for path in PAM.files: - content = read_file(path) - if not content: # Nothing to do if no content? - continue - - with open(path, 'w') as f: - f.write(comment_modules(model.modules, content)) - break diff --git a/repos/system_upgrade/el7toel8/actors/removeoldpammodulesapply/libraries/removeoldpammodulesapply.py b/repos/system_upgrade/el7toel8/actors/removeoldpammodulesapply/libraries/removeoldpammodulesapply.py deleted file mode 100644 index 8fd3e13a..00000000 --- a/repos/system_upgrade/el7toel8/actors/removeoldpammodulesapply/libraries/removeoldpammodulesapply.py +++ /dev/null @@ -1,27 +0,0 @@ -import os -import re - - -def read_file(config): - """ - Read file contents. Return empty string if the file does not exist. - """ - if not os.path.isfile(config): - return "" - with open(config) as f: - return f.read() - - -def comment_modules(modules, content): - """ - Disable modules in file content by commenting them. - """ - for module in modules: - content = re.sub( - r'^([ \t]*[^#\s]+.*{0}\.so.*)$'.format(module), - r'#\1', - content, - flags=re.MULTILINE - ) - - return content diff --git a/repos/system_upgrade/el7toel8/actors/removeoldpammodulesapply/tests/unit_test_removeoldpammodulesapply.py b/repos/system_upgrade/el7toel8/actors/removeoldpammodulesapply/tests/unit_test_removeoldpammodulesapply.py deleted file mode 100644 index 169bf0c1..00000000 --- a/repos/system_upgrade/el7toel8/actors/removeoldpammodulesapply/tests/unit_test_removeoldpammodulesapply.py +++ /dev/null @@ -1,70 +0,0 @@ -import textwrap - -from leapp.libraries.actor.removeoldpammodulesapply import comment_modules, read_file - - -def get_config(config): - return textwrap.dedent(config).strip() - - -def test_read_file__non_existent(): - content = read_file('/this/does/not/exist') - assert content == '' - - -def test_read_file__ok(): - content = read_file(__file__) - assert content != '' - assert 'test_read_file__ok' in content - - -def test_comment_modules__none(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_pkcs11.so - auth sufficient pam_krb5.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - expected = pam - - content = comment_modules([], pam) - assert content == expected - - -def test_comment_modules__replaced_single(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_pkcs11.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - expected = get_config(''' - auth sufficient pam_unix.so - #auth sufficient pam_pkcs11.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - content = comment_modules(['pam_pkcs11', 'pam_krb5'], pam) - assert content == expected - - -def test_comment_modules__replaced_all(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_pkcs11.so - auth sufficient pam_krb5.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - expected = get_config(''' - auth sufficient pam_unix.so - #auth sufficient pam_pkcs11.so - #auth sufficient pam_krb5.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - content = comment_modules(['pam_pkcs11', 'pam_krb5'], pam) - assert content == expected diff --git a/repos/system_upgrade/el7toel8/actors/removeoldpammodulescheck/actor.py b/repos/system_upgrade/el7toel8/actors/removeoldpammodulescheck/actor.py deleted file mode 100644 index cf4fdc84..00000000 --- a/repos/system_upgrade/el7toel8/actors/removeoldpammodulescheck/actor.py +++ /dev/null @@ -1,119 +0,0 @@ -from leapp import reporting -from leapp.actors import Actor -from leapp.dialogs import Dialog -from leapp.dialogs.components import BooleanComponent -from leapp.models import RemovedPAMModules -from leapp.reporting import create_report, Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class RemoveOldPAMModulesCheck(Actor): - """ - Check if it is all right to disable PAM modules that are not in RHEL-8. - - If admin will refuse to disable these modules (pam_pkcs11 and pam_krb5), - upgrade will be stopped. Otherwise we would risk locking out the system - once these modules are removed. - """ - name = 'removed_pam_modules_check' - consumes = (RemovedPAMModules,) - produces = (Report,) - tags = (IPUWorkflowTag, ChecksPhaseTag) - dialogs = ( - Dialog( - scope='remove_pam_pkcs11_module_check', - reason='Confirmation', - components=( - BooleanComponent( - key='confirm', - label='Disable pam_pkcs11 module in PAM configuration? ' - 'If no, the upgrade process will be interrupted.', - description='PAM module pam_pkcs11 is no longer available ' - 'in RHEL-8 since it was replaced by SSSD.', - reason='Leaving this module in PAM configuration may ' - 'lock out the system.' - ), - ) - ), - Dialog( - scope='remove_pam_krb5_module_check', - reason='Confirmation', - components=( - BooleanComponent( - key='confirm', - label='Disable pam_krb5 module in PAM configuration? ' - 'If no, the upgrade process will be interrupted.', - description='PAM module pam_krb5 is no longer available ' - 'in RHEL-8 since it was replaced by SSSD.', - reason='Leaving this module in PAM configuration may ' - 'lock out the system.' - ), - ) - ), - ) - - modules = [] - - def process(self): - model = next(self.consume(RemovedPAMModules)) - - for module in model.modules: - result = self.confirm(module) - if result: - self.produce_report(module) - elif result is False: - # user specifically chose to disagree with auto disablement - self.produce_inhibitor(module) - - def confirm(self, module): - questions = { - 'pam_pkcs11': self.dialogs[0], - 'pam_krb5': self.dialogs[1] - } - - return self.get_answers(questions[module]).get('confirm') - - def produce_report(self, module): - create_report([ - reporting.Title('Module {0} will be removed from PAM configuration'.format(module)), - reporting.Summary( - 'Module {0} was surpassed by SSSD and therefore it was ' - 'removed from RHEL-8. Keeping it in PAM configuration may ' - 'lock out the system thus it will be automatically removed ' - 'from PAM configuration before upgrading to RHEL-8. ' - 'Please switch to SSSD to recover the functionality ' - 'of {0}.'.format(module) - ), - reporting.Severity(reporting.Severity.MEDIUM), - reporting.Groups([ - reporting.Groups.AUTHENTICATION, - reporting.Groups.SECURITY, - reporting.Groups.TOOLS - ]), - reporting.Remediation(hint='Configure SSSD to replace {0}'.format(module)), - reporting.RelatedResource('package', 'sssd') - ]) - - def produce_inhibitor(self, module): - create_report([ - reporting.Title( - 'Upgrade process was interrupted because {0} is enabled in ' - 'PAM configuration and SA user refused to disable it ' - 'automatically.'.format(module)), - reporting.Summary( - 'Module {0} was surpassed by SSSD and therefore it was ' - 'removed from RHEL-8. Keeping it in PAM configuration may ' - 'lock out the system thus it is necessary to disable it ' - 'before the upgrade process can continue.'.format(module) - ), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([ - reporting.Groups.AUTHENTICATION, - reporting.Groups.SECURITY, - reporting.Groups.TOOLS - ]), - reporting.Groups([reporting.Groups.INHIBITOR]), - reporting.Remediation( - hint='Disable {0} module and switch to SSSD to recover its functionality.'.format(module)), - reporting.RelatedResource('package', 'sssd') - ]) diff --git a/repos/system_upgrade/el7toel8/actors/removeoldpammodulesscanner/actor.py b/repos/system_upgrade/el7toel8/actors/removeoldpammodulesscanner/actor.py deleted file mode 100644 index d22dd314..00000000 --- a/repos/system_upgrade/el7toel8/actors/removeoldpammodulesscanner/actor.py +++ /dev/null @@ -1,24 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.removeoldpammodulesscanner import RemoveOldPAMModulesScannerLibrary -from leapp.libraries.common.pam import PAM -from leapp.models import RemovedPAMModules -from leapp.tags import FactsPhaseTag, IPUWorkflowTag - - -class RemoveOldPAMModulesScanner(Actor): - """ - Scan PAM configuration for modules that are not available in RHEL-8. - - PAM module pam_krb5 and pam_pkcs11 are no longer present in RHEL-8 - and must be removed from PAM configuration, otherwise it may lock out - the system. - """ - name = 'removed_pam_modules_scanner' - consumes = () - produces = (RemovedPAMModules,) - tags = (IPUWorkflowTag, FactsPhaseTag) - - def process(self): - pam = PAM.from_system_configuration() - scanner = RemoveOldPAMModulesScannerLibrary(pam) - self.produce(scanner.process()) diff --git a/repos/system_upgrade/el7toel8/actors/removeoldpammodulesscanner/libraries/removeoldpammodulesscanner.py b/repos/system_upgrade/el7toel8/actors/removeoldpammodulesscanner/libraries/removeoldpammodulesscanner.py deleted file mode 100644 index 8b133f59..00000000 --- a/repos/system_upgrade/el7toel8/actors/removeoldpammodulesscanner/libraries/removeoldpammodulesscanner.py +++ /dev/null @@ -1,27 +0,0 @@ -from leapp.models import RemovedPAMModules - - -class RemoveOldPAMModulesScannerLibrary(object): - """ - Scan PAM configuration for modules that are not available in RHEL-8. - - PAM module pam_krb5 and pam_pkcs11 are no longer present in RHEL-8 - and must be removed from PAM configuration, otherwise it may lock out - the system. - """ - - def __init__(self, pam): - self.pam = pam - - def process(self): - # PAM modules pam_pkcs11 and pam_krb5 are no longer available in - # RHEL8. We must remove them because if they are left in PAM - # configuration it may lock out the system. - modules = [] - for module in ['pam_krb5', 'pam_pkcs11']: - if self.pam.has(module): - modules.append(module) - - return RemovedPAMModules( - modules=modules - ) diff --git a/repos/system_upgrade/el7toel8/actors/removeoldpammodulesscanner/tests/unit_test_removeoldpammodulesscanner.py b/repos/system_upgrade/el7toel8/actors/removeoldpammodulesscanner/tests/unit_test_removeoldpammodulesscanner.py deleted file mode 100644 index 4eaa44ea..00000000 --- a/repos/system_upgrade/el7toel8/actors/removeoldpammodulesscanner/tests/unit_test_removeoldpammodulesscanner.py +++ /dev/null @@ -1,62 +0,0 @@ -import textwrap - -from leapp.libraries.actor.removeoldpammodulesscanner import RemoveOldPAMModulesScannerLibrary -from leapp.libraries.common.pam import PAM - - -def get_config(config): - return textwrap.dedent(config).strip() - - -def test_RemoveOldPAMModulesScannerLibrary_process__pkcs11(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_pkcs11.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = RemoveOldPAMModulesScannerLibrary(PAM(pam)) - model = obj.process() - assert model.modules == ['pam_pkcs11'] - - -def test_RemoveOldPAMModulesScannerLibrary_process__krb5(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_krb5.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = RemoveOldPAMModulesScannerLibrary(PAM(pam)) - model = obj.process() - assert model.modules == ['pam_krb5'] - - -def test_RemoveOldPAMModulesScannerLibrary_process__all(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_krb5.so - auth sufficient pam_pkcs11.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = RemoveOldPAMModulesScannerLibrary(PAM(pam)) - model = obj.process() - assert len(model.modules) == 2 - assert 'pam_krb5' in model.modules - assert 'pam_pkcs11' in model.modules - - -def test_RemoveOldPAMModulesScannerLibrary_process__none(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = RemoveOldPAMModulesScannerLibrary(PAM(pam)) - model = obj.process() - assert not model.modules diff --git a/repos/system_upgrade/el7toel8/actors/sanebackendsmigrate/actor.py b/repos/system_upgrade/el7toel8/actors/sanebackendsmigrate/actor.py deleted file mode 100644 index a57ff366..00000000 --- a/repos/system_upgrade/el7toel8/actors/sanebackendsmigrate/actor.py +++ /dev/null @@ -1,21 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import sanebackendsmigrate -from leapp.models import DistributionSignedRPM -from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag - - -class SanebackendsMigrate(Actor): - """ - Actor for migrating sane-backends configuration files. - - Adds USB quirks for support specific USB scanners if they - are not added during package manager transaction. - """ - - name = 'sanebackends_migrate' - consumes = (DistributionSignedRPM,) - produces = () - tags = (ApplicationsPhaseTag, IPUWorkflowTag) - - def process(self): - sanebackendsmigrate.update_sane() diff --git a/repos/system_upgrade/el7toel8/actors/sanebackendsmigrate/libraries/sanebackendsmigrate.py b/repos/system_upgrade/el7toel8/actors/sanebackendsmigrate/libraries/sanebackendsmigrate.py deleted file mode 100644 index 4cf15dff..00000000 --- a/repos/system_upgrade/el7toel8/actors/sanebackendsmigrate/libraries/sanebackendsmigrate.py +++ /dev/null @@ -1,318 +0,0 @@ -from leapp.libraries.common.rpms import has_package -from leapp.libraries.stdlib import api -from leapp.models import DistributionSignedRPM - -# Database of changes in configuration files of sane-backends -# between RHELs - -CANON_DR = [ - '# P-150M', - 'usb 0x1083 0x162c', - '# DR-M160', - 'option extra-status 1', - 'option duplex-offset 400', - 'usb 0x1083 0x163e', - '# DR-M140', - 'option extra-status 1', - 'option duplex-offset 400', - 'usb 0x1083 0x163f', - '# DR-C125', - 'option duplex-offset 400', - 'usb 0x1083 0x1640', - '# DR-P215', - 'usb 0x1083 0x1641', - '# FSU-201', - 'usb 0x1083 0x1648', - '# DR-C130', - 'usb 0x1083 0x164a', - '# DR-P208', - 'usb 0x1083 0x164b', - '# DR-G1130', - 'option buffer-size 8000000', - 'usb 0x1083 0x164f', - '# DR-G1100', - 'option buffer-size 8000000', - 'usb 0x1083 0x1650', - '# DR-C120', - 'usb 0x1083 0x1651', - '# P-201', - 'usb 0x1083 0x1652', - '# DR-F120', - 'option duplex-offset 1640', - 'usb 0x1083 0x1654', - '# DR-M1060', - 'usb 0x1083 0x1657', - '# DR-C225', - 'usb 0x1083 0x1658', - '# DR-P215II', - 'usb 0x1083 0x1659', - '# P-215II', - 'usb 0x1083 0x165b', - '# DR-P208II', - 'usb 0x1083 0x165d', - '# P-208II', - 'usb 0x1083 0x165f' -] - -CARDSCAN = [ - '# Sanford Cardscan 800c', - 'usb 0x0451 0x6250' -] - -DLL = ['epsonds'] - -EPJITSU = [ - '# Fujitsu fi-65F', - 'firmware /usr/share/sane/epjitsu/65f_0A01.nal', - 'usb 0x04c5 0x11bd', - '# Fujitsu S1100', - 'firmware /usr/share/sane/epjitsu/1100_0B00.nal', - 'usb 0x04c5 0x1200', - '# Fujitsu S1300i', - 'firmware /usr/share/sane/epjitsu/1300i_0D12.nal', - 'usb 0x04c5 0x128d', - '# Fujitsu S1100i', - 'firmware /usr/share/sane/epjitsu/1100i_0A00.nal', - 'usb 0x04c5 0x1447' -] - -FUJITSU = [ - '#fi-6125', - 'usb 0x04c5 0x11ee', - '#fi-6225', - 'usb 0x04c5 0x11ef', - '#ScanSnap SV600', - 'usb 0x04c5 0x128e', - '#fi-7180', - 'usb 0x04c5 0x132c', - '#fi-7280', - 'usb 0x04c5 0x132d', - '#fi-7160', - 'usb 0x04c5 0x132e', - '#fi-7260', - 'usb 0x04c5 0x132f', - '#ScanSnap iX500EE', - 'usb 0x04c5 0x13f3', - '#ScanSnap iX100', - 'usb 0x04c5 0x13f4', - '#ScanPartner SP25', - 'usb 0x04c5 0x1409', - '#ScanPartner SP30', - 'usb 0x04c5 0x140a', - '#ScanPartner SP30F', - 'usb 0x04c5 0x140c', - '#fi-6140ZLA', - 'usb 0x04c5 0x145f', - '#fi-6240ZLA', - 'usb 0x04c5 0x1460', - '#fi-6130ZLA', - 'usb 0x04c5 0x1461', - '#fi-6230ZLA', - 'usb 0x04c5 0x1462', - '#fi-6125ZLA', - 'usb 0x04c5 0x1463', - '#fi-6225ZLA', - 'usb 0x04c5 0x1464', - '#fi-6135ZLA', - 'usb 0x04c5 0x146b', - '#fi-6235ZLA', - 'usb 0x04c5 0x146c', - '#fi-6120ZLA', - 'usb 0x04c5 0x146d', - '#fi-6220ZLA', - 'usb 0x04c5 0x146e', - '#N7100', - 'usb 0x04c5 0x146f', - '#fi-6400', - 'usb 0x04c5 0x14ac', - '#fi-7480', - 'usb 0x04c5 0x14b8', - '#fi-6420', - 'usb 0x04c5 0x14bd', - '#fi-7460', - 'usb 0x04c5 0x14be', - '#fi-7140', - 'usb 0x04c5 0x14df', - '#fi-7240', - 'usb 0x04c5 0x14e0', - '#fi-7135', - 'usb 0x04c5 0x14e1', - '#fi-7235', - 'usb 0x04c5 0x14e2', - '#fi-7130', - 'usb 0x04c5 0x14e3', - '#fi-7230', - 'usb 0x04c5 0x14e4', - '#fi-7125', - 'usb 0x04c5 0x14e5', - '#fi-7225', - 'usb 0x04c5 0x14e6', - '#fi-7120', - 'usb 0x04c5 0x14e7', - '#fi-7220', - 'usb 0x04c5 0x14e8', - '#fi-400F', - 'usb 0x04c5 0x151e', - '#fi-7030', - 'usb 0x04c5 0x151f', - '#fi-7700', - 'usb 0x04c5 0x1520', - '#fi-7600', - 'usb 0x04c5 0x1521', - '#fi-7700S', - 'usb 0x04c5 0x1522' -] - -CANON = [ - '# Canon LiDE 80', - 'usb 0x04a9 0x2214', - '# Canon LiDE 120', - 'usb 0x04a9 0x190e', - '# Canon LiDE 220', - 'usb 0x04a9 0x190f' -] - -XEROX_MFP = [ - '#Samsung X4300 Series', - 'usb 0x04e8 0x3324', - '#Samsung K4350 Series', - 'usb 0x04e8 0x3325', - '#Samsung X7600 Series', - 'usb 0x04e8 0x3326', - '#Samsung K7600 Series', - 'usb 0x04e8 0x3327', - '#Samsung K703 Series', - 'usb 0x04e8 0x3331', - '#Samsung X703 Series', - 'usb 0x04e8 0x3332', - '#Samsung M458x Series', - 'usb 0x04e8 0x346f', - '#Samsung M4370 5370 Series', - 'usb 0x04e8 0x3471', - '#Samsung X401 Series', - 'usb 0x04e8 0x3477', - '#Samsung K401 Series', - 'usb 0x04e8 0x3478', - '#Samsung K3250 Series', - 'usb 0x04e8 0x3481', - '#Samsung X3220 Series', - 'usb 0x04e8 0x3482' -] - -NEW_QUIRKS = { - '/etc/sane.d/canon_dr.conf': CANON_DR, - '/etc/sane.d/cardscan.conf': CARDSCAN, - '/etc/sane.d/dll.conf': DLL, - '/etc/sane.d/epjitsu.conf': EPJITSU, - '/etc/sane.d/fujitsu.conf': FUJITSU, - '/etc/sane.d/canon.conf': CANON, - '/etc/sane.d/xerox_mfp.conf': XEROX_MFP -} -""" -Dictionary of configuration files which changes in 1.0.27 -""" - - -def _macro_exists(path, macro): - """ - Check if macro is in the file. - - :param str path: string representing the full path of the config file - :param str macro: new directive to be added - :return boolean res: macro does/does not exist in the file - """ - with open(path, 'r') as f: - lines = f.readlines() - - for line in lines: - if line.lstrip().startswith(macro): - return True - return False - - -def _append_string(path, content): - """ - Append string at the end of file. - - :param str path: string representing the full path of file - :param str content: preformatted string to be added - """ - with open(path, 'a') as f: - f.write(content) - - -def update_config(path, - quirks, - check_function=_macro_exists, - append_function=_append_string): - """ - Insert expected content into the file on the path if it is not - in the file already. - - :param str path: string representing the full path of the config file - :param func check_function: function to be used to check if string is in the file - :param func append_function: function to be used to append string - """ - - macros = [] - for macro in quirks: - if not check_function(path, macro): - macros.append(macro) - - if not macros: - return - - fmt_input = "\n{comment_line}\n{content}\n".format(comment_line='# content added by Leapp', - content='\n'.join(macros)) - - try: - append_function(path, fmt_input) - except IOError: - raise IOError('Error during writing to file: {}.'.format(path)) - - -def _check_package(pkg_name): - """ - Checks if the package is installed and signed by Red Hat - - :param str pkg_name: name of package - """ - - return has_package(DistributionSignedRPM, pkg_name) - - -def update_sane(debug_log=api.current_logger().debug, - error_log=api.current_logger().error, - is_installed=_check_package, - append_function=_append_string, - check_function=_macro_exists): - """ - Iterate over dictionary and updates each configuration file. - - :param func debug_log: function for debug logging - :param func error_log: function for error logging - :param func is_installed: checks if the package is installed - :param func append_function: appends a string into file - :param func check_function: checks if a string exists in file - """ - - error_list = [] - - if not is_installed('sane-backends'): - return - - for path, lines in NEW_QUIRKS.items(): - - debug_log('Updating SANE configuration file {}.'.format(path)) - - try: - update_config(path, lines, check_function, append_function) - except (OSError, IOError) as error: - error_list.append((path, error)) - - if error_list: - error_log('The files below have not been modified ' - '(error message included):' + - ''.join(['\n - {}: {}'.format(err[0], err[1]) - for err in error_list])) - return diff --git a/repos/system_upgrade/el7toel8/actors/sanebackendsmigrate/tests/test_update_config_sanebackendsmigrate.py b/repos/system_upgrade/el7toel8/actors/sanebackendsmigrate/tests/test_update_config_sanebackendsmigrate.py deleted file mode 100644 index 96e58dea..00000000 --- a/repos/system_upgrade/el7toel8/actors/sanebackendsmigrate/tests/test_update_config_sanebackendsmigrate.py +++ /dev/null @@ -1,238 +0,0 @@ -import pytest - -from leapp.libraries.actor.sanebackendsmigrate import ( - CANON, - CANON_DR, - CARDSCAN, - DLL, - EPJITSU, - FUJITSU, - update_config, - XEROX_MFP -) - - -def _pattern_exists(content, macro): - for line in content.split('\n'): - if line.lstrip().startswith(macro): - return True - return False - - -def _create_original_file(file_content): - content = '' - for line in file_content: - fmt_line = '{}\n'.format(line) - content += fmt_line - return content - - -def _create_expected_file(original_content, new_content): - macros = [] - for line in new_content: - if not _pattern_exists(original_content, line): - macros.append(line) - - fmt_input = '' - if macros: - fmt_input = "\n{comment_line}\n{content}\n".format(comment_line='# content added by Leapp', - content='\n'.join(macros)) - - return '\n'.join((original_content, fmt_input)) - - -testdata = [ - ( - _create_original_file(['']), - _create_expected_file('', CANON), - CANON - ), - ( - _create_original_file(['']), - _create_expected_file('', CANON_DR), - CANON_DR - ), - ( - _create_original_file(['']), - _create_expected_file('', CARDSCAN), - CARDSCAN - ), - ( - _create_original_file(['']), - _create_expected_file('', DLL), - DLL - ), - ( - _create_original_file(['']), - _create_expected_file('', EPJITSU), - EPJITSU - ), - ( - _create_original_file(['']), - _create_expected_file('', FUJITSU), - FUJITSU - ), - ( - _create_original_file(['']), - _create_expected_file('', XEROX_MFP), - XEROX_MFP - ), - ( - _create_original_file(['fdfdfdr']), - _create_expected_file('fdfdfdr', CANON), - CANON - ), - ( - _create_original_file(['fdfdfdr']), - _create_expected_file('fdfdfdr', CANON_DR), - CANON_DR - ), - ( - _create_original_file(['fdfdfdr']), - _create_expected_file('fdfdfdr', CARDSCAN), - CARDSCAN - ), - ( - _create_original_file(['fdfdfdr']), - _create_expected_file('fdfdfdr', DLL), - DLL - ), - ( - _create_original_file(['fdfdfdr']), - _create_expected_file('fdfdfdr', EPJITSU), - EPJITSU - ), - ( - _create_original_file(['fdfdfdr']), - _create_expected_file('fdfdfdr', FUJITSU), - FUJITSU - ), - ( - _create_original_file(['fdfdfdr']), - _create_expected_file('fdfdfdr', XEROX_MFP), - XEROX_MFP - ), - ( - _create_original_file(['usb 0x04a9 0x2214']), - _create_expected_file('usb 0x04a9 0x2214', CANON), - CANON - ), - ( - _create_original_file(['usb 0x1083 0x162c']), - _create_expected_file('usb 0x1083 0x162c', CANON_DR), - CANON_DR - ), - ( - _create_original_file(['usb 0x0451 0x6250']), - _create_expected_file('usb 0x0451 0x6250', CARDSCAN), - CARDSCAN - ), - ( - _create_original_file(['#usb 0x0451 0x6250']), - _create_expected_file('#usb 0x0451 0x6250', CARDSCAN), - CARDSCAN - ), - ( - _create_original_file(['epsonds']), - _create_expected_file('epsonds', DLL), - DLL - ), - ( - _create_original_file(['usb 0x04c5 0x11bd']), - _create_expected_file('usb 0x04c5 0x11bd', EPJITSU), - EPJITSU - ), - ( - _create_original_file(['usb 0x04c5 0x132c']), - _create_expected_file('usb 0x04c5 0x132c', FUJITSU), - FUJITSU - ), - ( - _create_original_file(['usb 0x04e8 0x3471']), - _create_expected_file('usb 0x04e8 0x3471', XEROX_MFP), - XEROX_MFP - ), - ( - _create_original_file(CANON), - _create_original_file(CANON), - CANON - ), - ( - _create_original_file(CANON_DR), - _create_original_file(CANON_DR), - CANON_DR - ), - ( - _create_original_file(CARDSCAN), - _create_original_file(CARDSCAN), - CARDSCAN - ), - ( - _create_original_file(DLL), - _create_original_file(DLL), - DLL - ), - ( - _create_original_file(EPJITSU), - _create_original_file(EPJITSU), - EPJITSU - ), - ( - _create_original_file(FUJITSU), - _create_original_file(FUJITSU), - FUJITSU - ), - ( - _create_original_file(XEROX_MFP), - _create_original_file(XEROX_MFP), - XEROX_MFP - ) -] -""" -3-tuple of original file, file after migration and list of lines which -will be tried to add -""" - - -class MockFile(object): - def __init__(self, path, content=None): - self.path = path - self.content = content - self.error = False - - def append(self, path, content): - if path != self.path: - self.error = True - if not self.error: - self.content += content - return self.content - raise IOError('Error during writing to file: {}.'.format(path)) - - def exists(self, path, macro): - for line in self.content.split('\n'): - if line.lstrip().startswith(macro) and self.path == path: - return True - return False - - -def test_update_config_file_errors(): - path = 'foo' - new_content = ['fdfgdfg', 'gnbfgnf'] - - f = MockFile(path, content='') - - with pytest.raises(IOError): - update_config('bar', new_content, f.exists, f.append) - - assert f.content == '' - - -@pytest.mark.parametrize('orig_content,expected_result,content_to_add', testdata) -def test_update_config_append_into_file(orig_content, - expected_result, - content_to_add): - f = MockFile('foo', orig_content) - - update_config('foo', content_to_add, f.exists, f.append) - - assert f.content == expected_result diff --git a/repos/system_upgrade/el7toel8/actors/sanebackendsmigrate/tests/test_update_sane_sanebackendsmigrate.py b/repos/system_upgrade/el7toel8/actors/sanebackendsmigrate/tests/test_update_sane_sanebackendsmigrate.py deleted file mode 100644 index f24f0c7c..00000000 --- a/repos/system_upgrade/el7toel8/actors/sanebackendsmigrate/tests/test_update_sane_sanebackendsmigrate.py +++ /dev/null @@ -1,115 +0,0 @@ -import pytest - -from leapp.libraries.actor.sanebackendsmigrate import NEW_QUIRKS, update_sane - -testdata = [ - {'sane-backends': '/etc/sane.d/canon_dr.conf'}, - {'sane-backends': ''}, - {'ble': ''} -] - - -class MockLogger(object): - def __init__(self): - self.debugmsg = '' - self.errmsg = '' - - def debug(self, message): - self.debugmsg += message - - def error(self, message): - self.errmsg += message - - -class MockPackage(object): - def __init__(self, name, config): - self.name = name - self.config = config - self.config_content = '' - - -class MockPackageSet(object): - def __init__(self): - self.installed_packages = None - - def add_packages(self, pkgs): - if self.installed_packages is None: - self.installed_packages = [] - - for rpm, config in pkgs.items(): - self.installed_packages.append(MockPackage(rpm, config)) - - def is_installed(self, pkg): - for rpm in self.installed_packages: - if pkg == rpm.name: - return True - return False - - def append_content(self, path, content): - found = False - - for rpm in self.installed_packages: - if path == rpm.config: - found = True - rpm.config_content += content - if not found: - raise IOError('Error during writing to file: {}.'.format(path)) - - def check_content(self, path, content): - found = False - - for rpm in self.installed_packages: - if path == rpm.config and content in rpm.config_content: - found = True - - return found - - -class ExpectedOutput(object): - def __init__(self): - self.debugmsg = '' - self.errmsg = '' - - def create(self, rpms): - error_list = [] - found = False - - for pkg, config in rpms.items(): - if pkg == 'sane-backends': - found = True - break - - if found: - for sane_config in NEW_QUIRKS.keys(): - self.debugmsg += ('Updating SANE configuration file {}.' - .format(sane_config)) - if config == '' or config != sane_config: - error_list.append((sane_config, - 'Error during writing to file: {}.' - .format(sane_config))) - - if error_list: - self.errmsg = ('The files below have not been modified ' - '(error message included):' + - ''.join(['\n - {}: {}'.format(err[0], err[1]) - for err in error_list])) - - -@pytest.mark.parametrize("rpms", testdata) -def test_actor_check_report(rpms): - logger = MockLogger() - installed_packages = MockPackageSet() - - installed_packages.add_packages(rpms) - - expected = ExpectedOutput() - expected.create(rpms) - - update_sane(logger.debug, - logger.error, - installed_packages.is_installed, - installed_packages.append_content, - installed_packages.check_content) - - assert expected.debugmsg == logger.debugmsg - assert expected.errmsg == logger.errmsg diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/actor.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/actor.py deleted file mode 100644 index 7fc38e10..00000000 --- a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/actor.py +++ /dev/null @@ -1,22 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.satellite_upgrade_check import satellite_upgrade_check -from leapp.models import Report, SatelliteFacts -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class SatelliteUpgradeCheck(Actor): - """ - Check state of Satellite system before upgrade - """ - - name = 'satellite_upgrade_check' - consumes = (SatelliteFacts,) - produces = (Report,) - tags = (IPUWorkflowTag, ChecksPhaseTag) - - def process(self): - facts = next(self.consume(SatelliteFacts), None) - if not facts or not facts.has_foreman: - return - - satellite_upgrade_check(facts) diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/libraries/satellite_upgrade_check.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/libraries/satellite_upgrade_check.py deleted file mode 100644 index 82148ef3..00000000 --- a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/libraries/satellite_upgrade_check.py +++ /dev/null @@ -1,64 +0,0 @@ -import textwrap - -from leapp import reporting - - -def satellite_upgrade_check(facts): - if facts.postgresql.local_postgresql: - if facts.postgresql.old_var_lib_pgsql_data: - title = "Old PostgreSQL data found in /var/lib/pgsql/data" - summary = """ - The upgrade wants to move PostgreSQL data to /var/lib/pgsql/data, - but this directory already exists on your system. - Please make sure /var/lib/pgsql/data doesn't exist prior to the upgrade. - """ - reporting.create_report([ - reporting.Title(title), - reporting.Summary(textwrap.dedent(summary).strip()), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([]), - reporting.Groups([reporting.Groups.INHIBITOR]) - ]) - - title = "Satellite PostgreSQL data migration" - flags = [] - severity = reporting.Severity.MEDIUM - reindex_msg = textwrap.dedent(""" - After the data has been moved to the new location, all databases will require a REINDEX. - This will happen automatically during the first boot of the system. - """).strip() - - if facts.postgresql.same_partition: - migration_msg = "Your PostgreSQL data will be automatically migrated." - else: - scl_psql_path = '/var/opt/rh/rh-postgresql12/lib/pgsql/data/' - if facts.postgresql.space_required > facts.postgresql.space_available: - storage_message = """You currently don't have enough free storage to move the data. - Automatic moving cannot be performed.""" - flags = [reporting.Groups.INHIBITOR] - severity = reporting.Severity.HIGH - else: - storage_message = """You currently have enough free storage to move the data. - This operation can be performed by the upgrade process.""" - migration_msg = """ - Your PostgreSQL data in {} is currently on a dedicated volume. - PostgreSQL on RHEL8 expects the data to live in /var/lib/pgsql/data. - {} - However, instead of moving the data over, you might want to consider manually adapting your mounts, - so that the contents of {} are available in /var/lib/pgsql/data. - """.format(scl_psql_path, storage_message, scl_psql_path) - - summary = "{}\n{}".format(textwrap.dedent(migration_msg).strip(), reindex_msg) - - reporting.create_report([ - reporting.Title(title), - reporting.Summary(summary), - reporting.ExternalLink( - url='https://access.redhat.com/solutions/6794671', - title='Leapp preupgrade of Red Hat Satellite 6 fails on ' - 'Old PostgreSQL data found in /var/lib/pgsql/data' - ), - reporting.Severity(severity), - reporting.Groups([]), - reporting.Groups(flags) - ]) diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/tests/unit_test_satellite_upgrade_check.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/tests/unit_test_satellite_upgrade_check.py deleted file mode 100644 index 8b75adf7..00000000 --- a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/tests/unit_test_satellite_upgrade_check.py +++ /dev/null @@ -1,83 +0,0 @@ -from leapp import reporting -from leapp.libraries.actor.satellite_upgrade_check import satellite_upgrade_check -from leapp.libraries.common.testutils import create_report_mocked -from leapp.models import SatelliteFacts, SatellitePostgresqlFacts - - -def test_old_data(monkeypatch): - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - - satellite_upgrade_check(SatelliteFacts(has_foreman=True, - postgresql=SatellitePostgresqlFacts(local_postgresql=True, old_var_lib_pgsql_data=True))) - - assert reporting.create_report.called == 2 - - expected_title = 'Old PostgreSQL data found in /var/lib/pgsql/data' - assert next((report for report in reporting.create_report.reports if report.get('title') == expected_title), None) - - expected_title = 'Satellite PostgreSQL data migration' - assert next((report for report in reporting.create_report.reports if report.get('title') == expected_title), None) - - -def test_no_old_data(monkeypatch): - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - - satellite_upgrade_check(SatelliteFacts(has_foreman=True, - postgresql=SatellitePostgresqlFacts(local_postgresql=True, old_var_lib_pgsql_data=False))) - - assert reporting.create_report.called == 1 - - expected_title = 'Satellite PostgreSQL data migration' - - assert expected_title == reporting.create_report.report_fields['title'] - - -def test_same_disk(monkeypatch): - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - - satellite_upgrade_check(SatelliteFacts(has_foreman=True, - postgresql=SatellitePostgresqlFacts(local_postgresql=True, same_partition=True))) - - assert reporting.create_report.called == 1 - - expected_title = 'Satellite PostgreSQL data migration' - expected_summary = 'Your PostgreSQL data will be automatically migrated.' - expected_reindex = 'all databases will require a REINDEX' - - assert expected_title == reporting.create_report.report_fields['title'] - assert expected_summary in reporting.create_report.report_fields['summary'] - assert expected_reindex in reporting.create_report.report_fields['summary'] - - -def test_different_disk_sufficient_storage(monkeypatch): - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - - satellite_upgrade_check(SatelliteFacts(has_foreman=True, - postgresql=SatellitePostgresqlFacts(local_postgresql=True, same_partition=False, - space_required=5, space_available=10))) - - assert reporting.create_report.called == 1 - - expected_title = 'Satellite PostgreSQL data migration' - expected_summary = 'You currently have enough free storage to move the data' - expected_reindex = 'all databases will require a REINDEX' - - assert expected_title == reporting.create_report.report_fields['title'] - assert expected_summary in reporting.create_report.report_fields['summary'] - assert expected_reindex in reporting.create_report.report_fields['summary'] - - -def test_different_disk_insufficient_storage(monkeypatch): - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - - satellite_upgrade_check(SatelliteFacts(has_foreman=True, - postgresql=SatellitePostgresqlFacts(local_postgresql=True, same_partition=False, - space_required=10, space_available=5))) - - assert reporting.create_report.called == 1 - - expected_title = 'Satellite PostgreSQL data migration' - expected_summary = "You currently don't have enough free storage to move the data" - - assert expected_title == reporting.create_report.report_fields['title'] - assert expected_summary in reporting.create_report.report_fields['summary'] diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_data_migration/actor.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_data_migration/actor.py deleted file mode 100644 index 1dd52691..00000000 --- a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_data_migration/actor.py +++ /dev/null @@ -1,48 +0,0 @@ -import glob -import os -import shutil - -from leapp.actors import Actor -from leapp.models import SatelliteFacts -from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag - -POSTGRESQL_DATA_PATH = '/var/lib/pgsql/data/' -POSTGRESQL_SCL_DATA_PATH = '/var/opt/rh/rh-postgresql12/lib/pgsql/data/' -POSTGRESQL_USER = 'postgres' -POSTGRESQL_GROUP = 'postgres' - - -class SatelliteUpgradeDataMigration(Actor): - """ - Migrate Satellite PostgreSQL data - """ - - name = 'satellite_upgrade_data_migration' - consumes = (SatelliteFacts,) - produces = () - tags = (IPUWorkflowTag, ApplicationsPhaseTag) - - def process(self): - facts = next(self.consume(SatelliteFacts), None) - if not facts or not facts.has_foreman: - return - - if facts.postgresql.local_postgresql and os.path.exists(POSTGRESQL_SCL_DATA_PATH): - # we can assume POSTGRESQL_DATA_PATH exists and is empty - # move PostgreSQL data to the new home - for item in glob.glob(os.path.join(POSTGRESQL_SCL_DATA_PATH, '*')): - try: - shutil.move(item, POSTGRESQL_DATA_PATH) - except Exception as e: # pylint: disable=broad-except - self.log.warning('Failed moving PostgreSQL data: {}'.format(e)) - return - - if not facts.postgresql.same_partition: - for dirpath, _, filenames in os.walk(POSTGRESQL_DATA_PATH): - try: - shutil.chown(dirpath, POSTGRESQL_USER, POSTGRESQL_GROUP) - for filename in filenames: - shutil.chown(os.path.join(dirpath, filename), POSTGRESQL_USER, POSTGRESQL_GROUP) - except Exception as e: # pylint: disable=broad-except - self.log.warning('Failed fixing ownership of PostgreSQL data: {}'.format(e)) - return diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/actor.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/actor.py deleted file mode 100644 index cfba0503..00000000 --- a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/actor.py +++ /dev/null @@ -1,157 +0,0 @@ -import os - -from leapp.actors import Actor -from leapp.libraries.common.config import architecture, version -from leapp.libraries.common.rpms import has_package -from leapp.libraries.stdlib import run -from leapp.models import ( - DNFWorkaround, - InstalledRPM, - Module, - RepositoriesSetupTasks, - RpmTransactionTasks, - SatelliteFacts, - SatellitePostgresqlFacts -) -from leapp.tags import FactsPhaseTag, IPUWorkflowTag - -POSTGRESQL_SCL_DATA_PATH = '/var/opt/rh/rh-postgresql12/lib/pgsql/data/' - - -class SatelliteUpgradeFacts(Actor): - """ - Report which Satellite packages require updates and how to handle PostgreSQL data - """ - - name = 'satellite_upgrade_facts' - consumes = (InstalledRPM, ) - produces = (DNFWorkaround, RepositoriesSetupTasks, RpmTransactionTasks, SatelliteFacts) - tags = (IPUWorkflowTag, FactsPhaseTag) - - def process(self): - if not architecture.matches_architecture(architecture.ARCH_X86_64): - return - - has_foreman = has_package(InstalledRPM, 'foreman') or has_package(InstalledRPM, 'foreman-proxy') - if not has_foreman: - return - - has_katello_installer = has_package(InstalledRPM, 'foreman-installer-katello') - - local_postgresql = has_package(InstalledRPM, 'rh-postgresql12-postgresql-server') - postgresql_contrib = has_package(InstalledRPM, 'rh-postgresql12-postgresql-contrib') - postgresql_evr = has_package(InstalledRPM, 'rh-postgresql12-postgresql-evr') - - # SCL-related packages - to_remove = ['tfm-runtime', 'tfm-pulpcore-runtime', 'rh-redis5-runtime', 'rh-ruby27-runtime', - 'rh-python38-runtime'] - to_install = ['rubygem-foreman_maintain'] - modules_to_enable = [Module(name='ruby', stream='2.7')] - - if has_package(InstalledRPM, 'katello'): - # enable modules that are needed for Candlepin, which is pulled in by Katello - modules_to_enable.append(Module(name='pki-core', stream='10.6')) - modules_to_enable.append(Module(name='pki-deps', stream='10.6')) - # enable modules that are needed for Pulpcore - modules_to_enable.append(Module(name='python38', stream='3.8')) - to_install.append('katello') - if version.matches_target_version('8.8', '8.9'): - # Force removal of tomcat - # PES data indicates tomcat.el7 can be upgraded to tomcat.el8 since EL 8.8, - # but we need pki-servlet-engine from the module instead which will be pulled in via normal - # package dependencies - to_remove.extend(['tomcat', 'tomcat-lib']) - - if has_package(InstalledRPM, 'rh-redis5-redis'): - modules_to_enable.append(Module(name='redis', stream='5')) - to_install.append('redis') - - for rpm_pkgs in self.consume(InstalledRPM): - for pkg in rpm_pkgs.items: - if (pkg.name.startswith('tfm-rubygem-hammer') or pkg.name.startswith('tfm-rubygem-foreman') - or pkg.name.startswith('tfm-rubygem-katello') - or pkg.name.startswith('tfm-rubygem-smart_proxy')): - to_install.append(pkg.name.replace('tfm-rubygem-', 'rubygem-')) - elif pkg.name.startswith('tfm-pulpcore-python3-pulp'): - to_install.append(pkg.name.replace('tfm-pulpcore-python3-', 'python38-')) - elif pkg.name.startswith('foreman-installer') or pkg.name.startswith('satellite-installer'): - to_install.append(pkg.name) - - on_same_partition = True - bytes_required = None - bytes_available = None - old_pgsql_data = False - - if local_postgresql: - """ - Handle migration of the PostgreSQL legacy-actions files. - RPM cannot handle replacement of directories by symlinks by default - without the %pretrans scriptlet. As PostgreSQL package is packaged wrong, - we have to workaround that by migration of the PostgreSQL files - before the rpm transaction is processed. - """ - self.produce( - DNFWorkaround( - display_name='PostgreSQL symlink fix', - script_path=self.get_tool_path('handle-postgresql-legacy-actions'), - ) - ) - - old_pgsql_data = bool(os.path.exists('/var/lib/pgsql/data/') and os.listdir('/var/lib/pgsql/data/') - and os.path.exists(POSTGRESQL_SCL_DATA_PATH) - and os.listdir(POSTGRESQL_SCL_DATA_PATH)) - scl_psql_stat = os.stat(POSTGRESQL_SCL_DATA_PATH) - for nonscl_path in ['/var/lib/pgsql/data/', '/var/lib/pgsql/', '/var/lib/', '/']: - if os.path.exists(nonscl_path): - nonscl_psql_stat = os.stat(nonscl_path) - break - - if scl_psql_stat.st_dev != nonscl_psql_stat.st_dev: - on_same_partition = False - # get the current disk usage of the PostgreSQL data - scl_du_call = run(['du', '--block-size=1', '--summarize', POSTGRESQL_SCL_DATA_PATH]) - bytes_required = int(scl_du_call['stdout'].split()[0]) - # get the current free space on the target partition - nonscl_stat = os.statvfs(nonscl_path) - bytes_available = nonscl_stat.f_bavail * nonscl_stat.f_frsize - - modules_to_enable.append(Module(name='postgresql', stream='12')) - to_remove.append('rh-postgresql12-runtime') - to_install.extend(['postgresql', 'postgresql-server']) - if postgresql_contrib: - to_remove.append('rh-postgresql12-postgresql-contrib') - to_install.append('postgresql-contrib') - if postgresql_evr: - to_remove.append('rh-postgresql12-postgresql-evr') - to_install.append('postgresql-evr') - - self.produce(SatelliteFacts( - has_foreman=has_foreman, - has_katello_installer=has_katello_installer, - postgresql=SatellitePostgresqlFacts( - local_postgresql=local_postgresql, - old_var_lib_pgsql_data=old_pgsql_data, - same_partition=on_same_partition, - space_required=bytes_required, - space_available=bytes_available, - ), - )) - - repositories_to_enable = ['satellite-maintenance-6.11-for-rhel-8-x86_64-rpms'] - if has_package(InstalledRPM, 'satellite'): - repositories_to_enable.append('satellite-6.11-for-rhel-8-x86_64-rpms') - modules_to_enable.append(Module(name='satellite', stream='el8')) - to_install.append('satellite') - elif has_package(InstalledRPM, 'satellite-capsule'): - repositories_to_enable.append('satellite-capsule-6.11-for-rhel-8-x86_64-rpms') - modules_to_enable.append(Module(name='satellite-capsule', stream='el8')) - to_install.append('satellite-capsule') - - self.produce(RpmTransactionTasks( - to_remove=to_remove, - to_install=to_install, - modules_to_enable=modules_to_enable - ) - ) - - self.produce(RepositoriesSetupTasks(to_enable=repositories_to_enable)) diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/tests/unit_test_satellite_upgrade_facts.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/tests/unit_test_satellite_upgrade_facts.py deleted file mode 100644 index 2fb8a3ba..00000000 --- a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/tests/unit_test_satellite_upgrade_facts.py +++ /dev/null @@ -1,176 +0,0 @@ -import os - -from leapp.libraries.common.config import mock_configs -from leapp.models import ( - DNFWorkaround, - InstalledRPM, - Module, - RepositoriesSetupTasks, - RPM, - RpmTransactionTasks, - SatelliteFacts -) -from leapp.snactor.fixture import current_actor_context - -RH_PACKAGER = 'Red Hat, Inc. ' - - -def fake_package(pkg_name): - return RPM(name=pkg_name, version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', - pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51') - - -FOREMAN_RPM = fake_package('foreman') -FOREMAN_PROXY_RPM = fake_package('foreman-proxy') -KATELLO_INSTALLER_RPM = fake_package('foreman-installer-katello') -KATELLO_RPM = fake_package('katello') -POSTGRESQL_RPM = fake_package('rh-postgresql12-postgresql-server') -SATELLITE_RPM = fake_package('satellite') -SATELLITE_CAPSULE_RPM = fake_package('satellite-capsule') - - -def test_no_satellite_present(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[])) - current_actor_context.run(config_model=mock_configs.CONFIG) - message = current_actor_context.consume(SatelliteFacts) - assert not message - - -def test_satellite_present(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM])) - current_actor_context.run(config_model=mock_configs.CONFIG) - message = current_actor_context.consume(SatelliteFacts)[0] - assert message.has_foreman - - -def test_wrong_arch(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM])) - current_actor_context.run(config_model=mock_configs.CONFIG_S390X) - message = current_actor_context.consume(SatelliteFacts) - assert not message - - -def test_satellite_capsule_present(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[FOREMAN_PROXY_RPM])) - current_actor_context.run(config_model=mock_configs.CONFIG) - message = current_actor_context.consume(SatelliteFacts)[0] - assert message.has_foreman - - -def test_no_katello_installer_present(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM])) - current_actor_context.run(config_model=mock_configs.CONFIG) - message = current_actor_context.consume(SatelliteFacts)[0] - assert not message.has_katello_installer - - -def test_katello_installer_present(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM, KATELLO_INSTALLER_RPM])) - current_actor_context.run(config_model=mock_configs.CONFIG) - message = current_actor_context.consume(SatelliteFacts)[0] - assert message.has_katello_installer - - -def test_enables_ruby_module(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM])) - current_actor_context.run(config_model=mock_configs.CONFIG) - message = current_actor_context.consume(RpmTransactionTasks)[0] - assert Module(name='ruby', stream='2.7') in message.modules_to_enable - - -def test_enables_pki_modules(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM, KATELLO_RPM])) - current_actor_context.run(config_model=mock_configs.CONFIG) - message = current_actor_context.consume(RpmTransactionTasks)[0] - assert Module(name='pki-core', stream='10.6') in message.modules_to_enable - assert Module(name='pki-deps', stream='10.6') in message.modules_to_enable - - -def test_enables_satellite_module(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM, SATELLITE_RPM])) - current_actor_context.run(config_model=mock_configs.CONFIG) - message = current_actor_context.consume(RpmTransactionTasks)[0] - assert Module(name='satellite', stream='el8') in message.modules_to_enable - assert Module(name='satellite-capsule', stream='el8') not in message.modules_to_enable - - -def test_enables_satellite_capsule_module(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[FOREMAN_PROXY_RPM, SATELLITE_CAPSULE_RPM])) - current_actor_context.run(config_model=mock_configs.CONFIG) - message = current_actor_context.consume(RpmTransactionTasks)[0] - assert Module(name='satellite-capsule', stream='el8') in message.modules_to_enable - assert Module(name='satellite', stream='el8') not in message.modules_to_enable - - -def test_installs_satellite_package(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM, SATELLITE_RPM])) - current_actor_context.run(config_model=mock_configs.CONFIG) - message = current_actor_context.consume(RpmTransactionTasks)[0] - assert 'satellite' in message.to_install - assert 'satellite-capsule' not in message.to_install - - -def test_installs_satellite_capsule_package(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[FOREMAN_PROXY_RPM, SATELLITE_CAPSULE_RPM])) - current_actor_context.run(config_model=mock_configs.CONFIG) - message = current_actor_context.consume(RpmTransactionTasks)[0] - assert 'satellite-capsule' in message.to_install - assert 'satellite' not in message.to_install - - -def test_detects_local_postgresql(monkeypatch, current_actor_context): - def mock_stat(): - orig_stat = os.stat - - def mocked_stat(path): - if path == '/var/opt/rh/rh-postgresql12/lib/pgsql/data/': - path = '/' - return orig_stat(path) - return mocked_stat - monkeypatch.setattr("os.stat", mock_stat()) - - current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM, POSTGRESQL_RPM])) - current_actor_context.run(config_model=mock_configs.CONFIG) - - rpmmessage = current_actor_context.consume(RpmTransactionTasks)[0] - assert Module(name='postgresql', stream='12') in rpmmessage.modules_to_enable - - satellitemsg = current_actor_context.consume(SatelliteFacts)[0] - assert satellitemsg.postgresql.local_postgresql - - assert current_actor_context.consume(DNFWorkaround) - - -def test_detects_remote_postgresql(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM])) - current_actor_context.run(config_model=mock_configs.CONFIG) - - rpmmessage = current_actor_context.consume(RpmTransactionTasks)[0] - assert Module(name='postgresql', stream='12') not in rpmmessage.modules_to_enable - - satellitemsg = current_actor_context.consume(SatelliteFacts)[0] - assert not satellitemsg.postgresql.local_postgresql - - assert not current_actor_context.consume(DNFWorkaround) - - -def test_enables_right_repositories_on_satellite(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM, SATELLITE_RPM])) - current_actor_context.run(config_model=mock_configs.CONFIG) - - rpmmessage = current_actor_context.consume(RepositoriesSetupTasks)[0] - - assert 'satellite-maintenance-6.11-for-rhel-8-x86_64-rpms' in rpmmessage.to_enable - assert 'satellite-6.11-for-rhel-8-x86_64-rpms' in rpmmessage.to_enable - assert 'satellite-capsule-6.11-for-rhel-8-x86_64-rpms' not in rpmmessage.to_enable - - -def test_enables_right_repositories_on_capsule(current_actor_context): - current_actor_context.feed(InstalledRPM(items=[FOREMAN_PROXY_RPM, SATELLITE_CAPSULE_RPM])) - current_actor_context.run(config_model=mock_configs.CONFIG) - - rpmmessage = current_actor_context.consume(RepositoriesSetupTasks)[0] - - assert 'satellite-maintenance-6.11-for-rhel-8-x86_64-rpms' in rpmmessage.to_enable - assert 'satellite-6.11-for-rhel-8-x86_64-rpms' not in rpmmessage.to_enable - assert 'satellite-capsule-6.11-for-rhel-8-x86_64-rpms' in rpmmessage.to_enable diff --git a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/actor.py b/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/actor.py deleted file mode 100644 index 0db93aba..00000000 --- a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/actor.py +++ /dev/null @@ -1,18 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import scan_layout as scan_layout_lib -from leapp.models import GRUBDevicePartitionLayout, GrubInfo -from leapp.tags import FactsPhaseTag, IPUWorkflowTag - - -class ScanGRUBDevicePartitionLayout(Actor): - """ - Scan all identified GRUB devices for their partition layout. - """ - - name = 'scan_grub_device_partition_layout' - consumes = (GrubInfo,) - produces = (GRUBDevicePartitionLayout,) - tags = (FactsPhaseTag, IPUWorkflowTag,) - - def process(self): - scan_layout_lib.scan_grub_device_partition_layout() diff --git a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/libraries/scan_layout.py b/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/libraries/scan_layout.py deleted file mode 100644 index 7f4a2a59..00000000 --- a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/libraries/scan_layout.py +++ /dev/null @@ -1,101 +0,0 @@ -from leapp.libraries.stdlib import api, CalledProcessError, run -from leapp.models import GRUBDevicePartitionLayout, GrubInfo, PartitionInfo - -SAFE_OFFSET_BYTES = 1024*1024 # 1MiB - - -def split_on_space_segments(line): - fragments = (fragment.strip() for fragment in line.split(' ')) - return [fragment for fragment in fragments if fragment] - - -def get_partition_layout(device): - try: - partition_table = run(['fdisk', '-l', '-u=sectors', device], split=True)['stdout'] - except CalledProcessError as err: - # Unlikely - if the disk has no partition table, `fdisk` terminates with 0 (no err). Fdisk exits with an err - # when the device does not exists, or if it is too small to contain a partition table. - - err_msg = 'Failed to run `fdisk` to obtain the partition table of the device {0}. Full error: \'{1}\'' - api.current_logger().error(err_msg.format(device, str(err))) - return None - - table_iter = iter(partition_table) - - for line in table_iter: - if not line.startswith('Units'): - # We are still reading general device information and not the table itself - continue - - unit = line.split('=')[2].strip() # Contains '512 bytes' - unit = int(unit.split(' ')[0].strip()) - break # First line of the partition table header - - # Discover disk label type: dos | gpt - for line in table_iter: - line = line.strip() - if not line.startswith('Disk label type'): - continue - disk_type = line.split(':')[1].strip() - break - - if disk_type == 'gpt': - api.current_logger().info( - 'Detected GPT partition table. Skipping produce of GRUBDevicePartitionLayout message.' - ) - # NOTE(pstodulk): The GPT table has a different output format than - # expected below, example (ignore start/end lines): - # --------------------------- start ---------------------------------- - # # Start End Size Type Name - # 1 2048 4095 1M BIOS boot - # 2 4096 2101247 1G Microsoft basic - # 3 2101248 41940991 19G Linux LVM - # ---------------------------- end ----------------------------------- - # But mainly, in case of GPT, we have nothing to actually check as - # we are gathering this data now mainly to get information about the - # actual size of embedding area (MBR gap). In case of GPT, there is - # bios boot / prep boot partition, which has always 1 MiB and fulfill - # our expectations. So skip in this case another processing and generation - # of the msg. Let's improve it in future if we find a reason for it. - return None - - for line in table_iter: - line = line.strip() - if not line.startswith('Device'): - continue - - break - - partitions = [] - for partition_line in table_iter: - if not partition_line.startswith('/'): - # the output can contain warning msg when a partition is not aligned - # on physical sector boundary, like: - # ~~~ - # Partition 4 does not start on physical sector boundary. - # ~~~ - # We know that in case of MBR the line we expect to parse always - # starts with canonical path. So let's use this condition. - # See https://issues.redhat.com/browse/RHEL-50947 - continue - # Fields: Device Boot Start End Sectors Size Id Type - # The line looks like: `/dev/vda1 * 2048 2099199 2097152 1G 83 Linux` - part_info = split_on_space_segments(partition_line) - - # If the partition is not bootable, the Boot column might be empty - part_device = part_info[0] - part_start = int(part_info[2]) if part_info[1] == '*' else int(part_info[1]) - partitions.append(PartitionInfo(part_device=part_device, start_offset=part_start*unit)) - - return GRUBDevicePartitionLayout(device=device, partitions=partitions) - - -def scan_grub_device_partition_layout(): - grub_devices = next(api.consume(GrubInfo), None) - if not grub_devices: - return - - for device in grub_devices.orig_devices: - dev_info = get_partition_layout(device) - if dev_info: - api.produce(dev_info) diff --git a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/tests/test_scan_partition_layout.py b/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/tests/test_scan_partition_layout.py deleted file mode 100644 index 9c32e16f..00000000 --- a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/tests/test_scan_partition_layout.py +++ /dev/null @@ -1,87 +0,0 @@ -from collections import namedtuple - -import pytest - -from leapp.libraries.actor import scan_layout as scan_layout_lib -from leapp.libraries.common import grub -from leapp.libraries.common.testutils import create_report_mocked, produce_mocked -from leapp.libraries.stdlib import api -from leapp.models import GRUBDevicePartitionLayout, GrubInfo -from leapp.utils.report import is_inhibitor - -Device = namedtuple('Device', ['name', 'partitions', 'sector_size']) -Partition = namedtuple('Partition', ['name', 'start_offset']) - - -@pytest.mark.parametrize( - 'devices', - [ - ( - Device(name='/dev/vda', sector_size=512, - partitions=[Partition(name='/dev/vda1', start_offset=63), - Partition(name='/dev/vda2', start_offset=1000)]), - Device(name='/dev/vdb', sector_size=1024, - partitions=[Partition(name='/dev/vdb1', start_offset=100), - Partition(name='/dev/vdb2', start_offset=20000)]) - ), - ( - Device(name='/dev/vda', sector_size=512, - partitions=[Partition(name='/dev/vda1', start_offset=111), - Partition(name='/dev/vda2', start_offset=1000)]), - ) - ] -) -@pytest.mark.parametrize('fs', ('Linux', 'Linux raid autodetect')) -def test_get_partition_layout(monkeypatch, devices, fs): - device_to_fdisk_output = {} - for device in devices: - fdisk_output = [ - 'Disk {0}: 42.9 GB, 42949672960 bytes, 83886080 sectors'.format(device.name), - 'Units = sectors of 1 * {sector_size} = {sector_size} bytes'.format(sector_size=device.sector_size), - 'Sector size (logical/physical): 512 bytes / 512 bytes', - 'I/O size (minimum/optimal): 512 bytes / 512 bytes', - 'Disk label type: dos', - 'Disk identifier: 0x0000000da', - '', - ' Device Boot Start End Blocks Id System', - ] - for part in device.partitions: - part_line = '{0} * {1} 2099199 1048576 83 {2}'.format(part.name, part.start_offset, fs) - fdisk_output.append(part_line) - - # add a problematic warning msg to test: - # https://issues.redhat.com/browse/RHEL-50947 - fdisk_output.append('Partition 3 does not start on physical sector boundary.') - device_to_fdisk_output[device.name] = fdisk_output - - def mocked_run(cmd, *args, **kwargs): - assert cmd[:3] == ['fdisk', '-l', '-u=sectors'] - device = cmd[3] - output = device_to_fdisk_output[device] - return {'stdout': output} - - def consume_mocked(*args, **kwargs): - yield GrubInfo(orig_devices=[device.name for device in devices]) - - monkeypatch.setattr(scan_layout_lib, 'run', mocked_run) - monkeypatch.setattr(api, 'produce', produce_mocked()) - monkeypatch.setattr(api, 'consume', consume_mocked) - - scan_layout_lib.scan_grub_device_partition_layout() - - assert api.produce.called == len(devices) - - dev_name_to_desc = {dev.name: dev for dev in devices} - - for message in api.produce.model_instances: - assert isinstance(message, GRUBDevicePartitionLayout) - dev = dev_name_to_desc[message.device] - - expected_part_name_to_start = {part.name: part.start_offset*dev.sector_size for part in dev.partitions} - actual_part_name_to_start = {part.part_device: part.start_offset for part in message.partitions} - assert expected_part_name_to_start == actual_part_name_to_start - - -def test_get_partition_layout_gpt(monkeypatch): - # TODO(pstodulk): skipping for now, due to time pressure. Testing for now manually. - pass diff --git a/repos/system_upgrade/el7toel8/actors/sctpchecks/actor.py b/repos/system_upgrade/el7toel8/actors/sctpchecks/actor.py deleted file mode 100644 index 73acea56..00000000 --- a/repos/system_upgrade/el7toel8/actors/sctpchecks/actor.py +++ /dev/null @@ -1,24 +0,0 @@ -from leapp.actors import Actor -from leapp.models import RpmTransactionTasks, SCTPConfig -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class SCTPChecks(Actor): - """ - Parses collected SCTP information and take necessary actions. - - The only action performed by this actor is to request the installation of the - kernel-modules-extra rpm package, based on if SCTP is being used or not which - is collected on SCTPConfig message. If yes, it then produces a RpmTransactionTasks - requesting to install the package. - """ - name = 'sctp_checks' - consumes = (SCTPConfig,) - produces = (RpmTransactionTasks, ) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - for sctpconfig in self.consume(SCTPConfig): - if sctpconfig.wanted: - self.produce(RpmTransactionTasks(to_install=['kernel-modules-extra'])) - break diff --git a/repos/system_upgrade/el7toel8/actors/sctpchecks/tests/component_test_sctpchecks.py b/repos/system_upgrade/el7toel8/actors/sctpchecks/tests/component_test_sctpchecks.py deleted file mode 100644 index c5437d9d..00000000 --- a/repos/system_upgrade/el7toel8/actors/sctpchecks/tests/component_test_sctpchecks.py +++ /dev/null @@ -1,27 +0,0 @@ -from leapp.actors import Actor -from leapp.models import RpmTransactionTasks, SCTPConfig -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -def test_sctp_wanted(current_actor_context): - current_actor_context.feed(SCTPConfig(wanted=True)) - current_actor_context.run() - assert current_actor_context.consume(RpmTransactionTasks) - assert current_actor_context.consume(RpmTransactionTasks)[0].to_install == ['kernel-modules-extra'] - - -def test_sctp_empty_config(current_actor_context): - current_actor_context.feed(SCTPConfig()) - current_actor_context.run() - assert not current_actor_context.consume(RpmTransactionTasks) - - -def test_sctp_no_config(current_actor_context): - current_actor_context.run() - assert not current_actor_context.consume(RpmTransactionTasks) - - -def test_sctp_unwanted(current_actor_context): - current_actor_context.feed(SCTPConfig(wanted=False)) - current_actor_context.run() - assert not current_actor_context.consume(RpmTransactionTasks) diff --git a/repos/system_upgrade/el7toel8/actors/sctpconfigread/actor.py b/repos/system_upgrade/el7toel8/actors/sctpconfigread/actor.py deleted file mode 100644 index 452f5c69..00000000 --- a/repos/system_upgrade/el7toel8/actors/sctpconfigread/actor.py +++ /dev/null @@ -1,21 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.sctplib import is_sctp_wanted -from leapp.models import ActiveKernelModulesFacts, SCTPConfig -from leapp.tags import FactsPhaseTag, IPUWorkflowTag - - -class SCTPConfigRead(Actor): - """ - Determines whether or not the SCTP kernel module might be wanted. - - This actor determines whether or not the SCTP is currently used by this machine or has been quite - recently used (1 month timeframe). In case it has been used it will issue a SCTPConfig message that - defines the decision whether or not the SCTP module should be removed from the module blacklist on RHEL8. - """ - name = 'sctp_read_status' - consumes = (ActiveKernelModulesFacts,) - produces = (SCTPConfig,) - tags = (FactsPhaseTag, IPUWorkflowTag) - - def process(self): - self.produce(SCTPConfig(wanted=is_sctp_wanted())) diff --git a/repos/system_upgrade/el7toel8/actors/sctpconfigread/libraries/sctpdlm.py b/repos/system_upgrade/el7toel8/actors/sctpconfigread/libraries/sctpdlm.py deleted file mode 100644 index aa547062..00000000 --- a/repos/system_upgrade/el7toel8/actors/sctpconfigread/libraries/sctpdlm.py +++ /dev/null @@ -1,64 +0,0 @@ -# -# Functions for probing SCTP usage by DLM -# -import re - -from leapp.libraries.common import utils -from leapp.libraries.stdlib import api - - -def check_dlm_cfgfile(_open=open): - """Parse DLM config file. - :param _open: object behind opening a file. Might be replaced - by mocked one for the purpose of testing - """ - fname = '/etc/dlm/dlm.conf' - - try: - with _open(fname, 'r') as fp: - cfgs = '[dlm]\n' + fp.read() - except (OSError, IOError): - return False - - cfg = utils.parse_config(cfgs) - - if not cfg.has_option('dlm', 'protocol'): - return False - - proto = cfg.get('dlm', 'protocol').lower() - return proto in ['sctp', 'detect', '1', '2'] - - -def check_dlm_sysconfig(_open=open): - """Parse /etc/sysconfig/dlm - :param _open: object behind opening a file. Might be replaced - by mocked one for the purpose of testing - """ - - regex = re.compile('^[^#]*DLM_CONTROLD_OPTS.*=.*(?:--protocol|-r)[ =]*([^"\' ]+).*', re.IGNORECASE) - - try: - with _open('/etc/sysconfig/dlm', 'r') as fp: - lines = fp.readlines() - except (OSError, IOError): - return False - - for line in lines: - if regex.match(line): - proto = regex.sub('\\1', line).lower().strip() - if proto in ['sctp', 'detect']: - return True - - return False - - -def is_dlm_using_sctp(): - if check_dlm_cfgfile(): - api.current_logger().info('DLM is configured to use SCTP on dlm.conf.') - return True - - if check_dlm_sysconfig(): - api.current_logger().info('DLM is configured to use SCTP on sysconfig.') - return True - - return False diff --git a/repos/system_upgrade/el7toel8/actors/sctpconfigread/libraries/sctplib.py b/repos/system_upgrade/el7toel8/actors/sctpconfigread/libraries/sctplib.py deleted file mode 100644 index cc002166..00000000 --- a/repos/system_upgrade/el7toel8/actors/sctpconfigread/libraries/sctplib.py +++ /dev/null @@ -1,106 +0,0 @@ -# -# Helper functions -# - -from os.path import isfile - -from leapp.libraries.actor import sctpdlm -from leapp.libraries.stdlib import api, CalledProcessError, run -from leapp.models import ActiveKernelModulesFacts - - -def anyfile(files): - """ - Determines if any of the given paths exist and are a file. - - :type files: tuple of str - :return: True if any of the given paths exists and it is a file. - :rtype: bool - """ - for f in files: - try: - if isfile(f): - return True - except OSError: - continue - return False - - -def is_module_loaded(module): - """ - Determines if the given kernel module has been reported in the ActiveKernelModuleFacts as loaded. - - :return: True if the module has been found in the ActiveKernelModuleFacts. - :rtype: bool - """ - for fact in api.consume(ActiveKernelModulesFacts): - for active_module in fact.kernel_modules: - if active_module.filename == module: - return True - return False - - -def is_sctp_used(): - """ - Logic function that decides whether SCTP is being used on this machine. - - :return: True if SCTP usage was detected. - :rtype: bool - """ - - # If anything is using SCTP, be it for listening on new connections or - # connecting somewhere else, the module will be loaded. Thus, no need to - # also probe on sockets. - if is_module_loaded('sctp'): - return True - - # Basic files from lksctp-tools. This check is enough and checking RPM - # database is an overkill here and this allows for checking for - # manually installed ones, which is possible. - lksctp_files = ['/usr/lib64/libsctp.so.1', - '/usr/lib/libsctp.so.1', - '/usr/bin/sctp_test'] - if anyfile(lksctp_files): - api.current_logger().debug('At least one of lksctp files is present.') - return True - - if sctpdlm.is_dlm_using_sctp(): - return True - - return False - - -def was_sctp_used(): - """ - Determines whether SCTP has been used in the path month, by checking the journalctl. - - :return: True if SCTP usage has been found. - :rtype: bool - """ - try: - run(['check_syslog_for_sctp.sh']) - except CalledProcessError: - api.current_logger().debug('Nothing regarding SCTP was found on journal.') - return False - api.current_logger().debug('Found logs regarding SCTP on journal.') - return True - - -def is_sctp_wanted(): - """ - Decision making function that decides based on the current or past usage of SCTP, the SCTP module is wanted - on the new system. - - :return: True if SCTP seems to be in use or has been recently used. - :rtype: bool - """ - if is_sctp_used(): - api.current_logger().info('SCTP is being used.') - return True - - if was_sctp_used(): - api.current_logger().info('SCTP was used.') - return True - - api.current_logger().info('SCTP is not being used and neither wanted.') - return False diff --git a/repos/system_upgrade/el7toel8/actors/sctpconfigread/tests/test_unit_sctpconfigread_sctpdlm.py b/repos/system_upgrade/el7toel8/actors/sctpconfigread/tests/test_unit_sctpconfigread_sctpdlm.py deleted file mode 100644 index 37a7e2df..00000000 --- a/repos/system_upgrade/el7toel8/actors/sctpconfigread/tests/test_unit_sctpconfigread_sctpdlm.py +++ /dev/null @@ -1,90 +0,0 @@ -import logging - -import pytest -import six - -from leapp.libraries.actor import sctpdlm - -if six.PY2: - from mock import mock_open -else: - from unittest.mock import mock_open - - -# TODO Confirm with the team the way to mock builtin open -# and apply this throughout the repo - - -@pytest.mark.parametrize( - ('config', 'open_raises', 'exp_return',), - [ - ('', IOError, False), - ('', OSError, False), - ('log_debug=1\npost_join_delay=10', None, False), - ('log_debug=1\npost_join_delay=10\nprotocol=sctp', None, True), - ('log_debug=1\npost_join_delay=10\nprotocol=detect', None, True), - ('log_debug=1\npost_join_delay=10\nprotocol=1', None, True), - ('log_debug=1\npost_join_delay=10\nprotocol=2', None, True), - ('log_debug=1\npost_join_delay=10\nprotocol=tcp', None, False), - ('log_debug=1\npost_join_delay=10', None, False), - ], -) -def test_check_dlm_cfgfile(config, open_raises, exp_return): - if open_raises: - mock_open.side_effect = open_raises - assert ( - sctpdlm.check_dlm_cfgfile(_open=mock_open(read_data=config)) - == exp_return - ) - - -@pytest.mark.parametrize( - ('config', 'open_raises', 'exp_return'), - [ - ('', IOError, False), - ('', OSError, False), - ('DLM_CONTROLD_OPTS="- f 0 -q 0 --protocol=sctp"', None, True), - ('DLM_CONTROLD_OPTS="- f 0 -q 0 -r detect"', None, True), - ('DLM_CONTROLD_OPTS="- f 0 -q 0 --protocol tcp"', None, False), - ], -) -def test_check_dlm_sysconfig(config, open_raises, exp_return): - if open_raises: - mock_open.side_effect = open_raises - assert ( - sctpdlm.check_dlm_sysconfig(_open=mock_open(read_data=config)) - == exp_return - ) - - -@pytest.mark.parametrize( - ( - 'check_dlm_cfg_file_returns', - 'check_dlm_sysconfig_returns', - 'exp_return', - 'text_in_log', - ), - [ - (True, False, True, 'DLM is configured to use SCTP on dlm.conf.'), - (False, True, True, 'DLM is configured to use SCTP on sysconfig.'), - (False, False, False, ''), - ], -) -def test_is_dlm_using_sctp( - check_dlm_cfg_file_returns, - check_dlm_sysconfig_returns, - exp_return, - text_in_log, - monkeypatch, - caplog, -): - monkeypatch.setattr( - sctpdlm, 'check_dlm_cfgfile', lambda: check_dlm_cfg_file_returns - ) - monkeypatch.setattr( - sctpdlm, 'check_dlm_sysconfig', lambda: check_dlm_sysconfig_returns - ) - with caplog.at_level(logging.DEBUG): - assert sctpdlm.is_dlm_using_sctp() == exp_return - if text_in_log: - assert text_in_log in caplog.text diff --git a/repos/system_upgrade/el7toel8/actors/sctpconfigread/tests/test_unit_sctpconfigread_sctplib.py b/repos/system_upgrade/el7toel8/actors/sctpconfigread/tests/test_unit_sctpconfigread_sctplib.py deleted file mode 100644 index 90d8109a..00000000 --- a/repos/system_upgrade/el7toel8/actors/sctpconfigread/tests/test_unit_sctpconfigread_sctplib.py +++ /dev/null @@ -1,239 +0,0 @@ -import logging -from functools import partial - -import pytest - -from leapp.libraries.actor import sctpdlm, sctplib -from leapp.libraries.common.testutils import CurrentActorMocked -from leapp.models import ActiveKernelModule, ActiveKernelModulesFacts - -FILENAME_SCTP = 'sctp' -FILENAME_NO_SCTP = 'no_sctp' -SRC_VER = '7.6' - -logger = logging.getLogger(__name__) - - -def test_anyfile(tmpdir): - file1 = tmpdir.join('file1') - file2 = tmpdir.join('file2') - file1.write('I am not empty') - file2.write('And me either') - - assert sctplib.anyfile((str(file1),)) - assert sctplib.anyfile((str(file1), str(tmpdir))) - assert not sctplib.anyfile((str(tmpdir),)) - assert not sctplib.anyfile(('Iam not exist',)) - - -def test_is_module_loaded(monkeypatch): - monkeypatch.setattr( - sctplib.api, - 'current_actor', - CurrentActorMocked( - src_ver=SRC_VER, - msgs=[ - ActiveKernelModulesFacts( - kernel_modules=[ - ActiveKernelModule( - filename=FILENAME_SCTP, parameters=() - ), - ] - ), - ], - ), - ) - assert sctplib.is_module_loaded(FILENAME_SCTP) - assert not sctplib.is_module_loaded('not exists filename') - - -@pytest.mark.parametrize( - ( - 'actor', - 'exp_return', - 'anyfile_returns', - 'check_dlm_cfgfile_returns', - 'check_dlm_sysconfig_returns', - 'text_in_log', - ), - [ - # test if module name is sctp - ( - CurrentActorMocked( - src_ver=SRC_VER, - msgs=[ - ActiveKernelModulesFacts( - kernel_modules=[ - ActiveKernelModule( - filename=FILENAME_SCTP, parameters=() - ) - ] - ) - ], - ), - True, - False, - False, - False, - '', - ), - # test if module name is different, but one of lksctp is present - ( - CurrentActorMocked( - src_ver=SRC_VER, - msgs=[ - ActiveKernelModulesFacts( - kernel_modules=[ - ActiveKernelModule( - filename=FILENAME_NO_SCTP, parameters=() - ) - ] - ) - ], - ), - True, - True, - False, - False, - 'lksctp files', - ), - # test if check_dlm_cfgfile is True - ( - CurrentActorMocked( - src_ver=SRC_VER, - msgs=[ - ActiveKernelModulesFacts( - kernel_modules=[ - ActiveKernelModule( - filename=FILENAME_NO_SCTP, parameters=() - ) - ] - ) - ], - ), - True, - False, - True, - False, - 'dlm.conf', - ), - # test if check_dlm_sysconfig is True - ( - CurrentActorMocked( - src_ver=SRC_VER, - msgs=[ - ActiveKernelModulesFacts( - kernel_modules=[ - ActiveKernelModule( - filename=FILENAME_NO_SCTP, parameters=() - ) - ] - ) - ], - ), - True, - False, - False, - True, - 'sysconfig', - ), - ], -) -def test_is_sctp_used( - actor, - exp_return, - anyfile_returns, - check_dlm_cfgfile_returns, - check_dlm_sysconfig_returns, - text_in_log, - monkeypatch, - caplog, -): - monkeypatch.setattr(sctplib.api, 'current_actor', actor) - monkeypatch.setattr(sctplib, 'anyfile', lambda arg: anyfile_returns) - monkeypatch.setattr( - sctpdlm, 'check_dlm_cfgfile', lambda: check_dlm_cfgfile_returns - ) - monkeypatch.setattr( - sctpdlm, 'check_dlm_sysconfig', lambda: check_dlm_sysconfig_returns - ) - with caplog.at_level(logging.DEBUG): - assert sctplib.is_sctp_used() == exp_return - if text_in_log: - assert text_in_log in caplog.text - - -class RunMocked(object): - """Simple mock class for leapp.libraries.stdlib.run.""" - - def __init__(self, exc_type=None): - """if exc_type provided, then it will be raised on - instance call. - - :type exc_type: None or BaseException - """ - self.exc_type = exc_type - - def __call__(self, *args, **kwargs): - if self.exc_type: - logger.info('Mocked `run` raising %r', self.exc_type) - raise self.exc_type() - logger.info('Mocked `run` passed without exp.') - - -@pytest.mark.parametrize( - ('run_fails', 'exp_return', 'text_in_log'), - [ - (True, False, 'Nothing regarding SCTP was found on journal.'), - (False, True, 'Found logs regarding SCTP on journal.'), - ], -) -def test_was_sctp_used( - monkeypatch, caplog, run_fails, exp_return, text_in_log -): - monkeypatch.setattr( - sctplib, - 'run', - RunMocked( - exc_type=partial( - sctplib.CalledProcessError, 'message', 'command', 'result' - ) - if run_fails - else None - ), - ) - with caplog.at_level(logging.DEBUG): - assert sctplib.was_sctp_used() == exp_return - if text_in_log: - assert text_in_log in caplog.text - - -@pytest.mark.parametrize( - ( - 'is_sctp_used_returns', - 'was_sctp_used_returns', - 'exp_return', - 'text_in_log', - ), - [ - (True, False, True, 'SCTP is being used.'), - (False, True, True, 'SCTP was used.'), - (False, False, False, 'SCTP is not being used and neither wanted.'), - ], -) -def test_is_sctp_wanted( - is_sctp_used_returns, - was_sctp_used_returns, - exp_return, - text_in_log, - monkeypatch, - caplog, -): - monkeypatch.setattr(sctplib, 'is_sctp_used', lambda: is_sctp_used_returns) - monkeypatch.setattr( - sctplib, 'was_sctp_used', lambda: was_sctp_used_returns - ) - with caplog.at_level(logging.DEBUG): - assert sctplib.is_sctp_wanted() == exp_return - if text_in_log: - assert text_in_log in caplog.text diff --git a/repos/system_upgrade/el7toel8/actors/sctpconfigread/tools/check_syslog_for_sctp.sh b/repos/system_upgrade/el7toel8/actors/sctpconfigread/tools/check_syslog_for_sctp.sh deleted file mode 100755 index 74f9a913..00000000 --- a/repos/system_upgrade/el7toel8/actors/sctpconfigread/tools/check_syslog_for_sctp.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -/usr/bin/journalctl --system -S '1 month ago' | /usr/bin/grep -q -m1 -w sctp diff --git a/repos/system_upgrade/el7toel8/actors/sctpconfigupdate/actor.py b/repos/system_upgrade/el7toel8/actors/sctpconfigupdate/actor.py deleted file mode 100644 index ee177d93..00000000 --- a/repos/system_upgrade/el7toel8/actors/sctpconfigupdate/actor.py +++ /dev/null @@ -1,21 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import sctpupdate -from leapp.models import SCTPConfig -from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag - - -class SCTPConfigUpdate(Actor): - """ - Updates the kernel module blacklist for SCTP. - - If the SCTP module is wanted on RHEL8 the modprobe configuration gets updated to remove SCTP from the black listed - kernel modules. - """ - name = 'sctp_config_update' - description = 'This actor updates SCTP configuration for RHEL8.' - consumes = (SCTPConfig,) - produces = () - tags = (ApplicationsPhaseTag, IPUWorkflowTag) - - def process(self): - sctpupdate.perform_update() diff --git a/repos/system_upgrade/el7toel8/actors/sctpconfigupdate/libraries/sctpupdate.py b/repos/system_upgrade/el7toel8/actors/sctpconfigupdate/libraries/sctpupdate.py deleted file mode 100644 index 79be4c2e..00000000 --- a/repos/system_upgrade/el7toel8/actors/sctpconfigupdate/libraries/sctpupdate.py +++ /dev/null @@ -1,27 +0,0 @@ -import os - -from leapp.libraries.stdlib import api, run -from leapp.models import SCTPConfig - - -def enable_sctp(_modprobe_d_path="/etc/modprobe.d"): - """ - Enables the SCTP module by removing it from being black listed. - :type _modprobe_d_path: str - :param _modprobe_d_path: overwrite only in case of testing, by passing - some tmp_dir instead - """ - - api.current_logger().info('Enabling SCTP.') - run(['/usr/bin/sed', '-i', r's/^\s*blacklist.*sctp/#&/', - os.path.join(_modprobe_d_path, 'sctp_diag-blacklist.conf'), - os.path.join(_modprobe_d_path, 'sctp-blacklist.conf')]) - api.current_logger().info('Enabled SCTP.') - - -def perform_update(): - for sctpconfig in api.consume(SCTPConfig): - api.current_logger().info('Consuming sctp={}'.format(sctpconfig.wanted)) - if sctpconfig.wanted: - enable_sctp() - break diff --git a/repos/system_upgrade/el7toel8/actors/sctpconfigupdate/tests/test_unit_sctpconfigupdate.py b/repos/system_upgrade/el7toel8/actors/sctpconfigupdate/tests/test_unit_sctpconfigupdate.py deleted file mode 100644 index ac369229..00000000 --- a/repos/system_upgrade/el7toel8/actors/sctpconfigupdate/tests/test_unit_sctpconfigupdate.py +++ /dev/null @@ -1,119 +0,0 @@ -import logging -import subprocess - -import pytest - -from leapp.libraries.actor import sctpupdate -from leapp.libraries.stdlib import CalledProcessError - -logger = logging.getLogger(__name__) - - -@pytest.mark.parametrize( - ( - 'conf_content', - 'exp_new_conf_content', - 'log_should_contain', - 'log_shouldnt_contain', - 'conf_files_exists', - 'should_raise_exc', - 'logger_level', - ), - [ - # testing normal behaviour - ( - 'blacklist sctp', - '#blacklist sctp', - 'Enabled SCTP', - None, - True, - None, - logging.INFO, - ), - # testing if regex works also in case sctp just a part of a string - ( - 'blacklist some-sctp', - '#blacklist some-sctp', - 'Enabled SCTP', - None, - True, - None, - logging.INFO, - ), - # testing if script skips non sctp lines - ( - 'blacklist tcp', - 'blacklist tcp', - 'Enabled SCTP', - None, - True, - None, - logging.INFO, - ), - # testing if the logger message is empty on warning level - ( - 'blacklist tcp', - 'blacklist tcp', - '', - None, - True, - None, - logging.WARNING, - ), - # testing if CalledProcessError raised when sed exits with non 0 and - # logger not emits Enabled SCTP (what we want) - ( - 'blacklist tcp', - 'blacklist tcp', - None, - 'Enabled SCTP', - False, - CalledProcessError, - logging.INFO, - ), - ], -) -def test_enable_sctp( - conf_content, - exp_new_conf_content, - log_should_contain, - log_shouldnt_contain, - conf_files_exists, - should_raise_exc, - logger_level, - monkeypatch, - tmpdir, - caplog, - capsys, -): - def mock_run(args): - logger.info('Calling run with %r', args) - res = subprocess.call(args) - if res != 0: - raise CalledProcessError( - message='Sed fails with error code {!r}'.format(res), - command=args, - result=res, - ) - - monkeypatch.setattr(sctpupdate, 'run', mock_run) - - sctp_diag_blacklist_conf = tmpdir.join('sctp_diag-blacklist.conf') - sctp_blacklist_conf = tmpdir.join('sctp-blacklist.conf') - if conf_files_exists: - sctp_diag_blacklist_conf.write(conf_content) - sctp_blacklist_conf.write(conf_content) - - with caplog.at_level(logger_level): - if not should_raise_exc: - sctpupdate.enable_sctp(_modprobe_d_path=str(tmpdir)) - with open(str(sctp_blacklist_conf)) as conf: - assert conf.readlines() == [exp_new_conf_content] - else: - with pytest.raises(should_raise_exc): - sctpupdate.enable_sctp(_modprobe_d_path=str(tmpdir)) - - if log_should_contain is not None: - assert log_should_contain in caplog.text - if log_shouldnt_contain is not None: - assert log_shouldnt_contain not in caplog.text diff --git a/repos/system_upgrade/el7toel8/actors/sourcebootloaderscanner/actor.py b/repos/system_upgrade/el7toel8/actors/sourcebootloaderscanner/actor.py deleted file mode 100644 index b85be50d..00000000 --- a/repos/system_upgrade/el7toel8/actors/sourcebootloaderscanner/actor.py +++ /dev/null @@ -1,18 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.sourcebootloaderscanner import scan_source_boot_loader_configuration -from leapp.models import SourceBootLoaderConfiguration -from leapp.tags import FactsPhaseTag, IPUWorkflowTag - - -class SourceBootLoaderScanner(Actor): - """ - Scans the boot loader configuration on the source system. - """ - - name = 'source_boot_loader_scanner' - consumes = () - produces = (SourceBootLoaderConfiguration,) - tags = (FactsPhaseTag, IPUWorkflowTag) - - def process(self): - scan_source_boot_loader_configuration() diff --git a/repos/system_upgrade/el7toel8/actors/sourcebootloaderscanner/libraries/sourcebootloaderscanner.py b/repos/system_upgrade/el7toel8/actors/sourcebootloaderscanner/libraries/sourcebootloaderscanner.py deleted file mode 100644 index 111bb6ca..00000000 --- a/repos/system_upgrade/el7toel8/actors/sourcebootloaderscanner/libraries/sourcebootloaderscanner.py +++ /dev/null @@ -1,90 +0,0 @@ -from leapp.exceptions import StopActorExecutionError -from leapp.libraries.stdlib import api, CalledProcessError, run -from leapp.models import BootEntry, SourceBootLoaderConfiguration - -CMD_GRUBBY_INFO_ALL = ['grubby', '--info', 'ALL'] - - -def parse_grubby_output_line(line): - """ - Parses a single output line of `grubby --info ALL` that has the property=value format and returns a tuple - (property, value). - - Quotes are removed from the value. - :param str line: A line of the grubby output. - :returns: Tuple containing the key (boot entry property) and its value. - :rtype: tuple - """ - line_fragments = line.split('=', 1) - if len(line_fragments) != 2: - # The line does not have the property=value format, something is wrong - raise StopActorExecutionError( - message='Failed to parse `grubby` output.', - details={ - 'details': 'The following line does not appear to have expected format: {0}'.format(line) - } - ) - - prop, value = line_fragments - value = value.strip('\'"') - return (prop, value) - - -def scan_boot_entries(): - """ - Scans the available boot entries. - - :rtype: list - :returns: A list of available boot entries found in the boot loader configuration. - """ - try: - grubby_output = run(CMD_GRUBBY_INFO_ALL, split=True) - except CalledProcessError as err: - # We have failed to call `grubby` - something is probably wrong here. - raise StopActorExecutionError( - message='Failed to call `grubby` to list available boot entries.', - details={ - 'details': str(err), - 'stderr': err.stderr - } - ) - - boot_entries = [] - boot_entry_data = {} - for output_line in grubby_output['stdout']: - if output_line == 'non linux entry': - # Grubby does not display info about non-linux entries - # Such an entry is not problematic from our PoV, therefore, skip it - boot_entry_data = {} - continue - - prop, value = parse_grubby_output_line(output_line) - if prop == 'index': - # Start of a new boot entry section - if boot_entry_data: - # There has been a valid linux entry - boot_entries.append( - BootEntry(title=boot_entry_data.get('title', ''), # In theory, the title property can be missing - kernel_image=boot_entry_data['kernel'])) - boot_entry_data = {} - boot_entry_data[prop] = value - - # There was no 'index=' line after the last boot entry section, thus, its data has not been converted to a model. - if boot_entry_data: - boot_entries.append(BootEntry(title=boot_entry_data.get('title', ''), - kernel_image=boot_entry_data['kernel'])) - return boot_entries - - -def scan_source_boot_loader_configuration(): - """ - Scans the boot loader configuration. - - Produces :class:`SourceBootLoaderConfiguration for other actors to act upon. - """ - - boot_loader_configuration = SourceBootLoaderConfiguration( - entries=scan_boot_entries() - ) - - api.produce(boot_loader_configuration) diff --git a/repos/system_upgrade/el7toel8/actors/sourcebootloaderscanner/tests/test_bootentryscanner.py b/repos/system_upgrade/el7toel8/actors/sourcebootloaderscanner/tests/test_bootentryscanner.py deleted file mode 100644 index 284ec11f..00000000 --- a/repos/system_upgrade/el7toel8/actors/sourcebootloaderscanner/tests/test_bootentryscanner.py +++ /dev/null @@ -1,60 +0,0 @@ -import pytest - -from leapp.libraries import stdlib -from leapp.libraries.actor import sourcebootloaderscanner -from leapp.libraries.common.testutils import produce_mocked - -GRUBBY_INFO_ALL_STDOUT = '''index=0 -kernel="/boot/vmlinuz-4.18.0-305.7.1.el8_4.x86_64" -args="ro uned_params" -root="/someroot" -initrd="/boot/initramfs-4.18.0-305.7.1.el8_4.x86_64.img" -title="Linux" -id="some_id" -index=1 -kernel="/boot/vmlinuz-4.18.0-305.3.1.el8_4.x86_64" -args="ro" -root="/someroot" -initrd="/boot/initramfs-4.18.0-305.3.1.el8_4.x86_64.img" -title="Linux old-kernel" -id="some_id2" -index=2 -non linux entry''' - - -def test_scan_boot_entries(monkeypatch): - """Tests whether the library correctly identifies boot entries in the grubby output.""" - def run_mocked(cmd, **kwargs): - if cmd == ['grubby', '--info', 'ALL']: - return { - 'stdout': GRUBBY_INFO_ALL_STDOUT.split('\n') - } - raise ValueError('Tried to run unexpected command.') - - actor_produces = produce_mocked() - - # The library imports `run` all the way (from ... import run), therefore, - # we must monkeypatch the reference directly in the actor's library namespace - monkeypatch.setattr(sourcebootloaderscanner, 'run', run_mocked) - monkeypatch.setattr(stdlib.api, 'produce', actor_produces) - - sourcebootloaderscanner.scan_source_boot_loader_configuration() - - fail_description = 'Only one SourceBootLoaderConfiguration message should be produced.' - assert len(actor_produces.model_instances) == 1, fail_description - - bootloader_config = actor_produces.model_instances[0] - - fail_description = 'Found different number of boot entries than present in provided mocks.' - assert len(bootloader_config.entries) == 2, fail_description - - expected_entries = [ - {'title': 'Linux', 'kernel_image': '/boot/vmlinuz-4.18.0-305.7.1.el8_4.x86_64'}, - {'title': 'Linux old-kernel', 'kernel_image': '/boot/vmlinuz-4.18.0-305.3.1.el8_4.x86_64'}, - ] - - actual_entries = sorted(bootloader_config.entries, key=lambda entry: entry.title) - - for actual_entry, expected_entry in zip(actual_entries, expected_entries): - assert actual_entry.title == expected_entry['title'] - assert actual_entry.kernel_image == expected_entry['kernel_image'] diff --git a/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/actor.py b/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/actor.py deleted file mode 100644 index 3fdb27f5..00000000 --- a/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/actor.py +++ /dev/null @@ -1,33 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import spamassassinconfigcheck -from leapp.models import Report, SpamassassinFacts -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class SpamassassinConfigCheck(Actor): - """ - Reports changes in spamassassin between RHEL-7 and RHEL-8 - - Reports backward-incompatible changes that have been made in spamassassin - between RHEL-7 and RHEL-8 (spamc no longer accepts an argument with the --ssl - option; spamd no longer accepts the --ssl-version; SSLv3 is no longer supported; - the type of spamassassin.service has been changed from "forking" to "simple"; - sa-update no longer supports SHA1 validation of rule files). - - The migration of the configuration files will be mostly handled by the - SpamassassinConfigUpdate actor, however the admin still needs to know about - the changes so that they can do any necessary migration in places that we cannot - reach (e.g. scripts). - """ - - name = 'spamassassin_config_check' - consumes = (SpamassassinFacts,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - facts = next(self.consume(SpamassassinFacts), None) - if facts: - spamassassinconfigcheck.produce_reports(facts) - else: - self.log.debug('Skipping execution - no SpamassassinFacts message has been produced.') diff --git a/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/libraries/spamassassinconfigcheck.py b/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/libraries/spamassassinconfigcheck.py deleted file mode 100644 index 3a4cf186..00000000 --- a/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/libraries/spamassassinconfigcheck.py +++ /dev/null @@ -1,107 +0,0 @@ -from leapp import reporting -from leapp.libraries.common.spamassassinutils import ( - SPAMASSASSIN_SERVICE_OVERRIDE, - SPAMC_CONFIG_FILE, - SYSCONFIG_SPAMASSASSIN -) - - -def _check_spamc_config(facts, report_func): - title = ('spamc no longer allows specifying the TLS version and no longer ' - 'supports SSLv3') - summary_generic = ('spamc no longer allows using the "--ssl" option with an ' - 'argument specifying the TLS version - the option can only ' - 'be used without an argument. Also, spamc no longer supports ' - 'SSLv3.') - if facts.spamc_ssl_argument: - summary_detail = ('The spamc configuration file uses "--ssl %s", it will ' - 'be updated during the upgrade.' - % facts.spamc_ssl_argument) - summary = reporting.Summary('%s %s' % (summary_generic, summary_detail)) - resource = reporting.RelatedResource('file', SPAMC_CONFIG_FILE) - else: - summary = reporting.Summary(summary_generic) - resource = None - severity = (reporting.Severity.HIGH if facts.spamc_ssl_argument == 'sslv3' - else reporting.Severity.MEDIUM) - hint = 'Please update your scripts and configuration, if there are any.' - - args = [ - reporting.Title(title), - summary, - reporting.Groups([reporting.Groups.ENCRYPTION]), - reporting.Severity(severity), - reporting.Remediation(hint=hint), - ] - if resource: - args.append(resource) - report_func(args) - - -def _check_spamd_config_ssl(facts, report_func): - title = ('spamd no longer allows specifying the TLS version and no longer ' - 'supports SSLv3') - summary_generic = ('spamd no longer accepts the --ssl-version option and ' - 'no longer supports SSLv3.') - if facts.spamd_ssl_version: - summary_detail = ('The spamd sysconfig file uses "--ssl-version %s", ' - 'it will be updated during the upgrade.' - % facts.spamd_ssl_version) - summary = reporting.Summary('%s %s' % (summary_generic, summary_detail)) - resource = reporting.RelatedResource('file', SYSCONFIG_SPAMASSASSIN) - else: - summary = reporting.Summary(summary_generic) - resource = None - severity = (reporting.Severity.HIGH if facts.spamd_ssl_version == 'sslv3' - else reporting.Severity.MEDIUM) - hint = 'Please update your scripts and configuration, if there are any.' - - args = [ - reporting.Title(title), - summary, - reporting.Groups([reporting.Groups.ENCRYPTION, reporting.Groups.SERVICES]), - reporting.Severity(severity), - reporting.Remediation(hint=hint) - ] - if resource: - args.append(resource) - report_func(args) - - -def _check_spamd_config_service_type(facts, report_func): - title = 'The type of the spamassassin systemd service has changed' - summary_generic = 'The type of spamassassin.service has been changed from "forking" to "simple".' - if facts.service_overriden: - summary_detail = 'However, the service appears to be overridden; no migration action will occur.' - resource = reporting.RelatedResource('file', SPAMASSASSIN_SERVICE_OVERRIDE) - else: - summary_detail = 'The spamassassin sysconfig file will be updated.' - resource = reporting.RelatedResource('file', SYSCONFIG_SPAMASSASSIN) - report_func([ - reporting.Title(title), - reporting.Summary('%s %s' % (summary_generic, summary_detail)), - reporting.Groups([reporting.Groups.SERVICES]), - reporting.Severity(reporting.Severity.MEDIUM), - resource - ]) - - -def _report_sa_update_change(report_func): - summary = ('sa-update no longer supports SHA1 validation of filtering rules, ' - 'SHA256/SHA512 validation is done instead. This may affect you if ' - 'you are using an alternative update channel (sa-update used with ' - 'option --channel or --channelfile), or if you install filtering ' - 'rule updates directly from files (sa-update --install).') - report_func([reporting.Title('sa-update no longer supports SHA1 validation'), - reporting.Summary(summary), - reporting.Severity(reporting.Severity.LOW)]) - - -def produce_reports(facts): - """ - Checks spamassassin configuration and produces reports. - """ - _check_spamc_config(facts, reporting.create_report) - _check_spamd_config_ssl(facts, reporting.create_report) - _check_spamd_config_service_type(facts, reporting.create_report) - _report_sa_update_change(reporting.create_report) diff --git a/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/tests/test_actor_spamassassinconfigcheck.py b/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/tests/test_actor_spamassassinconfigcheck.py deleted file mode 100644 index b99a436e..00000000 --- a/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/tests/test_actor_spamassassinconfigcheck.py +++ /dev/null @@ -1,28 +0,0 @@ -from leapp.models import SpamassassinFacts -from leapp.reporting import Report -from leapp.snactor.fixture import current_actor_context - - -def test_actor_basic(current_actor_context): - facts = SpamassassinFacts(service_overriden=False) - - current_actor_context.feed(facts) - current_actor_context.run() - reports = current_actor_context.consume(Report) - - assert len(reports) == 4 - report = reports[0] - assert '--ssl' in report.report['summary'] - assert 'spamc' in report.report['summary'] - report = reports[1] - assert '--ssl-version' in report.report['summary'] - assert 'spamd' in report.report['summary'] - report = reports[2] - assert 'spamassassin.service' in report.report['summary'] - report = reports[3] - assert 'sa-update no longer supports SHA1' in report.report['summary'] - - -def test_actor_no_message(current_actor_context): - current_actor_context.run() - assert not current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/tests/test_library_spamassassinconfigcheck.py b/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/tests/test_library_spamassassinconfigcheck.py deleted file mode 100644 index a54dae21..00000000 --- a/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/tests/test_library_spamassassinconfigcheck.py +++ /dev/null @@ -1,152 +0,0 @@ -from leapp.libraries.actor import spamassassinconfigcheck -from leapp.libraries.common.testutils import create_report_mocked -from leapp.models import SpamassassinFacts - - -def test_check_spamc_config_tlsv1(): - facts = SpamassassinFacts(spamc_ssl_argument='tlsv1', service_overriden=False) - report_func = create_report_mocked() - - spamassassinconfigcheck._check_spamc_config(facts, report_func) - - assert report_func.called == 1 - report_fields = report_func.report_fields - assert 'specifying the TLS version' in report_fields['title'] - assert 'SSLv3' in report_fields['title'] - assert '--ssl' in report_fields['summary'] - assert 'SSLv3' in report_fields['summary'] - assert 'spamc configuration file' in report_fields['summary'] - assert '--ssl tlsv1' in report_fields['summary'] - assert all('update your scripts' in r['context'] for r in report_fields['detail']['remediations']) - assert report_fields['severity'] == 'medium' - - -def test_check_spamc_config_sslv3(): - facts = SpamassassinFacts(spamc_ssl_argument='sslv3', service_overriden=False) - report_func = create_report_mocked() - - spamassassinconfigcheck._check_spamc_config(facts, report_func) - - assert report_func.called == 1 - report_fields = report_func.report_fields - assert 'specifying the TLS version' in report_fields['title'] - assert 'SSLv3' in report_fields['title'] - assert '--ssl' in report_fields['summary'] - assert 'SSLv3' in report_fields['summary'] - assert 'spamc configuration file' in report_fields['summary'] - assert '--ssl sslv3' in report_fields['summary'] - assert all('update your scripts' in r['context'] for r in report_fields['detail']['remediations']) - assert report_fields['severity'] == 'high' - - -def test_check_spamc_config_correct_config(): - facts = SpamassassinFacts(spamc_ssl_argument=None, service_overriden=False) - report_func = create_report_mocked() - - spamassassinconfigcheck._check_spamc_config(facts, report_func) - - assert report_func.called == 1 - report_fields = report_func.report_fields - assert 'specifying the TLS version' in report_fields['title'] - assert 'SSLv3' in report_fields['title'] - assert '--ssl' in report_fields['summary'] - assert 'SSLv3' in report_fields['summary'] - assert 'spamc configuration file' not in report_fields['summary'] - assert all('update your scripts' in r['context'] for r in report_fields['detail']['remediations']) - assert report_fields['severity'] == 'medium' - - -def test_check_spamd_config_ssl_tlsv1(): - facts = SpamassassinFacts(spamd_ssl_version='tlsv1', service_overriden=False) - report_func = create_report_mocked() - - spamassassinconfigcheck._check_spamd_config_ssl(facts, report_func) - - assert report_func.called == 1 - report_fields = report_func.report_fields - assert 'specifying the TLS version' in report_fields['title'] - assert 'SSLv3' in report_fields['title'] - assert '--ssl-version' in report_fields['summary'] - assert 'SSLv3' in report_fields['summary'] - assert 'sysconfig' in report_fields['summary'] - assert '--ssl-version tlsv1' in report_fields['summary'] - assert all('update your scripts' in r['context'] for r in report_fields['detail']['remediations']) - assert report_fields['severity'] == 'medium' - - -def test_check_spamd_config_ssl_sslv3(): - facts = SpamassassinFacts(spamd_ssl_version='sslv3', service_overriden=False) - report_func = create_report_mocked() - - spamassassinconfigcheck._check_spamd_config_ssl(facts, report_func) - - assert report_func.called == 1 - report_fields = report_func.report_fields - assert 'specifying the TLS version' in report_fields['title'] - assert 'SSLv3' in report_fields['title'] - assert '--ssl-version' in report_fields['summary'] - assert 'SSLv3' in report_fields['summary'] - assert 'sysconfig' in report_fields['summary'] - assert '--ssl-version sslv3' in report_fields['summary'] - assert all('update your scripts' in r['context'] for r in report_fields['detail']['remediations']) - assert report_fields['severity'] == 'high' - - -def test_check_spamd_config_ssl_correct_config(): - facts = SpamassassinFacts(spamd_ssl_version=None, service_overriden=False) - report_func = create_report_mocked() - - spamassassinconfigcheck._check_spamd_config_ssl(facts, report_func) - - assert report_func.called == 1 - report_fields = report_func.report_fields - assert 'specifying the TLS version' in report_fields['title'] - assert 'SSLv3' in report_fields['title'] - assert '--ssl-version' in report_fields['summary'] - assert 'SSLv3' in report_fields['summary'] - assert 'sysconfig' not in report_fields['summary'] - assert all('update your scripts' in r['context'] for r in report_fields['detail']['remediations']) - assert report_fields['severity'] == 'medium' - - -def test_check_spamd_config_service_type_service_overriden(): - facts = SpamassassinFacts(service_overriden=True) - report_func = create_report_mocked() - - spamassassinconfigcheck._check_spamd_config_service_type(facts, report_func) - - assert report_func.called == 1 - report_fields = report_func.report_fields - assert 'type of the spamassassin systemd service' in report_fields['title'] - assert 'The type of spamassassin.service' in report_fields['summary'] - assert 'overridden' in report_fields['summary'] - assert report_fields['severity'] == 'medium' - - -def test_check_spamd_config_service_type_service_not_overriden(): - facts = SpamassassinFacts(service_overriden=False) - report_func = create_report_mocked() - - spamassassinconfigcheck._check_spamd_config_service_type(facts, report_func) - - assert report_func.called == 1 - report_fields = report_func.report_fields - assert 'type of the spamassassin systemd service' in report_fields['title'] - assert 'The type of spamassassin.service' in report_fields['summary'] - assert 'will be updated' in report_fields['summary'] - assert report_fields['severity'] == 'medium' - - -def test_report_sa_update_change(): - report_func = create_report_mocked() - - spamassassinconfigcheck._report_sa_update_change(report_func) - - assert report_func.called == 1 - report_fields = report_func.report_fields - assert 'sa-update no longer supports SHA1' in report_fields['title'] - assert 'no longer supports SHA1' in report_fields['summary'] - assert 'SHA256/SHA512' in report_fields['summary'] - assert '--channel or --channelfile' in report_fields['summary'] - assert '--install' in report_fields['summary'] - assert report_fields['severity'] == 'low' diff --git a/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/actor.py b/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/actor.py deleted file mode 100644 index 87451f1a..00000000 --- a/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/actor.py +++ /dev/null @@ -1,26 +0,0 @@ -import os - -from leapp.actors import Actor -from leapp.libraries.actor import spamassassinconfigread -from leapp.libraries.common.utils import read_file -from leapp.models import DistributionSignedRPM, SpamassassinFacts -from leapp.tags import FactsPhaseTag, IPUWorkflowTag - - -class SpamassassinConfigRead(Actor): - """ - Reads spamc configuration (/etc/mail/spamassassin/spamc.conf), the - spamassassin sysconfig file (/etc/sysconfig/spamassassin) and checks - whether the spamassassin service has been overridden. Produces - SpamassassinFacts containing the extracted information. - """ - - name = 'spamassassin_config_read' - consumes = (DistributionSignedRPM,) - produces = (SpamassassinFacts,) - tags = (FactsPhaseTag, IPUWorkflowTag) - - def process(self): - if spamassassinconfigread.is_processable(): - self.produce(spamassassinconfigread.get_spamassassin_facts(read_func=read_file, - listdir=os.listdir)) diff --git a/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/libraries/spamassassinconfigread.py b/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/libraries/spamassassinconfigread.py deleted file mode 100644 index 9ed8c091..00000000 --- a/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/libraries/spamassassinconfigread.py +++ /dev/null @@ -1,27 +0,0 @@ -from leapp.libraries.actor import spamassassinconfigread_spamc, spamassassinconfigread_spamd -from leapp.libraries.common.rpms import has_package -from leapp.libraries.stdlib import api -from leapp.models import DistributionSignedRPM, SpamassassinFacts - - -def is_processable(): - """ - Checks whether the spamassassin package is installed. - """ - res = has_package(DistributionSignedRPM, 'spamassassin') - if not res: - api.current_logger().debug('spamassassin is not installed.') - return res - - -def get_spamassassin_facts(read_func, listdir): - """ - Reads the spamc configuration file, the spamassassin sysconfig file and checks - whether the spamassassin service is overridden. Returns SpamassassinFacts. - """ - spamc_ssl_argument = spamassassinconfigread_spamc.get_spamc_ssl_argument(read_func) - service_overriden = spamassassinconfigread_spamd.spamassassin_service_overriden(listdir) - spamd_ssl_version = spamassassinconfigread_spamd.get_spamd_ssl_version(read_func) - return SpamassassinFacts(spamc_ssl_argument=spamc_ssl_argument, - service_overriden=service_overriden, - spamd_ssl_version=spamd_ssl_version) diff --git a/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/libraries/spamassassinconfigread_spamc.py b/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/libraries/spamassassinconfigread_spamc.py deleted file mode 100644 index 0b85d269..00000000 --- a/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/libraries/spamassassinconfigread_spamc.py +++ /dev/null @@ -1,35 +0,0 @@ -import errno -import re - -from leapp.libraries.common.spamassassinutils import SPAMC_CONFIG_FILE -from leapp.libraries.stdlib import api - - -def _remove_comments(content): - return re.sub(r'^#.*$', '', content, flags=re.MULTILINE) - - -def _parse_spamc_ssl_argument(content): - content = _remove_comments(content) - res = None - for match in re.finditer(r'(? 0] - return DaemonList(value=daemon_list) - - -def _get_daemon_lists_in_file(path, read_func=_read_file): - ret = [] - try: - content = read_func(path) - except IOError as e: - if e.errno != errno.ENOENT: - api.current_logger().warning('Failed to read %s: %s' % (path, e)) - return ret - lines = [line for line in _get_lines(content) if not _is_comment(line)] - for line in lines: - ret.append(_get_daemon_list_in_line(line)) - return ret - - -def _get_daemon_lists(read_func=_read_file): - daemon_lists = [] - for path in CONFIG_FILES: - daemon_lists.extend(_get_daemon_lists_in_file(path, read_func=read_func)) - return daemon_lists - - -def get_tcp_wrappers_facts(read_func=_read_file): - daemon_lists = _get_daemon_lists(read_func=read_func) - return TcpWrappersFacts(daemon_lists=daemon_lists) diff --git a/repos/system_upgrade/el7toel8/actors/tcpwrappersconfigread/tests/test_library_tcpwrappersconfigread.py b/repos/system_upgrade/el7toel8/actors/tcpwrappersconfigread/tests/test_library_tcpwrappersconfigread.py deleted file mode 100644 index 7bb0f7b9..00000000 --- a/repos/system_upgrade/el7toel8/actors/tcpwrappersconfigread/tests/test_library_tcpwrappersconfigread.py +++ /dev/null @@ -1,167 +0,0 @@ -import errno - -from leapp.libraries.actor import tcpwrappersconfigread -from leapp.libraries.common.testutils import make_IOError - - -class MockFileReader(object): - def __init__(self): - self.files = {} - self.files_read = {} - self.read_called = 0 - - def _increment_read_counters(self, path): - self.read_called += 1 - self.files_read.setdefault(path, 0) - self.files_read[path] += 1 - - def read(self, path): - self._increment_read_counters(path) - try: - return self.files[path] - except KeyError: - raise make_IOError(errno.ENOENT) - - -def test_get_daemon_list_in_line_simple(): - line = 'vsftpd : 192.168.2.*' - daemon_list = tcpwrappersconfigread._get_daemon_list_in_line(line) - assert daemon_list.value == ['vsftpd'] - - -def test_get_daemon_list_in_line_multiple(): - line = 'vsftpd, sendmail : 192.168.2.*' - daemon_list = tcpwrappersconfigread._get_daemon_list_in_line(line) - assert daemon_list.value == ['vsftpd', 'sendmail'] - - line = 'ALL EXCEPT sendmail : 192.168.2.*' - daemon_list = tcpwrappersconfigread._get_daemon_list_in_line(line) - assert daemon_list.value == ['ALL', 'EXCEPT', 'sendmail'] - - # different separators - line = 'vsftpd,sendmail : 192.168.2.*' - daemon_list = tcpwrappersconfigread._get_daemon_list_in_line(line) - assert daemon_list.value == ['vsftpd', 'sendmail'] - - line = 'vsftpd\tsendmail : 192.168.2.*' - daemon_list = tcpwrappersconfigread._get_daemon_list_in_line(line) - assert daemon_list.value == ['vsftpd', 'sendmail'] - - line = 'vsftpd, \t sendmail : 192.168.2.*' - daemon_list = tcpwrappersconfigread._get_daemon_list_in_line(line) - assert daemon_list.value == ['vsftpd', 'sendmail'] - - -def test_get_daemon_list_in_line_malformed(): - line = 'foo' - daemon_list = tcpwrappersconfigread._get_daemon_list_in_line(line) - # tcp_wrappers actually ignores lines like this, but there's no harm in being - # over-sensitive here. - assert daemon_list.value == ['foo'] - - -def test_get_lines_empty(): - content = '' - lines = tcpwrappersconfigread._get_lines(content) - assert lines == [''] - - -def test_get_lines_simple(): - content = 'vsftpd : 192.168.2.*\n' \ - 'ALL : 192.168.1.*\n' - lines = tcpwrappersconfigread._get_lines(content) - assert lines == content.split('\n') - - -def test_get_lines_continued_line(): - content = 'vsftpd : 192.168\\\n.2.*' - lines = tcpwrappersconfigread._get_lines(content) - expected = ['vsftpd : 192.168.2.*'] - assert lines == expected - - -def test_get_lines_backslash_followed_by_whitespace(): - content = 'foo \\ \nthis is not a continuation line' - lines = tcpwrappersconfigread._get_lines(content) - expected = ['foo \\ ', 'this is not a continuation line'] - assert lines == expected - - -def test_get_lines_continued_comment(): - content = '# foo \\\n' \ - 'this is still a comment' - lines = tcpwrappersconfigread._get_lines(content) - expected = ['# foo this is still a comment'] - assert lines == expected - - -def test_is_comment(): - assert tcpwrappersconfigread._is_comment('') is True - assert tcpwrappersconfigread._is_comment(' ') is True - assert tcpwrappersconfigread._is_comment('# foo') is True - assert tcpwrappersconfigread._is_comment('#') is True - assert tcpwrappersconfigread._is_comment(' # foo') is False - assert tcpwrappersconfigread._is_comment('foo') is False - assert tcpwrappersconfigread._is_comment(' foo') is False - - -def test_get_daemon_lists_in_file(): - path = '/etc/hosts.allow' - reader = MockFileReader() - reader.files[path] = 'vsftpd : 192.168.2.*\n' \ - 'ALL : 192.168.1.*\n' - - daemon_lists = tcpwrappersconfigread._get_daemon_lists_in_file(path, read_func=reader.read) - - num_lines = 2 - assert len(daemon_lists) == num_lines - assert daemon_lists[0].value == ['vsftpd'] - assert daemon_lists[1].value == ['ALL'] - - -def test_get_daemon_lists_in_file_nonexistent(): - reader = MockFileReader() - daemon_lists = tcpwrappersconfigread._get_daemon_lists_in_file('/etc/hosts.allow', read_func=reader.read) - assert not daemon_lists - - -def test_get_daemon_lists(): - reader = MockFileReader() - reader.files['/etc/hosts.allow'] = 'vsftpd : 192.168.2.*\n' \ - 'ALL : 192.168.1.*\n' - reader.files['/etc/hosts.deny'] = 'sendmail : 192.168.2.*\n' - - daemon_lists = tcpwrappersconfigread._get_daemon_lists(read_func=reader.read) - - num_lines = 3 - assert len(daemon_lists) == num_lines - assert daemon_lists[0].value == ['vsftpd'] - assert daemon_lists[1].value == ['ALL'] - assert daemon_lists[2].value == ['sendmail'] - - -def test_get_daemon_lists_nonexistent_config(): - reader = MockFileReader() - daemon_lists = tcpwrappersconfigread._get_daemon_lists(read_func=reader.read) - assert not daemon_lists - - -def test_get_tcp_wrappers_facts(): - reader = MockFileReader() - reader.files['/etc/hosts.allow'] = 'vsftpd : 192.168.2.*\n' \ - 'ALL : 192.168.1.*\n' - reader.files['/etc/hosts.deny'] = 'sendmail : 192.168.2.*\n' - - facts = tcpwrappersconfigread.get_tcp_wrappers_facts(read_func=reader.read) - - num_lines = 3 - assert len(facts.daemon_lists) == num_lines - assert facts.daemon_lists[0].value == ['vsftpd'] - assert facts.daemon_lists[1].value == ['ALL'] - assert facts.daemon_lists[2].value == ['sendmail'] - - -def test_get_tcp_wrappers_facts_nonexistent_config(): - reader = MockFileReader() - facts = tcpwrappersconfigread.get_tcp_wrappers_facts(read_func=reader.read) - assert not facts.daemon_lists diff --git a/repos/system_upgrade/el7toel8/actors/updateetcsysconfigkernel/actor.py b/repos/system_upgrade/el7toel8/actors/updateetcsysconfigkernel/actor.py deleted file mode 100644 index cfbdd6ba..00000000 --- a/repos/system_upgrade/el7toel8/actors/updateetcsysconfigkernel/actor.py +++ /dev/null @@ -1,20 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import updateetcsysconfigkernel -from leapp.tags import IPUWorkflowTag, PreparationPhaseTag - - -class UpdateEtcSysconfigKernel(Actor): - """ - Update /etc/sysconfig/kernel file. - - In order to proceed with Upgrade process, DEFAULTKERNEL entry should be updated from kernel to - kernel-core. - """ - - name = 'update_etc_sysconfig_kernel' - consumes = () - produces = () - tags = (PreparationPhaseTag, IPUWorkflowTag) - - def process(self): - updateetcsysconfigkernel.update_kernel_config('/etc/sysconfig/kernel') diff --git a/repos/system_upgrade/el7toel8/actors/updateetcsysconfigkernel/libraries/updateetcsysconfigkernel.py b/repos/system_upgrade/el7toel8/actors/updateetcsysconfigkernel/libraries/updateetcsysconfigkernel.py deleted file mode 100644 index 34216c3e..00000000 --- a/repos/system_upgrade/el7toel8/actors/updateetcsysconfigkernel/libraries/updateetcsysconfigkernel.py +++ /dev/null @@ -1,9 +0,0 @@ -from leapp.libraries.stdlib import run - - -def update_kernel_config(path): - """ Update DEFAULTKERNEL entry at provided config file """ - run(['/bin/sed', - '-i', - 's/^DEFAULTKERNEL=kernel$/DEFAULTKERNEL=kernel-core/g', - path]) diff --git a/repos/system_upgrade/el7toel8/actors/updateetcsysconfigkernel/tests/files/expected b/repos/system_upgrade/el7toel8/actors/updateetcsysconfigkernel/tests/files/expected deleted file mode 100644 index 78c9e37c..00000000 --- a/repos/system_upgrade/el7toel8/actors/updateetcsysconfigkernel/tests/files/expected +++ /dev/null @@ -1,6 +0,0 @@ -# UPDATEDEFAULT specifies if new-kernel-pkg should make -# new kernels the default -UPDATEDEFAULT=yes - -# DEFAULTKERNEL specifies the default kernel package type -DEFAULTKERNEL=kernel-core diff --git a/repos/system_upgrade/el7toel8/actors/updateetcsysconfigkernel/tests/files/original b/repos/system_upgrade/el7toel8/actors/updateetcsysconfigkernel/tests/files/original deleted file mode 100644 index 704d612e..00000000 --- a/repos/system_upgrade/el7toel8/actors/updateetcsysconfigkernel/tests/files/original +++ /dev/null @@ -1,6 +0,0 @@ -# UPDATEDEFAULT specifies if new-kernel-pkg should make -# new kernels the default -UPDATEDEFAULT=yes - -# DEFAULTKERNEL specifies the default kernel package type -DEFAULTKERNEL=kernel diff --git a/repos/system_upgrade/el7toel8/actors/updateetcsysconfigkernel/tests/unit_test_updateetcsysconfigkernel.py b/repos/system_upgrade/el7toel8/actors/updateetcsysconfigkernel/tests/unit_test_updateetcsysconfigkernel.py deleted file mode 100644 index 7fa444ca..00000000 --- a/repos/system_upgrade/el7toel8/actors/updateetcsysconfigkernel/tests/unit_test_updateetcsysconfigkernel.py +++ /dev/null @@ -1,29 +0,0 @@ -import os -import tempfile - -import pytest - -from leapp.libraries.actor import updateetcsysconfigkernel - - -# TODO [Artem] could be solved -@pytest.mark.skip(reason='Failing on CI complaining about missing leapp.db fiel') -def test_update_kernel_config(monkeypatch): - temp = tempfile.NamedTemporaryFile(delete=False) - with open('tests/files/original') as f: - data = f.readlines() - temp.writelines(data) - temp.close() - - updateetcsysconfigkernel.update_kernel_config(temp.name) - - with open(temp.name) as f: - result = f.readlines() - - with open('tests/files/expected') as f: - expected = f.readlines() - - assert result == expected - - os.unlink(temp.name) - assert not os.path.exists(temp.name) diff --git a/repos/system_upgrade/el7toel8/actors/vimmigrate/actor.py b/repos/system_upgrade/el7toel8/actors/vimmigrate/actor.py deleted file mode 100644 index 14b57341..00000000 --- a/repos/system_upgrade/el7toel8/actors/vimmigrate/actor.py +++ /dev/null @@ -1,19 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import vimmigrate -from leapp.models import DistributionSignedRPM -from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag - - -class VimMigrate(Actor): - """ - Modify configuration files of Vim 8.0 and later to keep the same behavior - as Vim 7.4 and earlier had. - """ - - name = 'vim_migrate' - consumes = (DistributionSignedRPM,) - produces = () - tags = (ApplicationsPhaseTag, IPUWorkflowTag) - - def process(self): - vimmigrate.update_vim() diff --git a/repos/system_upgrade/el7toel8/actors/vimmigrate/libraries/vimmigrate.py b/repos/system_upgrade/el7toel8/actors/vimmigrate/libraries/vimmigrate.py deleted file mode 100644 index 2934ccc4..00000000 --- a/repos/system_upgrade/el7toel8/actors/vimmigrate/libraries/vimmigrate.py +++ /dev/null @@ -1,82 +0,0 @@ -from leapp.libraries.common.rpms import has_package -from leapp.libraries.stdlib import api -from leapp.models import DistributionSignedRPM - - -def _append_string(path, content): - """ - Appends string into file - - :param str path: path to file - :param str content: content to add - """ - with open(path, 'a') as f: - f.write(content) - - -# rpm : the default config file -vim_configs = { - 'vim-minimal': '/etc/virc', - 'vim-enhanced': '/etc/vimrc' -} - - -# list of macros that should be set -new_macros = [ - 'let skip_defaults_vim=1', - 'set t_BE=' -] - - -def update_config(path, append_function=_append_string): - """ - Insert expected content into the file on the path - - :param str path: string representing the full path of the config file - :param func append_function: appends string into file - """ - fmt_input = "\n{comment_line}\n{content}\n".format(comment_line='" content added by Leapp', - content='\n'.join(new_macros)) - - try: - append_function(path, fmt_input) - except IOError: - raise IOError('Error during writing to file: {}.'.format(path)) - - -def _check_package(pkg): - """ - Checks if a package is installed and signed - - :param str pkg: name of package - """ - return has_package(DistributionSignedRPM, pkg) - - -def update_vim(debug_log=api.current_logger().debug, - error_log=api.current_logger().error, - is_installed=_check_package, - append_function=_append_string): - """ - Do update of configuration files - - :param func debug_log: function for debug logging - :param func error_log: function for error logging - :param func is_installed: checks if a package is installed - :param func append_function: appends string into file - """ - error_list = [] - - for pkg, config_file in vim_configs.items(): - if not is_installed(pkg): - continue - - debug_log('Updating Vim configuration file {}.'.format(config_file)) - - try: - update_config(config_file, append_function) - except (OSError, IOError) as error: - error_list.append((config_file, error)) - if error_list: - error_log('The files below have not been modified (error message included):' + - ''.join(['\n - {}: {}'.format(err[0], err[1]) for err in error_list])) diff --git a/repos/system_upgrade/el7toel8/actors/vimmigrate/tests/test_update_config_vimmigrate.py b/repos/system_upgrade/el7toel8/actors/vimmigrate/tests/test_update_config_vimmigrate.py deleted file mode 100644 index 19597b4e..00000000 --- a/repos/system_upgrade/el7toel8/actors/vimmigrate/tests/test_update_config_vimmigrate.py +++ /dev/null @@ -1,41 +0,0 @@ -import pytest - -from leapp.libraries.actor.vimmigrate import new_macros, update_config - - -class MockFile(object): - def __init__(self, path, content=None): - self.path = path - self.content = content - self.error = False - - def append(self, path, content): - if path != self.path: - self.error = True - if not self.error: - self.content += content - return self.content - raise IOError('Error during writing to file: {}.'.format(path)) - - -def test_update_config_file_errors(path='foo'): - f = MockFile(path, content='') - - with pytest.raises(IOError): - update_config('bar', f.append) - - assert f.content == '' - - -@pytest.mark.parametrize('content', ('', 'bleblaba')) -def test_update_config_append_into_file(content): - path = 'bar' - - fmt_input = "\n{comment_line}\n{content}\n".format(comment_line='" content added by Leapp', - content='\n'.join(new_macros)) - - f = MockFile(path, content) - res = update_config(path, f.append) - - assert res is None - assert f.content == content + fmt_input diff --git a/repos/system_upgrade/el7toel8/actors/vimmigrate/tests/test_update_vim_vimmigrate.py b/repos/system_upgrade/el7toel8/actors/vimmigrate/tests/test_update_vim_vimmigrate.py deleted file mode 100644 index 5defccd6..00000000 --- a/repos/system_upgrade/el7toel8/actors/vimmigrate/tests/test_update_vim_vimmigrate.py +++ /dev/null @@ -1,127 +0,0 @@ -import pytest - -from leapp.libraries.actor.vimmigrate import update_vim, vim_configs - -packages = [ - { - 'vim-minimal': '/etc/virc', - 'vim-enhanced': '/etc/vimrc' - }, - { - 'vim-minimal': '/etc/virc', - 'vim-enhanced': '' - }, - { - 'vim-minimal': '', - 'vim-enhanced': '/etc/vimrc' - }, - { - 'vim-minimal': '', - 'vim-enhanced': '' - }, - { - 'vim-minimal': '/etc/virc', - 'ble': '' - }, - { - 'vim-minimal': '', - 'ble': '' - }, - { - 'vim-enhanced': '/etc/vimrc', - 'moo': '' - }, - { - 'vim-enhanced': '', - 'moo': '' - }, - { - 'you': '', - 'hele': '' - } -] - - -class MockLogger(object): - def __init__(self): - self.debugmsg = '' - self.errmsg = '' - - def debug(self, message): - self.debugmsg += message - - def error(self, message): - self.errmsg += message - - -class MockPackage(object): - def __init__(self, name, config): - self.name = name - self.config = config - - -class MockPackageSet(object): - def __init__(self): - self.installed_packages = None - - def add_packages(self, pkgs): - if self.installed_packages is None: - self.installed_packages = [] - - for rpm, config in pkgs.items(): - self.installed_packages.append(MockPackage(rpm, config)) - - def is_installed(self, pkg): - for rpm in self.installed_packages: - if pkg == rpm.name: - return True - return False - - def append_content(self, path, content): - found = False - - for rpm in self.installed_packages: - if path == rpm.config: - found = True - if not found: - raise IOError('Error during writing to file: {}.'.format(path)) - - -class ExpectedOutput(object): - def __init__(self): - self.debugmsg = '' - self.errmsg = '' - - def create(self, rpms): - error_list = [] - - for pkg, config in rpms.items(): - if pkg in vim_configs.keys(): - self.debugmsg += 'Updating Vim configuration file {}.'.format(vim_configs[pkg]) - if config == '': - error_list.append((vim_configs[pkg], 'Error during writing to file: {}.'.format(vim_configs[pkg]))) - - if error_list: - self.errmsg = ('The files below have not been modified ' - '(error message included):' + - ''.join(['\n - {}: {}'.format(err[0], err[1]) - for err in error_list])) - - -@pytest.mark.parametrize('rpms', packages) -def test_update_vim(rpms): - logger = MockLogger() - installed_packages = MockPackageSet() - - installed_packages.add_packages(rpms) - - expected = ExpectedOutput() - expected.create(rpms) - - update_vim(logger.debug, - logger.error, - installed_packages.is_installed, - installed_packages.append_content) - - assert expected.debugmsg == logger.debugmsg - assert expected.errmsg == logger.errmsg diff --git a/repos/system_upgrade/el7toel8/actors/vsftpdconfigcheck/actor.py b/repos/system_upgrade/el7toel8/actors/vsftpdconfigcheck/actor.py deleted file mode 100644 index 3d188cd8..00000000 --- a/repos/system_upgrade/el7toel8/actors/vsftpdconfigcheck/actor.py +++ /dev/null @@ -1,26 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import vsftpdconfigcheck -from leapp.models import TcpWrappersFacts, VsftpdFacts -from leapp.reporting import Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class VsftpdConfigCheck(Actor): - """ - Checks whether the vsftpd configuration is supported in RHEL-8. Namely checks that - configuration files don't set tcp_wrappers=YES and vsftpd-related configuration is - not present in tcp_wrappers configuration files at the same time. - """ - - name = 'vsftpd_config_check' - consumes = (TcpWrappersFacts, VsftpdFacts,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag,) - - def process(self): - try: - vsftpd_facts = next(self.consume(VsftpdFacts)) - except StopIteration: - return - tcp_wrappers_facts = next(self.consume(TcpWrappersFacts)) - vsftpdconfigcheck.check_config_supported(tcp_wrappers_facts, vsftpd_facts) diff --git a/repos/system_upgrade/el7toel8/actors/vsftpdconfigcheck/libraries/vsftpdconfigcheck.py b/repos/system_upgrade/el7toel8/actors/vsftpdconfigcheck/libraries/vsftpdconfigcheck.py deleted file mode 100644 index c87d70e8..00000000 --- a/repos/system_upgrade/el7toel8/actors/vsftpdconfigcheck/libraries/vsftpdconfigcheck.py +++ /dev/null @@ -1,32 +0,0 @@ -from leapp import reporting -from leapp.libraries.common.tcpwrappersutils import config_applies_to_daemon -from leapp.reporting import create_report - - -def check_config_supported(tcpwrap_facts, vsftpd_facts): - bad_configs = [config.path for config in vsftpd_facts.configs if config.tcp_wrappers] - if bad_configs and config_applies_to_daemon(tcpwrap_facts, 'vsftpd'): - list_separator_fmt = '\n - ' - create_report([ - reporting.Title('Unsupported vsftpd configuration'), - reporting.Summary( - 'tcp_wrappers support has been removed in RHEL-8. ' - 'Some configuration files set the tcp_wrappers option to true and ' - 'there is some vsftpd-related configuration in /etc/hosts.deny ' - 'or /etc/hosts.allow. Please migrate it manually. ' - 'The list of problematic configuration files:{}{}'. - format( - list_separator_fmt, - list_separator_fmt.join(bad_configs) - ) - ), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.SERVICES, reporting.Groups.NETWORK]), - reporting.Groups([reporting.Groups.INHIBITOR]), - reporting.ExternalLink( - title='Replacing TCP Wrappers in RHEL 8', - url='https://access.redhat.com/solutions/3906701' - ), - reporting.RelatedResource('package', 'tcp_wrappers'), - reporting.RelatedResource('package', 'vsftpd'), - ] + [reporting.RelatedResource('file', str(bc)) for bc in bad_configs]) diff --git a/repos/system_upgrade/el7toel8/actors/vsftpdconfigcheck/tests/test_library_vsftpdconfigcheck.py b/repos/system_upgrade/el7toel8/actors/vsftpdconfigcheck/tests/test_library_vsftpdconfigcheck.py deleted file mode 100644 index 6bdce11d..00000000 --- a/repos/system_upgrade/el7toel8/actors/vsftpdconfigcheck/tests/test_library_vsftpdconfigcheck.py +++ /dev/null @@ -1,82 +0,0 @@ -from leapp.models import DaemonList, TcpWrappersFacts, VsftpdConfig, VsftpdFacts -from leapp.reporting import Report -from leapp.snactor.fixture import current_actor_context -from leapp.utils.report import is_inhibitor - - -def test_actor_with_unsupported_tcpwrap_and_vsftpd_config(current_actor_context): - config1 = VsftpdConfig(path='/etc/vsftpd/foo.conf', tcp_wrappers=False) - config2 = VsftpdConfig(path='/etc/vsftpd/bar.conf', tcp_wrappers=True) - vsftpd_facts = VsftpdFacts(configs=[config1, config2]) - daemon_list = DaemonList(value=['vsftpd']) - tcpwrap_facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - - current_actor_context.feed(vsftpd_facts) - current_actor_context.feed(tcpwrap_facts) - current_actor_context.run() - report_fields = current_actor_context.consume(Report)[0].report - - assert is_inhibitor(report_fields) - assert 'foo.conf' not in report_fields['summary'] - assert 'bar.conf' in report_fields['summary'] - - -def test_actor_with_unsupported_tcpwrap_multiple_unsupported_vsftpd_configs(current_actor_context): - config1 = VsftpdConfig(path='/etc/vsftpd/foo.conf', tcp_wrappers=True) - config2 = VsftpdConfig(path='/etc/vsftpd/bar.conf', tcp_wrappers=False) - config3 = VsftpdConfig(path='/etc/vsftpd/goo.conf', tcp_wrappers=True) - vsftpd_facts = VsftpdFacts(configs=[config1, config2, config3]) - daemon_list = DaemonList(value=['vsftpd']) - tcpwrap_facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - - current_actor_context.feed(vsftpd_facts) - current_actor_context.feed(tcpwrap_facts) - current_actor_context.run() - report_fields = current_actor_context.consume(Report)[0].report - - assert is_inhibitor(report_fields) - assert 'foo.conf' in report_fields['summary'] - assert 'bar.conf' not in report_fields['summary'] - assert 'goo.conf' in report_fields['summary'] - - -def test_actor_with_unsupported_tcpwrap_config(current_actor_context): - config1 = VsftpdConfig(path='/etc/vsftpd/foo.conf', tcp_wrappers=False) - config2 = VsftpdConfig(path='/etc/vsftpd/bar.conf', tcp_wrappers=None) - vsftpd_facts = VsftpdFacts(configs=[config1, config2]) - daemon_list = DaemonList(value=['vsftpd']) - tcpwrap_facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - - current_actor_context.feed(vsftpd_facts) - current_actor_context.feed(tcpwrap_facts) - current_actor_context.run() - - assert not current_actor_context.consume(Report) - - -def test_actor_with_unsupported_vsftpd_config(current_actor_context): - config1 = VsftpdConfig(path='/etc/vsftpd/foo.conf', tcp_wrappers=False) - config2 = VsftpdConfig(path='/etc/vsftpd/bar.conf', tcp_wrappers=True) - vsftpd_facts = VsftpdFacts(configs=[config1, config2]) - daemon_list = DaemonList(value=['all', 'except', 'vsftpd']) - tcpwrap_facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - - current_actor_context.feed(vsftpd_facts) - current_actor_context.feed(tcpwrap_facts) - current_actor_context.run() - - assert not current_actor_context.consume(Report) - - -def test_actor_with_supported_tcpwrap_and_vsftpd_config(current_actor_context): - config1 = VsftpdConfig(path='/etc/vsftpd/foo.conf', tcp_wrappers=False) - config2 = VsftpdConfig(path='/etc/vsftpd/bar.conf', tcp_wrappers=False) - vsftpd_facts = VsftpdFacts(configs=[config1, config2]) - daemon_list = DaemonList(value=['all', 'except', 'vsftpd']) - tcpwrap_facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - - current_actor_context.feed(vsftpd_facts) - current_actor_context.feed(tcpwrap_facts) - current_actor_context.run() - - assert not current_actor_context.consume(Report) diff --git a/repos/system_upgrade/el7toel8/actors/vsftpdconfigread/actor.py b/repos/system_upgrade/el7toel8/actors/vsftpdconfigread/actor.py deleted file mode 100644 index 99b8ec21..00000000 --- a/repos/system_upgrade/el7toel8/actors/vsftpdconfigread/actor.py +++ /dev/null @@ -1,20 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import vsftpdconfigread -from leapp.models import DistributionSignedRPM, VsftpdFacts -from leapp.tags import FactsPhaseTag, IPUWorkflowTag - - -class VsftpdConfigRead(Actor): - """ - Reads vsftpd configuration files (/etc/vsftpd/*.conf) and extracts necessary information. - """ - - name = 'vsftpd_config_read' - consumes = (DistributionSignedRPM,) - produces = (VsftpdFacts,) - tags = (FactsPhaseTag, IPUWorkflowTag) - - def process(self): - installed_rpm_facts = next(self.consume(DistributionSignedRPM)) - if vsftpdconfigread.is_processable(installed_rpm_facts): - self.produce(vsftpdconfigread.get_vsftpd_facts()) diff --git a/repos/system_upgrade/el7toel8/actors/vsftpdconfigread/libraries/config_parser.py b/repos/system_upgrade/el7toel8/actors/vsftpdconfigread/libraries/config_parser.py deleted file mode 100644 index a7a6c179..00000000 --- a/repos/system_upgrade/el7toel8/actors/vsftpdconfigread/libraries/config_parser.py +++ /dev/null @@ -1,102 +0,0 @@ -class ParsingError(Exception): - pass - - -class VsftpdConfigOptionParser(object): - def _get_string_options(self): - return ["secure_chroot_dir", "ftp_username", "chown_username", "xferlog_file", - "vsftpd_log_file", "message_file", "nopriv_user", "ftpd_banner", - "banned_email_file", "chroot_list_file", "pam_service_name", "guest_username", - "userlist_file", "anon_root", "local_root", "banner_file", "pasv_address", - "listen_address", "user_config_dir", "listen_address6", "cmds_allowed", - "hide_file", "deny_file", "user_sub_token", "email_password_file", - "rsa_cert_file", "dsa_cert_file", "dh_param_file", "ecdh_param_file", - "ssl_ciphers", "rsa_private_key_file", "dsa_private_key_file", "ca_certs_file", - "cmds_denied"] - - def _get_boolean_options(self): - return ["anonymous_enable", "local_enable", "pasv_enable", "port_enable", - "chroot_local_user", "write_enable", "anon_upload_enable", - "anon_mkdir_write_enable", "anon_other_write_enable", "chown_uploads", - "connect_from_port_20", "xferlog_enable", "dirmessage_enable", - "anon_world_readable_only", "async_abor_enable", "ascii_upload_enable", - "ascii_download_enable", "one_process_model", "xferlog_std_format", - "pasv_promiscuous", "deny_email_enable", "chroot_list_enable", - "setproctitle_enable", "text_userdb_names", "ls_recurse_enable", - "log_ftp_protocol", "guest_enable", "userlist_enable", "userlist_deny", - "use_localtime", "check_shell", "hide_ids", "listen", "port_promiscuous", - "passwd_chroot_enable", "no_anon_password", "tcp_wrappers", "use_sendfile", - "force_dot_files", "listen_ipv6", "dual_log_enable", "syslog_enable", - "background", "virtual_use_local_privs", "session_support", "download_enable", - "dirlist_enable", "chmod_enable", "secure_email_list_enable", - "run_as_launching_user", "no_log_lock", "ssl_enable", "allow_anon_ssl", - "force_local_logins_ssl", "force_local_data_ssl", "ssl_sslv2", "ssl_sslv3", - "ssl_tlsv1", "ssl_tlsv1_1", "ssl_tlsv1_2", "tilde_user_enable", - "force_anon_logins_ssl", "force_anon_data_ssl", "mdtm_write", - "lock_upload_files", "pasv_addr_resolve", "reverse_lookup_enable", - "userlist_log", "debug_ssl", "require_cert", "validate_cert", - "strict_ssl_read_eof", "strict_ssl_write_shutdown", "ssl_request_cert", - "delete_failed_uploads", "implicit_ssl", "ptrace_sandbox", "require_ssl_reuse", - "isolate", "isolate_network", "ftp_enable", "http_enable", "seccomp_sandbox", - "allow_writeable_chroot", "better_stou", "log_die"] - - def _get_integer_options(self): - return ["accept_timeout", "connect_timeout", "local_umask", "anon_umask", - "ftp_data_port", "idle_session_timeout", "data_connection_timeout", - "pasv_min_port", "pasv_max_port", "anon_max_rate", "local_max_rate", - "listen_port", "max_clients", "file_open_mode", "max_per_ip", "trans_chunk_size", - "delay_failed_login", "delay_successful_login", "max_login_fails", - "chown_upload_mode", "bind_retries"] - - def _get_boolean(self, option, value): - value = value.upper() - if value in ['YES', 'TRUE', '1']: - return True - if value in ['NO', 'FALSE', '0']: - return False - raise ParsingError("Boolean option '%s' contains a non-boolean value '%s'" - % (option, value)) - - def _get_integer(self, option, value): - try: - return int(value) - except ValueError: - raise ParsingError("Integer option '%s' contains a non-integer value '%s'" - % (option, value)) - - def parse_value(self, option, value): - if option in self._get_string_options(): - return value - if option in self._get_boolean_options(): - return self._get_boolean(option, value) - if option in self._get_integer_options(): - return self._get_integer(option, value) - - raise ParsingError("Unknown option: '%s'" % option) - - -class VsftpdConfigParser(object): - def __init__(self, config_content): - self._option_parser = VsftpdConfigOptionParser() - self.parsed_config = self._parse_config(config_content) - - def _parse_config_line(self, line, conf_dict): - if not line or line.startswith('#') or line.isspace(): - return - try: - option, value = line.split('=', 1) - except ValueError: - raise ParsingError("The line does not have the form 'option=value': %s" % line) - option = option.strip() - value = value.strip() - value = self._option_parser.parse_value(option, value) - conf_dict[option] = value - - def _parse_config(self, contents): - res = {} - for (ix, line) in enumerate(contents.split('\n')): - try: - self._parse_config_line(line, res) - except ParsingError as e: - raise ParsingError("Syntax error on line %d: %s" % (ix + 1, e)) - return res diff --git a/repos/system_upgrade/el7toel8/actors/vsftpdconfigread/libraries/vsftpdconfigread.py b/repos/system_upgrade/el7toel8/actors/vsftpdconfigread/libraries/vsftpdconfigread.py deleted file mode 100644 index 64e86426..00000000 --- a/repos/system_upgrade/el7toel8/actors/vsftpdconfigread/libraries/vsftpdconfigread.py +++ /dev/null @@ -1,55 +0,0 @@ -import errno -import os - -from leapp.libraries.actor import config_parser -from leapp.libraries.common import vsftpdutils as utils -from leapp.libraries.stdlib import api -from leapp.models import VsftpdConfig, VsftpdFacts - - -def _parse_config(path, content): - try: - parser = config_parser.VsftpdConfigParser(content) - return parser.parsed_config - except config_parser.ParsingError: - api.current_logger().info('File %s does not look like vsftpd configuration, skipping.' - % path) - return None - - -def _get_parsed_configs(read_func=utils.read_file, listdir=os.listdir): - res = [] - try: - for fname in listdir(utils.VSFTPD_CONFIG_DIR): - path = os.path.join(utils.VSFTPD_CONFIG_DIR, fname) - if not path.endswith('.conf'): - continue - content = utils.get_config_contents(path, read_func=read_func) - if content is None: - continue - parsed = _parse_config(path, content) - if parsed is not None: - res.append((path, parsed)) - except OSError as e: - if e.errno != errno.ENOENT: - api.current_logger().warning('Failed to read vsftpd configuration directory: %s' - % e) - return res - - -def get_vsftpd_facts(read_func=utils.read_file, listdir=os.listdir): - config_hash = utils.get_default_config_hash(read_func=read_func) - configs = _get_parsed_configs(read_func=read_func, listdir=listdir) - res_configs = [] - for path, config in configs: - res_configs.append(VsftpdConfig(path=path, - strict_ssl_read_eof=config.get(utils.STRICT_SSL_READ_EOF), - tcp_wrappers=config.get(utils.TCP_WRAPPERS))) - return VsftpdFacts(default_config_hash=config_hash, configs=res_configs) - - -def is_processable(installed_rpm_facts): - for pkg in installed_rpm_facts.items: - if pkg.name == 'vsftpd': - return True - return False diff --git a/repos/system_upgrade/el7toel8/actors/vsftpdconfigread/tests/test_config_parser_vsftpdconfigread.py b/repos/system_upgrade/el7toel8/actors/vsftpdconfigread/tests/test_config_parser_vsftpdconfigread.py deleted file mode 100644 index b10ec4c9..00000000 --- a/repos/system_upgrade/el7toel8/actors/vsftpdconfigread/tests/test_config_parser_vsftpdconfigread.py +++ /dev/null @@ -1,103 +0,0 @@ -import pytest - -from leapp.libraries.actor.config_parser import ParsingError, VsftpdConfigOptionParser, VsftpdConfigParser - - -def test_VsftpdConfigOptionParser_invalid_syntax(): - parser = VsftpdConfigOptionParser() - - with pytest.raises(ParsingError): - parser.parse_value('unknown option', 'foo') - with pytest.raises(ParsingError): - parser.parse_value('anonymous_enable', 'non-boolean value') - with pytest.raises(ParsingError): - parser.parse_value('require_cert', 'non-boolean value') - with pytest.raises(ParsingError): - parser.parse_value('anon_mkdir_write_enable', '') - with pytest.raises(ParsingError): - parser.parse_value('accept_timeout', 'non-integer value') - with pytest.raises(ParsingError): - parser.parse_value('max_per_ip', 'non-integer value') - with pytest.raises(ParsingError): - parser.parse_value('listen_port', '') - - -def test_VsftpdConfigOptionParser_string_option(): - parser = VsftpdConfigOptionParser() - - assert parser.parse_value('secure_chroot_dir', 'foo') == 'foo' - assert parser.parse_value('user_config_dir', '') == '' - assert parser.parse_value('dsa_cert_file', 'value with spaces') == 'value with spaces' - - -def test_VsftpdConfigOptionParser_boolean_option(): - parser = VsftpdConfigOptionParser() - - assert parser.parse_value('background', 'TRUE') is True - assert parser.parse_value('run_as_launching_user', 'true') is True - assert parser.parse_value('no_log_lock', 'YES') is True - assert parser.parse_value('force_local_data_ssl', 'yES') is True - assert parser.parse_value('ssl_tlsv1_2', '1') is True - - assert parser.parse_value('background', 'FALSE') is False - assert parser.parse_value('run_as_launching_user', 'false') is False - assert parser.parse_value('no_log_lock', 'NO') is False - assert parser.parse_value('force_local_data_ssl', 'No') is False - assert parser.parse_value('ssl_tlsv1_2', '0') is False - - -def test_VsftpdConfigOptionParser_integer_option(): - parser = VsftpdConfigOptionParser() - - assert parser.parse_value('connect_timeout', '0') == 0 - assert parser.parse_value('idle_session_timeout', '1') == 1 - assert parser.parse_value('data_connection_timeout', '2') == 2 - assert parser.parse_value('pasv_max_port', '6234') == 6234 - - -def test_VsftpdConfigParser_invalid_syntax(): - with pytest.raises(ParsingError): - VsftpdConfigParser('unknown_option=foo') - with pytest.raises(ParsingError): - VsftpdConfigParser('anonymous_enable=non-boolean') - with pytest.raises(ParsingError): - VsftpdConfigParser(' # comment with whitespace before the # character') - with pytest.raises(ParsingError): - VsftpdConfigParser('anonymous_enable') - - # Make sure that line num is properly shown - with pytest.raises(ParsingError) as err: - VsftpdConfigParser('background=0\n#andthislineisalso=fine\nError on line 3') - assert "Syntax error on line 3" in str(err.value) - - -def test_VsftpdConfigParser_empty_config(): - parser = VsftpdConfigParser('') - assert isinstance(parser.parsed_config, dict) - assert not parser.parsed_config - - -def test_VsftpdConfigParser_only_comments(): - parser = VsftpdConfigParser('# foo\n\n#bar\n') - assert isinstance(parser.parsed_config, dict) - assert not parser.parsed_config - - parser = VsftpdConfigParser('#anonymous_enable=yes\n') - assert isinstance(parser.parsed_config, dict) - assert not parser.parsed_config - - -def test_VsftpdConfigParser_one_option(): - parser = VsftpdConfigParser('anonymous_enable=yes\n') - assert len(parser.parsed_config) == 1 - assert parser.parsed_config['anonymous_enable'] is True - - -def test_VsftpdConfigParser_multiple_options(): - content = '# foo\n\nanonymous_enable=no\nbanned_email_file=/foo/bar\n# bar\nmax_login_fails=3\n' - parser = VsftpdConfigParser(content) - - assert len(parser.parsed_config) == 3 - assert parser.parsed_config['anonymous_enable'] is False - assert parser.parsed_config['banned_email_file'] == '/foo/bar' - assert parser.parsed_config['max_login_fails'] == 3 diff --git a/repos/system_upgrade/el7toel8/actors/vsftpdconfigread/tests/test_library_vsftpdconfigread.py b/repos/system_upgrade/el7toel8/actors/vsftpdconfigread/tests/test_library_vsftpdconfigread.py deleted file mode 100644 index 6f62617b..00000000 --- a/repos/system_upgrade/el7toel8/actors/vsftpdconfigread/tests/test_library_vsftpdconfigread.py +++ /dev/null @@ -1,214 +0,0 @@ -import errno -import os - -from leapp.libraries.actor import vsftpdconfigread -from leapp.libraries.common.testutils import make_IOError, make_OSError -from leapp.models import DistributionSignedRPM, RPM - - -class MockFileOperations(object): - def __init__(self): - self.files = {} - self.files_read = {} - self.read_called = 0 - - def _increment_read_counters(self, path): - self.read_called += 1 - self.files_read.setdefault(path, 0) - self.files_read[path] += 1 - - def read(self, path): - self._increment_read_counters(path) - try: - return self.files[path] - except KeyError: - raise make_IOError(errno.ENOENT) - - -class MockListDir(object): - def __init__(self, path=None, file_names=None, to_raise=None): - self.path = None if path is None else os.path.normpath(path) - self.file_names = file_names - self.to_raise = to_raise - self.error = False - - def listdir(self, path): - if self.to_raise: - raise self.to_raise - if os.path.normpath(path) == self.path: - return self.file_names - - self.error = True - raise make_OSError(errno.ENOENT) - - -def test_parse_config(): - content = 'anonymous_enable=YES' - path = 'my_file' - - parsed = vsftpdconfigread._parse_config(path, content) - - assert parsed['anonymous_enable'] is True - - -def test_parsing_bad_config_gives_None(): - content = 'foo' - path = 'my_file' - - parsed = vsftpdconfigread._parse_config(path, content) - - assert parsed is None - - -def test_get_parsed_configs(): - directory = '/etc/vsftpd' - file_names = ['vsftpd.conf', 'foo.conf'] - listdir = MockListDir(directory, file_names) - fileops = MockFileOperations() - fileops.files[os.path.join(directory, file_names[0])] = 'anonymous_enable=YES\n' \ - 'ca_certs_file=/foo/bar\n' - fileops.files[os.path.join(directory, file_names[1])] = 'anonymous_enable=NO\n' - - parsed_configs = list(vsftpdconfigread._get_parsed_configs(read_func=fileops.read, - listdir=listdir.listdir)) - - assert not listdir.error - assert len(fileops.files_read) == 2 - assert os.path.join(directory, file_names[0]) in fileops.files_read - assert os.path.join(directory, file_names[1]) in fileops.files_read - assert len(parsed_configs) == 2 - if parsed_configs[0][0] != os.path.join(directory, file_names[0]): - parsed_configs.reverse() - assert (os.path.join(directory, file_names[0]), {'anonymous_enable': True, - 'ca_certs_file': '/foo/bar'}) in parsed_configs - assert (os.path.join(directory, file_names[1]), {'anonymous_enable': False}) in parsed_configs - - -def test_get_parsed_configs_empty_dir(): - directory = '/etc/vsftpd' - listdir = MockListDir(directory, []) - fileops = MockFileOperations() - - parsed_configs = vsftpdconfigread._get_parsed_configs(read_func=fileops.read, - listdir=listdir.listdir) - - assert not listdir.error - assert fileops.read_called == 0 - assert not parsed_configs - - -def test_get_parsed_configs_nonexistent_dir(): - listdir = MockListDir(to_raise=make_OSError(errno.ENOENT)) - fileops = MockFileOperations() - - parsed_configs = vsftpdconfigread._get_parsed_configs(read_func=fileops.read, - listdir=listdir.listdir) - - assert fileops.read_called == 0 - assert not parsed_configs - - -def test_get_parsed_configs_inaccessible_dir(): - listdir = MockListDir(to_raise=make_OSError(errno.EACCES)) - fileops = MockFileOperations() - - parsed_configs = vsftpdconfigread._get_parsed_configs(read_func=fileops.read, - listdir=listdir.listdir) - - assert fileops.read_called == 0 - assert not parsed_configs - - -def test_get_vsftpd_facts(): - directory = '/etc/vsftpd' - file_names = ['vsftpd.conf', 'foo.conf', 'bar.conf'] - listdir = MockListDir(directory, file_names) - fileops = MockFileOperations() - fileops.files[os.path.join(directory, file_names[0])] = 'anonymous_enable=YES\n' \ - 'ca_certs_file=/foo/bar\n' - fileops.files[os.path.join(directory, file_names[1])] = 'anonymous_enable=NO\n' \ - 'tcp_wrappers=YES\n' - fileops.files[os.path.join(directory, file_names[2])] = 'strict_ssl_read_eof=yes\n' \ - 'tcp_wrappers=no\n' - - facts = vsftpdconfigread.get_vsftpd_facts(read_func=fileops.read, listdir=listdir.listdir) - - assert facts.default_config_hash == '892bae7b69eb66ec16afe842a15e53a5242155a4' - assert len(facts.configs) == 3 - used_indices = set() - for config in facts.configs: - assert os.path.dirname(config.path) == directory - file_name = os.path.basename(config.path) - ix = file_names.index(file_name) - if ix in used_indices: - assert False - used_indices.add(ix) - if ix == 0: - assert config.strict_ssl_read_eof is None - assert config.tcp_wrappers is None - elif ix == 1: - assert config.strict_ssl_read_eof is None - assert config.tcp_wrappers is True - elif ix == 2: - assert config.strict_ssl_read_eof is True - assert config.tcp_wrappers is False - else: - assert False - - -def test_get_vsftpd_facts_empty_dir(): - listdir = MockListDir('/etc/vsftpd', []) - fileops = MockFileOperations() - - facts = vsftpdconfigread.get_vsftpd_facts(read_func=fileops.read, listdir=listdir.listdir) - - assert facts.default_config_hash is None - assert not facts.configs - - -def test_get_vsftpd_facts_nonexistent_dir(): - listdir = MockListDir(to_raise=make_OSError(errno.ENOENT)) - fileops = MockFileOperations() - - facts = vsftpdconfigread.get_vsftpd_facts(read_func=fileops.read, listdir=listdir.listdir) - - assert facts.default_config_hash is None - assert not facts.configs - - -def test_get_vsftpd_facts_inaccessible_dir(): - listdir = MockListDir(to_raise=make_OSError(errno.EACCES)) - fileops = MockFileOperations() - - facts = vsftpdconfigread.get_vsftpd_facts(read_func=fileops.read, listdir=listdir.listdir) - - assert facts.default_config_hash is None - assert not facts.configs - - -def test_is_processable_vsftpd_installed(): - installed_rpms = [ - RPM(name='sendmail', version='8.14.7', release='5.el7', epoch='0', - packager='foo', arch='x86_64', pgpsig='bar'), - RPM(name='vsftpd', version='3.0.2', release='25.el7', epoch='0', - packager='foo', arch='x86_64', pgpsig='bar'), - RPM(name='postfix', version='2.10.1', release='7.el7', epoch='0', - packager='foo', arch='x86_64', pgpsig='bar')] - installed_rpm_facts = DistributionSignedRPM(items=installed_rpms) - - res = vsftpdconfigread.is_processable(installed_rpm_facts) - - assert res is True - - -def test_is_processable_vsftpd_not_installed(): - installed_rpms = [ - RPM(name='sendmail', version='8.14.7', release='5.el7', epoch='0', - packager='foo', arch='x86_64', pgpsig='bar'), - RPM(name='postfix', version='2.10.1', release='7.el7', epoch='0', - packager='foo', arch='x86_64', pgpsig='bar')] - installed_rpm_facts = DistributionSignedRPM(items=installed_rpms) - - res = vsftpdconfigread.is_processable(installed_rpm_facts) - - assert res is False diff --git a/repos/system_upgrade/el7toel8/actors/vsftpdconfigupdate/actor.py b/repos/system_upgrade/el7toel8/actors/vsftpdconfigupdate/actor.py deleted file mode 100644 index a3c2d4f8..00000000 --- a/repos/system_upgrade/el7toel8/actors/vsftpdconfigupdate/actor.py +++ /dev/null @@ -1,33 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor import vsftpdconfigupdate -from leapp.models import VsftpdFacts -from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag - - -class VsftpdConfigUpdate(Actor): - """ - Modifies vsftpd configuration files on the target RHEL-8 system so that the effective - configuration is the same, where possible. This means doing two things: - 1. Reverting the default configuration file (/etc/vsftpd/vsftpd.conf) to its state - before the upgrade (where it makes sense), if the configuration file was being used - with its default content (i.e., unmodified) on the source system (the configuration - file gets replaced with a new version during the RPM upgrade in this case). - The anonymous_enable option falls in this category. - 2. Adding 'option=old_effective_value' to configuration files for options whose default - value has changed, if the option is not explicitly specified in the configuration file. - The strict_ssl_read_eof option falls in this category. - 3. Disabling options that cannot be enabled, otherwise vsftpd wouldn't work. - The tcp_wrappers option falls in this category. - """ - - name = 'vsftpd_config_update' - consumes = (VsftpdFacts,) - produces = () - tags = (ApplicationsPhaseTag, IPUWorkflowTag) - - def process(self): - try: - vsftpd_facts = next(self.consume(VsftpdFacts)) - except StopIteration: - return - vsftpdconfigupdate.migrate_configs(vsftpd_facts) diff --git a/repos/system_upgrade/el7toel8/actors/vsftpdconfigupdate/libraries/vsftpdconfigupdate.py b/repos/system_upgrade/el7toel8/actors/vsftpdconfigupdate/libraries/vsftpdconfigupdate.py deleted file mode 100644 index eaa5c2da..00000000 --- a/repos/system_upgrade/el7toel8/actors/vsftpdconfigupdate/libraries/vsftpdconfigupdate.py +++ /dev/null @@ -1,82 +0,0 @@ -import re - -from leapp.libraries.common.vsftpdutils import ( - get_default_config_hash, - STRICT_SSL_READ_EOF, - TCP_WRAPPERS, - VSFTPD_DEFAULT_CONFIG_PATH -) -from leapp.libraries.stdlib import api - -ANONYMOUS_ENABLE = 'anonymous_enable' - - -class FileOperations(object): - def read(self, path): - with open(path, 'r') as f: - return f.read() - - def write(self, path, content): - with open(path, 'w') as f: - f.write(content) - - -def _replace_in_config(config_lines, option, value): - res = [] - for line in config_lines: - if re.match(r'^\s*' + option, line) is None: - res.append(line) - else: - res.append('# Commented out by Leapp:') - res.append('#' + line) - if value is not None: - res.append('# Added by Leapp:') - res.append('%s=%s' % (option, value)) - return res - - -def _restore_default_config_file(fileops=FileOperations()): - try: - content = fileops.read(VSFTPD_DEFAULT_CONFIG_PATH) - except IOError as e: - api.current_logger().warning('Failed to read vsftpd configuration file: %s' % e) - return - lines = content.split('\n') - lines = _replace_in_config(lines, ANONYMOUS_ENABLE, 'YES') - content = '\n'.join(lines) - content += '\n' - fileops.write(VSFTPD_DEFAULT_CONFIG_PATH, content) - - -def _migrate_config(config, fileops=FileOperations()): - if not config.tcp_wrappers and config.strict_ssl_read_eof is not None: - return - try: - content = fileops.read(config.path) - except IOError as e: - api.current_logger().warning('Failed to read vsftpd configuration file %s: %s' - % (config.path, e)) - return - lines = content.split('\n') - if config.tcp_wrappers: - lines = _replace_in_config(lines, TCP_WRAPPERS, 'NO') - if config.strict_ssl_read_eof is None: - lines = _replace_in_config(lines, STRICT_SSL_READ_EOF, 'NO') - content = '\n'.join(lines) - content += '\n' - try: - fileops.write(config.path, content) - except IOError as e: - api.current_logger().warning('Failed to write vsftpd configuration file %s: %s' - % (config.path, e)) - - -def migrate_configs(facts, fileops=FileOperations()): - if facts.default_config_hash is not None: - new_hash = get_default_config_hash(read_func=fileops.read) - # If the default config file was unmodified, it got replaced during the RPM upgrade, - # so we have to change it back. - if facts.default_config_hash != new_hash: - _restore_default_config_file(fileops=fileops) - for config in facts.configs: - _migrate_config(config, fileops=fileops) diff --git a/repos/system_upgrade/el7toel8/actors/vsftpdconfigupdate/tests/test_library_vsftpdconfigupdate.py b/repos/system_upgrade/el7toel8/actors/vsftpdconfigupdate/tests/test_library_vsftpdconfigupdate.py deleted file mode 100644 index 3b506a19..00000000 --- a/repos/system_upgrade/el7toel8/actors/vsftpdconfigupdate/tests/test_library_vsftpdconfigupdate.py +++ /dev/null @@ -1,110 +0,0 @@ -import errno - -from leapp.libraries.actor.vsftpdconfigupdate import migrate_configs -from leapp.libraries.common.testutils import make_IOError -from leapp.libraries.common.vsftpdutils import VSFTPD_DEFAULT_CONFIG_PATH -from leapp.models import VsftpdConfig, VsftpdFacts - - -class MockFileOperations(object): - def __init__(self): - self.files = {} - self.files_read = {} - self.files_written = {} - self.read_called = 0 - self.write_called = 0 - - def _increment_read_counters(self, path): - self.read_called += 1 - self.files_read.setdefault(path, 0) - self.files_read[path] += 1 - - def read(self, path): - self._increment_read_counters(path) - try: - return self.files[path] - except KeyError: - raise make_IOError(errno.ENOENT) - - def _increment_write_counters(self, path): - self.write_called += 1 - self.files_written.setdefault(path, 0) - self.files_written[path] += 1 - - def write(self, path, content): - self._increment_write_counters(path) - self.files[path] = content - - -def test_restoring_default_config(): - content = 'anonymous_enable=NO\n' \ - 'tcp_wrappers=NO\n' \ - 'strict_ssl_read_eof=NO\n' - fileops = MockFileOperations() - fileops.files[VSFTPD_DEFAULT_CONFIG_PATH] = content - config = VsftpdConfig(path=VSFTPD_DEFAULT_CONFIG_PATH, - tcp_wrappers=False, strict_ssl_read_eof=False) - facts = VsftpdFacts(default_config_hash='foobar', configs=[config]) - - migrate_configs(facts, fileops=fileops) - - assert len(fileops.files_read) == 1 - assert VSFTPD_DEFAULT_CONFIG_PATH in fileops.files_read - assert len(fileops.files_written) == 1 - assert VSFTPD_DEFAULT_CONFIG_PATH in fileops.files_written - expected_lines = ['# Commented out by Leapp:', - '#anonymous_enable=NO', - 'tcp_wrappers=NO', - 'strict_ssl_read_eof=NO', - '', - '# Added by Leapp:', - 'anonymous_enable=YES', - ''] - assert fileops.files[VSFTPD_DEFAULT_CONFIG_PATH] == '\n'.join(expected_lines) - - -def test_setting_tcp_wrappers(): - path = '/etc/vsftpd/foo.conf' - content = 'tcp_wrappers=YES\n' \ - 'strict_ssl_read_eof=NO\n' - fileops = MockFileOperations() - fileops.files[path] = content - config = VsftpdConfig(path=path, - tcp_wrappers=True, strict_ssl_read_eof=False) - facts = VsftpdFacts(configs=[config]) - - migrate_configs(facts, fileops=fileops) - - assert path in fileops.files_read - assert len(fileops.files_written) == 1 - assert path in fileops.files_written - expected_lines = ['# Commented out by Leapp:', - '#tcp_wrappers=YES', - 'strict_ssl_read_eof=NO', - '', - '# Added by Leapp:', - 'tcp_wrappers=NO', - ''] - assert fileops.files[path] == '\n'.join(expected_lines) - - -def test_setting_strict_ssl_read_eof(): - path = '/etc/vsftpd/bar.conf' - content = 'local_enable=YES\n' - fileops = MockFileOperations() - fileops.files[path] = content - config = VsftpdConfig(path=path, - tcp_wrappers=None, strict_ssl_read_eof=None) - facts = VsftpdFacts(configs=[config]) - - migrate_configs(facts, fileops=fileops) - - assert path in fileops.files_read - assert len(fileops.files_written) == 1 - assert path in fileops.files_written - expected_lines = ['local_enable=YES', - '', - '# Added by Leapp:', - 'strict_ssl_read_eof=NO', - ''] - assert fileops.files[path] == '\n'.join(expected_lines) diff --git a/repos/system_upgrade/el7toel8/actors/ziplcheckbootentries/actor.py b/repos/system_upgrade/el7toel8/actors/ziplcheckbootentries/actor.py deleted file mode 100644 index 019c6e53..00000000 --- a/repos/system_upgrade/el7toel8/actors/ziplcheckbootentries/actor.py +++ /dev/null @@ -1,24 +0,0 @@ -from leapp.actors import Actor -from leapp.libraries.actor.ziplcheckbootentries import inhibit_if_invalid_zipl_configuration -from leapp.models import SourceBootLoaderConfiguration -from leapp.reporting import Report -from leapp.tags import ChecksPhaseTag, IPUWorkflowTag - - -class ZiplCheckBootEntries(Actor): - """ - Inhibits the upgrade if a problematic Zipl configuration is detected on the system. - - The configuration is considered problematic if it will cause troubles during its conversion to BLS. - Such troubles can be caused by either containing multiple rescue entries, or containing rescue entries - sharing the same kernel image version. - """ - - name = 'zipl_check_boot_entries' - consumes = (SourceBootLoaderConfiguration,) - produces = (Report,) - tags = (ChecksPhaseTag, IPUWorkflowTag) - - def process(self): - boot_loader_configuration = next(self.consume(SourceBootLoaderConfiguration)) - inhibit_if_invalid_zipl_configuration(boot_loader_configuration) diff --git a/repos/system_upgrade/el7toel8/actors/ziplcheckbootentries/libraries/ziplcheckbootentries.py b/repos/system_upgrade/el7toel8/actors/ziplcheckbootentries/libraries/ziplcheckbootentries.py deleted file mode 100644 index 757af6c8..00000000 --- a/repos/system_upgrade/el7toel8/actors/ziplcheckbootentries/libraries/ziplcheckbootentries.py +++ /dev/null @@ -1,134 +0,0 @@ -from collections import defaultdict - -from leapp import reporting -from leapp.libraries.common.config import architecture - -FMT_LIST_SEPARATOR = '\n - ' -ZIPL_CONFIG_PATH = '/etc/zipl.conf' - - -def is_rescue_entry(boot_entry): - """ - Determines whether the given boot entry is rescue. - - :param BootEntry boot_entry: Boot entry to assess - :return: True is the entry is rescue - :rtype: bool - """ - return 'rescue' in boot_entry.kernel_image.lower() - - -def inhibit_if_multiple_zipl_rescue_entries_present(bootloader_config): - """ - Inhibits the upgrade if we are running on s390x and the bootloader configuration - contains multiple rescue boot entries. - - A boot entry is recognized as a rescue entry when its title contains the `rescue` substring. - - :param SourceBootloaderConfiguration bootloader_config: The configuration of the source boot loader. - """ - - # Keep the whole information about boot entries not just their count as - # we want to provide user with the details - rescue_entries = [] - for boot_entry in bootloader_config.entries: - if is_rescue_entry(boot_entry): - rescue_entries.append(boot_entry) - - if len(rescue_entries) > 1: - # Prepare the list of available rescue entries for user - rescue_entries_text = '' - for rescue_entry in rescue_entries: - rescue_entries_text += '{0}{1}'.format(FMT_LIST_SEPARATOR, rescue_entry.title) - - summary = ('The Zipl configuration file {0} contains multiple rescue boot entries preventing migration ' - 'to BLS. Problematic entries: {1}') - - reporting.create_report([ - reporting.Title('Multiple rescue boot entries present in the bootloader configuration.'), - reporting.Summary(summary.format(ZIPL_CONFIG_PATH, rescue_entries_text)), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.BOOT]), - reporting.Remediation(hint='Remove rescue boot entries from the configuration and leave just one.'), - reporting.Groups([reporting.Groups.INHIBITOR]) - ]) - - -def extract_kernel_version(kernel_img_path): - """ - Extracts the kernel version out of the given image path. - - The extraction logic is designed to closely mimic the logic Zipl configuration to BLS - conversion script works, so that it is possible to identify the possible issues with kernel - images. - - :param str kernel_img_path: The path to the kernel image. - :returns: Extracted kernel version from the given path - :rtype: str - """ - - # Mimic bash substitution used in the conversion script, see: - # https://github.com/ibm-s390-linux/s390-tools/blob/b5604850ab66f862850568a37404faa647b5c098/scripts/zipl-switch-to-blscfg#L168 - if 'vmlinuz-' in kernel_img_path: - fragments = kernel_img_path.rsplit('/vmlinuz-', 1) - return fragments[1] if len(fragments) > 1 else fragments[0] - - fragments = kernel_img_path.rsplit('/', 1) - return fragments[1] if len(fragments) > 1 else fragments[0] - - -def inhibit_if_entries_share_kernel_version(bootloader_config): - """ - Inhibits the upgrade if there are boot entries sharing the same kernel image version. - - The logic of identification whether the images are the same mimics the zipl-switch-to-blscfg, as it fails - to perform the conversion if there are entries with the same kernel image. - - :param SourceBootloaderConfiguration bootloader_config: The configuration of the source boot loader. - """ - - used_kernel_versions = defaultdict(list) # Maps images to the boot entries in which they are used - for boot_entry in bootloader_config.entries: - if is_rescue_entry(boot_entry): - # Rescue entries are handled differently and their images should not cause naming collisions - continue - - kernel_version = extract_kernel_version(boot_entry.kernel_image) - used_kernel_versions[kernel_version].append(boot_entry) - - versions_used_multiple_times = [] - for version, version_boot_entries in used_kernel_versions.items(): - if len(version_boot_entries) > 1: - # Keep the information about entries for the report - versions_used_multiple_times.append((version, version_boot_entries)) - - if versions_used_multiple_times: - problematic_entries_details = '' - for version, version_boot_entries in versions_used_multiple_times: - entry_titles = ['"{0}"'.format(entry.title) for entry in version_boot_entries] - problematic_entries_details += '{0}{1} (found in entries: {2})'.format( - FMT_LIST_SEPARATOR, - version, - ', '.join(entry_titles) - ) - - summary = ('The zipl configuration file {0} contains boot entries sharing the same kernel version ' - 'preventing migration to BLS. Kernel versions shared: {1}') - reporting.create_report([ - reporting.Title('Boot entries sharing the same kernel version found.'), - reporting.Summary(summary.format(ZIPL_CONFIG_PATH, problematic_entries_details)), - reporting.Severity(reporting.Severity.HIGH), - reporting.Groups([reporting.Groups.BOOT]), - reporting.Remediation( - hint='Remove boot entries sharing the same kernel version from the configuration and leave just one.'), - reporting.Groups([reporting.Groups.INHIBITOR]) - ]) - - -def inhibit_if_invalid_zipl_configuration(bootloader_config): - if not architecture.matches_architecture(architecture.ARCH_S390X): - # Zipl is used only on s390x - return - - inhibit_if_multiple_zipl_rescue_entries_present(bootloader_config) - inhibit_if_entries_share_kernel_version(bootloader_config) diff --git a/repos/system_upgrade/el7toel8/actors/ziplcheckbootentries/tests/test_ziplcheckbootentries.py b/repos/system_upgrade/el7toel8/actors/ziplcheckbootentries/tests/test_ziplcheckbootentries.py deleted file mode 100644 index 0bb0c8da..00000000 --- a/repos/system_upgrade/el7toel8/actors/ziplcheckbootentries/tests/test_ziplcheckbootentries.py +++ /dev/null @@ -1,157 +0,0 @@ -import pytest - -from leapp import reporting -from leapp.libraries.actor import ziplcheckbootentries -from leapp.libraries.actor.ziplcheckbootentries import ( - extract_kernel_version, - inhibit_if_entries_share_kernel_version, - inhibit_if_invalid_zipl_configuration, - inhibit_if_multiple_zipl_rescue_entries_present -) -from leapp.libraries.common.config import architecture -from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked -from leapp.libraries.stdlib import api -from leapp.models import BootEntry, SourceBootLoaderConfiguration -from leapp.snactor.fixture import current_actor_context -from leapp.utils.report import is_inhibitor - - -def test_inhibition_multiple_rescue_entries_present(monkeypatch): - """Tests whether the upgrade process is inhibited when multiple rescue boot entries are present.""" - mocked_report = create_report_mocked() - monkeypatch.setattr(architecture, 'matches_architecture', lambda dummy: True) - monkeypatch.setattr(reporting, 'create_report', mocked_report) - - boot_entries = [ - BootEntry(title='entry_1', kernel_image="img"), - BootEntry(title='entry_1_Rescue', kernel_image="img_Rescue"), - BootEntry(title='entry_2', kernel_image="img"), - BootEntry(title='entry_2_rescue-ver2.3', kernel_image="img_rescue"), - ] - - inhibit_if_multiple_zipl_rescue_entries_present(SourceBootLoaderConfiguration(entries=boot_entries)) - - assert mocked_report.called, 'Report should be created when multiple rescue entries are present.' - - fail_description = 'The correct rescue entries are not present in the report summary.' - report_summary = mocked_report.report_fields['summary'] - for expected_rescue_entry in ['entry_1_Rescue', 'entry_2_rescue-ver2.3']: - assert expected_rescue_entry in report_summary, fail_description - - fail_description = 'Upgrade should be inhibited on multiple rescue entries.' - assert is_inhibitor(mocked_report.report_fields), fail_description - - -def test_inhibition_multiple_rescue_entries_not_present(monkeypatch): - """Tests whether the upgrade process is not inhibited when multiple rescue boot entries are not present.""" - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - - boot_entries = [ - BootEntry(title='entry_1', kernel_image="img"), - BootEntry(title='entry_2', kernel_image="img"), - BootEntry(title='entry_2_rescue-ver2.3', kernel_image="img_rescue"), - ] - - inhibit_if_multiple_zipl_rescue_entries_present(SourceBootLoaderConfiguration(entries=boot_entries)) - - assert not reporting.create_report.called, 'Report was created, even if multiple rescue entries were not present.' - - -def test_inhibition_when_entries_do_not_share_kernel_image(monkeypatch): - """Tests whether the IPU is not inhibited when there are no kernel images shared between boot entries.""" - entries = [ - BootEntry(title='Linux#0', kernel_image='/boot/vmlinuz-4.17.0-240.1.1.el8_3.x86_64'), - BootEntry(title='Linux#1', kernel_image='/boot/vmlinuz-4.18.0-240.1.1.el8_3.x86_64') - ] - - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - inhibit_if_entries_share_kernel_version(SourceBootLoaderConfiguration(entries=entries)) - assert not reporting.create_report.called - - -@pytest.mark.parametrize( - ('boot_entries',), - [([BootEntry(title='Linux0', kernel_image='/boot/vmlinuz-4.18.0-240.1.1.el8_3.x86_64'), - BootEntry(title='Linux1', kernel_image='/boot/4.18.0-240.1.1.el8_3.x86_64')],), - ([BootEntry(title='Linux0', kernel_image='/boot/vmlinuz-4.18.0-240.1.1.el8_3.x86_64'), - BootEntry(title='Linux1', kernel_image='/boot/vmlinuz-4.18.0-240.1.1.el8_3.x86_64')],)]) -def test_inhibit_when_entries_share_kernel_image(monkeypatch, boot_entries): - """Tests whether the IPU gets inhibited when there are kernel images shared between boot entries.""" - - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - - inhibit_if_entries_share_kernel_version(SourceBootLoaderConfiguration(entries=boot_entries)) - - assert reporting.create_report.called - assert is_inhibitor(reporting.create_report.report_fields) - - report_summary = reporting.create_report.report_fields['summary'] - assert '- 4.18.0-240.1.1.el8_3.x86_64 (found in entries: "Linux0", "Linux1")' in report_summary - - -@pytest.mark.parametrize( - ('boot_entries',), - [([BootEntry(title='Linux', kernel_image='/boot/vmlinuz-4.18.0-240.1.1.el8_3.x86_64'), - BootEntry(title='Linux-rescue', kernel_image='/boot/vmlinuz-rescue-4.18.0-240.1.1.el8_3.x86_64')],), - ([BootEntry(title='Linux0-rescue', kernel_image='/boot/vmlinuz-rescue-4.18.0-240.1.1.el8_3.x86_64'), - BootEntry(title='Linux1-rescue', kernel_image='/boot/vmlinuz-rescue-4.18.0-240.1.1.el8_3.x86_64')],)]) -def test_inhibition_when_rescue_entries_share_kernel(monkeypatch, boot_entries): - """ - Tests whether the IPU is not inhibited when there are kernel images with the same version shared between rescue - boot entries. - """ - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - inhibit_if_entries_share_kernel_version(SourceBootLoaderConfiguration(entries=boot_entries)) - assert not reporting.create_report.called - - -@pytest.mark.parametrize(('arch',), [(arch,) for arch in architecture.ARCH_SUPPORTED]) -def test_checks_performed_only_on_s390x_arch(arch, monkeypatch): - """Tests whether the actor doesn't perform different architectures than s390x.""" - should_perform = False - if arch == architecture.ARCH_S390X: # Rescue entries should be checked only on s390x. - should_perform = True - - monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch=arch)) - monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) - - boot_entries = [BootEntry(title='rescue0', kernel_image='/boot/vmlinuz-rescue-4.18.0-240.1.1.el8_3.x86_64'), - BootEntry(title='rescue1', kernel_image='/boot/vmlinuz-rescue-4.19.0-240.1.1.el8_3.x86_64')] - - inhibit_if_invalid_zipl_configuration(SourceBootLoaderConfiguration(entries=boot_entries)) - - fail_description = 'Rescue entries should not be checked on non s390x architecture.' - if should_perform: - fail_description = 'No report was created when running on s390x and multiple rescue entries were used.' - assert bool(reporting.create_report.called) == should_perform, fail_description - - if should_perform: - inhibitor_description = 'contains multiple rescue boot entries' - assert inhibitor_description in reporting.create_report.report_fields['summary'] - - boot_entries = [BootEntry(title='Linux1', kernel_image='/boot/vmlinuz-4.18.0-240.1.1.el8_3.x86_64'), - BootEntry(title='Linux2', kernel_image='/boot/vmlinuz-4.18.0-240.1.1.el8_3.x86_64')] - - inhibit_if_invalid_zipl_configuration(SourceBootLoaderConfiguration(entries=boot_entries)) - - fail_description = 'Check for boot entries with the same kernel version should not be performed on non s390x arch.' - if should_perform: - fail_description = ('No report was created when running on s390x and boot entries' - 'with the same kernel version are present') - assert bool(reporting.create_report.called) == should_perform, fail_description - if should_perform: - inhibitor_description = 'contains boot entries sharing the same kernel version' - assert inhibitor_description in reporting.create_report.report_fields['summary'] - - -def test_extract_kernel_version(): - # Manually generated via experimentation with the zipl-switch-to-blscfg - versions_from_img_paths = [ - ('/boot/vmlinuz-4.18.0-240.1.1.el8_3.x86_64', '4.18.0-240.1.1.el8_3.x86_64'), - ('/boot/4.18.0-240.1.1.el8_3.x86_64', '4.18.0-240.1.1.el8_3.x86_64'), - ('/boot/patched-4.18.0-240.1.1.el8_3.x86_64', 'patched-4.18.0-240.1.1.el8_3.x86_64'), - ('patched-4.18.0-240.1.1.el8_3.x86_64', 'patched-4.18.0-240.1.1.el8_3.x86_64'), - ] - - for path, version in versions_from_img_paths: - assert extract_kernel_version(path) == version diff --git a/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/actor.py b/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/actor.py deleted file mode 100644 index 441c538b..00000000 --- a/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/actor.py +++ /dev/null @@ -1,77 +0,0 @@ -import filecmp -import os - -from leapp.actors import Actor -from leapp.exceptions import StopActorExecutionError -from leapp.libraries.common import mounting -from leapp.libraries.common.config import architecture -from leapp.libraries.stdlib import CalledProcessError -from leapp.models import TargetUserSpaceInfo -from leapp.tags import IPUWorkflowTag, PreparationPhaseTag - - -class ZiplConvertToBLSCFG(Actor): - """ - Convert the zipl boot loader configuration to the the boot loader specification on s390x systems. - """ - - name = 'zipl_convert_to_blscfg' - consumes = (TargetUserSpaceInfo,) - produces = () - tags = (IPUWorkflowTag, PreparationPhaseTag) - - def process(self): - if not architecture.matches_architecture(architecture.ARCH_S390X): - return - userspace = next(self.consume(TargetUserSpaceInfo), None) - if not userspace: - # actually this should not happen, but in such case, we want to still - # rather continue even if we boot into the old kernel, but in such - # case, people will have to do manual actions. - # NOTE: it is really just hypothetical - self.log_error( - 'TargetUserSpaceInfo is missing. Cannot execute zipl-switch-to-blscfg' - ' to convert the zipl configuration to BLS.' - ) - raise StopActorExecutionError('GENERAL FAILURE: Input data for the actor are missing.') - - # replace the original boot directory inside the container by the host one - # - as we cannot use zipl* pointing anywhere else than default directory - # - no, --bls-directory is not solution - # also make sure device nodes are available (requirement for zipl-switch-to-blscfg) - binds = ['/boot', '/dev'] - with mounting.NspawnActions(base_dir=userspace.path, binds=binds) as context: - userspace_zipl_conf = os.path.join(userspace.path, 'etc', 'zipl.conf') - if os.path.exists(userspace_zipl_conf): - os.remove(userspace_zipl_conf) - context.copy_to('/etc/zipl.conf', '/etc/zipl.conf') - # zipl needs this one as well - context.copy_to('/etc/machine-id', '/etc/machine-id') - try: - context.call(['/usr/sbin/zipl-switch-to-blscfg']) - if filecmp.cmp('/etc/zipl.conf', userspace_zipl_conf): - # When the files are same, zipl failed - see the switch script - raise OSError('Failed to convert the ZIPL configuration to BLS.') - context.copy_from('/etc/zipl.conf', '/etc/zipl.conf') - except OSError as e: - self.log.error('Could not call zipl-switch-to-blscfg command.', - exc_info=True) - raise StopActorExecutionError( - message='Failed to execute zipl-switch-to-blscfg.', - details={'details': str(e)} - ) - except CalledProcessError as e: - self.log.error('zipl-switch-to-blscfg execution failed,', - exc_info=True) - raise StopActorExecutionError( - message='zipl-switch-to-blscfg execution failed with non zero exit code.', - details={'details': str(e), 'stdout': e.stdout, 'stderr': e.stderr} - ) - - # FIXME: we do not want to continue anymore, but we should clean - # better. - # NOTE: Basically, just removal of the /boot/loader dir content inside - # could be enough, but we cannot remove /boot/loader because of boom - # - - if we remove it, we will remove the snapshot as well - # - - on the other hand, we shouldn't keep it there if zipl - # - - has not been converted to BLS diff --git a/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/tests/unit_test_ziplconverttoblscfg.py b/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/tests/unit_test_ziplconverttoblscfg.py deleted file mode 100644 index 353e707d..00000000 --- a/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/tests/unit_test_ziplconverttoblscfg.py +++ /dev/null @@ -1,2 +0,0 @@ -def test_actor(): - pass diff --git a/repos/system_upgrade/el7toel8/libraries/Makefile b/repos/system_upgrade/el7toel8/libraries/Makefile deleted file mode 100644 index 27cda188..00000000 --- a/repos/system_upgrade/el7toel8/libraries/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -install-deps: - yum install -y python*-pyudev diff --git a/repos/system_upgrade/el7toel8/libraries/isccfg.py b/repos/system_upgrade/el7toel8/libraries/isccfg.py deleted file mode 100644 index 6cebb289..00000000 --- a/repos/system_upgrade/el7toel8/libraries/isccfg.py +++ /dev/null @@ -1,983 +0,0 @@ -#!/usr/bin/env python -# -# Simplified parsing of bind configuration, with include support and nested sections. - -from __future__ import print_function - -import re -import string - - -class ConfigParseError(Exception): - """Generic error when parsing config file.""" - - def __init__(self, error=None, parent=None): - # IOError on python3 includes path, on python2 it does not - message = "Cannot open the configuration file \"{path}\": {error}".format( - path=error.filename, error=str(error)) - if parent: - message += "; included from \"{0}\"".format(parent) - super(ConfigParseError, self).__init__(message) - self.error = error - self.parent = parent - pass - - -class ConfigFile(object): - """Representation of single configuration file and its contents.""" - def __init__(self, path): - """Load config file contents from path. - - :param path: Path to file - """ - self.path = path - self.load(path) - self.status = None - - def __str__(self): - return self.buffer - - def __repr__(self): - return 'ConfigFile {0} ({1})'.format( - self.path, self.buffer) - - def load(self, path): - with open(path, 'r') as f: - self.buffer = self.original = f.read() - - def is_modified(self): - return self.original == self.buffer - - def root_section(self): - return ConfigSection(self, None, 0, len(self.buffer)) - - -class MockConfig(ConfigFile): - """Configuration file with contents defined on constructor. - - Intended for testing the library. - """ - DEFAULT_PATH = '/etc/named/mock.conf' - - def __init__(self, contents, path=DEFAULT_PATH): - self.original = contents - super(MockConfig, self).__init__(path) - - def load(self, path): - self.buffer = self.original - - -class ConfigSection(object): - """Representation of section or key inside single configuration file. - - Section means statement, block, quoted string or any similar.""" - - TYPE_BARE = 1 - TYPE_QSTRING = 2 - TYPE_BLOCK = 3 - TYPE_IGNORED = 4 # comments and whitespaces - - def __init__(self, config, name=None, start=None, end=None, kind=None, parser=None): - """ - :param config: config file inside which is this section - :type config: ConfigFile - :param kind: type of this section - """ - self.config = config - self.name = name - self.start = start - self.end = end - self.ctext = self.original_value() # a copy for modification - self.parser = parser - if kind is None: - if self.config.buffer.startswith('{', self.start): - self.kind = self.TYPE_BLOCK - elif self.config.buffer.startswith('"', self.start): - self.kind = self.TYPE_QSTRING - else: - self.kind = self.TYPE_BARE - else: - self.kind = kind - self.statements = [] - - def __repr__(self): - text = self.value() - path = self.config.path - return 'ConfigSection#{kind}({path}:{start}-{end}: "{text}")'.format( - path=path, start=self.start, end=self.end, - text=text, kind=self.kind - ) - - def __str__(self): - return self.value() - - def copy(self): - return ConfigSection(self.config, self.name, self.start, self.end, self.kind) - - def type(self): - return self.kind - - def value(self): - return self.ctext - - def original_value(self): - return self.config.buffer[self.start:self.end+1] - - def invalue(self): - """Return just inside value of blocks and quoted strings.""" - t = self.type() - if t in (self.TYPE_QSTRING, self.TYPE_BLOCK): - return self.ctext[1:-1] - return self.value() - - def children(self, comments=False): - """Return list of items inside this section.""" - start = self.start - if self.type() == self.TYPE_BLOCK: - start += 1 - return list(IscIterator(self.parser, self, comments, start)) - - def serialize(self): - return self.value() - - -class IscIterator(object): - """Iterator for walking over parsed configuration. - - Creates sequence of ConfigSection objects for a given file. - That means a stream of objects. - """ - - def __init__(self, parser, section, comments=False, start=None): - """Create iterator. - - :param comments: Include comments and whitespaces - :param start: Index for starting, None means beginning of section - """ - self.parser = parser - self.section = section - self.current = None - self.key_wanted = True - self.comments = comments - self.waiting = None - if start is None: - start = section.start - self.start = start - - def __iter__(self): - self.current = None - self.key_wanted = True - self.waiting = None - return self - - def __next__(self): - index = self.start - cfg = self.section.config - if self.waiting: - self.current = self.waiting - self.waiting = None - return self.current - if self.current is not None: - index = self.current.end+1 - if self.key_wanted: - val = self.parser.find_next_key(cfg, index, self.section.end) - self.key_wanted = False - else: - val = self.parser.find_next_val(cfg, None, index, self.section.end, end_report=True) - if val is not None and val.value() in self.parser.CHAR_DELIM: - self.key_wanted = True - if val is None: - if self.current is not None and self.current.end < self.section.end and self.comments: - self.current = ConfigSection(self.section.config, None, - index, self.section.end, ConfigSection.TYPE_IGNORED) - return self.current - raise StopIteration - if index != val.start and self.comments: - # Include comments and spaces as ignored section - self.waiting = val - val = ConfigSection(val.config, None, index, val.start-1, ConfigSection.TYPE_IGNORED) - - self.current = val - return val - - next = __next__ # Python2 compat - - -class IscVarIterator(object): - """Iterator for walking over parsed configuration. - - Creates sequence of ConfigVariableSection objects for a given - file or section. - """ - - def __init__(self, parser, section, comments=False, start=None): - """Create iterator.""" - self.parser = parser - self.section = section - self.iter = IscIterator(parser, section, comments, start) - - def __iter__(self): - return self - - def __next__(self): - vl = [] - try: - statement = next(self.iter) - while statement: - vl.append(statement) - if self.parser.is_terminal(statement): - return ConfigVariableSection(vl, None, parent=self.section) - statement = next(self.iter) - except StopIteration: - if vl: - return ConfigVariableSection(vl, None, parent=self.section) - raise StopIteration - - next = __next__ # Python2 compat - - -class ConfigVariableSection(ConfigSection): - """Representation for key and values of variable length. - - Intended for view and zone. - """ - - def __init__(self, sectionlist, name, zone_class=None, parent=None, parser=None): - """Creates variable block for zone or view. - - :param sectionlist: list of ConfigSection, obtained from IscConfigParser.find_values() - """ - last = next(reversed(sectionlist)) - first = sectionlist[0] - self.values = sectionlist - super(ConfigVariableSection, self).__init__( - first.config, name, start=first.start, end=last.end, parser=parser - ) - if name is None: - try: - self.name = self.var(1).invalue() - except IndexError: - pass - # For optional dns class, like IN or CH - self.zone_class = zone_class - self.parent = parent - - def key(self): - if self.zone_class is None: - return self.name - return self.zone_class + '_' + self.name - - def firstblock(self): - """Return first block section in this tool.""" - return self.vartype(0, self.TYPE_BLOCK) - - def var(self, i): - """Return value by index, ignore spaces.""" - n = 0 - for v in self.values: - if v.type() != ConfigSection.TYPE_IGNORED: - if n == i: - return v - n += 1 - raise IndexError - - def vartype(self, i, vtype): - n = 0 - for v in self.values: - if v.type() == vtype: - if n == i: - return v - n += 1 - raise IndexError - - def serialize(self): - s = '' - for v in self.values: - s += v.serialize() - return s - - def serialize_skip(self, replace_ignored=None): - """ - Create single string from section, but skip whitespace on start. - - :type section: ConfigVariableSection - :param replace_ignored: Specify replaced text for whitespace - - Allows normalizing with replace ignored sections. - Is intended to strip possible comments between parts. - """ - s = '' - nonwhite = None - for v in self.values: - if nonwhite is None: - if v.type() != self.TYPE_IGNORED: - nonwhite = v - s += v.serialize() - elif replace_ignored is not None and v.type() == self.TYPE_IGNORED: - s += replace_ignored - else: - s += v.serialize() - return s - - -class ModifyState(object): - """Object keeping state of modifications when walking configuration file statements. - - It would keep modified configuration file and position of last found statement. - """ - - def __init__(self): - self.value = '' - self.lastpos = 0 - - def append_before(self, section): - """Appends content from last seen section to beginning of current one. - - It adds also whitespace on beginning of statement, - which is usually not interesting for any changes. - - :type section: ConfigVariableSection - """ - - end = section.start - first = section.values[0] - if first.type() == first.TYPE_IGNORED: - end = first.end - cfg = section.config.buffer - self.value += cfg[self.lastpos:end+1] - self.lastpos = end+1 - - def move_after(self, section): - """Set position to the end of section.""" - self.lastpos = section.end+1 - - def finish(self, section): - """Append remaining part of file to modified state.""" - if self.lastpos < section.end: - self.value += section.config.buffer[self.lastpos:section.end+1] - self.lastpos = section.end - - def content(self): - """Get content of (modified) section. - - Would be valid after finish() was called. - """ - return self.value - - @staticmethod - def callback_comment_out(section, state): - """parser.walk callback for commenting out the section.""" - state.append_before(section) - state.value += '/* ' + section.serialize_skip(' ') + ' */' - state.move_after(section) - - @staticmethod - def callback_remove(section, state): - """parser.walk callback for skipping a section.""" - state.append_before(section) - state.move_after(section) - - -# Main parser class -class IscConfigParser(object): - """Parser file with support of included files. - - Reads ISC BIND configuration file and tries to skip commented blocks, nested sections and similar stuff. - Imitates what isccfg does in native code, but without any use of native code. - """ - - CONFIG_FILE = "/etc/named.conf" - FILES_TO_CHECK = [] - - CHAR_DELIM = ";" # Must be single character - CHAR_CLOSING = CHAR_DELIM + "})]" - CHAR_CLOSING_WHITESPACE = CHAR_CLOSING + string.whitespace - CHAR_KEYWORD = string.ascii_letters + string.digits + '-_.:' - CHAR_STR_OPEN = '"' - - def __init__(self, config=None): - """Construct parser. - - :param config: path to file or already loaded ConfigFile instance - - Initialize contents from path to real config or already loaded ConfigFile class. - """ - if isinstance(config, ConfigFile): - self.FILES_TO_CHECK = [config] - self.load_included_files() - elif config is not None: - self.load_config(config) - - # - # function for parsing of config files - # - def is_comment_start(self, istr, index=0): - if istr[index] == "#" or ( - index+1 < len(istr) and istr[index:index+2] in ["//", "/*"]): - return True - return False - - def _find_end_of_comment(self, istr, index=0): - """Returns index where the comment ends. - - :param istr: input string - :param index: begin search from the index; from the start by default - - Support usual comments till the end of line (//, #) and block comment - like (/* comment */). In case that index is outside of the string or end - of the comment is not found, return -1. - - In case of block comment, returned index is position of slash after star. - """ - length = len(istr) - - if index >= length or index < 0: - return -1 - - if istr[index] == "#" or istr[index:].startswith("//"): - return istr.find("\n", index) - - if index+2 < length and istr[index:index+2] == "/*": - res = istr.find("*/", index+2) - if res != -1: - return res + 1 - - return -1 - - def is_opening_char(self, c): - return c in "\"'{([" - - def _remove_comments(self, istr, space_replace=False): - """Removes all comments from the given string. - - :param istr: input string - :param space_replace When true, replace comments with spaces. Skip them by default. - :return: istr without comments - """ - - ostr = "" - - length = len(istr) - index = 0 - - while index < length: - if self.is_comment_start(istr, index): - index = self._find_end_of_comment(istr, index) - if index == -1: - index = length - if space_replace: - ostr = ostr.ljust(index) - if index < length and istr[index] == "\n": - ostr += "\n" - elif istr[index] in self.CHAR_STR_OPEN: - end_str = self._find_closing_char(istr, index) - if end_str == -1: - ostr += istr[index:] - break - ostr += istr[index:end_str+1] - index = end_str - else: - ostr += istr[index] - index += 1 - - return ostr - - def _replace_comments(self, istr): - """Replaces all comments by spaces in the given string. - - :param istr: input string - :returns: string of the same length with comments replaced - """ - return self._remove_comments(istr, True) - - def find_next_token(self, istr, index=0, end_index=-1, end_report=False): - """ - Return index of another interesting token or -1 when there is not next. - - :param istr: input string - :param index: begin search from the index; from the start by default - :param end_index: stop searching at the end_index or end of the string - - In case that initial index contains already some token, skip to another. - But when searching starts on whitespace or beginning of the comment, - choose the first one. - - The function would be confusing in case of brackets, but content between - brackets is not evaluated as new tokens. - E.g.: - - "find { me };" : 5 - " me" : 1 - "find /* me */ me " : 13 - "/* me */ me" : 9 - "me;" : 2 - "{ me }; me" : 6 - "{ me } me" : 8 - "me } me" : 3 - "}} me" : 1 - "me" : -1 - "{ me } " : -1 - """ - length = len(istr) - if length < end_index or end_index < 0: - end_index = length - - if index >= end_index or index < 0: - return -1 - - # skip to the end of the current token - if istr[index] == '\\': - index += 2 - elif self.is_opening_char(istr[index]): - index = self._find_closing_char(istr, index, end_index) - if index != -1: - index += 1 - elif self.is_comment_start(istr, index): - index = self._find_end_of_comment(istr, index) - if index != -1: - index += 1 - elif istr[index] not in self.CHAR_CLOSING_WHITESPACE: - # so we have to skip to the end of the current token - index += 1 - while index < end_index: - if (istr[index] in self.CHAR_CLOSING_WHITESPACE - or self.is_comment_start(istr, index) - or self.is_opening_char(istr[index])): - break - index += 1 - elif end_report and istr[index] in self.CHAR_DELIM: - # Found end of statement. Report delimiter - return index - elif istr[index] in self.CHAR_CLOSING: - index += 1 - - # find next token (can be already under the current index) - while 0 <= index < end_index: - if istr[index] == '\\': - index += 2 - continue - if self.is_comment_start(istr, index): - index = self._find_end_of_comment(istr, index) - if index == -1: - break - elif self.is_opening_char(istr[index]) or istr[index] not in string.whitespace: - return index - index += 1 - return -1 - - def _find_closing_char(self, istr, index=0, end_index=-1): - """ - Returns index of equivalent closing character. - - :param istr: input string - - It's similar to the "find" method that returns index of the first character - of the searched character or -1. But in this function the corresponding - closing character is looked up, ignoring characters inside strings - and comments. E.g. for - "(hello (world) /* ) */ ), he would say" - index of the third ")" is returned. - """ - important_chars = { # TODO: should be that rather global var? - "{": "}", - "(": ")", - "[": "]", - "\"": "\"", - self.CHAR_DELIM: None, - } - length = len(istr) - if 0 <= end_index < length: - length = end_index - - if length < 2: - return -1 - - if index >= length or index < 0: - return -1 - - closing_char = important_chars.get(istr[index], self.CHAR_DELIM) - if closing_char is None: - return -1 - - isString = istr[index] in "\"" - index += 1 - curr_c = "" - while index < length: - curr_c = istr[index] - if curr_c == '//': - index += 2 - elif self.is_comment_start(istr, index) and not isString: - index = self._find_end_of_comment(istr, index) - if index == -1: - return -1 - elif not isString and self.is_opening_char(curr_c): - deep_close = self._find_closing_char(istr[index:]) - if deep_close == -1: - break - index += deep_close - elif curr_c == closing_char: - if curr_c == self.CHAR_DELIM: - index -= 1 - return index - index += 1 - - return -1 - - def find_key(self, istr, key, index=0, end_index=-1, only_first=True): - """ - Return index of the key or -1. - - :param istr: input string; it could be whole file or content of a section - :param key: name of the searched key in the current scope - :param index: start searching from the index - :param end_index: stop searching at the end_index or end of the string - - Function is not recursive. Searched key has to be in the current scope. - Attention: - - In case that input string contains data outside of section by mistake, - the closing character is ignored and the key outside of scope could be - found. Example of such wrong input could be: - key1 "val" - key2 { key-ignored "val-ignored" }; - }; - controls { ... }; - In this case, the key "controls" is outside of original scope. But for this - cases you can set end_index to value, where searching should end. In case - you set end_index higher then length of the string, end_index will be - automatically corrected to the end of the input string. - """ - length = len(istr) - keylen = len(key) - - if length < end_index or end_index < 0: - end_index = length - - if index >= end_index or index < 0: - return -1 - - while index != -1: - if istr.startswith(key, index): - if index+keylen < end_index and istr[index+keylen] not in self.CHAR_KEYWORD: - # key has been found - return index - - while not only_first and index != -1 and istr[index] != self.CHAR_DELIM: - index = self.find_next_token(istr, index) - index = self.find_next_token(istr, index) - - return -1 - - def find_next_key(self, cfg, index=0, end_index=-1, end_report=False): - """Modernized variant of find_key. - - :type cfg: ConfigFile - :param index: Where to start search - :rtype: ConfigSection - - Searches for first place of bare keyword, without quotes or block. - """ - istr = cfg.buffer - length = len(istr) - - if length < end_index or end_index < 0: - end_index = length - - if index > end_index or index < 0: - raise IndexError("Invalid cfg index") - - while index != -1: - keystart = index - while index < end_index and istr[index] in self.CHAR_KEYWORD: - index += 1 - - if index >= end_index: - break - - if keystart < index <= end_index and istr[index] not in self.CHAR_KEYWORD: - # key has been found - return ConfigSection(cfg, istr[keystart:index], keystart, index-1) - if istr[index] in self.CHAR_DELIM: - return ConfigSection(cfg, istr[index], index, index) - - index = self.find_next_token(istr, index, end_index, end_report) - - return None - - def find_next_val(self, cfg, key=None, index=0, end_index=-1, end_report=False): - """Find following token. - - :param cfg: input token - :type cfg: ConfigFile - :returns: ConfigSection object or None - :rtype: ConfigSection - """ - start = self.find_next_token(cfg.buffer, index, end_index, end_report) - if start < 0: - return None - if end_index < 0: - end_index = len(cfg.buffer) - # remains = cfg.buffer[start:end_index] - if not self.is_opening_char(cfg.buffer[start]): - return self.find_next_key(cfg, start, end_index, end_report) - - end = self._find_closing_char(cfg.buffer, start, end_index) - if end == -1 or (0 < end_index < end): - return None - return ConfigSection(cfg, key, start, end) - - def find_val(self, cfg, key, index=0, end_index=-1): - """Find value of keyword specified by key. - - :param cfg: ConfigFile - :param key: name of searched key (str) - :param index: start of search in cfg (int) - :param end_index: end of search in cfg (int) - :returns: ConfigSection object or None - :rtype: ConfigSection - """ - if not isinstance(cfg, ConfigFile): - raise TypeError("cfg must be ConfigFile parameter") - - if end_index < 0: - end_index = len(cfg.buffer) - key_start = self.find_key(cfg.buffer, key, index, end_index) - if key_start < 0 or key_start+len(key) >= end_index: - return None - return self.find_next_val(cfg, key, key_start+len(key), end_index) - - def find_val_section(self, section, key): - """Find value of keyword in section. - - :param section: section object returned from find_val - - Section is object found by previous find_val call. - """ - if not isinstance(section, ConfigSection): - raise TypeError("section must be ConfigSection") - return self.find_val(section.config, key, section.start+1, section.end) - - def find_values(self, section, key): - """Find key in section and list variable parameters. - - :param key: Name to statement to find - :returns: List of all found values in form of ConfigSection. First is key itself. - - Returns all sections of keyname. They can be mix of "quoted strings", {nested blocks} - or just bare keywords. First key is section of key itself, final section includes ';'. - Makes it possible to comment out whole section including terminal character. - """ - - if isinstance(section, ConfigFile): - cfg = section - index = 0 - end_index = len(cfg.buffer) - elif isinstance(section, ConfigSection): - cfg = section.config - index = section.start+1 - end_index = section.end - if end_index > index: - end_index -= 1 - else: - raise TypeError('Unexpected type') - - if key is None: - v = self.find_next_key(cfg, index, end_index) - else: - key_start = self.find_key(cfg.buffer, key, index, end_index) - key_end = key_start+len(key)-1 - if key_start < 0 or key_end >= end_index: - return None - # First value is always just keyword - v = ConfigSection(cfg, key, key_start, key_end) - - values = [] - while isinstance(v, ConfigSection): - values.append(v) - if v.value() == self.CHAR_DELIM: - break - v = self.find_next_val(cfg, key, v.end+1, end_index, end_report=True) - return values - - def find(self, key_string, cfg=None, delimiter='.'): - """Helper searching for values under requested sections. - - Search for statement under some sections. It is inspired by xpath style paths, - but searches section in bind configuration. - - :param key_string: keywords delimited by dots. For example options.dnssec-lookaside - :type key_string: str - :param cfg: Search only in given config file - :type cfg: ConfigFile - :returns: list of ConfigVariableSection - """ - keys = key_string.split(delimiter) - if cfg is not None: - return self._find_values_simple(cfg.root_section(), keys) - - items = [] - for cfgs in self.FILES_TO_CHECK: - items.extend(self._find_values_simple(cfgs.root_section(), keys)) - return items - - def is_terminal(self, section): - """.Returns true when section is final character of one statement.""" - return section.value() in self.CHAR_DELIM - - def _variable_section(self, vl, parent=None, offset=1): - """Create ConfigVariableSection with a name and optionally class. - - Intended for view and zone in bind. - :returns: ConfigVariableSection - """ - vname = self._list_value(vl, 1).invalue() - vclass = None - v = self._list_value(vl, 2) - if v.type() != ConfigSection.TYPE_BLOCK and self._list_value(vl, 2): - vclass = v.value() - return ConfigVariableSection(vl, vname, vclass, parent) - - def _list_value(self, vl, i): - n = 0 - for v in vl: - if v.type() != ConfigSection.TYPE_IGNORED: - if n == i: - return v - n += 1 - raise IndexError - - def _find_values_simple(self, section, keys): - found_values = [] - sect = section.copy() - - while sect is not None: - vl = self.find_values(sect, keys[0]) - if vl is None: - break - if len(keys) <= 1: - variable = self._variable_section(vl, section) - found_values.append(variable) - sect.start = variable.end+1 - else: - for v in vl: - if v.type() == ConfigSection.TYPE_BLOCK: - vl2 = self._find_values_simple(v, keys[1:]) - if vl2 is not None: - found_values.extend(vl2) - sect.start = vl[-1].end+1 - - return found_values - - def walk(self, section, callbacks, state=None, parent=None, start=0): - """Walk over section also with nested blocks. - - :param section: Section to iterate, usually ConfigFile.root_section() - :param callbacks: Set of callbacks with name: f(section, state) parameters, indexed by statement name - :param start: Offset from beginning of section - - Call specified actions specified in callbacks, which can react on desired statements. - Pass state and matching section to callback. - """ - if start == 0 and section.type() == ConfigSection.TYPE_BLOCK: - start = 1 - it = IscVarIterator(self, section, True, start=section.start+start) - for statement in it: - try: - name = statement.var(0).value() - if name in callbacks: - f = callbacks[name] - f(statement, state) - except IndexError: - pass - for child in statement.values: - if child.type() == ConfigSection.TYPE_BLOCK: - self.walk(child, callbacks, state, parent=statement) - return state - - # - # CONFIGURATION fixes PART - END - # - - def is_file_loaded(self, path=""): - """ - Checks if the file with a given 'path' is already loaded in FILES_TO_CHECK. - """ - for f in self.FILES_TO_CHECK: - if f.path == path: - return True - return False - - def new_config(self, path, parent=None): - config = ConfigFile(path) - self.FILES_TO_CHECK.append(config) - return config - - def on_include_error(self, e): - """Handle IO errors on file reading. - - Override to create custom error handling.""" - raise e - - def load_included_files(self): - """Add included list to parser. - - Finds the configuration files that are included in some configuration - file, reads it, closes and adds into the FILES_TO_CHECK list. - """ - # TODO: use parser instead of regexp - pattern = re.compile(r'include\s*"(.+?)"\s*;') - # find includes in all files - for ch_file in self.FILES_TO_CHECK: - nocomments = self._remove_comments(ch_file.buffer) - includes = re.findall(pattern, nocomments) - for include in includes: - # don't include already loaded files -> prevent loops - if self.is_file_loaded(include): - continue - try: - self.new_config(include) - except IOError as e: - self.on_include_error(ConfigParseError(e, include)) - - def load_main_config(self): - """Loads main CONFIG_FILE.""" - try: - self.new_config(self.CONFIG_FILE) - except IOError as e: - raise ConfigParseError(e) - - def load_config(self, path=None): - """Loads main config file with all included files.""" - if path is not None: - self.CONFIG_FILE = path - self.load_main_config() - self.load_included_files() - pass - - -if __name__ == '__main__': - """Run parser to default path or path in the first argument. - - Additional parameters are statements or blocks to print. - Defaults to options and zone. - """ - - from sys import argv - - def print_cb(section, state): - print(section) - - cfgpath = IscConfigParser.CONFIG_FILE - if len(argv) > 1: - cfgpath = argv[1] - if len(argv) > 2: - cb = {} - for key in argv[2:]: - cb[key] = print_cb - else: - cb = {'options': print_cb, 'zone': print_cb} - - parser = IscConfigParser(cfgpath) - for section in parser.FILES_TO_CHECK: - print("# Walking file '{}'".format(section.path)) - parser.walk(section.root_section(), cb) diff --git a/repos/system_upgrade/el7toel8/libraries/pam.py b/repos/system_upgrade/el7toel8/libraries/pam.py deleted file mode 100644 index 8494e469..00000000 --- a/repos/system_upgrade/el7toel8/libraries/pam.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -import re - - -class PAM(object): - files = [ - '/etc/pam.d/system-auth', - '/etc/pam.d/smartcard-auth', - '/etc/pam.d/password-auth', - '/etc/pam.d/fingerprint-auth', - '/etc/pam.d/postlogin' - ] - """ - List of system PAM configuration files. - """ - - def __init__(self, config): - self.modules = self.parse(config) - - def parse(self, config): - """ - Parse configuration and return list of modules that are present in the - configuration. - """ - result = re.findall( - r"^[ \t]*[^#\s]+.*(pam_\S+)\.so.*$", - config, - re.MULTILINE - ) - - return result - - def has(self, module): - """ - Return True if the module exist in the configuration, False otherwise. - """ - return module in self.modules - - def has_unknown_module(self, known_modules): - """ - Return True if the configuration has any module which is not known to - the caller, False otherwise. - """ - for module in self.modules: - if module not in known_modules: - return True - - return False - - @staticmethod - def read_file(config): - """ - Read file contents. Return empty string if the file does not exist. - """ - if not os.path.isfile(config): - return "" - with open(config) as f: - return f.read() - - @staticmethod - def from_system_configuration(): - config = "" - for f in PAM.files: - config += PAM.read_file(f) - - return PAM(config) diff --git a/repos/system_upgrade/el7toel8/libraries/spamassassinutils.py b/repos/system_upgrade/el7toel8/libraries/spamassassinutils.py deleted file mode 100644 index 9f5c1d1c..00000000 --- a/repos/system_upgrade/el7toel8/libraries/spamassassinutils.py +++ /dev/null @@ -1,44 +0,0 @@ -import re - -SPAMC_CONFIG_FILE = '/etc/mail/spamassassin/spamc.conf' -SPAMASSASSIN_SERVICE_OVERRIDE = '/etc/systemd/system/spamassassin.service' -SYSCONFIG_SPAMASSASSIN = '/etc/sysconfig/spamassassin' -SYSCONFIG_VARIABLE = 'SPAMDOPTIONS' -SPAMD_SHORTOPTS_NOARG = "ch46LlxPQqVv" -""" All short options in spamd that do not accept an argument, excluding -d. """ - - -def parse_sysconfig_spamassassin(content): - """ - Splits up a spamassassin sysconfig file into three parts and returns those parts: - 1. Beginning of the file up to the SPAMDOPTIONS assignment - 2. The assignment to the SPAMDOPTIONS variable (this is the assignment - that takes effect, i.e. the last assignment to the variable) - 3. End of the file after the SPAMDOPTIONS assignment - """ - line_continues = False - is_assignment = False - assignment_start = None - assignment_end = None - lines = content.split('\n') - for ix, line in enumerate(lines): - is_assignment = ((is_assignment and line_continues) or - (not (not is_assignment and line_continues) and - re.match(r'\s*' + SYSCONFIG_VARIABLE + '=', line))) - if is_assignment: - if line_continues: - assignment_end += 1 - else: - assignment_start = ix - assignment_end = ix + 1 - line_continues = line.endswith('\\') - - if assignment_start is None: - return content, '', '' - assignment = '' - for line in lines[assignment_start:assignment_end - 1]: - assignment += line[:-1] - assignment += lines[assignment_end - 1] - pre_assignment = '\n'.join(lines[:assignment_start]) - post_assignment = '\n'.join(lines[assignment_end:]) - return pre_assignment, assignment, post_assignment diff --git a/repos/system_upgrade/el7toel8/libraries/tcpwrappersutils.py b/repos/system_upgrade/el7toel8/libraries/tcpwrappersutils.py deleted file mode 100644 index e17b0296..00000000 --- a/repos/system_upgrade/el7toel8/libraries/tcpwrappersutils.py +++ /dev/null @@ -1,83 +0,0 @@ -import re - - -def _build_regex(pattern): - regex = '^' - part_beginning = 0 - while part_beginning < len(pattern): - ix1 = pattern.find('*', part_beginning) - ix2 = pattern.find('?', part_beginning) - ix1 = len(pattern) if ix1 < 0 else ix1 - ix2 = len(pattern) if ix2 < 0 else ix2 - part_end = min(ix1, ix2) - - regex += re.escape(pattern[part_beginning:part_end]) - - if part_end < len(pattern): - if pattern[part_end] == '*': - regex += '.*' - else: - regex += '.' - - part_beginning = part_end + 1 - - regex += '$' - return regex - - -def _pattern_matches(pattern, string): - if pattern.lower() == 'all': - return True - regex = _build_regex(pattern) - return re.match(regex, string, re.IGNORECASE) is not None - - -def _daemon_list_matches_daemon(daemon_list, daemon, recursion_depth): - try: - cur_list_end = daemon_list.index('except') - except ValueError: - cur_list_end = len(daemon_list) - cur_list = daemon_list[:cur_list_end] - matches_cur_list = False - for item in cur_list: - try: - ix = item.index('@') - # For simplicity, we ignore the host part. So we must make sure - # that a daemon list containing a host-based pattern will always match - # the daemon part of that host-based pattern (e.g. 'all except vsftpd@localhost - # matches 'vsftpd'). See test_config_applies_to_daemon_with_host_except(). - if recursion_depth % 2 == 1: - continue - pattern = item[:ix] - except ValueError: - pattern = item - if _pattern_matches(pattern, daemon): - matches_cur_list = True - break - - next_list = daemon_list[cur_list_end + 1:] - if not next_list: - matches_next_list = False - else: - matches_next_list = _daemon_list_matches_daemon(next_list, daemon, recursion_depth + 1) - - return matches_cur_list and not matches_next_list - - -def config_applies_to_daemon(facts, daemon): - """ - Returns True if the specified tcp_wrappers configuration applies to the specified daemon. - Otherwise returns False. - - This information is intended to be used in the Checks phase to check whether there is - any tcp_wrappers configuration that the user needs to migrate manually and whether we - should inhibit the upgrade, so that the upgraded system is not insecure. - - :param facts: A TcpWrappersFacts representation of the tcp_wrappers configuration - :param daemon: The daemon name - """ - for daemon_list in facts.daemon_lists: - value = [item.lower() for item in daemon_list.value] - if _daemon_list_matches_daemon(value, daemon, 0): - return True - return False diff --git a/repos/system_upgrade/el7toel8/libraries/tests/test_isccfg.py b/repos/system_upgrade/el7toel8/libraries/tests/test_isccfg.py deleted file mode 100644 index 00753681..00000000 --- a/repos/system_upgrade/el7toel8/libraries/tests/test_isccfg.py +++ /dev/null @@ -1,379 +0,0 @@ -#!/usr/bin/env python -# -# Tests for bind configuration parsing - -from leapp.libraries.common import isccfg - -# -# Sample configuration stubs -# -named_conf_default = isccfg.MockConfig(""" -// -// named.conf -// -// Provided by Red Hat bind package to configure the ISC BIND named(8) DNS -// server as a caching only nameserver (as a localhost DNS resolver only). -// -// See /usr/share/doc/bind*/sample/ for example named configuration files. -// - -options { - listen-on port 53 { 127.0.0.1; }; - listen-on-v6 port 53 { ::1; }; - directory "/var/named"; - dump-file "/var/named/data/cache_dump.db"; - statistics-file "/var/named/data/named_stats.txt"; - memstatistics-file "/var/named/data/named_mem_stats.txt"; - secroots-file "/var/named/data/named.secroots"; - recursing-file "/var/named/data/named.recursing"; - allow-query { localhost; }; - - /* - - If you are building an AUTHORITATIVE DNS server, do NOT enable recursion. - - If you are building a RECURSIVE (caching) DNS server, you need to enable - recursion. - - If your recursive DNS server has a public IP address, you MUST enable access - control to limit queries to your legitimate users. Failing to do so will - cause your server to become part of large scale DNS amplification - attacks. Implementing BCP38 within your network would greatly - reduce such attack surface - */ - recursion yes; - - dnssec-enable yes; - dnssec-validation yes; - - managed-keys-directory "/var/named/dynamic"; - - pid-file "/run/named/named.pid"; - session-keyfile "/run/named/session.key"; -}; - -logging { - channel default_debug { - file "data/named.run"; - severity dynamic; - }; -}; - -zone "." IN { - type hint; - file "named.ca"; -}; - -# Avoid including files from bind package, may be not installed -# include "/etc/named.rfc1912.zones"; -# include "/etc/named.root.key"; -include "/dev/null"; -""") - - -options_lookaside_no = isccfg.MockConfig(""" -options { - dnssec-lookaside no; -}; -""") - - -options_lookaside_auto = isccfg.MockConfig(""" -options { - dnssec-lookaside /* no */ auto; -}; -""") - - -options_lookaside_manual = isccfg.MockConfig(""" -options { - # make sure parser handles comments - dnssec-lookaside "." /* comment to confuse parser */trust-anchor "dlv.isc.org"; -}; -""") - - -options_lookaside_commented = isccfg.MockConfig(""" -options { - /* dnssec-lookaside auto; */ -}; -""") - - -views_lookaside = isccfg.MockConfig(""" -view "v1" IN { - // This is auto - dnssec-lookaside auto; -}; - -options { - /* This is multi - * line - * comment */ - dnssec-lookaside no; -}; - -view "v2" { - # Note no IN - dnssec-lookaside "." trust-anchor "dlv.isc.org"; -}; -""") - -config_empty = isccfg.MockConfig('') - -config_empty_include = isccfg.MockConfig('options { include "/dev/null"; };') - - -def check_in_section(parser, section, key, value): - """ Helper to check some section was found - in configuration section and has expected value - - :type parser: IscConfigParser - :type section: bind.ConfigSection - :type key: str - :param value: expected value """ - assert isinstance(section, isccfg.ConfigSection) - cfgval = parser.find_val_section(section, key) - assert isinstance(cfgval, isccfg.ConfigSection) - assert cfgval.value() == value - return cfgval - - -def cb_state(statement, state): - """Callback used in IscConfigParser.walk()""" - key = statement.var(0).value() - state[key] = statement - - -def find_options(parser): - """Replace IscConfigParser.find_option with walk use""" - state = {} - callbacks = { - 'options': cb_state, - } - assert len(parser.FILES_TO_CHECK) >= 1 - cfg = parser.FILES_TO_CHECK[0] - parser.walk(cfg.root_section(), callbacks, state) - options = state['options'] - if options: - assert isinstance(options, isccfg.ConfigVariableSection) - return options.firstblock() - return None - - -# End of helpers -# -# Begin of tests - - -def test_lookaside_no(): - parser = isccfg.IscConfigParser(options_lookaside_no) - assert len(parser.FILES_TO_CHECK) == 1 - opt = find_options(parser) - check_in_section(parser, opt, "dnssec-lookaside", "no") - - -def test_lookaside_commented(): - parser = isccfg.IscConfigParser(options_lookaside_commented) - assert len(parser.FILES_TO_CHECK) == 1 - opt = find_options(parser) - assert isinstance(opt, isccfg.ConfigSection) - lookaside = parser.find_val_section(opt, "dnssec-lookaside") - assert lookaside is None - - -def test_default(): - parser = isccfg.IscConfigParser(named_conf_default) - assert len(parser.FILES_TO_CHECK) >= 2 - opt = find_options(parser) - check_in_section(parser, opt, "directory", '"/var/named"') - check_in_section(parser, opt, "session-keyfile", '"/run/named/session.key"') - check_in_section(parser, opt, "allow-query", '{ localhost; }') - check_in_section(parser, opt, "recursion", 'yes') - check_in_section(parser, opt, "dnssec-validation", 'yes') - check_in_section(parser, opt, "dnssec-enable", 'yes') - - -def test_key_lookaside(): - parser = isccfg.IscConfigParser(options_lookaside_manual) - opt = find_options(parser) - key = parser.find_next_key(opt.config, opt.start+1, opt.end) - assert isinstance(key, isccfg.ConfigSection) - assert key.value() == 'dnssec-lookaside' - value = parser.find_next_val(opt.config, None, key.end+1, opt.end) - assert value.value() == '"."' - key2 = parser.find_next_key(opt.config, value.end+1, opt.end) - assert key2.value() == 'trust-anchor' - value2a = parser.find_next_val(opt.config, None, key2.end+1, opt.end) - value2b = parser.find_val(opt.config, 'trust-anchor', value.end+1, opt.end) - assert value2b.value() == '"dlv.isc.org"' - assert value2a.value() == value2b.value() - value3 = parser.find_next_key(opt.config, value2b.end+1, opt.end, end_report=True) - assert value3.value() == ';' - - -def test_key_lookaside_all(): - """ Test getting variable arguments after keyword """ - parser = isccfg.IscConfigParser(options_lookaside_manual) - assert len(parser.FILES_TO_CHECK) == 1 - opt = find_options(parser) - assert isinstance(opt, isccfg.ConfigSection) - values = parser.find_values(opt, "dnssec-lookaside") - assert values is not None - assert len(values) >= 4 - key = values[0].value() - assert key == 'dnssec-lookaside' - assert values[1].value() == '"."' - assert values[2].value() == 'trust-anchor' - assert values[3].value() == '"dlv.isc.org"' - assert values[4].value() == ';' - - -def test_key_lookaside_simple(): - """ Test getting variable arguments after keyword """ - parser = isccfg.IscConfigParser(options_lookaside_manual) - assert len(parser.FILES_TO_CHECK) == 1 - stmts = parser.find('options.dnssec-lookaside') - assert stmts is not None - assert len(stmts) == 1 - assert isinstance(stmts[0], isccfg.ConfigVariableSection) - values = stmts[0].values - assert len(values) >= 4 - key = values[0].value() - assert key == 'dnssec-lookaside' - assert values[1].value() == '"."' - assert values[2].value() == 'trust-anchor' - assert values[3].value() == '"dlv.isc.org"' - assert values[4].value() == ';' - - -def test_find_index(): - """ Test simplified searching for values in sections """ - parser = isccfg.IscConfigParser(named_conf_default) - assert len(parser.FILES_TO_CHECK) >= 1 - stmts = parser.find('logging.channel.severity') - assert stmts is not None and len(stmts) == 1 - assert isinstance(stmts[0], isccfg.ConfigVariableSection) - values = stmts[0].values - assert len(values) >= 1 - key = values[0].value() - assert key == 'severity' - assert values[1].value() == 'dynamic' - recursion = parser.find('options.recursion') - assert len(recursion) == 1 and len(recursion[0].values) >= 2 - assert recursion[0].values[0].value() == 'recursion' - assert recursion[0].values[1].value() == 'yes' - - -def cb_view(statement, state): - if 'view' not in state: - state['view'] = {} - name = statement.var(1).invalue() - second = statement.var(2) - if second.type() != isccfg.ConfigSection.TYPE_BLOCK: - name = second.value() + '_' + name - state['view'][name] = statement - - -def test_key_views_lookaside(): - """ Test getting variable arguments for views """ - - parser = isccfg.IscConfigParser(views_lookaside) - assert len(parser.FILES_TO_CHECK) == 1 - opt = find_options(parser) - assert isinstance(opt, isccfg.ConfigSection) - opt_val = parser.find_values(opt, "dnssec-lookaside") - assert isinstance(opt_val[1], isccfg.ConfigSection) - assert opt_val[1].value() == 'no' - - state = {} - callbacks = { - 'view': cb_view, - } - assert len(parser.FILES_TO_CHECK) >= 1 - cfg = parser.FILES_TO_CHECK[0] - parser.walk(cfg.root_section(), callbacks, state) - - views = state['view'] - assert len(views) == 2 - - v1 = views['IN_v1'] - assert isinstance(v1, isccfg.ConfigVariableSection) - v1b = v1.firstblock() - assert isinstance(v1b, isccfg.ConfigSection) - v1_la = parser.find_val_section(v1b, "dnssec-lookaside") - assert isinstance(v1_la, isccfg.ConfigSection) - assert v1_la.value() == 'auto' - - v2 = views['v2'] - assert isinstance(v2, isccfg.ConfigVariableSection) - v2b = v2.firstblock() - assert isinstance(v2b, isccfg.ConfigSection) - v2_la = parser.find_values(v2b, "dnssec-lookaside") - assert isinstance(v2_la[1], isccfg.ConfigSection) - assert v2_la[1].value() == '"."' - assert isinstance(v2_la[3], isccfg.ConfigSection) - assert v2_la[3].value() == '"dlv.isc.org"' - - -def test_remove_comments(): - """ Test removing comments works as expected """ - - parser = isccfg.IscConfigParser(views_lookaside) - assert len(parser.FILES_TO_CHECK) == 1 - cfg = parser.FILES_TO_CHECK[0] - assert isinstance(cfg, isccfg.ConfigFile) - removed_comments = parser._remove_comments(cfg.buffer) - assert len(removed_comments) < len(cfg.buffer) - replaced_comments = parser._replace_comments(cfg.buffer) - assert len(replaced_comments) == len(cfg.buffer) - assert 'This is auto' not in replaced_comments - assert 'comment' not in replaced_comments - assert 'Note no IN' not in replaced_comments - - -def test_walk(): - """ Test walk function of parser """ - - callbacks = { - 'options': cb_state, - 'dnssec-lookaside': cb_state, - 'dnssec-validation': cb_state, - } - state = {} - parser = isccfg.IscConfigParser(views_lookaside) - assert len(parser.FILES_TO_CHECK) == 1 - cfg = parser.FILES_TO_CHECK[0] - parser.walk(cfg.root_section(), callbacks, state) - assert 'options' in state - assert 'dnssec-lookaside' in state - assert 'dnssec-validation' not in state - - -def test_empty_config(): - """ Test empty configuration """ - - callbacks = {} - - parser = isccfg.IscConfigParser(config_empty) - assert len(parser.FILES_TO_CHECK) == 1 - cfg = parser.FILES_TO_CHECK[0] - parser.walk(cfg.root_section(), callbacks) - assert cfg.buffer == '' - - -def test_empty_include_config(): - """ Test empty configuration """ - - callbacks = {} - - parser = isccfg.IscConfigParser(config_empty_include) - assert len(parser.FILES_TO_CHECK) == 2 - cfg = parser.FILES_TO_CHECK[0] - parser.walk(cfg.root_section(), callbacks) - assert cfg.buffer == 'options { include "/dev/null"; };' - - null_cfg = parser.FILES_TO_CHECK[1] - parser.walk(null_cfg.root_section(), callbacks) - assert null_cfg.buffer == '' - - -if __name__ == '__main__': - test_key_views_lookaside() diff --git a/repos/system_upgrade/el7toel8/libraries/tests/test_pam.py b/repos/system_upgrade/el7toel8/libraries/tests/test_pam.py deleted file mode 100644 index 8ae7ce09..00000000 --- a/repos/system_upgrade/el7toel8/libraries/tests/test_pam.py +++ /dev/null @@ -1,92 +0,0 @@ -import textwrap - -from leapp.libraries.common.pam import PAM - - -def get_config(config): - return textwrap.dedent(config).strip() - - -def test_PAM_parse(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = PAM('') - modules = obj.parse(pam) - - assert len(modules) == 3 - assert 'pam_unix' in modules - assert 'pam_sss' in modules - assert 'pam_deny' in modules - - -def test_PAM_has__true(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = PAM(pam) - assert obj.has('pam_unix') - assert obj.has('pam_sss') - assert obj.has('pam_deny') - - -def test_PAM_has__false(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = PAM(pam) - assert not obj.has('pam_winbind') - - -def test_PAM_has_unknown_module__empty(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = PAM(pam) - assert obj.has_unknown_module([]) - - -def test_PAM_has_unknown_module__false(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - ''') - - obj = PAM(pam) - assert not obj.has_unknown_module(['pam_unix', 'pam_sss', 'pam_deny']) - - -def test_PAM_has_unknown_module__true(): - pam = get_config(''' - auth sufficient pam_unix.so - auth sufficient pam_sss.so - auth required pam_deny.so - session pam_ecryptfs.so - ''') - - obj = PAM(pam) - assert obj.has_unknown_module(['pam_unix', 'pam_sss', 'pam_deny']) - - -def test_PAM_read_file__non_existent(): - content = PAM.read_file('/this/does/not/exist') - assert content == '' - - -def test_PAM_read_file__ok(): - content = PAM.read_file(__file__) - assert content != '' - assert 'test_PAM_read_file__ok' in content diff --git a/repos/system_upgrade/el7toel8/libraries/tests/test_spamassassinutils.py b/repos/system_upgrade/el7toel8/libraries/tests/test_spamassassinutils.py deleted file mode 100644 index acbaa622..00000000 --- a/repos/system_upgrade/el7toel8/libraries/tests/test_spamassassinutils.py +++ /dev/null @@ -1,41 +0,0 @@ -import leapp.libraries.common.spamassassinutils as lib - - -def test_parse_sysconfig_spamassassin_begins_with_assignment(): - content = 'SPAMDOPTIONS="foo"\n# bar\n' - pre, assignment, post = lib.parse_sysconfig_spamassassin(content) - assert pre == '' - assert assignment == 'SPAMDOPTIONS="foo"' - assert post == '# bar\n' - - -def test_parse_sysconfig_spamassassin_ends_with_assignment(): - content = '# bar\nSPAMDOPTIONS="foo"\n' - pre, assignment, post = lib.parse_sysconfig_spamassassin(content) - assert pre == '# bar' - assert assignment == 'SPAMDOPTIONS="foo"' - assert post == '' - - -def test_parse_sysconfig_spamassassin_only_assignment(): - content = 'SPAMDOPTIONS="foo"\n' - pre, assignment, post = lib.parse_sysconfig_spamassassin(content) - assert pre == '' - assert assignment == 'SPAMDOPTIONS="foo"' - assert post == '' - - -def test_parse_sysconfig_spamassassin_no_assignment(): - content = '# foo\n' - pre, assignment, post = lib.parse_sysconfig_spamassassin(content) - assert pre == '# foo\n' - assert assignment == '' - assert post == '' - - -def test_parse_sysconfig_spamassassin_empty(): - content = '' - pre, assignment, post = lib.parse_sysconfig_spamassassin(content) - assert pre == '' - assert assignment == '' - assert post == '' diff --git a/repos/system_upgrade/el7toel8/libraries/tests/test_tcpwrappersutils.py b/repos/system_upgrade/el7toel8/libraries/tests/test_tcpwrappersutils.py deleted file mode 100644 index d9a57669..00000000 --- a/repos/system_upgrade/el7toel8/libraries/tests/test_tcpwrappersutils.py +++ /dev/null @@ -1,176 +0,0 @@ -import leapp.libraries.common.tcpwrappersutils as lib -from leapp.models import DaemonList, TcpWrappersFacts - - -def test_config_applies_to_daemon_simple(): - daemon_list = DaemonList(value=['vsftpd']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - assert lib.config_applies_to_daemon(facts, 'VsfTpd') is True - assert lib.config_applies_to_daemon(facts, 'ftp') is False - assert lib.config_applies_to_daemon(facts, 'foo') is False - - -def test_config_applies_to_daemon_multiple_lists(): - list1 = DaemonList(value=['vsftpd', 'sendmail']) - list2 = DaemonList(value=['postfix']) - facts = TcpWrappersFacts(daemon_lists=[list1, list2]) - - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - assert lib.config_applies_to_daemon(facts, 'sendmail') is True - assert lib.config_applies_to_daemon(facts, 'postfix') is True - assert lib.config_applies_to_daemon(facts, 'foo') is False - - -def test_config_applies_to_daemon_except(): - list1 = DaemonList(value=['all', 'except', 'sendmail']) - list2 = DaemonList(value=['postfix']) - facts = TcpWrappersFacts(daemon_lists=[list1, list2]) - - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - assert lib.config_applies_to_daemon(facts, 'sendmail') is False - assert lib.config_applies_to_daemon(facts, 'postfix') is True - assert lib.config_applies_to_daemon(facts, 'foo') is True - - list1 = DaemonList(value=['all', 'except', 'b*', 'EXCEPT', 'bar']) - facts = TcpWrappersFacts(daemon_lists=[list1]) - assert lib.config_applies_to_daemon(facts, 'foo') is True - assert lib.config_applies_to_daemon(facts, 'bar') is True - assert lib.config_applies_to_daemon(facts, 'baar') is False - - list1 = DaemonList(value=['all', 'except', 'vsftpd']) - facts = TcpWrappersFacts(daemon_lists=[list1]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is False - - list1 = DaemonList(value=['all', 'except', 'all', 'except', 'vsftpd']) - facts = TcpWrappersFacts(daemon_lists=[list1]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - - list1 = DaemonList(value=['all', 'except', 'all', 'except', 'all', 'except', 'vsftpd']) - facts = TcpWrappersFacts(daemon_lists=[list1]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is False - - -def test_config_applies_to_daemon_except_empty(): - daemon_list = DaemonList(value=['all', 'except']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - - -def test_config_applies_to_daemon_with_host(): - list1 = DaemonList(value=['vsftpd@localhost', 'sendmail']) - list2 = DaemonList(value=['postfix']) - facts = TcpWrappersFacts(daemon_lists=[list1, list2]) - - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - assert lib.config_applies_to_daemon(facts, 'sendmail') is True - assert lib.config_applies_to_daemon(facts, 'postfix') is True - assert lib.config_applies_to_daemon(facts, 'foo') is False - - -def test_config_applies_to_daemon_with_host_except(): - daemon_list = DaemonList(value=['vsftpd@localhost', 'except', 'vsftpd']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is False - - # It works like this for simplicity. - daemon_list = DaemonList(value=['vsftpd@localhost', 'except', 'vsftpd@localhost']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - - daemon_list = DaemonList(value=['vsftpd@localhost']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - - daemon_list = DaemonList(value=['all', 'except', 'vsftpd@localhost']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - - daemon_list = DaemonList(value=['all', 'except', 'all', 'except', 'vsftpd@localhost']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - - daemon_list = DaemonList(value=['all', 'except', 'all', 'except', 'all', - 'except', 'vsftpd@localhost']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - - daemon_list = DaemonList(value=['all', 'except', 'all', 'except', 'all', 'except', 'all', - 'except', 'vsftpd@localhost']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - - -def test_config_applies_to_daemon_empty(): - daemon_list = DaemonList(value=['']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is False - - daemon_list = DaemonList(value=[]) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is False - - -def test_config_applies_to_daemon_whole_word(): - daemon_list = DaemonList(value=['ftp']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is False - - -def test_config_applies_to_daemon_asterisk_wildcard(): - daemon_list = DaemonList(value=['*ftp*']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - - daemon_list = DaemonList(value=['************']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - - daemon_list = DaemonList(value=['*']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - - daemon_list = DaemonList(value=['*foo*']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is False - - -def test_config_applies_to_daemon_question_mark_wildcard(): - daemon_list = DaemonList(value=['vs?tpd']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - - daemon_list = DaemonList(value=['vsf?tpd']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is False - - daemon_list = DaemonList(value=['?']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is False - - daemon_list = DaemonList(value=['??????']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - - -def test_config_applies_to_daemon_all_wildcard(): - daemon_list = DaemonList(value=['all']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - - daemon_list = DaemonList(value=['aLl']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is True - - daemon_list = DaemonList(value=['al']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is False - - daemon_list = DaemonList(value=['ll']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is False - - daemon_list = DaemonList(value=['valld']) - facts = TcpWrappersFacts(daemon_lists=[daemon_list]) - assert lib.config_applies_to_daemon(facts, 'vsftpd') is False diff --git a/repos/system_upgrade/el7toel8/libraries/tests/test_vsftpdutils.py b/repos/system_upgrade/el7toel8/libraries/tests/test_vsftpdutils.py deleted file mode 100644 index ff631c0f..00000000 --- a/repos/system_upgrade/el7toel8/libraries/tests/test_vsftpdutils.py +++ /dev/null @@ -1,82 +0,0 @@ -import errno - -from leapp.libraries.common.testutils import make_IOError -from leapp.libraries.common.vsftpdutils import get_config_contents, get_default_config_hash - - -class MockFile(object): - def __init__(self, path, content=None, to_raise=None): - self.path = path - self.content = content - self.to_raise = to_raise - self.error = False - - def read_file(self, path): - if path != self.path: - self.error = True - raise ValueError - if not self.to_raise: - return self.content - raise self.to_raise - - -def test_getting_nonexistent_config_gives_None(): - path = 'my_file' - f = MockFile(path, to_raise=make_IOError(errno.ENOENT)) - - res = get_config_contents(path, read_func=f.read_file) - - assert not f.error - assert res is None - - -def test_getting_inaccessible_config_gives_None(): - path = 'my_file' - f = MockFile(path, to_raise=make_IOError(errno.EACCES)) - - res = get_config_contents(path, read_func=f.read_file) - - assert not f.error - assert res is None - - -def test_getting_empty_config_gives_empty_string(): - path = 'my_file' - f = MockFile(path, content='') - - res = get_config_contents(path, read_func=f.read_file) - - assert not f.error - assert res == '' - - -def test_getting_nonempty_config_gives_the_content(): - path = 'my_file' - content = 'foo\nbar\n' - f = MockFile(path, content=content) - - res = get_config_contents(path, read_func=f.read_file) - - assert not f.error - assert res == content - - -def test_hash_of_default_config_is_correct(): - path = '/etc/vsftpd/vsftpd.conf' - content = 'foo\n' - f = MockFile(path, content=content) - - h = get_default_config_hash(read_func=f.read_file) - - assert h == 'f1d2d2f924e986ac86fdf7b36c94bcdf32beec15' - assert not f.error - - -def test_hash_of_nonexistent_default_config_is_None(): - path = '/etc/vsftpd/vsftpd.conf' - f = MockFile(path, to_raise=make_IOError(errno.ENOENT)) - - h = get_default_config_hash(read_func=f.read_file) - - assert h is None - assert not f.error diff --git a/repos/system_upgrade/el7toel8/libraries/vsftpdutils.py b/repos/system_upgrade/el7toel8/libraries/vsftpdutils.py deleted file mode 100644 index 776c5b2d..00000000 --- a/repos/system_upgrade/el7toel8/libraries/vsftpdutils.py +++ /dev/null @@ -1,51 +0,0 @@ -import errno -import hashlib - -from leapp.libraries.stdlib import api - -VSFTPD_CONFIG_DIR = '/etc/vsftpd' -VSFTPD_DEFAULT_CONFIG_PATH = '/etc/vsftpd/vsftpd.conf' -STRICT_SSL_READ_EOF = 'strict_ssl_read_eof' -TCP_WRAPPERS = 'tcp_wrappers' - - -def read_file(path): - """ - Read a file in text mode and return the contents. - - :param path: File path - """ - with open(path, 'r') as f: - return f.read() - - -def get_config_contents(path, read_func=read_file): - """ - Try to read a vsftpd configuration file - - Try to read a vsftpd configuration file, log a warning if an error happens. - :param path: File path - :param read_func: Function to use to read the file. This is meant to be overridden in tests. - :return: File contents or None, if the file could not be read - """ - try: - return read_func(path) - except IOError as e: - if e.errno != errno.ENOENT: - api.current_logger().warning('Failed to read vsftpd configuration file: %s' % e) - return None - - -def get_default_config_hash(read_func=read_file): - """ - Read the default vsftpd configuration file (/etc/vsftpd/vsftpd.conf) and return its hash. - - :param read_func: Function to use to read the file. This is meant to be overridden in tests. - :return SHA1 hash of the configuration file, or None if the file could not be read. - """ - content = get_config_contents(VSFTPD_DEFAULT_CONFIG_PATH, read_func=read_func) - if content is None: - return None - content = content.encode(encoding='utf-8') - h = hashlib.sha1(content) - return h.hexdigest() diff --git a/repos/system_upgrade/el7toel8/models/authselect.py b/repos/system_upgrade/el7toel8/models/authselect.py deleted file mode 100644 index 32ef422d..00000000 --- a/repos/system_upgrade/el7toel8/models/authselect.py +++ /dev/null @@ -1,42 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemFactsTopic, SystemInfoTopic - - -class Authselect(Model): - """ - Suggested changes that will convert the system to authselect. - - This model describes the authselect call that can be used to convert - existing configuration into a equivalent or similar configuration - that is generated by authselect. - """ - topic = SystemFactsTopic - - profile = fields.Nullable(fields.String(default=None)) - """ - Suggested authselect profile name. - """ - - features = fields.List(fields.String()) - """ - Suggested authselect profile features. - """ - - confirm = fields.Boolean(default=True) - """ - Changes to the system requires admin confirmation. - """ - - -class AuthselectDecision(Model): - """ - Confirmation of changes suggested in Authselect model. - - If confirmed is True, the changes will be applied on RHEL-8 machine. - """ - topic = SystemInfoTopic - - confirmed = fields.Boolean(default=False) - """ - If true, authselect should be called after upgrade. - """ diff --git a/repos/system_upgrade/el7toel8/models/bindfacts.py b/repos/system_upgrade/el7toel8/models/bindfacts.py deleted file mode 100644 index 8d19f2f7..00000000 --- a/repos/system_upgrade/el7toel8/models/bindfacts.py +++ /dev/null @@ -1,35 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemInfoTopic - - -class BindConfigIssuesModel(Model): - """ - Problematic files with statements, which are problematic - """ - - topic = SystemInfoTopic - path = fields.String() # path to problematic file - statements = fields.List(fields.String()) # list of offending statements - - -class BindFacts(Model): - """ - Whole facts related to BIND configuration - """ - - topic = SystemInfoTopic - - # Detected configuration files via includes - config_files = fields.List(fields.String()) - - # Files modified by update - modified_files = fields.List(fields.String()) - - # Only issues detected. - # unsupported dnssec-lookaside statements with old values - # found in list of files. List of files, where unsupported - # statements were found. Context not yet provided - dnssec_lookaside = fields.Nullable(fields.List(fields.Model(BindConfigIssuesModel))) - - # Missing listen-on-v6 option - listen_on_v6_missing = fields.Boolean(default=False) diff --git a/repos/system_upgrade/el7toel8/models/bootloaderconfiguration.py b/repos/system_upgrade/el7toel8/models/bootloaderconfiguration.py deleted file mode 100644 index 14405abf..00000000 --- a/repos/system_upgrade/el7toel8/models/bootloaderconfiguration.py +++ /dev/null @@ -1,25 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemInfoTopic - - -class BootEntry(Model): - """ - One entry in the boot loader configuration. - - Not meant to be produced directly, only as a part of :class:`SourceBootLoaderConfiguration`. - """ - topic = SystemInfoTopic - - title = fields.String() - """Title of the boot entry.""" - - kernel_image = fields.String() - """Kernel image of the boot entry.""" - - -class SourceBootLoaderConfiguration(Model): - """Describes the bootloader configuration found on the source system.""" - topic = SystemInfoTopic - - entries = fields.List(fields.Model(BootEntry)) - """Boot entries available in the bootloader configuration.""" diff --git a/repos/system_upgrade/el7toel8/models/brlttymigrationdecision.py b/repos/system_upgrade/el7toel8/models/brlttymigrationdecision.py deleted file mode 100644 index cd03aa61..00000000 --- a/repos/system_upgrade/el7toel8/models/brlttymigrationdecision.py +++ /dev/null @@ -1,9 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemInfoTopic - - -class BrlttyMigrationDecision(Model): - topic = SystemInfoTopic - migrate_file = fields.String() - migrate_bt = fields.Boolean() - migrate_espeak = fields.Boolean() diff --git a/repos/system_upgrade/el7toel8/models/cupschangedfeatures.py b/repos/system_upgrade/el7toel8/models/cupschangedfeatures.py deleted file mode 100644 index 97358471..00000000 --- a/repos/system_upgrade/el7toel8/models/cupschangedfeatures.py +++ /dev/null @@ -1,41 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemInfoTopic - - -class CupsChangedFeatures(Model): - topic = SystemInfoTopic - - interface = fields.Boolean(default=False) - """ - True if interface scripts are used, False otherwise - """ - - digest = fields.Boolean(default=False) - """ - True if Digest/BasicDigest directive values are used, False otherwise - """ - - include = fields.Boolean(default=False) - """ - True if Include directive is used, False otherwise - """ - - certkey = fields.Boolean(default=False) - """ - True if ServerKey/ServerCertificate directives are used, False otherwise - """ - - env = fields.Boolean(default=False) - """ - True if PassEnv/SetEnv directives are used, False otherwise - """ - - printcap = fields.Boolean(default=False) - """ - True if PrintcapFormat directive is used, False otherwise - """ - - include_files = fields.List(fields.String(), default=['/etc/cups/cupsd.conf']) - """ - Paths to included files, contains /etc/cups/cupsd.conf by default - """ diff --git a/repos/system_upgrade/el7toel8/models/firewalldfacts.py b/repos/system_upgrade/el7toel8/models/firewalldfacts.py deleted file mode 100644 index a2e70eb3..00000000 --- a/repos/system_upgrade/el7toel8/models/firewalldfacts.py +++ /dev/null @@ -1,11 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemInfoTopic - - -class FirewalldFacts(Model): - """The model contains firewalld configuration.""" - topic = SystemInfoTopic - - firewall_config_command = fields.String(default='') - ebtablesTablesInUse = fields.List(fields.String(), default=[]) - ipsetTypesInUse = fields.List(fields.String(), default=[]) diff --git a/repos/system_upgrade/el7toel8/models/installedkdeappsfacts.py b/repos/system_upgrade/el7toel8/models/installedkdeappsfacts.py deleted file mode 100644 index 464c23f9..00000000 --- a/repos/system_upgrade/el7toel8/models/installedkdeappsfacts.py +++ /dev/null @@ -1,7 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemFactsTopic - - -class InstalledKdeAppsFacts(Model): - topic = SystemFactsTopic - installed_apps = fields.List(fields.String(), default=[]) diff --git a/repos/system_upgrade/el7toel8/models/multipathconffacts.py b/repos/system_upgrade/el7toel8/models/multipathconffacts.py deleted file mode 100644 index a7ec03e4..00000000 --- a/repos/system_upgrade/el7toel8/models/multipathconffacts.py +++ /dev/null @@ -1,59 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemInfoTopic - - -class MultipathConfigOption(Model): - """Model representing information about a multipath configuration option""" - topic = SystemInfoTopic - - name = fields.String(default='') - value = fields.String(default='') - - -class MultipathConfig(Model): - """Model representing information about a multipath configuration file""" - topic = SystemInfoTopic - - pathname = fields.String() - """Config file path name""" - - default_path_checker = fields.Nullable(fields.String()) - config_dir = fields.Nullable(fields.String()) - """Values of path_checker and config_dir in the defaults section. - None if not set""" - - default_retain_hwhandler = fields.Nullable(fields.Boolean()) - default_detect_prio = fields.Nullable(fields.Boolean()) - default_detect_checker = fields.Nullable(fields.Boolean()) - reassign_maps = fields.Nullable(fields.Boolean()) - """True if retain_attached_hw_handler, detect_prio, detect_path_checker, - or reassign_maps is set to "yes" in the defaults section. False - if set to "no". None if not set.""" - - hw_str_match_exists = fields.Boolean(default=False) - ignore_new_boot_devs_exists = fields.Boolean(default=False) - new_bindings_in_boot_exists = fields.Boolean(default=False) - unpriv_sgio_exists = fields.Boolean(default=False) - detect_path_checker_exists = fields.Boolean(default=False) - overrides_hwhandler_exists = fields.Boolean(default=False) - overrides_pg_timeout_exists = fields.Boolean(default=False) - queue_if_no_path_exists = fields.Boolean(default=False) - all_devs_section_exists = fields.Boolean(default=False) - """True if hw_str_match, ignore_new_boot_devs, new_bindings_in_boot, - detect_path_checker, or unpriv_sgio is set in any section, - if queue_if_no_path is included in the features line in any - section or if hardware_handler or pg_timeout is set in the - overrides section. False otherwise""" - - all_devs_options = fields.List(fields.Model(MultipathConfigOption), - default=[]) - """options in an all_devs device configuration section to be converted to - an overrides section""" - - -class MultipathConfFacts(Model): - """Model representing information from multipath configuration files""" - topic = SystemInfoTopic - - configs = fields.List(fields.Model(MultipathConfig), default=[]) - """List of multipath configuration files""" diff --git a/repos/system_upgrade/el7toel8/models/ntpmigrationdecision.py b/repos/system_upgrade/el7toel8/models/ntpmigrationdecision.py deleted file mode 100644 index a0752472..00000000 --- a/repos/system_upgrade/el7toel8/models/ntpmigrationdecision.py +++ /dev/null @@ -1,8 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemInfoTopic - - -class NtpMigrationDecision(Model): - topic = SystemInfoTopic - migrate_services = fields.List(fields.String()) - config_tgz64 = fields.String() diff --git a/repos/system_upgrade/el7toel8/models/pamconfiguration.py b/repos/system_upgrade/el7toel8/models/pamconfiguration.py deleted file mode 100644 index a57987ce..00000000 --- a/repos/system_upgrade/el7toel8/models/pamconfiguration.py +++ /dev/null @@ -1,29 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemInfoTopic - - -class PamService(Model): - """ - Pam service description - - This model contains information about pam modules used by specific PAM - service/filename - """ - topic = SystemInfoTopic - - service = fields.String() - modules = fields.List(fields.String()) - # Should this also list includes? - - -class PamConfiguration(Model): - """ - Global PAM configuration - - This model describes separate services using PAM and what pam modules are - used in each of them. Consumer can select just the pam services he is - interested in or scan for specific configuration throughout all the services. - """ - topic = SystemInfoTopic - - services = fields.List(fields.Model(PamService)) diff --git a/repos/system_upgrade/el7toel8/models/partitionlayout.py b/repos/system_upgrade/el7toel8/models/partitionlayout.py deleted file mode 100644 index c6483283..00000000 --- a/repos/system_upgrade/el7toel8/models/partitionlayout.py +++ /dev/null @@ -1,28 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemInfoTopic - - -class PartitionInfo(Model): - """ - Information about a single partition. - """ - topic = SystemInfoTopic - - part_device = fields.String() - """ Partition device """ - - start_offset = fields.Integer() - """ Partition start - offset from the start of the block device in bytes """ - - -class GRUBDevicePartitionLayout(Model): - """ - Information about partition layout of a GRUB device. - """ - topic = SystemInfoTopic - - device = fields.String() - """ GRUB device """ - - partitions = fields.List(fields.Model(PartitionInfo)) - """ List of partitions present on the device """ diff --git a/repos/system_upgrade/el7toel8/models/quaggatofrrfacts.py b/repos/system_upgrade/el7toel8/models/quaggatofrrfacts.py deleted file mode 100644 index d33cfde2..00000000 --- a/repos/system_upgrade/el7toel8/models/quaggatofrrfacts.py +++ /dev/null @@ -1,15 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemInfoTopic - - -class QuaggaToFrrFacts(Model): - """ - Model for quagga to frr actors. - - A list of configuration files used by quagga. This list is used to add yes/no to - /etc/frr/daemons file. It indicates which daemons from frr should be run. - """ - topic = SystemInfoTopic - - active_daemons = fields.List(fields.String()) - enabled_daemons = fields.List(fields.String()) diff --git a/repos/system_upgrade/el7toel8/models/removedpammodules.py b/repos/system_upgrade/el7toel8/models/removedpammodules.py deleted file mode 100644 index 2e7ace8a..00000000 --- a/repos/system_upgrade/el7toel8/models/removedpammodules.py +++ /dev/null @@ -1,15 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemFactsTopic - - -class RemovedPAMModules(Model): - """ - PAM modules that were removed from RHEL8 but are in current configuration. - """ - topic = SystemFactsTopic - - modules = fields.List(fields.String()) - """ - List of PAM modules that were detected in current configuration but - are no longer available in RHEL8. - """ diff --git a/repos/system_upgrade/el7toel8/models/sctpconfigmodel.py b/repos/system_upgrade/el7toel8/models/sctpconfigmodel.py deleted file mode 100644 index 94f9bbcc..00000000 --- a/repos/system_upgrade/el7toel8/models/sctpconfigmodel.py +++ /dev/null @@ -1,7 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SCTPConfigTopic - - -class SCTPConfig(Model): - topic = SCTPConfigTopic - wanted = fields.Boolean(default=False) diff --git a/repos/system_upgrade/el7toel8/models/sendmailmigrationdecision.py b/repos/system_upgrade/el7toel8/models/sendmailmigrationdecision.py deleted file mode 100644 index 5044fdce..00000000 --- a/repos/system_upgrade/el7toel8/models/sendmailmigrationdecision.py +++ /dev/null @@ -1,7 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemInfoTopic - - -class SendmailMigrationDecision(Model): - topic = SystemInfoTopic - migrate_files = fields.List(fields.String()) diff --git a/repos/system_upgrade/el7toel8/models/spamassassinfacts.py b/repos/system_upgrade/el7toel8/models/spamassassinfacts.py deleted file mode 100644 index c0755aed..00000000 --- a/repos/system_upgrade/el7toel8/models/spamassassinfacts.py +++ /dev/null @@ -1,23 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemInfoTopic - - -class SpamassassinFacts(Model): - topic = SystemInfoTopic - - spamc_ssl_argument = fields.Nullable(fields.String()) - """ - SSL version specified by the --ssl option in the spamc config file, or None - if no value is given. - """ - - spamd_ssl_version = fields.Nullable(fields.String()) - """ - SSL version specified by the --ssl-version in the spamassassin sysconfig file, - or None if no value is given. - """ - - service_overriden = fields.Boolean() - """ - True if spamassassin.service is overridden, else False. - """ diff --git a/repos/system_upgrade/el7toel8/models/sssd.py b/repos/system_upgrade/el7toel8/models/sssd.py deleted file mode 100644 index ca1a8741..00000000 --- a/repos/system_upgrade/el7toel8/models/sssd.py +++ /dev/null @@ -1,32 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemInfoTopic - - -class SSSDDomainConfig(Model): - """ - Facts found about an SSSD domain. - """ - topic = SystemInfoTopic - - name = fields.String() - """ - Domain name. - """ - - options = fields.List(fields.String(), default=list()) - """ - List of options related to this domain that affects the upgrade process. - """ - - -class SSSDConfig(Model): - """ - List of SSSD domains and their configuration that is related to the - upgrade process. - """ - topic = SystemInfoTopic - - domains = fields.List(fields.Model(SSSDDomainConfig), default=list()) - """ - SSSD Domains configuration. - """ diff --git a/repos/system_upgrade/el7toel8/models/tcpwrappersfacts.py b/repos/system_upgrade/el7toel8/models/tcpwrappersfacts.py deleted file mode 100644 index a0e65049..00000000 --- a/repos/system_upgrade/el7toel8/models/tcpwrappersfacts.py +++ /dev/null @@ -1,24 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemInfoTopic - - -class DaemonList(Model): - """ - A split up representation of a daemon_list (see host_access(5)). Example value of the - 'value' attribute: ["ALL", "EXCEPT", "in.fingerd"] - """ - topic = SystemInfoTopic - - value = fields.List(fields.String()) - - -class TcpWrappersFacts(Model): - """ - A representation of tcp_wrappers configuration. Currently it only contains a list - of daemon lists that are present in the tcp_wrappers configuration files. From this - you can extract information on whether there is any configuration that applies to - a specific daemon (see leapp.libraries.common.tcpwrappersutils.config_applies_to_daemon()). - """ - topic = SystemInfoTopic - - daemon_lists = fields.List(fields.Model(DaemonList)) diff --git a/repos/system_upgrade/el7toel8/models/vsftpdfacts.py b/repos/system_upgrade/el7toel8/models/vsftpdfacts.py deleted file mode 100644 index 3e88399d..00000000 --- a/repos/system_upgrade/el7toel8/models/vsftpdfacts.py +++ /dev/null @@ -1,31 +0,0 @@ -from leapp.models import fields, Model -from leapp.topics import SystemInfoTopic - - -class VsftpdConfig(Model): - """ - Model representing some aspects of a vsftpd configuration file. - - The attributes representing the state of configuration options are nullable, so that - they can represent the real state of the option in the file: if an option is set to "YES" - in the configuration file, the corresponding attribute is set to True; if the option - is set to NO, the attribute is set to False; if the option is not present in the config - file at all, the attribute is set to None. - """ - topic = SystemInfoTopic - - path = fields.String() - """Path to the vsftpd configuration file""" - strict_ssl_read_eof = fields.Nullable(fields.Boolean()) - """Represents the state of the strict_ssl_read_eof option in the config file""" - tcp_wrappers = fields.Nullable(fields.Boolean()) - """Represents the state of the tcp_wrappers option in the config file""" - - -class VsftpdFacts(Model): - topic = SystemInfoTopic - - default_config_hash = fields.Nullable(fields.String()) - """SHA1 hash of the /etc/vsftpd/vsftpd.conf file, if it exists, None otherwise""" - configs = fields.List(fields.Model(VsftpdConfig)) - """List of vsftpd configuration files""" diff --git a/repos/system_upgrade/el7toel8/tools/handle-postgresql-legacy-actions b/repos/system_upgrade/el7toel8/tools/handle-postgresql-legacy-actions deleted file mode 100755 index ac154f6f..00000000 --- a/repos/system_upgrade/el7toel8/tools/handle-postgresql-legacy-actions +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/bash -e - -# lets assume the new PostgreSQL package will just fix this -if [ -L /usr/libexec/initscripts/legacy-actions/postgresql ]; then - rm -f /usr/libexec/initscripts/legacy-actions/postgresql -fi diff --git a/repos/system_upgrade/el7toel8/tools/handleyumconfig b/repos/system_upgrade/el7toel8/tools/handleyumconfig deleted file mode 100755 index 67dc94d8..00000000 --- a/repos/system_upgrade/el7toel8/tools/handleyumconfig +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/bash -e - -# just in case of hidden files.. not sure why would someone do that, it's more -# like forgotten cache file possibility, but rather do that.. -shopt -s dotglob - -is_dir_empty() { - test -z "$(ls -A $1)" -} - -handle_dir() { - # Move all files from $1 to $2 when the /etc/yum/$1 is not empty - # and not already a link - # Then remove the $1 directory and relink it to $2 - # param $1: dirname under /etc/yum path - # param $2: dirname under /etc/dnf path - if [ "$(readlink /etc/yum/$1)" == "../dnf/$2" ]; then - return - fi - if ! is_dir_empty "/etc/yum/$1"; then - mv /etc/yum/$1/* /etc/dnf/$2/ - fi - - rm -rf /etc/yum/$1 - - #relink - ln -s ../dnf/$2 /etc/yum/$1 - - return 0 -} - - -# assume the directories are not removed by user.. -handle_dir pluginconf.d plugins -handle_dir protected.d protected.d -handle_dir vars vars diff --git a/repos/system_upgrade/el8toel9/actors/addarmbootloaderworkaround/libraries/addupgradebootloader.py b/repos/system_upgrade/el8toel9/actors/addarmbootloaderworkaround/libraries/addupgradebootloader.py index c076fe6b..2455a2f6 100644 --- a/repos/system_upgrade/el8toel9/actors/addarmbootloaderworkaround/libraries/addupgradebootloader.py +++ b/repos/system_upgrade/el8toel9/actors/addarmbootloaderworkaround/libraries/addupgradebootloader.py @@ -14,6 +14,22 @@ from leapp.libraries.common.grub import ( from leapp.libraries.stdlib import api, CalledProcessError, run from leapp.models import ArmWorkaroundEFIBootloaderInfo, EFIBootEntry, TargetUserSpaceInfo +dirname = { + 'AlmaLinux': 'almalinux', + 'CentOS Linux': 'centos', + 'CentOS Stream': 'centos', + 'Oracle Linux Server': 'redhat', + 'Red Hat Enterprise Linux': 'redhat', + 'Rocky Linux': 'rocky', + 'Scientific Linux': 'redhat', +} + +with open('/etc/system-release', 'r') as sr: + release_line = next(line for line in sr if 'release' in line) + distro = release_line.split(' release ', 1)[0] + +distro_dir = dirname.get(distro, 'default') + UPGRADE_EFI_ENTRY_LABEL = 'Leapp Upgrade' ARM_SHIM_PACKAGE_NAME = 'shim-aa64' @@ -21,7 +37,7 @@ ARM_GRUB_PACKAGE_NAME = 'grub2-efi-aa64' EFI_MOUNTPOINT = '/boot/efi/' LEAPP_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/leapp/') -RHEL_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/redhat/') +RHEL_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/', distro_dir) UPGRADE_BLS_DIR = '/boot/upgrade-loader' CONTAINER_DOWNLOAD_DIR = '/tmp_pkg_download_dir' diff --git a/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/actor.py b/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/actor.py new file mode 100644 index 00000000..14668e42 --- /dev/null +++ b/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/actor.py @@ -0,0 +1,32 @@ +from leapp.actors import Actor +from leapp.libraries.actor import checkvalidgrubcfghybrid +from leapp.models import FirmwareFacts, HybridImageAzure +from leapp.reporting import Report +from leapp.tags import ChecksPhaseTag, IPUWorkflowTag + + +class CheckValidGrubConfigHybrid(Actor): + """ + Check potential for boot failures in Azure Gen1 VMs due to invalid grubcfg + + This actor addresses the issue where the `/boot/grub2/grub.cfg` file is + overwritten during the upgrade process by an old RHEL7 configuration + leftover on the system, causing the system to fail to boot. + + The problem occurs on hybrid Azure images, which support both UEFI and + Legacy systems. The issue is caused by one of the scriplets in `grub-efi` + which overwrites during the upgrade current configuration in + `/boot/grub2/grub.cfg` by an old configuration from + `/boot/efi/EFI/redhat/grub.cfg`. + + The issue is detected specifically to Azure hybrid cloud systems. + + """ + + name = 'check_valid_grubcfg_hybrid' + consumes = (FirmwareFacts, HybridImageAzure,) + produces = (Report,) + tags = (ChecksPhaseTag, IPUWorkflowTag) + + def process(self): + checkvalidgrubcfghybrid.process() diff --git a/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/libraries/checkvalidgrubcfghybrid.py b/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/libraries/checkvalidgrubcfghybrid.py new file mode 100644 index 00000000..374772f5 --- /dev/null +++ b/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/libraries/checkvalidgrubcfghybrid.py @@ -0,0 +1,30 @@ +from leapp import reporting +from leapp.libraries.stdlib import api +from leapp.models import HybridImageAzure + + +def process(): + hybrid_image = next(api.consume(HybridImageAzure), None) + + if hybrid_image: + reporting.create_report([ + reporting.Title( + 'Azure hybrid (BIOS/EFI) image detected. The GRUB configuration might be regenerated.' + ), + reporting.Summary( + 'Leapp detected that the system is running on Azure cloud and is booted using BIOS. ' + 'While upgrading from older systems (i.e. RHEL 7) on such systems' + 'it is possible that the system might end up with invalid GRUB configuration, ' + 'as `/boot/grub2/grub.cfg` might be overwritten by an old configuration from ' + '`/boot/efi/EFI/redhat/grub.cfg`, which might cause the system to fail to boot. ' + + 'Please ensure that the system is able to boot with both of these ' + 'configurations. If an invalid configuration is detected during upgrade, ' + 'it will be regenerated automatically using `grub2-mkconfig.`' + ), + reporting.Severity(reporting.Severity.HIGH), + reporting.Groups([ + reporting.Groups.PUBLIC_CLOUD, + reporting.Groups.BOOT + ]), + ]) diff --git a/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/tests/test_checkvalidgrubcfghybrid.py b/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/tests/test_checkvalidgrubcfghybrid.py new file mode 100644 index 00000000..3fd9a53c --- /dev/null +++ b/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/tests/test_checkvalidgrubcfghybrid.py @@ -0,0 +1,25 @@ +import pytest + +from leapp import reporting +from leapp.libraries.actor import checkvalidgrubcfghybrid +from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked +from leapp.libraries.stdlib import api +from leapp.models import HybridImageAzure + + +@pytest.mark.parametrize('is_hybrid', [True, False]) +def test_check_invalid_grubcfg_hybrid(monkeypatch, is_hybrid): + + monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) + + msgs = [HybridImageAzure()] if is_hybrid else [] + monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch='x86_64', msgs=msgs)) + monkeypatch.setattr(api, "produce", produce_mocked()) + + checkvalidgrubcfghybrid.process() + + if is_hybrid: + assert reporting.create_report.called == 1 + assert 'regenerated' in reporting.create_report.report_fields['title'] + else: + assert reporting.create_report.called == 0 diff --git a/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/actor.py b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/actor.py new file mode 100644 index 00000000..a350c7a0 --- /dev/null +++ b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/actor.py @@ -0,0 +1,30 @@ +from leapp.actors import Actor +from leapp.libraries.actor import ensurevalidgrubcfghybrid +from leapp.models import HybridImageAzure +from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag + + +class EnsureValidGrubcfgHybrid(Actor): + """ + Resolve boot failures in Azure Gen1 VMs during upgrades from RHEL 7 to RHEL 8 to RHEL 9. + + If old configuration is detected, this actor regenerates the grub + configuration using `grub2-mkconfig -o /boot/grub2/grub.cfg` after + installing rpms to ensure the correct boot configuration is in place. + + Old configuration is detected by looking for a menuentry corresponding to a + kernel from RHEL 7 which should not be present on RHEL 8 systems. + + The fix is applied specifically to Azure hybrid cloud systems. + + See also CheckValidGrubConfigHybrid actor. + + """ + + name = 'ensure_valid_grubcfg_hybrid' + consumes = (HybridImageAzure,) + produces = () + tags = (ApplicationsPhaseTag, IPUWorkflowTag) + + def process(self): + ensurevalidgrubcfghybrid.process() diff --git a/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/libraries/ensurevalidgrubcfghybrid.py b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/libraries/ensurevalidgrubcfghybrid.py new file mode 100644 index 00000000..f94cf67b --- /dev/null +++ b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/libraries/ensurevalidgrubcfghybrid.py @@ -0,0 +1,66 @@ +import re + +from leapp.exceptions import StopActorExecutionError +from leapp.libraries.common.config.architecture import ARCH_ACCEPTED +from leapp.libraries.stdlib import api, CalledProcessError, run +from leapp.models import HybridImageAzure + +GRUB_CFG_PATH = '/boot/grub2/grub.cfg' + +MATCH_ARCH = r'({})'.format('|'.join(ARCH_ACCEPTED)) +MATCH_RHEL7_KERNEL_VERSION = r"\d+\.\d+\.\d+-\d+(\.\d+)*\.el7\.{}".format(MATCH_ARCH) +MATCH_RHEL7_KERNEL_DEFINITION = r"vmlinuz-{}".format(MATCH_RHEL7_KERNEL_VERSION) + + +def process(): + if not _is_hybrid_image(): + api.current_logger().info('System is not a hybrid image. Skipping.') + return + + grubcfg = _read_grubcfg() + if _is_grubcfg_invalid(grubcfg): + _run_grub2_mkconfig() + + +def _is_hybrid_image(): + return next(api.consume(HybridImageAzure), None) is not None + + +def _read_grubcfg(): + api.current_logger().debug('Reading {}:'.format(GRUB_CFG_PATH)) + with open(GRUB_CFG_PATH, 'r') as fin: + grubcfg = fin.read() + + api.current_logger().debug(grubcfg) + return grubcfg + + +def _is_grubcfg_invalid(grubcfg): + return _contains_rhel7_kernel_definition(grubcfg) + + +def _contains_rhel7_kernel_definition(grubcfg): + api.current_logger().debug("Looking for RHEL7 kernel version ...") + + match = re.search(MATCH_RHEL7_KERNEL_DEFINITION, grubcfg) + + api.current_logger().debug( + "Matched: {}".format(match.group() if match else "[NO MATCH]") + ) + + return match is not None + + +def _run_grub2_mkconfig(): + api.current_logger().info("Regenerating {}".format(GRUB_CFG_PATH)) + + try: + run([ + 'grub2-mkconfig', + '-o', + GRUB_CFG_PATH + ]) + except CalledProcessError as err: + msg = 'Could not regenerate {}: {}'.format(GRUB_CFG_PATH, str(err)) + api.current_logger().error(msg) + raise StopActorExecutionError(msg) diff --git a/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/files/invalid_grub.cfg b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/files/invalid_grub.cfg new file mode 100644 index 00000000..58f55c53 --- /dev/null +++ b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/files/invalid_grub.cfg @@ -0,0 +1,51 @@ + +# Created by osbuild + +set timeout=10 + +# load the grubenv file +load_env + +# selection of the next boot entry +if [ "${next_entry}" ] ; then + set default="${next_entry}" + set next_entry= + save_env next_entry + set boot_once=true +else + set default="${saved_entry}" +fi + +if [ "${prev_saved_entry}" ]; then + set saved_entry="${prev_saved_entry}" + save_env saved_entry + set prev_saved_entry= + save_env prev_saved_entry + set boot_once=true +fi + +function savedefault { + if [ -z "${boot_once}" ]; then + saved_entry="${chosen}" + save_env saved_entry + fi +} + +serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1 +terminal_input serial console +terminal_output serial console + +menuentry 'Red Hat Enterprise Linux Server (3.10.0-1160.119.1.el7.x86_64) 7.9 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted --id 'gnulinux-3.10.0-1160.99.1.el7.x86_64-advanced-76a22bf4-f153-4541-b6c7-0332c0dfaeac' { + insmod all_video + set gfxpayload=keep + search --no-floppy --set=root --fs-uuid 61779359-8d11-49ba-bc9d-8d038ee4b108 + linuxefi /vmlinuz-3.10.0-1160.119.1.el7.x86_64 root=UUID=d3c9a2bd-7ffb-4113-9b8f-234c13b18274 ro crashkernel=auto console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300 scsi_mod.use_blk_mq=y LANG=en_US.UTF-8 + initrdefi /initramfs-3.10.0-1160.119.1.el7.x86_64.img +} +menuentry 'Red Hat Enterprise Linux (3.10.0-1160.99.1.el7.x86_64) 7.9 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted --id 'gnulinux-3.10.0-1160.99.1.el7.x86_64-advanced-76a22bf4-f153-4541-b6c7-0332c0dfaeac' { + insmod all_video + set gfxpayload=keep + search --no-floppy --set=root --fs-uuid 61779359-8d11-49ba-bc9d-8d038ee4b108 + linuxefi /vmlinuz-3.10.0-1160.99.1.el7.x86_64 root=UUID=d3c9a2bd-7ffb-4113-9b8f-234c13b18274 ro crashkernel=auto console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300 scsi_mod.use_blk_mq=y + initrdefi /initramfs-3.10.0-1160.99.1.el7.x86_64.img +} diff --git a/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/files/valid_grub.cfg b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/files/valid_grub.cfg new file mode 100644 index 00000000..8192665e --- /dev/null +++ b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/files/valid_grub.cfg @@ -0,0 +1,195 @@ +# +# DO NOT EDIT THIS FILE +# +# It is automatically generated by grub2-mkconfig using templates +# from /etc/grub.d and settings from /etc/default/grub +# + +### BEGIN /etc/grub.d/00_header ### +set pager=1 + +if [ -f ${config_directory}/grubenv ]; then + load_env -f ${config_directory}/grubenv +elif [ -s $prefix/grubenv ]; then + load_env +fi +if [ "${next_entry}" ] ; then + set default="${next_entry}" + set next_entry= + save_env next_entry + set boot_once=true +else + set default="${saved_entry}" +fi + +if [ x"${feature_menuentry_id}" = xy ]; then + menuentry_id_option="--id" +else + menuentry_id_option="" +fi + +export menuentry_id_option + +if [ "${prev_saved_entry}" ]; then + set saved_entry="${prev_saved_entry}" + save_env saved_entry + set prev_saved_entry= + save_env prev_saved_entry + set boot_once=true +fi + +function savedefault { + if [ -z "${boot_once}" ]; then + saved_entry="${chosen}" + save_env saved_entry + fi +} + +function load_video { + if [ x$feature_all_video_module = xy ]; then + insmod all_video + else + insmod efi_gop + insmod efi_uga + insmod ieee1275_fb + insmod vbe + insmod vga + insmod video_bochs + insmod video_cirrus + fi +} + +serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1 +terminal_input serial console +terminal_output serial console +if [ x$feature_timeout_style = xy ] ; then + set timeout_style=countdown + set timeout=10 +# Fallback hidden-timeout code in case the timeout_style feature is +# unavailable. +elif sleep --interruptible 10 ; then + set timeout=0 +fi +### END /etc/grub.d/00_header ### + +### BEGIN /etc/grub.d/00_tuned ### +set tuned_params="" +set tuned_initrd="" +### END /etc/grub.d/00_tuned ### + +### BEGIN /etc/grub.d/01_users ### +if [ -f ${prefix}/user.cfg ]; then + source ${prefix}/user.cfg + if [ -n "${GRUB2_PASSWORD}" ]; then + set superusers="root" + export superusers + password_pbkdf2 root ${GRUB2_PASSWORD} + fi +fi +### END /etc/grub.d/01_users ### + +### BEGIN /etc/grub.d/08_fallback_counting ### +insmod increment +# Check if boot_counter exists and boot_success=0 to activate this behaviour. +if [ -n "${boot_counter}" -a "${boot_success}" = "0" ]; then + # if countdown has ended, choose to boot rollback deployment, + # i.e. default=1 on OSTree-based systems. + if [ "${boot_counter}" = "0" -o "${boot_counter}" = "-1" ]; then + set default=1 + set boot_counter=-1 + # otherwise decrement boot_counter + else + decrement boot_counter + fi + save_env boot_counter +fi +### END /etc/grub.d/08_fallback_counting ### + +### BEGIN /etc/grub.d/10_linux ### +insmod part_gpt +insmod xfs +set root='hd0,gpt2' +if [ x$feature_platform_search_hint = xy ]; then + search --no-floppy --fs-uuid --set=root --hint-bios=hd0,gpt2 --hint-efi=hd0,gpt2 --hint-baremetal=ahci0,gpt2 61779359-8d11-49ba-bc9d-8d038ee4b108 +else + search --no-floppy --fs-uuid --set=root 61779359-8d11-49ba-bc9d-8d038ee4b108 +fi +insmod part_gpt +insmod xfs +set boot='hd0,gpt2' +if [ x$feature_platform_search_hint = xy ]; then + search --no-floppy --fs-uuid --set=boot --hint-bios=hd0,gpt2 --hint-efi=hd0,gpt2 --hint-baremetal=ahci0,gpt2 61779359-8d11-49ba-bc9d-8d038ee4b108 +else + search --no-floppy --fs-uuid --set=boot 61779359-8d11-49ba-bc9d-8d038ee4b108 +fi + +# This section was generated by a script. Do not modify the generated file - all changes +# will be lost the next time file is regenerated. Instead edit the BootLoaderSpec files. +# +# The blscfg command parses the BootLoaderSpec files stored in /boot/loader/entries and +# populates the boot menu. Please refer to the Boot Loader Specification documentation +# for the files format: https://www.freedesktop.org/wiki/Specifications/BootLoaderSpec/. + +# The kernelopts variable should be defined in the grubenv file. But to ensure that menu +# entries populated from BootLoaderSpec files that use this variable work correctly even +# without a grubenv file, define a fallback kernelopts variable if this has not been set. +# +# The kernelopts variable in the grubenv file can be modified using the grubby tool or by +# executing the grub2-mkconfig tool. For the latter, the values of the GRUB_CMDLINE_LINUX +# and GRUB_CMDLINE_LINUX_DEFAULT options from /etc/default/grub file are used to set both +# the kernelopts variable in the grubenv file and the fallback kernelopts variable. +if [ -z "${kernelopts}" ]; then + set kernelopts="root=/dev/mapper/rootvg-rootlv ro ro crashkernel=auto console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300 scsi_mod.use_blk_mq=y " +fi + +insmod blscfg +blscfg +### END /etc/grub.d/10_linux ### + +### BEGIN /etc/grub.d/10_reset_boot_success ### +# Hiding the menu is ok if last boot was ok or if this is a first boot attempt to boot the entry +if [ "${boot_success}" = "1" -o "${boot_indeterminate}" = "1" ]; then + set menu_hide_ok=1 +else + set menu_hide_ok=0 +fi +# Reset boot_indeterminate after a successful boot +if [ "${boot_success}" = "1" ] ; then + set boot_indeterminate=0 +# Avoid boot_indeterminate causing the menu to be hidden more then once +elif [ "${boot_indeterminate}" = "1" ]; then + set boot_indeterminate=2 +fi +# Reset boot_success for current boot +set boot_success=0 +save_env boot_success boot_indeterminate +### END /etc/grub.d/10_reset_boot_success ### + +### BEGIN /etc/grub.d/12_menu_auto_hide ### +### END /etc/grub.d/12_menu_auto_hide ### + +### BEGIN /etc/grub.d/20_linux_xen ### +### END /etc/grub.d/20_linux_xen ### + +### BEGIN /etc/grub.d/20_ppc_terminfo ### +### END /etc/grub.d/20_ppc_terminfo ### + +### BEGIN /etc/grub.d/30_os-prober ### +### END /etc/grub.d/30_os-prober ### + +### BEGIN /etc/grub.d/30_uefi-firmware ### +### END /etc/grub.d/30_uefi-firmware ### + +### BEGIN /etc/grub.d/40_custom ### +# This file provides an easy way to add custom menu entries. Simply type the +# menu entries you want to add after this comment. Be careful not to change +# the 'exec tail' line above. +### END /etc/grub.d/40_custom ### + +### BEGIN /etc/grub.d/41_custom ### +if [ -f ${config_directory}/custom.cfg ]; then + source ${config_directory}/custom.cfg +elif [ -z "${config_directory}" -a -f $prefix/custom.cfg ]; then + source $prefix/custom.cfg; +fi +### END /etc/grub.d/41_custom ### diff --git a/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/test_ensurevalidgrubcfghybrid.py b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/test_ensurevalidgrubcfghybrid.py new file mode 100644 index 00000000..3ba46cb5 --- /dev/null +++ b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/test_ensurevalidgrubcfghybrid.py @@ -0,0 +1,124 @@ +import os + +import pytest + +from leapp.exceptions import StopActorExecutionError +from leapp.libraries.actor import ensurevalidgrubcfghybrid +from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked +from leapp.libraries.stdlib import api, CalledProcessError +from leapp.models import HybridImageAzure + +CUR_DIR = os.path.dirname(os.path.abspath(__file__)) + + +def raise_call_error(args=None): + raise CalledProcessError( + message='A Leapp Command Error occurred.', + command=args, + result={'signal': None, 'exit_code': 1, 'pid': 0, 'stdout': 'fake', 'stderr': 'fake'} + ) + + +class run_mocked(object): + def __init__(self, raise_err=False): + self.called = 0 + self.args = [] + self.raise_err = raise_err + + def __call__(self, *args): + self.called += 1 + self.args.append(args) + if self.raise_err: + raise_call_error(args) + + +def test_not_hybrid_image(monkeypatch): + """ + Skip when system is not a hybrid. + """ + + monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[])) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + monkeypatch.setattr(ensurevalidgrubcfghybrid, 'run', run_mocked(raise_err=False)) + + ensurevalidgrubcfghybrid.process() + + assert api.current_logger.infomsg[0].startswith('System is not a hybrid image') + assert ensurevalidgrubcfghybrid.run.called == 0 + + +@pytest.mark.parametrize("is_invalid", [True, False]) +def test_is_grubcfg_valid(monkeypatch, is_invalid): + + grubcfg_filename = ('invalid' if is_invalid else 'valid') + '_grub.cfg' + grubcfg_filepath = os.path.join(CUR_DIR, 'files', grubcfg_filename) + with open(grubcfg_filepath, 'r') as fin: + grubcfg = fin.read() + + assert ensurevalidgrubcfghybrid._is_grubcfg_invalid(grubcfg) == is_invalid + + +def test_valid_grubcfg(monkeypatch): + """ + Test valid configuration does not trigger grub2-mkconfig + """ + + monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[HybridImageAzure()])) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + monkeypatch.setattr(ensurevalidgrubcfghybrid, 'run', run_mocked(raise_err=False)) + + grubcfg_filepath = os.path.join(CUR_DIR, 'files', 'valid_grub.cfg') + with open(grubcfg_filepath, 'r') as fin: + grubcfg = fin.read() + + monkeypatch.setattr(ensurevalidgrubcfghybrid, '_read_grubcfg', lambda: grubcfg) + + ensurevalidgrubcfghybrid.process() + + assert ensurevalidgrubcfghybrid.run.called == 0 + + +def test_invalid_grubcfg(monkeypatch): + """ + Test invalid configuration triggers grub2-mkconfig + """ + + monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[HybridImageAzure()])) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + monkeypatch.setattr(ensurevalidgrubcfghybrid, 'run', run_mocked(raise_err=False)) + + grubcfg_filepath = os.path.join(CUR_DIR, 'files', 'invalid_grub.cfg') + with open(grubcfg_filepath, 'r') as fin: + grubcfg = fin.read() + + monkeypatch.setattr(ensurevalidgrubcfghybrid, '_read_grubcfg', lambda: grubcfg) + + ensurevalidgrubcfghybrid.process() + + assert ensurevalidgrubcfghybrid.run.called == 1 + assert any(msg.startswith('Regenerating') for msg in api.current_logger.infomsg) + + +def test_run_error(monkeypatch): + """ + Test invalid configuration triggers grub2-mkconfig + """ + + monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[HybridImageAzure()])) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + monkeypatch.setattr(ensurevalidgrubcfghybrid, 'run', run_mocked(raise_err=True)) + + grubcfg_filepath = os.path.join(CUR_DIR, 'files', 'invalid_grub.cfg') + with open(grubcfg_filepath, 'r') as fin: + grubcfg = fin.read() + + monkeypatch.setattr(ensurevalidgrubcfghybrid, '_read_grubcfg', lambda: grubcfg) + + with pytest.raises(StopActorExecutionError): + ensurevalidgrubcfghybrid.process() + + assert ensurevalidgrubcfghybrid.run.called == 1 + assert any( + msg.startswith('Could not regenerate') + for msg in api.current_logger.err + ) diff --git a/repos/system_upgrade/el7toel8/actors/postgresqlcheck/actor.py b/repos/system_upgrade/el8toel9/actors/mysqlcheck/actor.py similarity index 57% rename from repos/system_upgrade/el7toel8/actors/postgresqlcheck/actor.py rename to repos/system_upgrade/el8toel9/actors/mysqlcheck/actor.py index cd0c9c4d..d675d75c 100644 --- a/repos/system_upgrade/el7toel8/actors/postgresqlcheck/actor.py +++ b/repos/system_upgrade/el8toel9/actors/mysqlcheck/actor.py @@ -1,20 +1,20 @@ from leapp.actors import Actor -from leapp.libraries.actor.postgresqlcheck import report_installed_packages +from leapp.libraries.actor.mysqlcheck import process from leapp.models import DistributionSignedRPM, Report from leapp.tags import ChecksPhaseTag, IPUWorkflowTag -class PostgresqlCheck(Actor): +class MySQLCheck(Actor): """ - Actor checking for presence of PostgreSQL installation. + Actor checking for presence of MySQL installation. Provides user with information related to upgrading systems - with PostgreSQL installed. + with MySQL installed. """ - name = 'postgresql_check' + name = 'mysql_check' consumes = (DistributionSignedRPM,) produces = (Report,) tags = (ChecksPhaseTag, IPUWorkflowTag) def process(self): - report_installed_packages() + process() diff --git a/repos/system_upgrade/el8toel9/actors/mysqlcheck/libraries/mysqlcheck.py b/repos/system_upgrade/el8toel9/actors/mysqlcheck/libraries/mysqlcheck.py new file mode 100644 index 00000000..b446d9c4 --- /dev/null +++ b/repos/system_upgrade/el8toel9/actors/mysqlcheck/libraries/mysqlcheck.py @@ -0,0 +1,51 @@ +from leapp import reporting +from leapp.libraries.common.rpms import has_package +from leapp.models import DistributionSignedRPM + + +def _report_server_installed(): + """ + Create report on mysql-server package installation detection. + + Should remind user about present MySQL server package + installation, warn them about necessary additional steps, and + redirect them to online documentation for the upgrade process. + """ + reporting.create_report([ + reporting.Title('Further action to upgrade MySQL might be needed'), + reporting.Summary( + 'The MySQL server component will be reinstalled during the upgrade with a RHEL 9' + ' version. Since RHEL 9 includes the same MySQL version 8.0 by default, no action' + ' should be required and there should not be any compatibility issues. However,' + ' it is still advisable to follow the documentation on this topic for up to date' + ' recommendations.' + ' Keep in mind that MySQL 8.0, which is the default in RHEL 9, will reach the end' + ' of \'Extended Support\' in April 2026. As such it is advisable to upgrade to' + ' MySQL version 8.4, which is provided via a module. MySQL 8.4 is also the' + ' default version for RHEL 10, therefore having MySQL 8.4 on the RHEL 9 system' + ' will make a future upgrade process to RHEL 10 smoother.' + ), + reporting.Severity(reporting.Severity.MEDIUM), + reporting.Groups([reporting.Groups.SERVICES]), + reporting.ExternalLink(title='Migrating MySQL databases from RHEL 8 to RHEL 9', + url='https://access.redhat.com/articles/7099753'), + reporting.RelatedResource('package', 'mysql-server'), + reporting.Remediation(hint=( + 'Dump or backup your data before proceeding with the upgrade ' + 'and consult attached article ' + '\'Migrating MySQL databases from RHEL 8 to RHEL 9\' ' + 'with up to date recommended steps before and after the upgrade.' + )), + ]) + + +def process(): + """ + Create reports according to detected MySQL packages. + + Create the report if the mysql-server rpm (RH signed) is installed. + """ + has_server = has_package(DistributionSignedRPM, 'mysql-server') + + if has_server: + _report_server_installed() diff --git a/repos/system_upgrade/el7toel8/actors/postgresqlcheck/tests/test_postgresqlcheck.py b/repos/system_upgrade/el8toel9/actors/mysqlcheck/tests/test_mysqlcheck.py similarity index 59% rename from repos/system_upgrade/el7toel8/actors/postgresqlcheck/tests/test_postgresqlcheck.py rename to repos/system_upgrade/el8toel9/actors/mysqlcheck/tests/test_mysqlcheck.py index 559c8f2d..385f4dfd 100644 --- a/repos/system_upgrade/el7toel8/actors/postgresqlcheck/tests/test_postgresqlcheck.py +++ b/repos/system_upgrade/el8toel9/actors/mysqlcheck/tests/test_mysqlcheck.py @@ -1,7 +1,7 @@ import pytest from leapp import reporting -from leapp.libraries.actor.postgresqlcheck import report_installed_packages +from leapp.libraries.actor.mysqlcheck import process from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked from leapp.libraries.stdlib import api from leapp.models import DistributionSignedRPM, RPM @@ -26,12 +26,11 @@ def _generate_rpm_with_name(name): arch='noarch') -@pytest.mark.parametrize('has_server,has_contrib', [ - (True, True), # both server, contrib - (True, False), # only server - (False, False), # neither +@pytest.mark.parametrize('has_server', [ + (True), # with server + (False), # without server ]) -def test_actor_execution(monkeypatch, has_server, has_contrib): +def test_actor_execution(monkeypatch, has_server): """ Parametrized helper function for test_actor_* functions. @@ -40,8 +39,7 @@ def test_actor_execution(monkeypatch, has_server, has_contrib): according to set arguments. Parameters: - has_server (bool): postgresql-server installed - has_contrib (bool): postgresql-contrib installed + has_server (bool): mysql-server installed """ # Couple of random packages @@ -49,25 +47,19 @@ def test_actor_execution(monkeypatch, has_server, has_contrib): _generate_rpm_with_name('htop')] if has_server: - # Add postgresql-server - rpms += [_generate_rpm_with_name('postgresql-server')] - if has_contrib: - # Add postgresql-contrib - rpms += [_generate_rpm_with_name('postgresql-contrib')] + # Add mysql-server + rpms += [_generate_rpm_with_name('mysql-server')] curr_actor_mocked = CurrentActorMocked(msgs=[DistributionSignedRPM(items=rpms)]) monkeypatch.setattr(api, 'current_actor', curr_actor_mocked) monkeypatch.setattr(reporting, "create_report", create_report_mocked()) - # Executed actor fed with out fake RPMs - report_installed_packages(_context=api) + # Executed actor fed with fake RPMs + process() - if has_server and has_contrib: - # Assert for postgresql-server and postgresql-contrib packages installed - assert reporting.create_report.called == 2 - elif has_server: - # Assert only for postgresql-server package installed + if has_server: + # Assert for mysql-server package installed assert reporting.create_report.called == 1 else: - # Assert for no postgresql packages installed + # Assert for no mysql packages installed assert not reporting.create_report.called diff --git a/repos/system_upgrade/el8toel9/actors/removeupgradeefientry/libraries/removeupgradeefientry.py b/repos/system_upgrade/el8toel9/actors/removeupgradeefientry/libraries/removeupgradeefientry.py index daa7b2ca..dd604d8b 100644 --- a/repos/system_upgrade/el8toel9/actors/removeupgradeefientry/libraries/removeupgradeefientry.py +++ b/repos/system_upgrade/el8toel9/actors/removeupgradeefientry/libraries/removeupgradeefientry.py @@ -5,9 +5,25 @@ from leapp.exceptions import StopActorExecutionError from leapp.libraries.stdlib import api, CalledProcessError, run from leapp.models import ArmWorkaroundEFIBootloaderInfo +dirname = { + 'AlmaLinux': 'almalinux', + 'CentOS Linux': 'centos', + 'CentOS Stream': 'centos', + 'Oracle Linux Server': 'redhat', + 'Red Hat Enterprise Linux': 'redhat', + 'Rocky Linux': 'rocky', + 'Scientific Linux': 'redhat', +} + +with open('/etc/system-release', 'r') as sr: + release_line = next(line for line in sr if 'release' in line) + distro = release_line.split(' release ', 1)[0] + +distro_dir = dirname.get(distro, 'default') + EFI_MOUNTPOINT = '/boot/efi/' LEAPP_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/leapp/') -RHEL_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/redhat/') +RHEL_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/', distro_dir) def get_workaround_efi_info(): diff --git a/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/actor.py b/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/actor.py new file mode 100644 index 00000000..7a3e4be4 --- /dev/null +++ b/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/actor.py @@ -0,0 +1,23 @@ +from leapp.actors import Actor +from leapp.libraries.actor import inhibitcgroupsv1 +from leapp.models import KernelCmdline +from leapp.reporting import Report +from leapp.tags import ChecksPhaseTag, IPUWorkflowTag + + +class InhibitCgroupsv1(Actor): + """ + Inhibit upgrade if cgroups-v1 are enabled + + Support for cgroups-v1 was deprecated in RHEL 9 and removed in RHEL 10. + Both legacy and hybrid modes are unsupported, only the unified cgroup + hierarchy (cgroups-v2) is supported. + """ + + name = "inhibit_cgroupsv1" + consumes = (KernelCmdline,) + produces = (Report,) + tags = (ChecksPhaseTag, IPUWorkflowTag) + + def process(self): + inhibitcgroupsv1.process() diff --git a/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/libraries/inhibitcgroupsv1.py b/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/libraries/inhibitcgroupsv1.py new file mode 100644 index 00000000..6c891f22 --- /dev/null +++ b/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/libraries/inhibitcgroupsv1.py @@ -0,0 +1,56 @@ +from leapp import reporting +from leapp.exceptions import StopActorExecutionError +from leapp.libraries.stdlib import api +from leapp.models import KernelCmdline + + +def process(): + kernel_cmdline = next(api.consume(KernelCmdline), None) + if not kernel_cmdline: + # really unlikely + raise StopActorExecutionError("Did not receive any KernelCmdline messages.") + + unified_hierarchy = True # default since RHEL 9 + legacy_controller_present = False + for param in kernel_cmdline.parameters: + if param.key == "systemd.unified_cgroup_hierarchy": + if param.value is not None and param.value.lower() in ("0", "false", "no"): + unified_hierarchy = False + if param.key == "systemd.legacy_systemd_cgroup_controller": + # no matter the value, it should be removed + # it has no effect when unified hierarchy is enabled + legacy_controller_present = True + + if unified_hierarchy: + api.current_logger().debug("cgroups-v2 already in use, nothing to do, skipping.") + return + + remediation_cmd_args = ["systemd.unified_cgroup_hierarchy"] + if legacy_controller_present: + remediation_cmd_args.append('systemd.legacy_systemd_cgroup_controller') + + summary = ( + "Leapp detected cgroups-v1 is enabled on the system." + " The support of cgroups-v1 was deprecated in RHEL 9 and is removed in RHEL 10." + " Software requiring cgroups-v1 might not work correctly or at all on RHEL 10." + ) + reporting.create_report( + [ + reporting.Title("cgroups-v1 enabled on the system"), + reporting.Summary(summary), + reporting.Severity(reporting.Severity.HIGH), + reporting.Groups([reporting.Groups.INHIBITOR, reporting.Groups.KERNEL]), + reporting.RelatedResource("package", "systemd"), + reporting.Remediation( + hint="Make sure no third party software requires cgroups-v1 and switch to cgroups-v2.", + # remove the args from commandline, the defaults are the desired values + commands=[ + [ + "grubby", + "--update-kernel=ALL", + '--remove-args="{}"'.format(",".join(remediation_cmd_args)), + ], + ], + ), + ] + ) diff --git a/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/tests/test_inhibitcgroupsv1.py b/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/tests/test_inhibitcgroupsv1.py new file mode 100644 index 00000000..9b3ec96f --- /dev/null +++ b/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/tests/test_inhibitcgroupsv1.py @@ -0,0 +1,74 @@ +import pytest + +from leapp import reporting +from leapp.libraries.actor import inhibitcgroupsv1 +from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked +from leapp.libraries.stdlib import api +from leapp.models import KernelCmdline, KernelCmdlineArg + + +@pytest.mark.parametrize( + "cmdline_params", ( + ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="0")]), + ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="false")]), + ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="False")]), + ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="no")]), + ( + [ + KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="0"), + KernelCmdlineArg(key="systemd.legacy_systemd_cgroup_controller", value="0"), + ] + ), ( + [ + KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="0"), + KernelCmdlineArg(key="systemd.legacy_systemd_cgroup_controller", value="1"), + ] + ) + ) +) +def test_inhibit_should_inhibit(monkeypatch, cmdline_params): + curr_actor_mocked = CurrentActorMocked(msgs=[KernelCmdline(parameters=cmdline_params)]) + monkeypatch.setattr(api, "current_actor", curr_actor_mocked) + monkeypatch.setattr(reporting, "create_report", create_report_mocked()) + + inhibitcgroupsv1.process() + + assert reporting.create_report.called == 1 + report = reporting.create_report.reports[0] + assert "cgroups-v1" in report["title"] + assert reporting.Groups.INHIBITOR in report["groups"] + + command = [r for r in report["detail"]["remediations"] if r["type"] == "command"][0] + assert "systemd.unified_cgroup_hierarchy" in command['context'][2] + if len(cmdline_params) == 2: + assert "systemd.legacy_systemd_cgroup_controller" in command['context'][2] + + +@pytest.mark.parametrize( + "cmdline_params", ( + ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="1")]), + ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="true")]), + ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="True")]), + ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="yes")]), + ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value=None)]), + ( + [ + KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="1"), + KernelCmdlineArg(key="systemd.legacy_systemd_cgroup_controller", value="1"), + ] + ), ( + [ + KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="1"), + KernelCmdlineArg(key="systemd.legacy_systemd_cgroup_controller", value="0"), + ] + ), + ) +) +def test_inhibit_should_not_inhibit(monkeypatch, cmdline_params): + curr_actor_mocked = CurrentActorMocked(msgs=[KernelCmdline(parameters=cmdline_params)]) + monkeypatch.setattr(api, "current_actor", curr_actor_mocked) + monkeypatch.setattr(reporting, "create_report", create_report_mocked()) + + inhibitcgroupsv1.process() + + assert not reporting.create_report.called