From 2c2e94ad667cf673e8ab2d9af8f5de203b0f4cc5 Mon Sep 17 00:00:00 2001 From: Lokesh Mandvekar Date: Thu, 5 Jun 2025 15:07:10 -0400 Subject: [PATCH] podman-5.4.0-8.el9 - Enable gating tests via TMT - Related: RHEL-80816 Signed-off-by: Lokesh Mandvekar --- 220-healthcheck.bats | 426 ++++ 252-quadlet.bats | 1754 +++++++++++++++++ 255-auto-update.bats | 701 +++++++ 520-checkpoint.bats | 451 +++++ helpers.bash | 1375 +++++++++++++ plans/basic.fmf | 8 - plans/system.fmf | 47 + plans/tmt.fmf | 20 + podman.spec | 26 +- test/tmt/system.fmf | 49 + test/tmt/system.sh | 44 + {tests => test/tmt}/tmt.fmf | 7 +- tests/main.fmf | 2 - tests/roles/bats_installed/tasks/main.yml | 12 - tests/roles/rootless_user/tasks/main.yml | 7 - .../run_bats_tests/files/run_bats_tests.sh | 72 - tests/roles/run_bats_tests/tasks/main.yml | 37 - .../run_bats_tests/tasks/run_one_test.yml | 52 - tests/smoke.fmf | 2 - tests/tests.yml | 49 - 20 files changed, 4896 insertions(+), 245 deletions(-) create mode 100644 220-healthcheck.bats create mode 100644 252-quadlet.bats create mode 100644 255-auto-update.bats create mode 100644 520-checkpoint.bats create mode 100644 helpers.bash delete mode 100644 plans/basic.fmf create mode 100644 plans/system.fmf create mode 100644 plans/tmt.fmf create mode 100644 test/tmt/system.fmf create mode 100644 test/tmt/system.sh rename {tests => test/tmt}/tmt.fmf (53%) delete mode 100644 tests/main.fmf delete mode 100644 tests/roles/bats_installed/tasks/main.yml delete mode 100644 tests/roles/rootless_user/tasks/main.yml delete mode 100755 tests/roles/run_bats_tests/files/run_bats_tests.sh delete mode 100644 tests/roles/run_bats_tests/tasks/main.yml delete mode 100644 tests/roles/run_bats_tests/tasks/run_one_test.yml delete mode 100644 tests/smoke.fmf delete mode 100644 tests/tests.yml diff --git a/220-healthcheck.bats b/220-healthcheck.bats new file mode 100644 index 0000000..e3b2d3b --- /dev/null +++ b/220-healthcheck.bats @@ -0,0 +1,426 @@ +#!/usr/bin/env bats -*- bats -*- +# +# tests for podman healthcheck +# +# + +load helpers +load helpers.systemd + +# bats file_tags=ci:parallel + +# Helper function: run 'podman inspect' and check various given fields +function _check_health { + local ctrname="$1" + local testname="$2" + local tests="$3" + local since="$4" + local hc_status="$5" + + # Loop-wait (up to a few seconds) for healthcheck event (#20342) + # Allow a margin when running parallel, because of system load + local timeout=5 + if [[ -n "$PARALLEL_JOBSLOT" ]]; then + timeout=$((timeout + 3)) + fi + + while :; do + run_podman events --filter container=$ctrname --filter event=health_status \ + --since "$since" --stream=false --format "{{.HealthStatus}}" + # Output may be empty or multiple lines. + if [[ -n "$output" ]]; then + if [[ "${lines[-1]}" = "$hc_status" ]]; then + break + fi + fi + + timeout=$((timeout - 1)) + if [[ $timeout -eq 0 ]]; then + die "$testname - timed out waiting for '$hc_status' in podman events" + fi + sleep 1 + done + + # Got the desired status. Now verify all the healthcheck fields + run_podman inspect --format "{{json .State.Healthcheck}}" $ctrname + + defer-assertion-failures + parse_table "$tests" | while read field expect;do + actual=$(jq ".$field" <<<"$output") + is "$actual" "$expect" "$testname - .State.Healthcheck.$field" + done + immediate-assertion-failures +} + +@test "podman healthcheck" { + local ctrname="c-h-$(safename)" + run_podman run -d --name $ctrname \ + --health-cmd /home/podman/healthcheck \ + --health-interval 1s \ + --health-retries 3 \ + --health-on-failure=kill \ + --health-startup-cmd /home/podman/healthcheck \ + --health-startup-interval 1s \ + $IMAGE /home/podman/pause + cid="$output" + + run_podman inspect $ctrname --format "{{.Config.HealthcheckOnFailureAction}}" + is "$output" "kill" "on-failure action is set to kill" + + run_podman inspect $ctrname --format "{{.Config.StartupHealthCheck.Test}}" + is "$output" "[CMD-SHELL /home/podman/healthcheck]" ".Config.StartupHealthCheck.Test" + + current_time=$(date --iso-8601=ns) + # We can't check for 'starting' because a 1-second interval is too + # short; it could run healthcheck before we get to our first check. + # + # So, just force a healthcheck run, then confirm that it's running. + run_podman healthcheck run $ctrname + is "$output" "" "output from 'podman healthcheck run'" + + _check_health $ctrname "All healthy" " +Status | \"healthy\" +FailingStreak | 0 +Log[-1].ExitCode | 0 +Log[-1].Output | \"Life is Good on stdout\\\nLife is Good on stderr\\\n\" +" "$current_time" "healthy" + + current_time=$(date --iso-8601=ns) + # Force a failure + run_podman exec $ctrname touch /uh-oh + + _check_health $ctrname "First failure" " +Status | \"healthy\" +FailingStreak | [123] +Log[-1].ExitCode | 1 +Log[-1].Output | \"Uh-oh on stdout!\\\nUh-oh on stderr!\\\n\" +" "$current_time" "healthy" + + # Check that we now we do have valid podman units with this + # name so that the leak check below does not turn into a NOP without noticing. + run -0 systemctl list-units + cidmatch=$(grep "$cid" <<<"$output") + echo "$cidmatch" + assert "$cidmatch" =~ " $cid-[0-9a-f]+\.timer *.*/podman healthcheck run $cid" \ + "Healthcheck systemd unit exists" + + current_time=$(date --iso-8601=ns) + # After three successive failures, container should no longer be healthy + _check_health $ctrname "Four or more failures" " +Status | \"unhealthy\" +FailingStreak | [3456] +Log[-1].ExitCode | 1 +Log[-1].Output | \"Uh-oh on stdout!\\\nUh-oh on stderr!\\\n\" +" "$current_time" "unhealthy" + + # now the on-failure should kick in and kill the container + run_podman wait $ctrname + + # Clean up + run_podman rm -t 0 -f $ctrname + + # Important check for https://github.com/containers/podman/issues/22884 + # We never should leak the unit files, healthcheck uses the cid in name so just grep that. + # (Ignore .scope units, those are conmon and can linger for 5 minutes) + # (Ignore .mount, too. They are created/removed by systemd based on the actual real mounts + # on the host and that is async and might be slow enough in CI to cause failures.) + run -0 systemctl list-units --quiet "*$cid*" + except_scope_mount=$(grep -vF ".scope " <<<"$output" | { grep -vF ".mount" || true; } ) + assert "$except_scope_mount" == "" "Healthcheck systemd unit cleanup: no units leaked" +} + +@test "podman healthcheck - restart cleans up old state" { + ctr="c-h-$(safename)" + + run_podman run -d --name $ctr \ + --health-cmd /home/podman/healthcheck \ + --health-retries=3 \ + --health-interval=disable \ + $IMAGE /home/podman/pause + + run_podman container inspect $ctr --format "{{.State.Healthcheck.FailingStreak}}" + is "$output" "0" "Failing streak of fresh container should be 0" + + # Get the healthcheck to fail + run_podman exec $ctr touch /uh-oh-only-once + run_podman 1 healthcheck run $ctr + is "$output" "unhealthy" "output from 'podman healthcheck run'" + run_podman container inspect $ctr --format "{{.State.Healthcheck.FailingStreak}}" + is "$output" "1" "Failing streak after one failed healthcheck should be 1" + + run_podman container restart $ctr + run_podman container inspect $ctr --format "{{.State.Healthcheck.FailingStreak}}" + is "$output" "0" "Failing streak of restarted container should be 0 again" + + run_podman rm -f -t0 $ctr +} + +@test "podman wait --condition={healthy,unhealthy}" { + ctr="c-h-$(safename)" + + wait_file="$PODMAN_TMPDIR/$(random_string).wait_for_me" + + for condition in healthy unhealthy;do + rm -f $wait_file + run_podman run -d --name $ctr \ + --health-cmd /home/podman/healthcheck \ + --health-retries=1 \ + --health-interval=disable \ + $IMAGE /home/podman/pause + if [[ $condition == "unhealthy" ]];then + # create the uh-oh file to let the health check fail + run_podman exec $ctr touch /uh-oh + fi + + # Wait for the container in the background and create the $wait_file to + # signal the specified wait condition was met. + (timeout --foreground -v --kill=5 10 $PODMAN wait --condition=$condition $ctr && touch $wait_file) & + + # Sleep 1 second to make sure above commands are running + sleep 1 + if [[ -f $wait_file ]]; then + die "the wait file should only be created after the container turned healthy" + fi + + if [[ $condition == "healthy" ]];then + run_podman healthcheck run $ctr + else + run_podman 1 healthcheck run $ctr + fi + wait_for_file $wait_file + run_podman rm -f -t0 $ctr + done +} + +@test "podman healthcheck --health-on-failure" { + run_podman 125 create --health-on-failure=kill $IMAGE + is "$output" "Error: cannot set on-failure action to kill without a health check" + + ctr="c-h-$(safename)" + + for policy in none kill restart stop;do + uhoh=/uh-oh + if [[ $policy != "none" ]];then + # only fail the first run + uhoh=/uh-oh-only-once + fi + + # Run healthcheck image. + run_podman run -d --name $ctr \ + --health-cmd /home/podman/healthcheck \ + --health-retries=1 \ + --health-on-failure=$policy \ + --health-interval=disable \ + $IMAGE /home/podman/pause + + # healthcheck should succeed + run_podman healthcheck run $ctr + + # Now cause the healthcheck to fail + run_podman exec $ctr touch $uhoh + + # healthcheck should now fail, with exit status 1 and 'unhealthy' output + run_podman 1 healthcheck run $ctr + is "$output" "unhealthy" "output from 'podman healthcheck run' (policy: $policy)" + + if [[ $policy == "restart" ]];then + # Make sure the container transitions back to running + run_podman wait --condition=running $ctr + run_podman inspect $ctr --format "{{.RestartCount}}" + assert "${#lines[@]}" != 0 "Container has been restarted at least once" + run_podman container inspect $ctr --format "{{.State.Healthcheck.FailingStreak}}" + is "$output" "0" "Failing streak of restarted container should be 0 again" + run_podman healthcheck run $ctr + elif [[ $policy == "none" ]];then + run_podman inspect $ctr --format "{{.State.Status}} {{.Config.HealthcheckOnFailureAction}}" + # Container is still running and health check still broken + is "$output" "running $policy" "container continued running" + run_podman 1 healthcheck run $ctr + is "$output" "unhealthy" "output from 'podman healthcheck run' (policy: $policy)" + else + run_podman inspect $ctr --format "{{.State.Status}} {{.Config.HealthcheckOnFailureAction}}" + # kill and stop yield the container into a non-running state + is "$output" ".* $policy" "container was stopped/killed (policy: $policy)" + assert "$output" != "running $policy" + # also make sure that it's not stuck in the stopping state + assert "$output" != "stopping $policy" + fi + + run_podman rm -f -t0 $ctr + done +} + +@test "podman healthcheck --health-on-failure with interval" { + ctr="c-h-$(safename)" + + for policy in stop kill restart ;do + t0=$(date --iso-8601=seconds) + run_podman run -d --name $ctr \ + --health-cmd /bin/false \ + --health-retries=1 \ + --health-on-failure=$policy \ + --health-interval=1s \ + $IMAGE top + + if [[ $policy == "restart" ]];then + # Sleeping for 2 seconds makes the test much faster than using + # podman-wait which would compete with the container getting + # restarted. + sleep 2 + # Make sure the container transitions back to running + run_podman wait --condition=running $ctr + run_podman inspect $ctr --format "{{.RestartCount}}" + assert "${#lines[@]}" != 0 "Container has been restarted at least once" + else + # kill and stop yield the container into a non-running state + run_podman wait $ctr + run_podman inspect $ctr --format "{{.State.Status}} {{.Config.HealthcheckOnFailureAction}}" + is "$output" ".* $policy" "container was stopped/killed (policy: $policy)" + assert "$output" != "running $policy" + # also make sure that it's not stuck in the stopping state + assert "$output" != "stopping $policy" + fi + + run_podman rm -f -t0 $ctr + done +} + +function _create_container_with_health_log_settings { + local ctrname="$1" + local msg="$2" + local format="$3" + local flag="$4" + local expect="$5" + local expect_msg="$6" + + run_podman run -d --name $ctrname \ + --health-cmd "echo $msg" \ + $flag \ + $IMAGE /home/podman/pause + cid="$output" + + run_podman inspect $ctrname --format $format + is "$output" "$expect" "$expect_msg" + + output=$cid +} + +function _check_health_log { + local ctrname="$1" + local expect_msg="$2" + local comparison=$3 + local expect_count="$4" + + run_podman inspect $ctrname --format "{{.State.Health.Log}}" + count=$(grep -co "$expect_msg" <<< "$output") + assert "$count" $comparison $expect_count "Number of matching health log messages" +} + +@test "podman healthcheck --health-max-log-count values" { + # flag | expected value | op | log count + test=" + | 5 | -eq | 5 + --health-max-log-count 0 | 0 | -ge | 11 + --health-max-log-count=0 | 0 | -ge | 11 + --health-max-log-count 10 | 10 | -eq | 10 + --health-max-log-count=10 | 10 | -eq | 10 + " + + while read flag value op logs_count ; do + local msg="healthmsg-$(random_string)" + local ctrname="c-h-$(safename)" + _create_container_with_health_log_settings $ctrname $msg "{{.Config.HealthMaxLogCount}}" $flag $value "HealthMaxLogCount" + + for i in $(seq 1 $((logs_count + 5))); + do + run_podman healthcheck run $ctrname + is "$output" "" "unexpected output from podman healthcheck run (pass $i)" + done + + _check_health_log $ctrname $msg $op $logs_count + + run_podman rm -t 0 -f $ctrname + done < <(parse_table "$tests") +} + +@test "podman healthcheck --health-max-log-size values" { + local s=$(printf "healthmsg-%1000s") + local long_msg=${s// /$(random_string)} + + # flag | expected value | exp_msg + test=" + | 500 | ${long_msg:0:500}}]\$ + --health-max-log-size 0 | 0 | $long_msg}]\$ + --health-max-log-size=0 | 0 | $long_msg}]\$ + --health-max-log-size 10 | 10 | ${long_msg:0:10}}]\$ + --health-max-log-size=10 | 10 | ${long_msg:0:10}}]\$ + " + + while read flag value exp_msg ; do + local ctrname="c-h-$(safename)" + _create_container_with_health_log_settings $ctrname $long_msg "{{.Config.HealthMaxLogSize}}" $flag $value "HealthMaxLogSize" + + run_podman healthcheck run $ctrname + is "$output" "" "output from 'podman healthcheck run'" + + _check_health_log $ctrname $exp_msg -eq 1 + + run_podman rm -t 0 -f $ctrname + done < <(parse_table "$tests") +} + +@test "podman healthcheck --health-log-destination file" { + local TMP_DIR_HEALTHCHECK="$PODMAN_TMPDIR/healthcheck" + mkdir $TMP_DIR_HEALTHCHECK + local ctrname="c-h-$(safename)" + local msg="healthmsg-$(random_string)" + _create_container_with_health_log_settings $ctrname $msg "{{.Config.HealthLogDestination}}" "--health-log-destination $TMP_DIR_HEALTHCHECK" "$TMP_DIR_HEALTHCHECK" "HealthLogDestination" + cid="$output" + + run_podman healthcheck run $ctrname + is "$output" "" "output from 'podman healthcheck run'" + + healthcheck_log_path="${TMP_DIR_HEALTHCHECK}/${cid}-healthcheck.log" + # The healthcheck is triggered by the podman when the container is started, but its execution depends on systemd. + # And since `run_podman healthcheck run` is also run manually, it will result in two runs. + count=$(grep -co "$msg" $healthcheck_log_path) + assert "$count" -ge 1 "Number of matching health log messages" + + run_podman rm -t 0 -f $ctrname +} + + +@test "podman healthcheck --health-log-destination journal" { + skip_if_remote "We cannot read journalctl over remote." + + # We can't use journald on RHEL as rootless, either: rhbz#1895105 + skip_if_journald_unavailable + + # FIXME: The rootless user belongs to systemd-journal, but this still fails + if is_rhel_or_centos; then + skip_if_rootless + fi + + local ctrname="c-h-$(safename)" + local msg="healthmsg-$(random_string)" + _create_container_with_health_log_settings $ctrname $msg "{{.Config.HealthLogDestination}}" "--health-log-destination events_logger" "events_logger" "HealthLogDestination" + cid="$output" + + run_podman healthcheck run $ctrname + is "$output" "" "output from 'podman healthcheck run'" + + cmd="journalctl --output cat --output-fields=PODMAN_HEALTH_LOG PODMAN_ID=$cid" + echo "$_LOG_PROMPT $cmd" + run $cmd + echo "$output" + assert "$status" -eq 0 "exit status of journalctl" + + # The healthcheck is triggered by the podman when the container is started, but its execution depends on systemd. + # And since `run_podman healthcheck run` is also run manually, it will result in two runs. + count=$(grep -co "$msg" <<< "$output") + assert "$count" -ge 1 "Number of matching health log messages" + + run_podman rm -t 0 -f $ctrname +} + +# vim: filetype=sh diff --git a/252-quadlet.bats b/252-quadlet.bats new file mode 100644 index 0000000..21d9cff --- /dev/null +++ b/252-quadlet.bats @@ -0,0 +1,1754 @@ +#!/usr/bin/env bats -*- bats -*- +# +# Tests generated configurations for systemd. +# + +# bats file_tags=ci:parallel + +load helpers +load helpers.network +load helpers.registry +load helpers.systemd + +UNIT_FILES=() + +function start_time() { + sleep_to_next_second # Ensure we're on a new second with no previous logging + STARTED_TIME=$(date "+%F %R:%S") # Start time for new log time +} + +function setup() { + skip_if_remote "quadlet tests are meaningless over remote" + skip_if_rootless_cgroupsv1 "Can't use --cgroups=split w/ CGv1 (issue 17456, wontfix)" + skip_if_journald_unavailable "Needed for RHEL. FIXME: we might be able to re-enable a subset of tests." + + test -x "$QUADLET" || die "Cannot run quadlet tests without executable \$QUADLET ($QUADLET)" + + start_time + + basic_setup +} + +function teardown() { + for UNIT_FILE in ${UNIT_FILES[@]}; do + if [[ -e "$UNIT_FILE" ]]; then + local service=$(basename "$UNIT_FILE") + run systemctl stop "$service" + if [ $status -ne 0 ]; then + echo "# WARNING: systemctl stop failed in teardown: $output" >&3 + fi + run systemctl reset-failed "$service" + rm -f "$UNIT_FILE" + fi + done + systemctl daemon-reload + + basic_teardown +} + +# Converts the quadlet file and installs the result it in $UNIT_DIR +function run_quadlet() { + local sourcefile="$1" + local service=$(quadlet_to_service_name "$sourcefile") + + # quadlet always works on an entire directory, so copy the file + # to transform to the given or newly created tmpdir + local quadlet_tmpdir="$2" + if [ -z "$quadlet_tmpdir" ]; then + quadlet_tmpdir=$(mktemp -d --tmpdir=$PODMAN_TMPDIR quadlet.XXXXXX) + fi + cp $sourcefile $quadlet_tmpdir/ + + echo "$_LOG_PROMPT $QUADLET $_DASHUSER $UNIT_DIR" + QUADLET_UNIT_DIRS="$quadlet_tmpdir" run \ + timeout --foreground -v --kill=10 $PODMAN_TIMEOUT \ + $QUADLET $_DASHUSER $UNIT_DIR + echo "$output" + assert $status -eq 0 "Failed to convert quadlet file: $sourcefile" + is "$output" "" "quadlet should report no errors" + + run cat $UNIT_DIR/$service + assert $status -eq 0 "Could not cat $UNIT_DIR/$service" + echo "$output" + local content="$output" + + # Ensure this is teared down + UNIT_FILES+=("$UNIT_DIR/$service") + + QUADLET_SERVICE_NAME="$service" + QUADLET_SERVICE_CONTENT="$content" + QUADLET_SYSLOG_ID="$(basename $service .service)" + QUADLET_CONTAINER_NAME="systemd-$QUADLET_SYSLOG_ID" +} + +function service_setup() { + local service="$1" + local option="$2" + + systemctl daemon-reload + + local startargs="" + local statusexit=0 + local activestate="active" + + # If option wait, start and wait for service to exist + if [ "$option" == "wait" ]; then + startargs="--wait" + statusexit=3 + local activestate="inactive" + fi + + systemctl_start $startargs "$service" + + # FIXME FIXME FIXME: this is racy with short-lived containers! + echo "$_LOG_PROMPT systemctl status $service" + run systemctl status "$service" + echo "$output" + assert $status -eq $statusexit "systemctl status $service" + + echo "$_LOG_PROMPT systemctl show --value --property=ActiveState $service" + run systemctl show --value --property=ActiveState "$service" + echo "$output" + assert $status -eq 0 "systemctl show $service" + is "$output" $activestate +} + +# Helper to stop a systemd service running a container +function service_cleanup() { + local service="$1" + local expected_state="$2" + + run systemctl stop "$service" + assert $status -eq 0 "Error stopping systemd unit $service: $output" + + # Regression test for #11304: confirm that unit stops into correct state + if [[ -n "$expected_state" ]]; then + run systemctl show --property=ActiveState "$service" + assert "$output" = "ActiveState=$expected_state" \ + "state of service $service after systemctl stop" + fi + + # reset-failed necessary to clean up stray systemd cruft + run systemctl reset-failed "$service" + rm -f "$UNIT_DIR/$service" + systemctl daemon-reload +} + +function create_secret() { + local secret_name=$(random_string) + local secret_file=$PODMAN_TMPDIR/secret_$(random_string) + local secret=$(random_string) + + echo $secret > $secret_file + run_podman secret create $secret_name $secret_file + + SECRET_NAME=$secret_name + SECRET=$secret +} + +function remove_secret() { + local secret_name="$1" + + run_podman secret rm $secret_name +} + +function wait_for_journal() { + local step=1 + local count=10 + local expect_str= + + while [ "$#" -gt 0 ]; do + case "$1" in + -s|--step) + step="$2" + shift 2 + ;; + -c|--count) + count="$2" + shift 2 + ;; + *) + expect_str="$1" + shift 1 + ;; + esac + done + + while [ "$count" -gt 0 ]; do + run journalctl "--since=$STARTED_TIME" --unit="$QUADLET_SERVICE_NAME" + if [[ "$output" =~ "$expect_str" ]]; then + return + fi + sleep "$step" + count=$(( count - 1 )) + done + die "Timed out waiting for '$expect_str' in journalctl output" +} + +# bats test_tags=distro-integration +@test "quadlet - basic" { + # FIXME: The rootless user belongs to systemd-journal, but this still fails + if is_rhel_or_centos; then + skip_if_rootless + fi + + # Network=none is to work around a Pasta bug, can be removed once a patched Pasta is available. + # Ref https://github.com/containers/podman/pull/21563#issuecomment-1965145324 + local quadlet_file=$PODMAN_TMPDIR/basic_$(safename).container + cat > $quadlet_file < $dir1/$quadlet_file < $dir2/$quadlet_file < $quadlet_file < $quadlet_file <&2; top -d 10" +EOF + + run_quadlet "$quadlet_file" + service_setup $QUADLET_SERVICE_NAME + + # Ensure we can access with the custom container name + run_podman container inspect --format "{{.State.Status}}" customcontainername + is "$output" "running" "container should be started by systemd and hence be running" + + wait_for_journal "Started $QUADLET_SERVICE_NAME" + + run journalctl "--since=$STARTED_TIME" --unit="$QUADLET_SERVICE_NAME" + assert "$output" =~ "$token_out" "Output can be found with journalctl" + assert "$output" =~ "$token_err" "Error can be found with journalctl" + assert "$output" =~ "Starting $QUADLET_SERVICE_NAME" "Status information can be found with journalctl" + + # log priority 3 in journalctl is err. This is documented in syslog(3) + run journalctl "--since=$STARTED_TIME" --priority=3 --unit="$QUADLET_SERVICE_NAME" + assert "$output" =~ "$token_err" "Error can be found with journalctl --priority=3" + assert "$output" !~ "$token_out" "Output can not be found with journalctl --priority=3" + + service_cleanup $QUADLET_SERVICE_NAME failed +} + +@test "quadlet - labels" { + local quadlet_file=$PODMAN_TMPDIR/labels_$(safename).container + cat > $quadlet_file < $quadlet_file < $quadlet_file < $quadlet_vol_file < $quadlet_file < $quadlet_vol_file < $quadlet_file < $quadlet_file < $quadlet_network_file < $quadlet_file < $quadlet_network_file < $quadlet_file <$yaml_source < $quadlet_file < $quadlet_network_file <$yaml_source < $quadlet_file < $quadlet_file < $quadlet_file < $quadlet_file < $quadlet_file < $quadlet_file <$unitfile <$percent_t_file" +Type=oneshot +EOF + systemctl daemon-reload + systemctl_start --wait $service + percent_t=$(< $percent_t_file) + # Clean up. Don't bother to systemctl-reload, service_setup does that below. + rm -f $unitfile + + # Sanity check: just make sure it's not "/" + assert "${#percent_t}" -ge 4 "sanity check: length of %T ($percent_t)" + + # Step 2: Make a subdirectory in %T, and in there, a scratch file + local tmp_path=$(mktemp -d --tmpdir=${percent_t} quadlet.volume.XXXXXX) + local tmp_subdir=$(basename $tmp_path) + local file_name="f$(random_string 10).txt" + local file_content="data_$(random_string 15)" + echo $file_content > $tmp_path/$file_name + + local quadlet_file=$PODMAN_TMPDIR/basic_$(safename).container + cat > $quadlet_file < $quadlet_file < $quadlet_file < $yaml_file < $quadlet_file <$yaml_source < $quadlet_file < /test/test.txt" + is $(cat $PODMAN_TMPDIR/$local_path/test.txt) "hello" + + service_cleanup $QUADLET_SERVICE_NAME inactive +} + +@test "quadlet - image files" { + local quadlet_tmpdir=$PODMAN_TMPDIR/quadlets + + local registry=localhost:${PODMAN_LOGIN_REGISTRY_PORT} + local image_for_test=$registry/i-$(safename):$(random_string) + local authfile=$PODMAN_TMPDIR/authfile.json + + local quadlet_image_unit=image_test_$(safename).image + local quadlet_image_file=$PODMAN_TMPDIR/$quadlet_image_unit + cat > $quadlet_image_file < $quadlet_volume_file < $quadlet_container_file < $quadlet_kube_volume_yaml_file < $quadlet_kube_volume_unit_file < $quadlet_kube_pod_yaml_file < $quadlet_kube_pod_unit_file < $quadlet_kube_pod_yaml_file < $quadlet_kube_pod_unit_file < $quadlet_image_file < $quadlet_volume_file < $quadlet_container_file < $quadlet_pod_file < $quadlet_container_file <$container_file_path << EOF +FROM $untagged_image +EOF + + # Create the YAMl file + pod_name="p-$(safename)" + container_name="c-$(safename)" + yaml_source="$yaml_dir/build_$(safename).yaml" + cat >$yaml_source < $quadlet_file <>"${f}" + echo "${content}" >>"${f}" + done < <(parse_table "${dropin_files}") + + # Create the base quadlet file + quadlet_base="${PODMAN_TMPDIR}/${quadlet_file}" + cat > "${quadlet_base}" <$container_file_path << EOF +FROM $IMAGE +EOF + + local image_tag=quay.io/i-$(safename):$(random_string) + local quadlet_file=$PODMAN_TMPDIR/pull_$(safename).build + cat >$quadlet_file << EOF +[Build] +ImageTag=$image_tag +File=$container_file_path +Pull=never +EOF + + run_quadlet "$quadlet_file" + service_setup $QUADLET_SERVICE_NAME "wait" + + run_podman rmi -i $image_tag +} +# vim: filetype=sh diff --git a/255-auto-update.bats b/255-auto-update.bats new file mode 100644 index 0000000..188dedd --- /dev/null +++ b/255-auto-update.bats @@ -0,0 +1,701 @@ +#!/usr/bin/env bats -*- bats -*- +# +# Tests for automatically update images for containerized services +# + +load helpers +load helpers.network +load helpers.registry +load helpers.systemd + +export SNAME_FILE + +function setup() { + skip_if_remote "systemd tests are meaningless over remote" + basic_setup + + SNAME_FILE=${PODMAN_TMPDIR}/services +} + +function teardown() { + if [[ -e $SNAME_FILE ]]; then + while read line; do + if [[ "$line" =~ "podman-auto-update" ]]; then + echo "Stop timer: $line.timer" + systemctl stop $line.timer + systemctl disable $line.timer + else + systemctl stop $line + fi + rm -f $UNIT_DIR/$line.{service,timer} + done < $SNAME_FILE + + rm -f $SNAME_FILE + fi + SNAME_FILE= + + run_podman rmi -f \ + quay.io/libpod/alpine:latest \ + quay.io/libpod/busybox:latest \ + quay.io/libpod/localtest:latest \ + quay.io/libpod/autoupdatebroken:latest \ + quay.io/libpod/test:latest + + # The rollback tests may leave some dangling images behind, so let's prune + # them to leave a clean state. + run_podman image prune -f + basic_teardown +} + +# This functions is used for handle the basic step in auto-update related +# tests. Including following steps: +# 1. Generate a random container name and echo it to output. +# 2. Tag the fake image before test +# 3. Start a container with io.containers.autoupdate +# 4. Generate the service file from the container +# 5. Remove the origin container +# 6. Start the container from service +# 7. Use this fully-qualified image instead of 2) +function generate_service() { + local target_img_basename=$1 + local autoupdate=$2 + local command=$3 + local extraArgs=$4 + local noTag=$5 + local requires=$6 + + # Unless specified, set a default command. + if [[ -z "$command" ]]; then + command="top -d 120" + fi + + # Container name. Include the autoupdate type, to make debugging easier. + # IMPORTANT: variable 'cname' is passed (out of scope) up to caller! + cname=c_${autoupdate//\'/}_$(random_string) + target_img="quay.io/libpod/$target_img_basename:latest" + if [[ -n "$7" ]]; then + target_img="$7" + fi + + if [[ -z "$noTag" ]]; then + run_podman tag $IMAGE $target_img + fi + + if [[ -n "$autoupdate" ]]; then + label="--label io.containers.autoupdate=$autoupdate" + else + label="" + fi + + if [[ -n "$requires" ]]; then + requires="--requires=$requires" + fi + + run_podman create $extraArgs --name $cname $label $target_img $command + + (cd $UNIT_DIR; run_podman generate systemd --new --files --name $requires $cname) + echo "container-$cname" >> $SNAME_FILE + run_podman rm -t 0 -f $cname + + systemctl daemon-reload + systemctl_start container-$cname + systemctl status container-$cname + + # Original image ID. + # IMPORTANT: variable 'ori_image' is passed (out of scope) up to caller! + run_podman inspect --format "{{.Image}}" $cname + ori_image=$output +} + +function _wait_service_ready() { + local sname=$1 + + local timeout=6 + while [[ $timeout -gt 1 ]]; do + if systemctl -q is-active $sname; then + return + fi + sleep 1 + let timeout=$timeout-1 + done + + # Print service status as debug information before failed the case + systemctl status $sname + die "Timed out waiting for $sname to start" +} + +# Wait for container to update, as confirmed by its image ID changing +function _confirm_update() { + local cname=$1 + local old_iid=$2 + + # Image has already been pulled, so this shouldn't take too long + local timeout=10 + while [[ $timeout -gt 0 ]]; do + run_podman '?' inspect --format "{{.Image}}" $cname + if [[ $status != 0 ]]; then + if [[ $output =~ (no such object|does not exist in database): ]]; then + # this is ok, it just means the container is being restarted + : + else + die "podman inspect $cname failed unexpectedly" + fi + elif [[ $output != $old_iid ]]; then + return + fi + sleep 1 + timeout=$((timeout - 1)) + done + + die "Timed out waiting for $cname to update; old IID=$old_iid" +} + +@test "podman auto-update - validate input" { + # Fully-qualified image reference is required + run_podman create --label io.containers.autoupdate=registry $IMAGE + run_podman rm -f "$output" + + # Short name does not work + shortname="shortname:latest" + run_podman image tag $IMAGE $shortname + run_podman 125 create --label io.containers.autoupdate=registry $shortname + is "$output" "Error: short name: auto updates require fully-qualified image reference: \"$shortname\"" + + # Requires docker (or no) transport + archive=$PODMAN_TMPDIR/archive.tar + run_podman save -o $archive $IMAGE + run_podman 125 create --label io.containers.autoupdate=registry docker-archive:$archive + is "$output" ".*Error: auto updates require the docker image transport but image is of transport \"docker-archive\"" + + run_podman rmi $shortname +} + +# This test can fail in dev. environment because of SELinux. +# quick fix: chcon -t container_runtime_exec_t ./bin/podman +@test "podman auto-update - label io.containers.autoupdate=image" { + since=$(date --iso-8601=seconds) + run_podman auto-update + is "$output" "" + run_podman events --filter type=system --since $since --stream=false + is "$output" "" + + # Generate two units. The first "parent" to be auto updated, the second + # "child" depends on/requires the "parent" and is expected to get restarted + # as well on auto updates (regression test for #18926). + generate_service alpine image + ctr_parent=$cname + _wait_service_ready container-$ctr_parent.service + + generate_service alpine image "" "" "" "container-$ctr_parent.service" + ctr_child=$cname + _wait_service_ready container-$ctr_child.service + run_podman container inspect --format "{{.ID}}" $ctr_child + old_child_id=$output + + since=$(date --iso-8601=seconds) + run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" ".*container-$ctr_parent.service,quay.io/libpod/alpine:latest,pending,registry.*" "Image update is pending." + run_podman events --filter type=system --since $since --stream=false + is "$output" ".* system auto-update" + + since=$(date --iso-8601=seconds) + run_podman auto-update --rollback=false --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" "Trying to pull.*" "Image is updated." + is "$output" ".*container-$ctr_parent.service,quay.io/libpod/alpine:latest,true,registry.*" "Image is updated." + run_podman events --filter type=system --since $since --stream=false + is "$output" ".* system auto-update" + + # Confirm that the update was successful and that the child container/unit + # has been restarted as well. + _confirm_update $ctr_parent $ori_image + run_podman container inspect --format "{{.ID}}" $ctr_child + assert "$output" != "$old_child_id" \ + "child container/unit has not been restarted during update" + run_podman container inspect --format "{{.ID}}" $ctr_child + run_podman container inspect --format "{{.State.Status}}" $ctr_child + is "$output" "running" "child container is in running state" +} + +@test "podman auto-update - label io.containers.autoupdate=image with rollback" { + # FIXME: this test should exercise the authfile label to have a regression + # test for #11171. + + # Note: the autoupdatebroken image is empty on purpose so it cannot be + # executed and force a rollback. The rollback test for the local policy + # is exercising the case where the container doesn't send a ready message. + image=quay.io/libpod/autoupdatebroken + + run_podman tag $IMAGE $image + generate_service autoupdatebroken image + + _wait_service_ready container-$cname.service + run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" ".*container-$cname.service,$image:latest,pending,registry.*" "Image update is pending." + + run_podman container inspect --format "{{.Image}}" $cname + oldID="$output" + + run_podman inspect --format "{{.ID}}" $cname + containerID="$output" + + run_podman auto-update --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" "Trying to pull.*" "Image is updated." + is "$output" ".*container-$cname.service,$image:latest,rolled back,registry.*" "Image has been rolled back." + + run_podman container inspect --format "{{.Image}}" $cname + is "$output" "$oldID" "container rolled back to previous image" + + run_podman container inspect --format "{{.ID}}" $cname + assert "$output" != "$containerID" \ + "container has not been restarted during rollback" +} + +@test "podman auto-update - label io.containers.autoupdate=disabled" { + generate_service alpine disabled + + _wait_service_ready container-$cname.service + run_podman auto-update + is "$output" "" "Image is not updated when autoupdate=disabled." + + run_podman inspect --format "{{.Image}}" $cname + is "$output" "$ori_image" "Image ID should not change" +} + +@test "podman auto-update - label io.containers.autoupdate=fakevalue" { + fakevalue=fake_$(random_string) + generate_service alpine $fakevalue + + _wait_service_ready container-$cname.service + run_podman 125 auto-update + is "$output" ".*invalid auto-update policy.*" "invalid policy setup" + + run_podman inspect --format "{{.Image}}" $cname + is "$output" "$ori_image" "Image ID should not change" +} + +@test "podman auto-update - label io.containers.autoupdate=local" { + generate_service localtest local + _wait_service_ready container-$cname.service + + image=quay.io/libpod/localtest:latest + run_podman commit --change CMD=/bin/bash $cname $image + run_podman image inspect --format "{{.ID}}" $image + + run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" ".*container-$cname.service,quay.io/libpod/localtest:latest,pending,local.*" "Image update is pending." + + run_podman auto-update --rollback=false --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" ".*container-$cname.service,quay.io/libpod/localtest:latest,true,local.*" "Image is updated." + + _confirm_update $cname $ori_image +} + +# This test can fail in dev. environment because of SELinux. +# quick fix: chcon -t container_runtime_exec_t ./bin/podman +@test "podman auto-update - label io.containers.autoupdate=local with rollback" { + # sdnotify fails with runc 1.0.0-3-dev2 on Ubuntu. Let's just + # assume that we work only with crun, nothing else. + # [copied from 260-sdnotify.bats] + runtime=$(podman_runtime) + if [[ "$runtime" != "crun" ]]; then + skip "this test only works with crun, not $runtime" + fi + + _prefetch $SYSTEMD_IMAGE + + dockerfile1=$PODMAN_TMPDIR/Dockerfile.1 + cat >$dockerfile1 <> /runme +RUN chmod +x /runme +EOF + + dockerfile2=$PODMAN_TMPDIR/Dockerfile.2 + cat >$dockerfile2 <> /runme +RUN chmod +x /runme +EOF + image=test + + # Generate a healthy image that will run correctly. + run_podman build -t quay.io/libpod/$image -f $dockerfile1 + + generate_service $image local /runme --sdnotify=container noTag + _wait_service_ready container-$cname.service + + run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" ".*container-$cname.service,quay.io/libpod/$image:latest,false,local.*" "No update available" + + # Generate an unhealthy image that will fail. + run_podman build -t quay.io/libpod/$image -f $dockerfile2 + run_podman image inspect --format "{{.ID}}" $image + newID="$output" + + run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" ".*container-$cname.service,quay.io/libpod/$image:latest,pending,local.*" "Image updated is pending" + + # Note: we rollback automatically by default. + run_podman auto-update --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}" + is "$output" ".*container-$cname.service,quay.io/libpod/$image:latest,rolled back,local.*" "Rolled back to old image" + + # Make sure that new container is not using the new image ID anymore. + _confirm_update $cname $newID +} + +@test "podman auto-update with multiple services" { + # Preserve original image ID, to confirm that it changes (or not) + run_podman inspect --format "{{.Id}}" $IMAGE + local img_id="$output" + + local cnames=() + local -A expect_update + local -A will_update=([image]=1 [registry]=1 [local]=1) + + local fakevalue=fake_$(random_string) + for auto_update in image registry "" disabled "''" $fakevalue local + do + local img_base="alpine" + if [[ $auto_update == "registry" ]]; then + img_base="busybox" + elif [[ $auto_update == "local" ]]; then + img_base="localtest" + fi + generate_service $img_base $auto_update + cnames+=($cname) + if [[ $auto_update == "local" ]]; then + local_cname=$cname + fi + + if [[ -n "$auto_update" && -n "${will_update[$auto_update]}" ]]; then + expect_update[$cname]=1 + fi + done + + # Make sure all services are ready. + for cname in "${cnames[@]}"; do + _wait_service_ready container-$cname.service + done + run_podman commit --change CMD=/bin/bash $local_cname quay.io/libpod/localtest:latest + # Exit code is expected, due to invalid 'fakevalue' + run_podman 125 auto-update --rollback=false + update_log=$output + is "$update_log" ".*invalid auto-update policy.*" "invalid policy setup" + is "$update_log" ".*Error: invalid auto-update policy.*" "invalid policy setup" + + local n_updated=$(grep -c 'Trying to pull' <<<"$update_log") + is "$n_updated" "2" "Number of images updated from registry." + + for cname in "${!expect_update[@]}"; do + is "$update_log" ".*$cname.*" "container with auto-update policy image updated" + # Just because podman says it fetched, doesn't mean it actually updated + _confirm_update $cname $img_id + done + + # Final confirmation that all image IDs have/haven't changed + for cname in "${cnames[@]}"; do + run_podman inspect --format "{{.Image}}" $cname + if [[ -n "${expect_update[$cname]}" ]]; then + assert "$output" != "$img_id" "$cname: image ID did not change" + else + assert "$output" = "$img_id" "Image ID should not be changed." + fi + done +} + +@test "podman auto-update using systemd" { + # FIXME: The rootless user belongs to systemd-journal, but this still fails + if is_rhel_or_centos; then + skip_if_rootless + fi + + generate_service alpine image + + cat >$UNIT_DIR/podman-auto-update-$cname.timer <$UNIT_DIR/podman-auto-update-$cname.service <> $SNAME_FILE + systemctl enable --now podman-auto-update-$cname.timer + systemctl list-timers --all + + # systemd <245 displays 'Started Podman auto-update ...' + # systemd 245 - <250 displays 'Finished Podman auto-update ...' + # systemd 250 - ???? displays 'Finished - Podman auto-...' + local expect='(Started|Finished.*) Podman auto-update testing service' + local failed_start=failed + local count=0 + while [ $count -lt 120 ]; do + run journalctl -n 15 -u podman-auto-update-$cname.service + if [[ "$output" =~ $expect ]]; then + failed_start= + break + fi + ((count+=1)) + sleep 1 + done + + if [[ -n "$failed_start" ]]; then + echo "journalctl output:" + sed -e 's/^/ /' <<<"$output" + die "Did not find expected string '$expect' in journalctl output for $cname" + fi + + _confirm_update $cname $ori_image +} + +@test "podman-kube@.service template with rollback" { + # sdnotify fails with runc 1.0.0-3-dev2 on Ubuntu. Let's just + # assume that we work only with crun, nothing else. + # [copied from 260-sdnotify.bats] + runtime=$(podman_runtime) + if [[ "$runtime" != "crun" ]]; then + skip "this test only works with crun, not $runtime" + fi + + _prefetch $SYSTEMD_IMAGE + install_kube_template + + dockerfile1=$PODMAN_TMPDIR/Dockerfile.1 + cat >$dockerfile1 <> /runme +RUN chmod +x /runme +EOF + + dockerfile2=$PODMAN_TMPDIR/Dockerfile.2 + cat >$dockerfile2 <> /runme +RUN chmod +x /runme +EOF + local_image=localhost/image:$(random_string 10) + + # Generate a healthy image that will run correctly. + run_podman build -t $local_image -f $dockerfile1 + run_podman image inspect --format "{{.ID}}" $local_image + oldID="$output" + + # Create the YAMl file + yaml_source="$PODMAN_TMPDIR/test.yaml" + cat >$yaml_source <$dockerfile <cname;echo READY;while :;do cat /proc/uptime >mydate.tmp;mv -f mydate.tmp mydate;sleep 0.1;done" + local cid="$output" + _PODMAN_TEST_OPTS="$p_opts" wait_for_ready $cid + + # Confirm that container responds + run curl --max-time 3 -s $server/cname + is "$output" "$cname" "curl $server/cname" + run curl --max-time 3 -s $server/mydate + local date_oldroot="$output" + + # Checkpoint... + run_podman $p_opts container checkpoint \ + --ignore-rootfs \ + --export=$PODMAN_TMPDIR/$cname.tar.gz \ + $cname + + # ...confirm that port is now closed + run curl --max-time 1 -s $server/mydate + is "$status" "7" "cannot connect to port $host_port while container is down" + + # ...now restore it to our regular root + run_podman container restore --import=$PODMAN_TMPDIR/$cname.tar.gz + is "$output" "$cid" + + # Inspect (on regular root). Note that, unlike the basic test above, + # .State.Checkpointed here is *false*. + run_podman container inspect \ + --format '{{.State.Status}}:{{.State.Running}}:{{.State.Paused}}:{{.State.Checkpointed}}' $cname + is "$output" "running:true:false:false" "State. Status:Running:Pause:Checkpointed" + + # Pause a moment to let the restarted container update the timestamp file + sleep .3 + run curl --max-time 3 -s $server/mydate + local date_newroot="$output" + assert "$date_newroot" != "$date_oldroot" \ + "Restored container did not update the timestamp file" + + run_podman exec $cid cat /myvol/cname + is "$output" "$cname" "volume transferred fine" + + run_podman rm -t 0 -f $cid + run_podman volume rm -f $volname +} + +# FIXME: test --leave-running + +# bats test_tags=ci:parallel +@test "podman checkpoint --file-locks" { + action='flock test.lock sh -c "while [ -e /wait ];do sleep 0.5;done;for i in 1 2 3;do echo \$i;sleep 0.5;done"' + run_podman run -d $IMAGE sh -c "touch /wait; touch test.lock; echo READY; $action & $action & wait" + local cid="$output" + + # Wait for container to start emitting output + wait_for_ready $cid + + # Checkpoint, and confirm via inspect + run_podman container checkpoint --file-locks $cid + is "$output" "$cid" "podman container checkpoint" + + run_podman container inspect \ + --format '{{.State.Status}}:{{.State.Running}}:{{.State.Paused}}:{{.State.Checkpointed}}' $cid + is "$output" "exited:false:false:true" "State. Status:Running:Pause:Checkpointed" + + # Restart immediately and confirm state + run_podman container restore --file-locks $cid + is "$output" "$cid" "podman container restore" + + # Signal the container to continue; this is where the 1-2-3s will come from + # The '-d' is because container exit is racy: the exec process itself + # could get caught and killed by cleanup, causing this step to exit 137 + run_podman exec -d $cid rm /wait + + # Wait for the container to stop + run_podman wait $cid + + run_podman logs $cid + trim=$(sed -z -e 's/[\r\n]\+//g' <<<"$output") + is "$trim" "READY123123" "File lock restored" + + run_podman rm $cid +} + +# bats test_tags=ci:parallel +@test "podman checkpoint/restore ip and mac handling" { + # Refer to https://github.com/containers/podman/issues/16666#issuecomment-1337860545 + # for the correct behavior, this should cover all cases listed there. + local netname="net-$(safename)" + local subnet="$(random_rfc1918_subnet)" + run_podman network create --subnet "$subnet.0/24" $netname + + run_podman run -d --network $netname $IMAGE top + cid="$output" + # get current ip and mac + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" + ip1="$output" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" + mac1="$output" + + run_podman exec $cid cat /etc/hosts /etc/resolv.conf + pre_hosts_resolv_conf_output="$output" + + run_podman container checkpoint $cid + is "$output" "$cid" + run_podman container restore $cid + is "$output" "$cid" + + # now get mac and ip after restore they should be the same + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" + ip2="$output" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" + mac2="$output" + + # Make sure hosts and resolv.conf are the same after restore (#22901) + run_podman exec $cid cat /etc/hosts /etc/resolv.conf + assert "$output" == "$pre_hosts_resolv_conf_output" "hosts/resolv.conf must be the same after checkpoint" + + assert "$ip2" == "$ip1" "ip after restore should match" + assert "$mac2" == "$mac1" "mac after restore should match" + + # restart the container we should get a new ip/mac because they are not static + run_podman restart $cid + + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" + ip3="$output" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" + mac3="$output" + + # the ip/mac should be different this time + assert "$ip3" != "$ip1" "ip after restart should be different" + assert "$mac3" != "$mac1" "mac after restart should be different" + + # restore with --ignore-static-ip/mac + run_podman container checkpoint $cid + is "$output" "$cid" + run_podman container restore --ignore-static-ip --ignore-static-mac $cid + is "$output" "$cid" + + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" + ip4="$output" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" + mac4="$output" + + # the ip/mac should be different this time + assert "$ip4" != "$ip3" "ip after restore --ignore-static-ip should be different" + assert "$mac4" != "$mac3" "mac after restore --ignore-static-mac should be different" + + local archive=$PODMAN_TMPDIR/checkpoint.tar.gz + + # now checkpoint and export the container + run_podman container checkpoint --export "$archive" $cid + is "$output" "$cid" + # remove container + run_podman rm -t 0 -f $cid + + # restore it without new name should keep the ip/mac, we also get a new container id + run_podman container restore --import "$archive" + cid="$output" + + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" + ip5="$output" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" + mac5="$output" + assert "$ip5" == "$ip4" "ip after restore --import should match" + assert "$mac5" == "$mac4" "mac after restore --import should match" + + run_podman rm -t 0 -f $cid + + # now restore it again but with --name this time, it should not keep the + # mac and ip to allow restoring the same container with different names + # at the same time + newname="newc-$(safename)" + run_podman container restore --import "$archive" --name $newname + cid="$output" + + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" + ip6="$output" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" + mac6="$output" + assert "$ip6" != "$ip5" "ip after restore --import --name should be different" + assert "$mac6" != "$mac5" "mac after restore --import --name should be different" + + run_podman rm -t 0 -f $cid + + # now create a container with a static mac and ip + local static_ip="$subnet.2" + local static_mac="92:d0:c6:0a:29:38" + run_podman run -d --network "$netname:ip=$static_ip,mac=$static_mac" $IMAGE top + cid="$output" + + run_podman container checkpoint $cid + is "$output" "$cid" + run_podman container restore --ignore-static-ip --ignore-static-mac $cid + is "$output" "$cid" + + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" + ip7="$output" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" + mac7="$output" + assert "$ip7" != "$static_ip" "static ip after restore --ignore-static-ip should be different" + assert "$mac7" != "$static_mac" "static mac after restore --ignore-static-mac should be different" + + # restart the container to make sure the change is actually persistent in the config and not just set for restore + run_podman restart $cid + + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" + ip8="$output" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" + mac8="$output" + assert "$ip8" != "$static_ip" "static ip after restore --ignore-static-ip and restart should be different" + assert "$mac8" != "$static_mac" "static mac after restore --ignore-static-mac and restart should be different" + assert "$ip8" != "$ip7" "static ip after restore --ignore-static-ip and restart should be different" + assert "$mac8" != "$ip" "static mac after restore --ignore-static-mac and restart should be different" + + run_podman rm -t 0 -f $cid + + # now create container again and try the same again with --export and --import + run_podman run -d --network "$netname:ip=$static_ip,mac=$static_mac" $IMAGE top + cid="$output" + + run_podman container checkpoint --export "$archive" $cid + is "$output" "$cid" + # remove container + run_podman rm -t 0 -f $cid + + # restore normal should keep static ip + run_podman container restore --import "$archive" + cid="$output" + + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" + ip9="$output" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" + mac9="$output" + assert "$ip9" == "$static_ip" "static ip after restore --import should match" + assert "$mac9" == "$static_mac" "static mac after restore --import should match" + + # restart the container to make sure the change is actually persistent in the config and not just set for restore + run_podman restart $cid + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" + ip10="$output" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" + mac10="$output" + assert "$ip10" == "$static_ip" "static ip after restore --import and restart should match" + assert "$mac10" == "$static_mac" "static mac after restore --import and restart should match" + + run_podman rm -t 0 -f $cid + + # restore normal without keeping static ip/mac + run_podman container restore --ignore-static-ip --ignore-static-mac --import "$archive" + cid="$output" + + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" + ip11="$output" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" + mac11="$output" + assert "$ip11" != "$static_ip" "static ip after restore --import --ignore-static-ip should be different" + assert "$mac11" != "$static_mac" "static mac after restore --import --ignore-static-mac should be different" + + # restart the container to make sure the change is actually persistent in the config and not just set for restore + run_podman restart $cid + + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}" + ip12="$output" + run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}" + mac12="$output" + assert "$ip12" != "$static_ip" "static ip after restore --import --ignore-static-ip and restart should be different" + assert "$mac12" != "$static_mac" "static mac after restore --ignore-static-mac and restart should be different" + assert "$ip12" != "$ip11" "static ip after restore --import --ignore-static-ip and restart should be different" + assert "$mac12" != "$ip11" "static mac after restore --ignore-static-mac and restart should be different" + + run_podman rm -t 0 -f $cid + run_podman network rm $netname +} + +# rhbz#2177611 : podman breaks checkpoint/restore +# CANNOT BE PARALLELIZED: --latest +@test "podman checkpoint/restore the latest container" { + skip_if_remote "podman-remote does not support --latest option" + # checkpoint/restore -l must print the IDs + run_podman run -d $IMAGE top + ctrID="$output" + run_podman container checkpoint --latest + is "$output" "$ctrID" + + run_podman container inspect \ + --format '{{.State.Status}}:{{.State.Running}}:{{.State.Paused}}:{{.State.Checkpointed}}' $ctrID + is "$output" "exited:false:false:true" "State. Status:Running:Pause:Checkpointed" + + run_podman container restore -l + is "$output" "$ctrID" + + run_podman container inspect \ + --format '{{.State.Status}}:{{.State.Running}}:{{.State.Paused}}:{{.State.Checkpointed}}' $ctrID + is "$output" "running:true:false:false" "State. Status:Running:Pause:Checkpointed" + + run_podman rm -t 0 -f $ctrID +} + +# vim: filetype=sh diff --git a/helpers.bash b/helpers.bash new file mode 100644 index 0000000..a186629 --- /dev/null +++ b/helpers.bash @@ -0,0 +1,1375 @@ +# -*- bash -*- + +# Podman command to run; may be podman-remote +PODMAN=${PODMAN:-podman} +QUADLET=${QUADLET:-/usr/libexec/podman/quadlet} + +# Podman testing helper used in 331-system-check tests +PODMAN_TESTING=${PODMAN_TESTING:-$(dirname ${BASH_SOURCE})/../../bin/podman-testing} + +# crun or runc, unlikely to change. Cache, because it's expensive to determine. +PODMAN_RUNTIME= + +# Standard image to use for most tests +PODMAN_TEST_IMAGE_REGISTRY=${PODMAN_TEST_IMAGE_REGISTRY:-"quay.io"} +PODMAN_TEST_IMAGE_USER=${PODMAN_TEST_IMAGE_USER:-"libpod"} +PODMAN_TEST_IMAGE_NAME=${PODMAN_TEST_IMAGE_NAME:-"testimage"} +PODMAN_TEST_IMAGE_TAG=${PODMAN_TEST_IMAGE_TAG:-"20241011"} +PODMAN_TEST_IMAGE_FQN="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/$PODMAN_TEST_IMAGE_NAME:$PODMAN_TEST_IMAGE_TAG" + +# Larger image containing systemd tools. +PODMAN_SYSTEMD_IMAGE_NAME=${PODMAN_SYSTEMD_IMAGE_NAME:-"systemd-image"} +PODMAN_SYSTEMD_IMAGE_TAG=${PODMAN_SYSTEMD_IMAGE_TAG:-"20240124"} +PODMAN_SYSTEMD_IMAGE_FQN="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/$PODMAN_SYSTEMD_IMAGE_NAME:$PODMAN_SYSTEMD_IMAGE_TAG" + +# Remote image that we *DO NOT* fetch or keep by default; used for testing pull +# This has changed in 2021, from 0 through 3, various iterations of getting +# multiarch to work. It should change only very rarely. +PODMAN_NONLOCAL_IMAGE_TAG=${PODMAN_NONLOCAL_IMAGE_TAG:-"00000004"} +PODMAN_NONLOCAL_IMAGE_FQN="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/$PODMAN_TEST_IMAGE_NAME:$PODMAN_NONLOCAL_IMAGE_TAG" + +# Because who wants to spell that out each time? +IMAGE=$PODMAN_TEST_IMAGE_FQN +SYSTEMD_IMAGE=$PODMAN_SYSTEMD_IMAGE_FQN + +# Default timeout for a podman command. +PODMAN_TIMEOUT=${PODMAN_TIMEOUT:-120} + +# Prompt to display when logging podman commands; distinguish root/rootless +_LOG_PROMPT='$' +if [ $(id -u) -eq 0 ]; then + _LOG_PROMPT='#' +fi + +# Invocations via su may not set this. Although all container tools make +# an effort to determine a default if unset, there are corner cases (rootless +# namespace preservation) that run before the default is set. +# For purposes of system tests (CI, gating, OpenQA) we force a default early. +# As of September 2024 we no longer test the default-setting logic in the +# tools. +if [[ -z "$XDG_RUNTIME_DIR" ]] && [[ "$(id -u)" -ne 0 ]]; then + export XDG_RUNTIME_DIR=/run/user/$(id -u) +fi + +# Used in helpers.network, needed here in teardown +PORT_LOCK_DIR=$BATS_SUITE_TMPDIR/reserved-ports + +############################################################################### +# BEGIN tools for fetching & caching test images +# +# Registries are flaky: any time we have to pull an image, that's a risk. +# + +# Store in a semipermanent location. Not important for CI, but nice for +# developers so test restarts don't hang fetching images. +export PODMAN_IMAGECACHE=${BATS_TMPDIR:-/tmp}/podman-systest-imagecache-$(id -u) +mkdir -p ${PODMAN_IMAGECACHE} + +function _prefetch() { + local want=$1 + + # Do we already have it in image store? + run_podman '?' image exists "$want" + if [[ $status -eq 0 ]]; then + return + fi + + # No image. Do we have it already cached? (Replace / and : with --) + local cachename=$(sed -e 's;[/:];--;g' <<<"$want") + local cachepath="${PODMAN_IMAGECACHE}/${cachename}.tar" + if [[ ! -e "$cachepath" ]]; then + # Not cached. Fetch it and cache it. Retry twice, because of flakes. + cmd="skopeo copy --preserve-digests docker://$want oci-archive:$cachepath" + echo "$_LOG_PROMPT $cmd" + run $cmd + echo "$output" + if [[ $status -ne 0 ]]; then + echo "# 'pull $want' failed, will retry..." >&3 + sleep 5 + + run $cmd + echo "$output" + if [[ $status -ne 0 ]]; then + echo "# 'pull $want' failed again, will retry one last time..." >&3 + sleep 30 + $cmd + fi + fi + fi + + # Kludge alert. + # Skopeo has no --storage-driver, --root, or --runroot flags; those + # need to be expressed in the destination string inside [brackets]. + # See containers-transports(5). So if we see those options in + # _PODMAN_TEST_OPTS, transmogrify $want into skopeo form. + skopeo_opts='' + driver="$(expr "$_PODMAN_TEST_OPTS" : ".*--storage-driver \([^ ]\+\)" || true)" + if [[ -n "$driver" ]]; then + skopeo_opts+="$driver@" + fi + + altroot="$(expr "$_PODMAN_TEST_OPTS" : ".*--root \([^ ]\+\)" || true)" + if [[ -n "$altroot" ]] && [[ -d "$altroot" ]]; then + skopeo_opts+="$altroot" + + altrunroot="$(expr "$_PODMAN_TEST_OPTS" : ".*--runroot \([^ ]\+\)" || true)" + if [[ -n "$altrunroot" ]] && [[ -d "$altrunroot" ]]; then + skopeo_opts+="+$altrunroot" + fi + fi + + if [[ -n "$skopeo_opts" ]]; then + want="[$skopeo_opts]$want" + fi + + # Cached image is now guaranteed to exist. Be sure to load it + # with skopeo, not podman, in order to preserve metadata + cmd="skopeo copy --all oci-archive:$cachepath containers-storage:$want" + echo "$_LOG_PROMPT $cmd" + $cmd +} + +# END tools for fetching & caching test images +############################################################################### +# BEGIN setup/teardown tools + +# Provide common setup and teardown functions, but do not name them such! +# That way individual tests can override with their own setup/teardown, +# while retaining the ability to include these if they so desire. + +# Setup helper: establish a test environment with exactly the images needed +function basic_setup() { + # Temporary subdirectory, in which tests can write whatever they like + # and trust that it'll be deleted on cleanup. + # (BATS v1.3 and above provide $BATS_TEST_TMPDIR, but we still use + # ancient BATS (v1.1) in RHEL gating tests.) + PODMAN_TMPDIR=$(mktemp -d --tmpdir=${BATS_TMPDIR:-/tmp} podman_bats.XXXXXX) + + # runtime is not likely to change + if [[ -z "$PODMAN_RUNTIME" ]]; then + PODMAN_RUNTIME=$(podman_runtime) + fi + + # In the unlikely event that a test runs is() before a run_podman() + MOST_RECENT_PODMAN_COMMAND= + + # Test filenames must match ###-name.bats; use "[###] " as prefix + run expr "$BATS_TEST_FILENAME" : "^.*/\([0-9]\{3\}\)-[^/]\+\.bats\$" + # If parallel, use |nnn|. Serial, [nnn] + if [[ -n "$PARALLEL_JOBSLOT" ]]; then + BATS_TEST_NAME_PREFIX="|${output}| " + else + BATS_TEST_NAME_PREFIX="[${output}] " + fi + + # By default, assert() and die() cause an immediate test failure. + # Under special circumstances (usually long test loops), tests + # can call defer-assertion-failures() to continue going, the + # idea being that a large number of failures can show patterns. + ASSERTION_FAILURES= + immediate-assertion-failures +} + +# bail-now is how we terminate a test upon assertion failure. +# By default, and the vast majority of the time, it just triggers +# immediate test termination; but see defer-assertion-failures, below. +function bail-now() { + # "false" does not apply to "bail now"! It means "nonzero exit", + # which BATS interprets as "yes, bail immediately". + false +} + +# Invoked on teardown: will terminate immediately if there have been +# any deferred test failures; otherwise will reset back to immediate +# test termination on future assertions. +function immediate-assertion-failures() { + function bail-now() { + false + } + + # Any backlog? + if [[ -n "$ASSERTION_FAILURES" ]]; then + local n=${#ASSERTION_FAILURES} + ASSERTION_FAILURES= + die "$n test assertions failed. Search for 'FAIL:' above this line." >&2 + fi +} + +# Used in special test circumstances--typically multi-condition loops--to +# continue going even on assertion failures. The test will fail afterward, +# usually in teardown. This can be useful to show failure patterns. +function defer-assertion-failures() { + function bail-now() { + ASSERTION_FAILURES+="!" + } +} + +# Basic teardown: remove all pods and containers +function basic_teardown() { + echo "# [teardown]" >&2 + + # Free any ports reserved by our test + if [[ -d $PORT_LOCK_DIR ]]; then + mylocks=$(grep -wlr $BATS_SUITE_TEST_NUMBER $PORT_LOCK_DIR || true) + if [[ -n "$mylocks" ]]; then + rm -f $mylocks + fi + fi + + immediate-assertion-failures + # Unlike normal tests teardown will not exit on first command failure + # but rather only uses the return code of the teardown function. + # This must be directly after immediate-assertion-failures to capture the error code + local exit_code=$? + + # Leak check and state reset. Only run these when running tests serially! + # (For parallel tests, we run a leak check only at the very end of all tests) + if [[ -z "$PARALLEL_JOBSLOT" ]]; then + # Check for leaks, but only if: + # 1) test was successful (BATS_TEST_COMPLETED is set 1); and + # 2) immediate-assertion-failures didn't fail (exit_code -eq 0); and + # 3) PODMAN_BATS_LEAK_CHECK is set (usually only in cron). + # As these podman commands are slow we do not want to do this by default + # and only provide this as opt-in option. (#22909) + if [[ "$BATS_TEST_COMPLETED" -eq 1 ]] && [[ $exit_code -eq 0 ]] && [[ -n "$PODMAN_BATS_LEAK_CHECK" ]]; then + leak_check + exit_code=$((exit_code + $?)) + fi + + # Some error happened (either in teardown itself or the actual test failed) + # so do a full cleanup to ensure following tests start with a clean env. + if [ $exit_code -gt 0 ] || [ -z "$BATS_TEST_COMPLETED" ]; then + clean_setup + exit_code=$((exit_code + $?)) + fi + fi + + # Status file used in teardown_suite() to decide whether or not + # to check for leaks + if [[ "$BATS_TEST_COMPLETED" -ne 1 ]]; then + rm -f "$BATS_SUITE_TMPDIR/all-tests-passed" + fi + + command rm -rf $PODMAN_TMPDIR + exit_code=$((exit_code + $?)) + return $exit_code +} + + +# Provide the above as default methods. +function setup() { + basic_setup +} + +function teardown() { + basic_teardown +} + + +# Helpers useful for tests running rmi +function archive_image() { + local image=$1 + + # FIXME: refactor? + archive_basename=$(echo $1 | tr -c a-zA-Z0-9._- _) + archive=$BATS_TMPDIR/$archive_basename.tar + + run_podman save -o $archive $image +} + +function restore_image() { + local image=$1 + + archive_basename=$(echo $1 | tr -c a-zA-Z0-9._- _) + archive=$BATS_TMPDIR/$archive_basename.tar + + run_podman restore $archive +} + +####################### +# _run_podman_quiet # Helper for leak_check. Runs podman with no logging +####################### +function _run_podman_quiet() { + # This should be the same as what run_podman() does. + run timeout -v --foreground --kill=10 60 $PODMAN $_PODMAN_TEST_OPTS "$@" + if [[ $status -ne 0 ]]; then + echo "# Error running command: podman $*" + echo "$output" + exit_code=$((exit_code + 1)) + fi +} + +##################### +# _leak_check_one # Helper for leak_check: shows leaked artifacts +##################### +# +# NOTE: plays fast & loose with variables! Reads $output, updates $exit_code +# +function _leak_check_one() { + local what="$1" + + # Shown the first time we see a stray of this kind + separator="vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv +" + + while read line; do + if [[ -n "$line" ]]; then + echo "${separator}*** Leaked $what: $line" + separator="" + exit_code=$((exit_code + 1)) + fi + done <<<"$output" +} + +################ +# leak_check # Look for, and warn about, stray artifacts +################ +# +# Runs on test failure, or at end of all tests, or when PODMAN_BATS_LEAK_CHECK=1 +# +# Note that all ps/ls commands specify a format where something useful +# (ID or name) is in the first column. This is not important today +# (July 2024) but may be useful one day: a future PR may run bats +# with --gather-test-outputs-in, which preserves logs of all tests. +# Why not today? Because that option is still buggy: (1) we need +# bats-1.11 to fix a more-than-one-slash-in-test-name bug, (2) as +# of July 2024 that option only copies logs of *completed* tests +# to the directory, so currently-running tests (the one running +# teardown, or, in parallel mode, any other running tests) are +# not seen. This renders that option less useful, and not worth +# bothering with at the moment. But please leave ID-or-name as +# the first column anyway because things may change and it's +# a reasonable format anyway. +# +function leak_check() { + local exit_code=0 + + # Volumes. + _run_podman_quiet volume ls --format '{{.Name}} {{.Driver}}' + _leak_check_one "volume" + + # Networks. "podman" and "podman-default-kube-network" are OK. + _run_podman_quiet network ls --noheading + output=$(grep -ve "^[0-9a-z]\{12\} *podman\(-default-kube-network\)\? *bridge\$" <<<"$output") + _leak_check_one "network" + + # Pods, containers, and external (buildah) containers. + _run_podman_quiet pod ls --format '{{.ID}} {{.Name}} status={{.Status}} ({{.NumberOfContainers}} containers)' + _leak_check_one "pod" + + _run_podman_quiet ps -a --format '{{.ID}} {{.Image}} {{.Names}} {{.Status}}' + _leak_check_one "container" + + _run_podman_quiet ps -a --external --filter=status=unknown --format '{{.ID}} {{.Image}} {{.Names}} {{.Status}}' + _leak_check_one "storage container" + + # Images. Exclude our standard expected images. + _run_podman_quiet images --all --format '{{.ID}} {{.Repository}}:{{.Tag}}' + output=$(awk "\$2 != \"$IMAGE\" && \$2 != \"$PODMAN_SYSTEMD_IMAGE_FQN\" && \$2 !~ \"localhost/podman-pause:\" { print }" <<<"$output") + _leak_check_one "image" + + return $exit_code +} + +function clean_setup() { + local actions=( + "pod rm -t 0 --all --force --ignore" + "rm -t 0 --all --force --ignore" + "network prune --force" + "volume rm -a -f" + ) + for action in "${actions[@]}"; do + _run_podman_quiet $action + + # The -f commands should never exit nonzero, but if they do we want + # to know about it. + # FIXME: someday: also test for [[ -n "$output" ]] - can't do this + # yet because too many tests don't clean up their containers + if [[ $status -ne 0 ]]; then + echo "# [teardown] $_LOG_PROMPT podman $action" >&3 + for line in "${lines[*]}"; do + echo "# $line" >&3 + done + + # Special case for timeout: check for locks (#18514) + if [[ $status -eq 124 ]]; then + echo "# [teardown] $_LOG_PROMPT podman system locks" >&3 + run $PODMAN system locks + for line in "${lines[*]}"; do + echo "# $line" >&3 + done + fi + fi + done + + # ...including external (buildah) ones + _run_podman_quiet ps --all --external --format '{{.ID}} {{.Names}}' + for line in "${lines[@]}"; do + set $line + echo "# setup(): removing stray external container $1 ($2)" >&3 + run_podman '?' rm -f $1 + if [[ $status -ne 0 ]]; then + echo "# [setup] $_LOG_PROMPT podman rm -f $1" >&3 + for errline in "${lines[@]}"; do + echo "# $errline" >&3 + done + fi + done + + # Clean up all images except those desired. + # 2023-06-26 REMINDER: it is tempting to think that this is clunky, + # wouldn't it be safer/cleaner to just 'rmi -a' then '_prefetch $IMAGE'? + # Yes, but it's also tremendously slower: 29m for a CI run, to 39m. + # Image loads are slow. + found_needed_image= + _run_podman_quiet images --all --format '{{.Repository}}:{{.Tag}} {{.ID}}' + + for line in "${lines[@]}"; do + set $line + if [[ "$1" == "$PODMAN_TEST_IMAGE_FQN" ]]; then + if [[ -z "$PODMAN_TEST_IMAGE_ID" ]]; then + # This will probably only trigger the 2nd time through setup + PODMAN_TEST_IMAGE_ID=$2 + fi + found_needed_image=1 + elif [[ "$1" == "$PODMAN_SYSTEMD_IMAGE_FQN" ]]; then + # This is a big image, don't force unnecessary pulls + : + else + # Always remove image that doesn't match by name + echo "# setup(): removing stray image $1" >&3 + _run_podman_quiet rmi --force "$1" + + # Tagged image will have same IID as our test image; don't rmi it. + if [[ $2 != "$PODMAN_TEST_IMAGE_ID" ]]; then + echo "# setup(): removing stray image $2" >&3 + _run_podman_quiet rmi --force "$2" + fi + fi + done + + # Make sure desired image is present + if [[ -z "$found_needed_image" ]]; then + _prefetch $PODMAN_TEST_IMAGE_FQN + fi + + # Load (create, actually) the pause image. This way, all pod tests will + # have it available. Without this, pod tests run in parallel will leave + # behind : images. + # FIXME: only do this when running parallel! Otherwise, we may break + # test expectations. + # SUB-FIXME: there's no actual way to tell if we're running bats + # in parallel (see bats-core#998). Use undocumented hack. + # FIXME: #23292 -- this should not be necessary. + if [[ -n "$BATS_SEMAPHORE_DIR" ]]; then + run_podman pod create mypod + run_podman pod rm mypod + # And now, we have a pause image, and each test does not + # need to build their own. + fi +} + +# END setup/teardown tools +############################################################################### +# BEGIN podman helpers + +# Displays '[HH:MM:SS.NNNNN]' in command output. logformatter relies on this. +function timestamp() { + date +'[%T.%N]' +} + +################ +# run_podman # Invoke $PODMAN, with timeout, using BATS 'run' +################ +# +# This is the preferred mechanism for invoking podman: first, it +# invokes $PODMAN, which may be 'podman-remote' or '/some/path/podman'. +# +# Second, we use 'timeout' to abort (with a diagnostic) if something +# takes too long; this is preferable to a CI hang. +# +# Third, we log the command run and its output. This doesn't normally +# appear in BATS output, but it will if there's an error. +# +# Next, we check exit status. Since the normal desired code is 0, +# that's the default; but the first argument can override: +# +# run_podman 125 nonexistent-subcommand +# run_podman '?' some-other-command # let our caller check status +# +# Since we use the BATS 'run' mechanism, $output and $status will be +# defined for our caller. +# +function run_podman() { + # Number as first argument = expected exit code; default 0 + # "0+[we]" = require success, but allow warnings/errors + local expected_rc=0 + local allowed_levels="dit" + case "$1" in + 0\+[we]*) allowed_levels+=$(expr "$1" : "^0+\([we]\+\)"); shift;; + [0-9]) expected_rc=$1; shift;; + [1-9][0-9]) expected_rc=$1; shift;; + [12][0-9][0-9]) expected_rc=$1; shift;; + '?') expected_rc= ; shift;; # ignore exit code + esac + + # Remember command args, for possible use in later diagnostic messages + MOST_RECENT_PODMAN_COMMAND="podman $*" + + # BATS >= 1.5.0 treats 127 as a special case, adding a big nasty warning + # at the end of the test run if any command exits thus. Silence it. + # https://bats-core.readthedocs.io/en/stable/warnings/BW01.html + local silence127= + if [[ "$expected_rc" = "127" ]]; then + # We could use "-127", but that would cause BATS to fail if the + # command exits any other status -- and default BATS failure messages + # are much less helpful than the run_podman ones. "!" is more flexible. + silence127="!" + fi + + # stdout is only emitted upon error; this printf is to help in debugging + printf "\n%s %s %s %s\n" "$(timestamp)" "$_LOG_PROMPT" "$PODMAN" "$*" + # BATS hangs if a subprocess remains and keeps FD 3 open; this happens + # if podman crashes unexpectedly without cleaning up subprocesses. + run $silence127 timeout --foreground -v --kill=10 $PODMAN_TIMEOUT $PODMAN $_PODMAN_TEST_OPTS "$@" 3>/dev/null + # without "quotes", multiple lines are glommed together into one + if [ -n "$output" ]; then + echo "$(timestamp) $output" + + # FIXME FIXME FIXME: instrumenting to track down #15488. Please + # remove once that's fixed. We include the args because, remember, + # bats only shows output on error; it's possible that the first + # instance of the metacopy warning happens in a test that doesn't + # check output, hence doesn't fail. + if [[ "$output" =~ Ignoring.global.metacopy.option ]]; then + echo "# YO! metacopy warning triggered by: podman $*" >&3 + fi + fi + if [ "$status" -ne 0 ]; then + echo -n "$(timestamp) [ rc=$status "; + if [ -n "$expected_rc" ]; then + if [ "$status" -eq "$expected_rc" ]; then + echo -n "(expected) "; + else + echo -n "(** EXPECTED $expected_rc **) "; + fi + fi + echo "]" + fi + + if [ "$status" -eq 124 ]; then + if expr "$output" : ".*timeout: sending" >/dev/null; then + # It's possible for a subtest to _want_ a timeout + if [[ "$expected_rc" != "124" ]]; then + echo "*** TIMED OUT ***" + false + fi + fi + fi + + if [ -n "$expected_rc" ]; then + if [ "$status" -ne "$expected_rc" ]; then + die "exit code is $status; expected $expected_rc" + fi + fi + + # Check for "level=" in output, because a successful command + # should never issue unwanted warnings or errors. The "0+w" convention + # (see top of function) allows our caller to indicate that warnings are + # expected, e.g., "podman stop" without -t0. + if [[ $status -eq 0 ]]; then + # FIXME: don't do this on Debian or RHEL. runc is way too buggy: + # - #11784 - lstat /sys/fs/.../*.scope: ENOENT + # - #11785 - cannot toggle freezer: cgroups not configured + # As of January 2024 the freezer one seems to be fixed in Debian-runc + # but not in RHEL8-runc. The lstat one is closed-wontfix. + if [[ $PODMAN_RUNTIME != "runc" ]]; then + # FIXME: All kube commands emit unpredictable errors: + # "Storage for container has been removed" + # "no container with ID found in database" + # These are level=error but we still get exit-status 0. + # Just skip all kube commands completely + if [[ ! "$*" =~ kube ]]; then + if [[ "$output" =~ level=[^${allowed_levels}] ]]; then + die "Command succeeded, but issued unexpected warnings" + fi + fi + fi + fi +} + +function run_podman_testing() { + printf "\n%s %s %s %s\n" "$(timestamp)" "$_LOG_PROMPT" "$PODMAN_TESTING" "$*" + run $PODMAN_TESTING "$@" + if [[ $status -ne 0 ]]; then + echo "$output" + die "Unexpected error from testing helper, which should always always succeed" + fi +} + +# Wait for certain output from a container, indicating that it's ready. +function wait_for_output { + local sleep_delay=1 + local how_long=$PODMAN_TIMEOUT + local expect= + local cid= + + # Arg processing. A single-digit number is how long to sleep between + # iterations; a 2- or 3-digit number is the total time to wait; all + # else are, in order, the string to expect and the container name/ID. + local i + for i in "$@"; do + if expr "$i" : '[0-9]\+$' >/dev/null; then + if [ $i -le 9 ]; then + sleep_delay=$i + else + how_long=$i + fi + elif [ -z "$expect" ]; then + expect=$i + else + cid=$i + fi + done + + [ -n "$cid" ] || die "FATAL: wait_for_output: no container name/ID in '$*'" + + t1=$(expr $SECONDS + $how_long) + while [ $SECONDS -lt $t1 ]; do + run_podman 0+w logs $cid + logs=$output + if expr "$logs" : ".*$expect" >/dev/null; then + return + fi + + # Barf if container is not running + run_podman inspect --format '{{.State.Running}}' $cid + if [ $output != "true" ]; then + run_podman inspect --format '{{.State.ExitCode}}' $cid + exitcode=$output + + # One last chance: maybe the container exited just after logs cmd + run_podman 0+w logs $cid + if expr "$logs" : ".*$expect" >/dev/null; then + return + fi + + die "Container exited (status: $exitcode) before we saw '$expect': $logs" + fi + + sleep $sleep_delay + done + + die "timed out waiting for '$expect' from $cid" +} + +# Shortcut for the lazy +function wait_for_ready { + wait_for_output 'READY' "$@" +} + +################### +# wait_for_file # Returns once file is available on host +################### +function wait_for_file() { + local file=$1 # The path to the file + local _timeout=${2:-5} # Optional; default 5 seconds + + # Wait + while [ $_timeout -gt 0 ]; do + test -e $file && return + sleep 1 + _timeout=$(( $_timeout - 1 )) + done + + die "Timed out waiting for $file" +} + +########################### +# wait_for_file_content # Like wait_for_output, but with files (not ctrs) +########################### +function wait_for_file_content() { + local file=$1 # The path to the file + local content=$2 # What to expect in the file + local _timeout=${3:-5} # Optional; default 5 seconds + + while :; do + grep -q "$content" "$file" && return + + test $_timeout -gt 0 || die "Timed out waiting for '$content' in $file" + + _timeout=$(( $_timeout - 1 )) + sleep 1 + + # For debugging. Note that file does not necessarily exist yet. + if [[ -e "$file" ]]; then + echo "[ wait_for_file_content: retrying wait for '$content' in: ]" + sed -e 's/^/[ /' -e 's/$/ ]/' <"$file" + else + echo "[ wait_for_file_content: $file does not exist (yet) ]" + fi + done +} + +# END podman helpers +############################################################################### +# BEGIN miscellaneous tools + +# Shortcuts for common needs: +function is_rootless() { + [ "$(id -u)" -ne 0 ] +} + +function is_remote() { + [[ "$PODMAN" =~ -remote ]] +} + +function is_cgroupsv1() { + # WARNING: This will break if there's ever a cgroups v3 + ! is_cgroupsv2 +} + +# True if cgroups v2 are enabled +function is_cgroupsv2() { + cgroup_type=$(stat -f -c %T /sys/fs/cgroup) + test "$cgroup_type" = "cgroup2fs" +} + +# True if podman is using netavark +function is_netavark() { + run_podman info --format '{{.Host.NetworkBackend}}' + if [[ "$output" =~ netavark ]]; then + return 0 + fi + return 1 +} + +function is_aarch64() { + [ "$(uname -m)" == "aarch64" ] +} + +function is_rhel_or_centos() { + [[ -f /etc/redhat-release ]] && grep -Eiq "Red Hat Enterprise Linux|CentOS Stream" /etc/redhat-release +} + +function selinux_enabled() { + /usr/sbin/selinuxenabled 2> /dev/null +} + +# Returns the OCI runtime *basename* (typically crun or runc). Much as we'd +# love to cache this result, we probably shouldn't. +function podman_runtime() { + # This function is intended to be used as '$(podman_runtime)', i.e. + # our caller wants our output. It's unsafe to use run_podman(). + runtime=$($PODMAN $_PODMAN_TEST_OPTS info --format '{{ .Host.OCIRuntime.Name }}' 2>/dev/null) + basename "${runtime:-[null]}" +} + +# Returns the storage driver: 'overlay' or 'vfs' +function podman_storage_driver() { + run_podman info --format '{{.Store.GraphDriverName}}' >/dev/null + # Should there ever be a new driver + case "$output" in + overlay) ;; + vfs) ;; + *) die "Unknown storage driver '$output'; if this is a new driver, please review uses of this function in tests." ;; + esac + echo "$output" +} + +# Given a (scratch) directory path, returns a set of command-line options +# for running an isolated podman that will not step on system podman. Set: +# - rootdir, so we don't clobber real images or storage; +# - tmpdir, so we use an isolated DB; and +# - runroot, out of an abundance of paranoia +function podman_isolation_opts() { + local path=${1?podman_isolation_opts: missing PATH arg} + + for opt in root runroot tmpdir;do + mkdir -p $path/$opt + echo " --$opt $path/$opt" + done +} + +# rhbz#1895105: rootless journald is unavailable except to users in +# certain magic groups; which our testuser account does not belong to +# (intentional: that is the RHEL default, so that's the setup we test). +function journald_unavailable() { + if ! is_rootless; then + # root must always have access to journal + return 1 + fi + + run journalctl -n 1 + if [[ $status -eq 0 ]]; then + return 1 + fi + + if [[ $output =~ permission ]]; then + return 0 + fi + + # This should never happen; if it does, it's likely that a subsequent + # test will fail. This output may help track that down. + echo "WEIRD: 'journalctl -n 1' failed with a non-permission error:" + echo "$output" + return 1 +} + +# Returns the name of the local pause image. +function pause_image() { + # This function is intended to be used as '$(pause_image)', i.e. + # our caller wants our output. run_podman() messes with output because + # it emits the command invocation to stdout, hence the redirection. + run_podman version --format "{{.Server.Version}}-{{.Server.Built}}" >/dev/null + echo "localhost/podman-pause:$output" +} + +# Wait for the pod (1st arg) to transition into the state (2nd arg) +function _ensure_pod_state() { + for i in {0..5}; do + run_podman pod inspect $1 --format "{{.State}}" + if [[ $output == "$2" ]]; then + return + fi + sleep 0.5 + done + + die "Timed out waiting for pod $1 to enter state $2" +} + +# Wait for the container's (1st arg) running state (2nd arg) +function _ensure_container_running() { + for i in {0..20}; do + run_podman container inspect $1 --format "{{.State.Running}}" + if [[ $output == "$2" ]]; then + return + fi + sleep 0.5 + done + + die "Timed out waiting for container $1 to enter state running=$2" +} + +########################### +# _add_label_if_missing # make sure skip messages include rootless/remote +########################### +function _add_label_if_missing() { + local msg="$1" + local want="$2" + + if [ -z "$msg" ]; then + echo + elif expr "$msg" : ".*$want" &>/dev/null; then + echo "$msg" + else + echo "[$want] $msg" + fi +} + +###################### +# skip_if_no_ssh # ...with an optional message +###################### +function skip_if_no_ssh() { + if no_ssh; then + local msg=$(_add_label_if_missing "$1" "ssh") + skip "${msg:-not applicable with no ssh binary}" + fi +} + +###################### +# skip_if_rootless # ...with an optional message +###################### +function skip_if_rootless() { + if is_rootless; then + local msg=$(_add_label_if_missing "$1" "rootless") + skip "${msg:-not applicable under rootless podman}" + fi +} + +###################### +# skip_if_not_rootless # ...with an optional message +###################### +function skip_if_not_rootless() { + if ! is_rootless; then + local msg=$(_add_label_if_missing "$1" "rootful") + skip "${msg:-not applicable under rootlfull podman}" + fi +} + +#################### +# skip_if_remote # ...with an optional message +#################### +function skip_if_remote() { + if is_remote; then + local msg=$(_add_label_if_missing "$1" "remote") + skip "${msg:-test does not work with podman-remote}" + fi +} + +######################## +# skip_if_no_selinux # +######################## +function skip_if_no_selinux() { + if [ ! -e /usr/sbin/selinuxenabled ]; then + skip "selinux not available" + elif ! /usr/sbin/selinuxenabled; then + skip "selinux disabled" + fi +} + +####################### +# skip_if_cgroupsv1 # ...with an optional message +####################### +function skip_if_cgroupsv1() { + if ! is_cgroupsv2; then + skip "${1:-test requires cgroupsv2}" + fi +} + +####################### +# skip_if_cgroupsv2 # ...with an optional message +####################### +function skip_if_cgroupsv2() { + if is_cgroupsv2; then + skip "${1:-test requires cgroupsv1}" + fi +} + +###################### +# skip_if_rootless_cgroupsv1 # ...with an optional message +###################### +function skip_if_rootless_cgroupsv1() { + if is_rootless; then + if ! is_cgroupsv2; then + local msg=$(_add_label_if_missing "$1" "rootless cgroupvs1") + skip "${msg:-not supported as rootless under cgroupsv1}" + fi + fi +} + +################################## +# skip_if_journald_unavailable # rhbz#1895105: rootless journald permissions +################################## +function skip_if_journald_unavailable { + if journald_unavailable; then + skip "Cannot use rootless journald on this system" + fi +} + +function skip_if_aarch64 { + if is_aarch64; then + skip "${msg:-Cannot run this test on aarch64 systems}" + fi +} + +function skip_if_rhel_or_centos { + if is_rhel_or_centos; then + skip "${msg:-skip on RHEL and CentOS Stream}" + fi +} + +######### +# die # Abort with helpful message +######### +function die() { + # FIXME: handle multi-line output + echo "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv" >&2 + echo "#| FAIL: $*" >&2 + echo "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^" >&2 + bail-now +} + +############ +# assert # Compare actual vs expected string; fail if mismatch +############ +# +# Compares string (default: $output) against the given string argument. +# By default we do an exact-match comparison against $output, but there +# are two different ways to invoke us, each with an optional description: +# +# assert "EXPECT" [DESCRIPTION] +# assert "RESULT" "OP" "EXPECT" [DESCRIPTION] +# +# The first form (one or two arguments) does an exact-match comparison +# of "$output" against "EXPECT". The second (three or four args) compares +# the first parameter against EXPECT, using the given OPerator. If present, +# DESCRIPTION will be displayed on test failure. +# +# Examples: +# +# assert "this is exactly what we expect" +# assert "${lines[0]}" =~ "^abc" "first line begins with abc" +# +function assert() { + local actual_string="$output" + local operator='==' + local expect_string="$1" + local testname="$2" + + case "${#*}" in + 0) die "Internal error: 'assert' requires one or more arguments" ;; + 1|2) ;; + 3|4) actual_string="$1" + operator="$2" + expect_string="$3" + testname="$4" + ;; + *) die "Internal error: too many arguments to 'assert'" ;; + esac + + # Comparisons. + # Special case: there is no !~ operator, so fake it via '! x =~ y' + local not= + local actual_op="$operator" + if [[ $operator == '!~' ]]; then + not='!' + actual_op='=~' + fi + if [[ $operator == '=' || $operator == '==' ]]; then + # Special case: we can't use '=' or '==' inside [[ ... ]] because + # the right-hand side is treated as a pattern... and '[xy]' will + # not compare literally. There seems to be no way to turn that off. + if [ "$actual_string" = "$expect_string" ]; then + return + fi + elif [[ $operator == '!=' ]]; then + # Same special case as above + if [ "$actual_string" != "$expect_string" ]; then + return + fi + else + if eval "[[ $not \$actual_string $actual_op \$expect_string ]]"; then + return + elif [ $? -gt 1 ]; then + die "Internal error: could not process 'actual' $operator 'expect'" + fi + fi + + # Test has failed. Get a descriptive test name. + if [ -z "$testname" ]; then + testname="${MOST_RECENT_PODMAN_COMMAND:-[no test name given]}" + fi + + # Display optimization: the typical case for 'expect' is an + # exact match ('='), but there are also '=~' or '!~' or '-ge' + # and the like. Omit the '=' but show the others; and always + # align subsequent output lines for ease of comparison. + local op='' + local ws='' + if [ "$operator" != '==' ]; then + op="$operator " + ws=$(printf "%*s" ${#op} "") + fi + + # This is a multi-line message, which may in turn contain multi-line + # output, so let's format it ourself to make it more readable. + local expect_split + mapfile -t expect_split <<<"$expect_string" + local actual_split + mapfile -t actual_split <<<"$actual_string" + + # bash %q is really nice, except for the way it backslashes spaces + local -a expect_split_q + for line in "${expect_split[@]}"; do + local q=$(printf "%q" "$line" | sed -e 's/\\ / /g') + expect_split_q+=("$q") + done + local -a actual_split_q + for line in "${actual_split[@]}"; do + local q=$(printf "%q" "$line" | sed -e 's/\\ / /g') + actual_split_q+=("$q") + done + + printf "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n" >&2 + printf "#| FAIL: %s\n" "$testname" >&2 + printf "#| expected: %s%s\n" "$op" "${expect_split_q[0]}" >&2 + local line + for line in "${expect_split_q[@]:1}"; do + printf "#| > %s%s\n" "$ws" "$line" >&2 + done + printf "#| actual: %s%s\n" "$ws" "${actual_split_q[0]}" >&2 + for line in "${actual_split_q[@]:1}"; do + printf "#| > %s%s\n" "$ws" "$line" >&2 + done + printf "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n" >&2 + bail-now +} + +######## +# is # **DEPRECATED**; see assert() above +######## +function is() { + local actual="$1" + local expect="$2" + local testname="${3:-${MOST_RECENT_PODMAN_COMMAND:-[no test name given]}}" + + local is_expr= + if [ -z "$expect" ]; then + if [ -z "$actual" ]; then + # Both strings are empty. + return + fi + expect='[no output]' + elif [[ "$actual" = "$expect" ]]; then + # Strings are identical. + return + else + # Strings are not identical. Are there wild cards in our expect string? + if expr "$expect" : ".*[^\\][\*\[]" >/dev/null; then + # There is a '[' or '*' without a preceding backslash. + is_expr=' (using expr)' + elif [[ "${expect:0:1}" = '[' ]]; then + # String starts with '[', e.g. checking seconds like '[345]' + is_expr=' (using expr)' + fi + if [[ -n "$is_expr" ]]; then + if expr "$actual" : "$expect" >/dev/null; then + return + fi + fi + fi + + # This is a multi-line message, which may in turn contain multi-line + # output, so let's format it ourself to make it more readable. + local -a actual_split + readarray -t actual_split <<<"$actual" + printf "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n" >&2 + printf "#| FAIL: $testname\n" >&2 + printf "#| expected: '%s'%s\n" "$expect" "$is_expr" >&2 + printf "#| actual: '%s'\n" "${actual_split[0]}" >&2 + local line + for line in "${actual_split[@]:1}"; do + printf "#| > '%s'\n" "$line" >&2 + done + printf "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n" >&2 + bail-now +} + +#################### +# allow_warnings # check cmd output for warning messages other than these +#################### +# +# HEADS UP: Operates on '$lines' array, so, must be invoked after run_podman +# +function allow_warnings() { + for line in "${lines[@]}"; do + if [[ "$line" =~ level=[we] ]]; then + local ok= + for pattern in "$@"; do + if [[ "$line" =~ $pattern ]]; then + ok=ok + fi + done + if [[ -z "$ok" ]]; then + die "Unexpected warning/error in command results: $line" + fi + fi + done +} + +##################### +# require_warning # Require the given message, but disallow any others +##################### +# Optional 2nd argument is a message to display if warning is missing +function require_warning() { + local expect="$1" + local msg="${2:-Did not find expected warning/error message}" + assert "$output" =~ "$expect" "$msg" + allow_warnings "$expect" +} + +############ +# dprint # conditional debug message +############ +# +# Set PODMAN_TEST_DEBUG to the name of one or more functions you want to debug +# +# Examples: +# +# $ PODMAN_TEST_DEBUG=parse_table bats . +# $ PODMAN_TEST_DEBUG="test_podman_images test_podman_run" bats . +# +function dprint() { + test -z "$PODMAN_TEST_DEBUG" && return + + caller="${FUNCNAME[1]}" + + # PODMAN_TEST_DEBUG is a space-separated list of desired functions + # e.g. "parse_table test_podman_images" (or even just "table") + for want in $PODMAN_TEST_DEBUG; do + # Check if our calling function matches any of the desired strings + if expr "$caller" : ".*$want" >/dev/null; then + echo "# ${FUNCNAME[1]}() : $*" >&3 + return + fi + done +} + + +################# +# parse_table # Split a table on '|' delimiters; return space-separated +################# +# +# See sample .bats scripts for examples. The idea is to list a set of +# tests in a table, then use simple logic to iterate over each test. +# Columns are separated using '|' (pipe character) because sometimes +# we need spaces in our fields. +# +function parse_table() { + while read line; do + test -z "$line" && continue + + declare -a row=() + while read col; do + dprint "col=<<$col>>" + row+=("$col") + done < <(echo "$line" | sed -E -e 's/(^|\s)\|(\s|$)/\n /g' | sed -e 's/^ *//' -e 's/\\/\\\\/g') + # the above seds: + # 1) Convert '|' to newline, but only if bracketed by spaces or + # at beginning/end of line (this allows 'foo|bar' in tests); + # 2) then remove leading whitespace; + # 3) then double-escape all backslashes + + printf "%q " "${row[@]}" + printf "\n" + done <<<"$1" +} + + +################### +# random_string # Returns a pseudorandom human-readable string +################### +# +# Numeric argument, if present, is desired length of string +# +function random_string() { + local length=${1:-10} + + head /dev/urandom | tr -dc a-zA-Z0-9 | head -c$length +} + +############## +# safename # Returns a pseudorandom string suitable for container/image/etc names +############## +# +# Name will include the bats test number and a pseudorandom element, +# eg "t123-xyz123". safename() will return the same string across +# multiple invocations within a given test; this makes it easier for +# a maintainer to see common name patterns. +# +# String is lower-case so it can be used as an image name +# +function safename() { + # FIXME: I don't think these can ever fail. Remove checks once I'm sure. + test -n "$BATS_SUITE_TMPDIR" + test -n "$BATS_SUITE_TEST_NUMBER" + safenamepath=$BATS_SUITE_TMPDIR/.safename.$BATS_SUITE_TEST_NUMBER + if [[ ! -e $safenamepath ]]; then + echo -n "t${BATS_SUITE_TEST_NUMBER}-$(random_string 8 | tr A-Z a-z)" >$safenamepath + fi + cat $safenamepath +} + +######################### +# find_exec_pid_files # Returns nothing or exec_pid hash files +######################### +# +# Return exec_pid hash files if exists, otherwise, return nothing +# +function find_exec_pid_files() { + run_podman info --format '{{.Store.RunRoot}}' + local storage_path="$output" + if [ -d $storage_path ]; then + find $storage_path -type f -iname 'exec_pid_*' + fi +} + + +############################# +# remove_same_dev_warning # Filter out useless warning from output +############################# +# +# On some CI systems, 'podman run --privileged' emits a useless warning: +# +# WARNING: The same type, major and minor should not be used for multiple devices. +# +# This obviously screws us up when we look at output results. +# +# This function removes the warning from $output and $lines. We don't +# do a full string match because there's another variant of that message: +# +# WARNING: Creating device "/dev/null" with same type, major and minor as existing "/dev/foodevdir/null". +# +# (We should never again see that precise error ever again, but we could +# see variants of it). +# +function remove_same_dev_warning() { + # No input arguments. We operate in-place on $output and $lines + + local i=0 + local -a new_lines=() + while [[ $i -lt ${#lines[@]} ]]; do + if expr "${lines[$i]}" : 'WARNING: .* same type, major' >/dev/null; then + : + else + new_lines+=("${lines[$i]}") + fi + i=$(( i + 1 )) + done + + lines=("${new_lines[@]}") + output=$(printf '%s\n' "${lines[@]}") +} + +# run 'podman help', parse the output looking for 'Available Commands'; +# return that list. +function _podman_commands() { + dprint "$@" + # &>/dev/null prevents duplicate output + run_podman help "$@" &>/dev/null + awk '/^Available Commands:/{ok=1;next}/^Options:/{ok=0}ok { print $1 }' <<<"$output" | grep . +} + +########################## +# sleep_to_next_second # Sleep until second rolls over +########################## + +function sleep_to_next_second() { + sleep 0.$(printf '%04d' $((10000 - 10#$(date +%4N)))) +} + +function wait_for_command_output() { + local cmd="$1" + local want="$2" + local tries=20 + local sleep_delay=0.5 + + case "${#*}" in + 2) ;; + 4) tries="$3" + sleep_delay="$4" + ;; + *) die "Internal error: 'wait_for_command_output' requires two or four arguments" ;; + esac + + while [[ $tries -gt 0 ]]; do + echo "$_LOG_PROMPT $cmd" + run $cmd + echo "$output" + if [[ "$output" = "$want" ]]; then + return + fi + + sleep $sleep_delay + tries=$((tries - 1)) + done + die "Timed out waiting for '$cmd' to return '$want'" +} + +function make_random_file() { + dd if=/dev/urandom of="$1" bs=1 count=${2:-$((${RANDOM} % 8192 + 1024))} status=none +} + +# END miscellaneous tools +############################################################################### diff --git a/plans/basic.fmf b/plans/basic.fmf deleted file mode 100644 index 01ff9ec..0000000 --- a/plans/basic.fmf +++ /dev/null @@ -1,8 +0,0 @@ -summary: Check basic functionality -discover: - how: fmf -execute: - how: tmt -prepare: - how: feature - epel: enabled diff --git a/plans/system.fmf b/plans/system.fmf new file mode 100644 index 0000000..9d65470 --- /dev/null +++ b/plans/system.fmf @@ -0,0 +1,47 @@ +discover: + how: fmf + +execute: + how: tmt + +prepare: + - how: shell + script: modprobe null_blk nr_devices=1 + order: 5 + - how: shell + script: | + BATS_VERSION=1.12.0 + curl -L https://github.com/bats-core/bats-core/archive/refs/tags/v"$BATS_VERSION".tar.gz | tar -xz + cd bats-core-"$BATS_VERSION" + ./install.sh /usr + order: 10 + +provision: + how: artemis + hardware: + memory: ">= 16 GB" + cpu: + cores: ">= 4" + threads: ">=8" + disk: + - size: ">= 512 GB" + +/root-local: + summary: Local root system tests + discover+: + filter: 'tag:local & tag:root & tag:system' + +/rootless-local: + summary: Local rootless system tests + discover+: + filter: 'tag:local & tag:rootless & tag:system' + +/root-remote: + summary: Remote root system tests + discover+: + filter: 'tag:remote & tag:root & tag:system' + +/rootless-remote: + summary: Remote rootless system tests + discover+: + filter: 'tag:remote & tag:rootless & tag:system' diff --git a/plans/tmt.fmf b/plans/tmt.fmf new file mode 100644 index 0000000..2ceee10 --- /dev/null +++ b/plans/tmt.fmf @@ -0,0 +1,20 @@ +summary: Run tmt container provision test (downstream only) + +discover: + how: fmf + filter: 'tag:downstream' + +execute: + how: tmt + +prepare: + - when: distro == centos-stream-9 + how: shell + script: | + dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-$(rpm --eval '%{?rhel}').noarch.rpm + dnf -y config-manager --set-enabled epel + order: 10 + +adjust+: + - enabled: false + when: revdeps == yes or distro == rhel-10 or distro == centos-stream-10 diff --git a/podman.spec b/podman.spec index fe0a424..dab5a08 100644 --- a/podman.spec +++ b/podman.spec @@ -14,7 +14,7 @@ GO111MODULE=off go build -buildmode pie -compiler gc -tags="rpm_crashtraceback $ Epoch: 5 Name: podman Version: 5.4.0 -Release: 7%{?dist} +Release: 8%{?dist} Summary: Manage Pods, Containers and Container Images License: Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MPL-2.0 URL: https://%{name}.io/ @@ -27,6 +27,13 @@ Source1: https://github.com/openSUSE/catatonit/archive/v%{cataver}.tar.gz #Source2: https://github.com/containers/dnsname/archive/v%%{dnsnamever}.tar.gz Source2: https://github.com/containers/dnsname/archive/%{commit_dnsname}/dnsname-%{shortcommit_dnsname}.tar.gz # https://fedoraproject.org/wiki/PackagingDrafts/Go#Go_Language_Architectures +# NOTE: Delete these sources after +# https://github.com/containers/podman/pull/26183 merges +Source3: 220-healthcheck.bats +Source4: 252-quadlet.bats +Source5: 255-auto-update.bats +Source6: 520-checkpoint.bats +Source7: helpers.bash ExclusiveArch: %{go_arches} Provides: %{name}-manpages = %{epoch}:%{version}-%{release} Obsoletes: %{name}-manpages < %{epoch}:%{version}-%{release} @@ -127,7 +134,8 @@ file. Each CNI network will have its own dnsmasq instance. %package tests Summary: Tests for %{name} Requires: %{name} = %{epoch}:%{version}-%{release} -#Requires: bats (which RHEL8 doesn't have. If it ever does, un-comment this) +# Fetch bats rpm if you can, else install any way available +Recommends: bats Requires: nmap-ncat Requires: httpd-tools Requires: jq @@ -137,11 +145,13 @@ Requires: openssl Requires: buildah Requires: gnupg Requires: git-daemon +Recommends: slirp4netns %description tests %{summary} -This package contains system tests for %{name} +This package contains system tests for %{name}. Only used for gating tests. End +user / customer use cases not supported. %prep %if 0%{?branch:1} @@ -157,6 +167,12 @@ sed -i '$d' configure.ac popd tar fx %{SOURCE2} +cp %{SOURCE3} test/system/. +cp %{SOURCE4} test/system/. +cp %{SOURCE5} test/system/. +cp %{SOURCE6} test/system/. +cp %{SOURCE7} test/system/. + # cgroups-v1 is supported on rhel9 %if 0%{?rhel} == 9 sed -i '/DELETE ON RHEL9/,/DELETE ON RHEL9/d' libpod/runtime.go @@ -370,6 +386,10 @@ fi %{_datadir}/%{name}/test %changelog +* Thu May 08 2025 Lokesh Mandvekar - 5:5.4.0-8 +- Enable gating tests via TMT +- Related: RHEL-80816 + * Fri Apr 25 2025 Jindrich Novy - 5:5.4.0-7 - update to the latest content of https://github.com/containers/podman/tree/v5.4-rhel (https://github.com/containers/podman/commit/0ee1d49) diff --git a/test/tmt/system.fmf b/test/tmt/system.fmf new file mode 100644 index 0000000..a1411aa --- /dev/null +++ b/test/tmt/system.fmf @@ -0,0 +1,49 @@ +require: + - podman-tests + - psmisc + - slirp4netns + +environment: + # PODMAN_TESTING and CI_DESIRED_NETWORK envvars are set in system.sh + PODMAN: /usr/bin/podman + QUADLET: /usr/libexec/podman/quadlet + ROOTLESS_USER: "fedora" +adjust+: + - when: distro == centos-stream + environment+: + ROOTLESS_USER: "ec2-user" + - when: distro == rhel + environment+: + ROOTLESS_USER: "cloud-user" + +/local-root: + tag: [ local, root, system ] + summary: local rootful test + test: bash ./system.sh + duration: 60m + +/local-rootless: + tag: [ local, rootless, system ] + summary: rootless test + test: bash ./system.sh rootless + duration: 60m + +/remote-root: + tag: [ remote, root, system ] + summary: remote rootful test + test: bash ./system.sh + duration: 60m + environment+: + PODMAN: /usr/bin/podman-remote + require+: + - podman-remote + +/remote-rootless: + tag: [ remote, rootless, system ] + summary: remote rootless test + test: bash ./system.sh rootless + duration: 60m + environment+: + PODMAN: /usr/bin/podman-remote + require+: + - podman-remote diff --git a/test/tmt/system.sh b/test/tmt/system.sh new file mode 100644 index 0000000..c5399ba --- /dev/null +++ b/test/tmt/system.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +set -exo pipefail + +uname -r + +loginctl enable-linger "$ROOTLESS_USER" + +rpm -q \ + aardvark-dns \ + buildah \ + conmon \ + container-selinux \ + containers-common \ + criu \ + crun \ + netavark \ + passt \ + podman \ + podman-tests \ + skopeo \ + slirp4netns \ + systemd + +export system_service_cmd="/usr/bin/podman system service --timeout=0 &" +export test_cmd="whoami && cd /usr/share/podman/test/system && PATH=$PATH:/usr/libexec/podman CI_DESIRED_NETWORK=netavark PODMAN_TESTING=/usr/bin/podman-testing bats ." + +if [[ -z $1 ]]; then + if [[ $PODMAN == "/usr/bin/podman-remote" ]]; then + eval "$system_service_cmd" + fi + eval "$test_cmd" +elif [[ $1 == "rootless" ]]; then + if [[ $PODMAN == "/usr/bin/podman-remote" ]]; then + su - "$ROOTLESS_USER" -c "eval $system_service_cmd" + fi + su - "$ROOTLESS_USER" -c "eval $test_cmd" +fi + +# Kill all podman processes for remote tests +if [[ $PODMAN == "/usr/bin/podman-remote" ]]; then + killall -q podman +fi +exit 0 diff --git a/tests/tmt.fmf b/test/tmt/tmt.fmf similarity index 53% rename from tests/tmt.fmf rename to test/tmt/tmt.fmf index 67fc5b2..f016947 100644 --- a/tests/tmt.fmf +++ b/test/tmt/tmt.fmf @@ -1,4 +1,9 @@ -summary: Make sure that container provision works +enabled: false +adjust: + enabled: true + when: initiator != packit && distro != rhel +summary: Make sure that TMT container provision works +tag: [downstream] require: - tmt+provision-container test: diff --git a/tests/main.fmf b/tests/main.fmf deleted file mode 100644 index 765b81d..0000000 --- a/tests/main.fmf +++ /dev/null @@ -1,2 +0,0 @@ -require: - - podman diff --git a/tests/roles/bats_installed/tasks/main.yml b/tests/roles/bats_installed/tasks/main.yml deleted file mode 100644 index 20a73f3..0000000 --- a/tests/roles/bats_installed/tasks/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -# Sigh; RHEL8 doesn't have BATS -- name: bats | fetch and unpack tarball - unarchive: - src: https://github.com/bats-core/bats-core/archive/v1.9.0.tar.gz - dest: /root - remote_src: true - -- name: bats | install - command: ./install.sh /usr/local - args: - chdir: /root/bats-core-1.9.0 diff --git a/tests/roles/rootless_user/tasks/main.yml b/tests/roles/rootless_user/tasks/main.yml deleted file mode 100644 index 0e5d814..0000000 --- a/tests/roles/rootless_user/tasks/main.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: create rootless user - user: - name: testuser - shell: /bin/bash -- name: enable linger - command: loginctl enable-linger testuser diff --git a/tests/roles/run_bats_tests/files/run_bats_tests.sh b/tests/roles/run_bats_tests/files/run_bats_tests.sh deleted file mode 100755 index e9f5f5f..0000000 --- a/tests/roles/run_bats_tests/files/run_bats_tests.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash -# -# Run bats tests for a given $TEST_PACKAGE, e.g. buildah, podman -# -# This is invoked by the 'run_bats_tests' role; we assume that -# the package foo has a foo-tests subpackage which provides the -# directory /usr/share/foo/test/system, containing one or more .bats -# test files. -# - -export PATH=/usr/local/bin:/usr/sbin:/usr/bin - -FULL_LOG=/tmp/test.debug.log -BATS_LOG=/tmp/test.bats.log -rm -f $FULL_LOG $BATS_LOG -touch $FULL_LOG $BATS_LOG - -exec &> $FULL_LOG - -# Log program versions -echo "Packages:" -rpm -q ${TEST_PACKAGE} ${TEST_PACKAGE}-tests - -echo "------------------------------" -printenv | sort - -testdir=/usr/share/${TEST_PACKAGE}/test/system - -if ! cd $testdir; then - echo "FAIL ${TEST_NAME} : cd $testdir" >> /tmp/test.log - exit 0 -fi - -if [ -e /tmp/helper.sh ]; then - echo "------------------------------" - echo ". /tmp/helper.sh" - . /tmp/helper.sh -fi - -if [ "$(type -t setup)" = "function" ]; then - echo "------------------------------" - echo "\$ setup" - setup - if [ $? -ne 0 ]; then - echo "FAIL ${TEST_NAME} : setup" >> /tmp/test.log - exit 0 - fi -fi - -echo "------------------------------" -echo "\$ bats ." -bats . &> $BATS_LOG -rc=$? - -echo "------------------------------" -echo "bats completed with status $rc" - -status=PASS -if [ $rc -ne 0 ]; then - status=FAIL -fi - -echo "${status} ${TEST_NAME}" >> /tmp/test.log - -if [ "$(type -t teardown)" = "function" ]; then - echo "------------------------------" - echo "\$ teardown" - teardown -fi - -# FIXME: for CI purposes, always exit 0. This allows subsequent tests. -exit 0 diff --git a/tests/roles/run_bats_tests/tasks/main.yml b/tests/roles/run_bats_tests/tasks/main.yml deleted file mode 100644 index da79a4c..0000000 --- a/tests/roles/run_bats_tests/tasks/main.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -# Create empty results file, world-writable -- name: initialize test.log file - copy: dest=/tmp/test.log content='' force=yes mode=0666 - -- name: execute tests - include: run_one_test.yml - with_items: "{{ tests }}" - loop_control: - loop_var: test - -- name: pull test.log results - fetch: - src: "/tmp/test.log" - dest: "{{ artifacts }}/test.log" - flat: yes - -# Copied from standard-test-basic -- name: check results - shell: grep "^FAIL" /tmp/test.log - register: test_fails - # Never fail at this step. Just store result of tests. - failed_when: False - -- name: preserve results - set_fact: - role_result_failed: "{{ (test_fails.stdout|d|length > 0) or (test_fails.stderr|d|length > 0) }}" - role_result_msg: "{{ test_fails.stdout|d('tests failed.') }}" - -- name: display results - vars: - msg: | - Tests failed: {{ role_result_failed|d('Undefined') }} - Tests msg: {{ role_result_msg|d('None') }} - debug: - msg: "{{ msg.split('\n') }}" - failed_when: "role_result_failed|bool" diff --git a/tests/roles/run_bats_tests/tasks/run_one_test.yml b/tests/roles/run_bats_tests/tasks/run_one_test.yml deleted file mode 100644 index b44ed42..0000000 --- a/tests/roles/run_bats_tests/tasks/run_one_test.yml +++ /dev/null @@ -1,52 +0,0 @@ ---- -- name: "{{ test.name }} | install test packages" - dnf: name="{{ test.package }}-tests" state=installed - -- name: "{{ test.name }} | define helper variables" - set_fact: - test_name_oneword: "{{ test.name | replace(' ','-') }}" - -# UGH. This is necessary because our caller sets some environment variables -# and we need to set a few more based on other caller variables; then we -# need to combine the two dicts when running the test. This seems to be -# the only way to do it in ansible. -- name: "{{ test.name }} | define local environment" - set_fact: - local_environment: - TEST_NAME: "{{ test.name }}" - TEST_PACKAGE: "{{ test.package }}" - TEST_ENV: "{{ test.environment }}" - -- name: "{{ test.name }} | setup/teardown helper | see if exists" - local_action: stat path={{ role_path }}/files/helper.{{ test_name_oneword }}.sh - register: helper - -- name: "{{ test.name }} | setup/teardown helper | install" - copy: src=helper.{{ test_name_oneword }}.sh dest=/tmp/helper.sh - when: helper.stat.exists - -- name: "{{ test.name }} | run test" - script: ./run_bats_tests.sh - args: - chdir: /usr/share/{{ test.package }}/test/system - become: "{{ true if test.become is defined else false }}" - become_user: testuser - environment: "{{ local_environment | combine(test.environment) }}" - -- name: "{{ test.name }} | pull logs" - fetch: - src: "/tmp/test.{{ item }}.log" - dest: "{{ artifacts }}/test.{{ test_name_oneword }}.{{ item }}.log" - flat: yes - with_items: - - bats - - debug - -- name: "{{ test.name }} | remove remote logs and helpers" - file: - dest=/tmp/{{ item }} - state=absent - with_items: - - test.bats.log - - test.debug.log - - helper.sh diff --git a/tests/smoke.fmf b/tests/smoke.fmf deleted file mode 100644 index 97f2de4..0000000 --- a/tests/smoke.fmf +++ /dev/null @@ -1,2 +0,0 @@ -summary: Execute a simple command -test: podman run -t --rm fedora cat /etc/os-release | grep 'Fedora Linux' diff --git a/tests/tests.yml b/tests/tests.yml deleted file mode 100644 index 1f0a110..0000000 --- a/tests/tests.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -- hosts: localhost - tags: classic - vars: - - artifacts: ./artifacts - roles: - - role: bats_installed - - role: rootless_user - - role: run_bats_tests - tests: - - name: podman root netavark - package: podman - environment: - CI_DESIRED_NETWORK: netavark - PODMAN: /usr/bin/podman - QUADLET: /usr/libexec/podman/quadlet - PODMAN_TESTING: /usr/bin/podman-testing - - - name: podman rootless netavark - package: podman - environment: - CI_DESIRED_NETWORK: netavark - PODMAN: /usr/bin/podman - QUADLET: /usr/libexec/podman/quadlet - PODMAN_TESTING: /usr/bin/podman-testing - become: true - - - name: podman root cni - package: podman - environment: - CI_DESIRED_NETWORK: cni - PODMAN: /usr/bin/podman - QUADLET: /usr/libexec/podman/quadlet - PODMAN_TESTING: /usr/bin/podman-testing - - - name: podman rootless cni - package: podman - environment: - CI_DESIRED_NETWORK: cni - PODMAN: /usr/bin/podman - QUADLET: /usr/libexec/podman/quadlet - PODMAN_TESTING: /usr/bin/podman-testing - become: true - - #- name: podman-remote root - #package: podman - #environment: - # PODMAN: /usr/bin/podman-remote - # QUADLET: /usr/libexec/podman/quadlet