podman-5.6.0-1.el9
- update to https://github.com/containers/podman/releases/tag/v5.6.0 - Related: RHEL-80816 Signed-off-by: Jindrich Novy <jnovy@redhat.com>
This commit is contained in:
parent
bb65fc5a33
commit
fae8a166eb
@ -1,426 +0,0 @@
|
|||||||
#!/usr/bin/env bats -*- bats -*-
|
|
||||||
#
|
|
||||||
# tests for podman healthcheck
|
|
||||||
#
|
|
||||||
#
|
|
||||||
|
|
||||||
load helpers
|
|
||||||
load helpers.systemd
|
|
||||||
|
|
||||||
# bats file_tags=ci:parallel
|
|
||||||
|
|
||||||
# Helper function: run 'podman inspect' and check various given fields
|
|
||||||
function _check_health {
|
|
||||||
local ctrname="$1"
|
|
||||||
local testname="$2"
|
|
||||||
local tests="$3"
|
|
||||||
local since="$4"
|
|
||||||
local hc_status="$5"
|
|
||||||
|
|
||||||
# Loop-wait (up to a few seconds) for healthcheck event (#20342)
|
|
||||||
# Allow a margin when running parallel, because of system load
|
|
||||||
local timeout=5
|
|
||||||
if [[ -n "$PARALLEL_JOBSLOT" ]]; then
|
|
||||||
timeout=$((timeout + 3))
|
|
||||||
fi
|
|
||||||
|
|
||||||
while :; do
|
|
||||||
run_podman events --filter container=$ctrname --filter event=health_status \
|
|
||||||
--since "$since" --stream=false --format "{{.HealthStatus}}"
|
|
||||||
# Output may be empty or multiple lines.
|
|
||||||
if [[ -n "$output" ]]; then
|
|
||||||
if [[ "${lines[-1]}" = "$hc_status" ]]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
timeout=$((timeout - 1))
|
|
||||||
if [[ $timeout -eq 0 ]]; then
|
|
||||||
die "$testname - timed out waiting for '$hc_status' in podman events"
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
# Got the desired status. Now verify all the healthcheck fields
|
|
||||||
run_podman inspect --format "{{json .State.Healthcheck}}" $ctrname
|
|
||||||
|
|
||||||
defer-assertion-failures
|
|
||||||
parse_table "$tests" | while read field expect;do
|
|
||||||
actual=$(jq ".$field" <<<"$output")
|
|
||||||
is "$actual" "$expect" "$testname - .State.Healthcheck.$field"
|
|
||||||
done
|
|
||||||
immediate-assertion-failures
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman healthcheck" {
|
|
||||||
local ctrname="c-h-$(safename)"
|
|
||||||
run_podman run -d --name $ctrname \
|
|
||||||
--health-cmd /home/podman/healthcheck \
|
|
||||||
--health-interval 1s \
|
|
||||||
--health-retries 3 \
|
|
||||||
--health-on-failure=kill \
|
|
||||||
--health-startup-cmd /home/podman/healthcheck \
|
|
||||||
--health-startup-interval 1s \
|
|
||||||
$IMAGE /home/podman/pause
|
|
||||||
cid="$output"
|
|
||||||
|
|
||||||
run_podman inspect $ctrname --format "{{.Config.HealthcheckOnFailureAction}}"
|
|
||||||
is "$output" "kill" "on-failure action is set to kill"
|
|
||||||
|
|
||||||
run_podman inspect $ctrname --format "{{.Config.StartupHealthCheck.Test}}"
|
|
||||||
is "$output" "[CMD-SHELL /home/podman/healthcheck]" ".Config.StartupHealthCheck.Test"
|
|
||||||
|
|
||||||
current_time=$(date --iso-8601=ns)
|
|
||||||
# We can't check for 'starting' because a 1-second interval is too
|
|
||||||
# short; it could run healthcheck before we get to our first check.
|
|
||||||
#
|
|
||||||
# So, just force a healthcheck run, then confirm that it's running.
|
|
||||||
run_podman healthcheck run $ctrname
|
|
||||||
is "$output" "" "output from 'podman healthcheck run'"
|
|
||||||
|
|
||||||
_check_health $ctrname "All healthy" "
|
|
||||||
Status | \"healthy\"
|
|
||||||
FailingStreak | 0
|
|
||||||
Log[-1].ExitCode | 0
|
|
||||||
Log[-1].Output | \"Life is Good on stdout\\\nLife is Good on stderr\\\n\"
|
|
||||||
" "$current_time" "healthy"
|
|
||||||
|
|
||||||
current_time=$(date --iso-8601=ns)
|
|
||||||
# Force a failure
|
|
||||||
run_podman exec $ctrname touch /uh-oh
|
|
||||||
|
|
||||||
_check_health $ctrname "First failure" "
|
|
||||||
Status | \"healthy\"
|
|
||||||
FailingStreak | [123]
|
|
||||||
Log[-1].ExitCode | 1
|
|
||||||
Log[-1].Output | \"Uh-oh on stdout!\\\nUh-oh on stderr!\\\n\"
|
|
||||||
" "$current_time" "healthy"
|
|
||||||
|
|
||||||
# Check that we now we do have valid podman units with this
|
|
||||||
# name so that the leak check below does not turn into a NOP without noticing.
|
|
||||||
run -0 systemctl list-units
|
|
||||||
cidmatch=$(grep "$cid" <<<"$output")
|
|
||||||
echo "$cidmatch"
|
|
||||||
assert "$cidmatch" =~ " $cid-[0-9a-f]+\.timer *.*/podman healthcheck run $cid" \
|
|
||||||
"Healthcheck systemd unit exists"
|
|
||||||
|
|
||||||
current_time=$(date --iso-8601=ns)
|
|
||||||
# After three successive failures, container should no longer be healthy
|
|
||||||
_check_health $ctrname "Four or more failures" "
|
|
||||||
Status | \"unhealthy\"
|
|
||||||
FailingStreak | [3456]
|
|
||||||
Log[-1].ExitCode | 1
|
|
||||||
Log[-1].Output | \"Uh-oh on stdout!\\\nUh-oh on stderr!\\\n\"
|
|
||||||
" "$current_time" "unhealthy"
|
|
||||||
|
|
||||||
# now the on-failure should kick in and kill the container
|
|
||||||
run_podman wait $ctrname
|
|
||||||
|
|
||||||
# Clean up
|
|
||||||
run_podman rm -t 0 -f $ctrname
|
|
||||||
|
|
||||||
# Important check for https://github.com/containers/podman/issues/22884
|
|
||||||
# We never should leak the unit files, healthcheck uses the cid in name so just grep that.
|
|
||||||
# (Ignore .scope units, those are conmon and can linger for 5 minutes)
|
|
||||||
# (Ignore .mount, too. They are created/removed by systemd based on the actual real mounts
|
|
||||||
# on the host and that is async and might be slow enough in CI to cause failures.)
|
|
||||||
run -0 systemctl list-units --quiet "*$cid*"
|
|
||||||
except_scope_mount=$(grep -vF ".scope " <<<"$output" | { grep -vF ".mount" || true; } )
|
|
||||||
assert "$except_scope_mount" == "" "Healthcheck systemd unit cleanup: no units leaked"
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman healthcheck - restart cleans up old state" {
|
|
||||||
ctr="c-h-$(safename)"
|
|
||||||
|
|
||||||
run_podman run -d --name $ctr \
|
|
||||||
--health-cmd /home/podman/healthcheck \
|
|
||||||
--health-retries=3 \
|
|
||||||
--health-interval=disable \
|
|
||||||
$IMAGE /home/podman/pause
|
|
||||||
|
|
||||||
run_podman container inspect $ctr --format "{{.State.Healthcheck.FailingStreak}}"
|
|
||||||
is "$output" "0" "Failing streak of fresh container should be 0"
|
|
||||||
|
|
||||||
# Get the healthcheck to fail
|
|
||||||
run_podman exec $ctr touch /uh-oh-only-once
|
|
||||||
run_podman 1 healthcheck run $ctr
|
|
||||||
is "$output" "unhealthy" "output from 'podman healthcheck run'"
|
|
||||||
run_podman container inspect $ctr --format "{{.State.Healthcheck.FailingStreak}}"
|
|
||||||
is "$output" "1" "Failing streak after one failed healthcheck should be 1"
|
|
||||||
|
|
||||||
run_podman container restart $ctr
|
|
||||||
run_podman container inspect $ctr --format "{{.State.Healthcheck.FailingStreak}}"
|
|
||||||
is "$output" "0" "Failing streak of restarted container should be 0 again"
|
|
||||||
|
|
||||||
run_podman rm -f -t0 $ctr
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman wait --condition={healthy,unhealthy}" {
|
|
||||||
ctr="c-h-$(safename)"
|
|
||||||
|
|
||||||
wait_file="$PODMAN_TMPDIR/$(random_string).wait_for_me"
|
|
||||||
|
|
||||||
for condition in healthy unhealthy;do
|
|
||||||
rm -f $wait_file
|
|
||||||
run_podman run -d --name $ctr \
|
|
||||||
--health-cmd /home/podman/healthcheck \
|
|
||||||
--health-retries=1 \
|
|
||||||
--health-interval=disable \
|
|
||||||
$IMAGE /home/podman/pause
|
|
||||||
if [[ $condition == "unhealthy" ]];then
|
|
||||||
# create the uh-oh file to let the health check fail
|
|
||||||
run_podman exec $ctr touch /uh-oh
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Wait for the container in the background and create the $wait_file to
|
|
||||||
# signal the specified wait condition was met.
|
|
||||||
(timeout --foreground -v --kill=5 10 $PODMAN wait --condition=$condition $ctr && touch $wait_file) &
|
|
||||||
|
|
||||||
# Sleep 1 second to make sure above commands are running
|
|
||||||
sleep 1
|
|
||||||
if [[ -f $wait_file ]]; then
|
|
||||||
die "the wait file should only be created after the container turned healthy"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $condition == "healthy" ]];then
|
|
||||||
run_podman healthcheck run $ctr
|
|
||||||
else
|
|
||||||
run_podman 1 healthcheck run $ctr
|
|
||||||
fi
|
|
||||||
wait_for_file $wait_file
|
|
||||||
run_podman rm -f -t0 $ctr
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman healthcheck --health-on-failure" {
|
|
||||||
run_podman 125 create --health-on-failure=kill $IMAGE
|
|
||||||
is "$output" "Error: cannot set on-failure action to kill without a health check"
|
|
||||||
|
|
||||||
ctr="c-h-$(safename)"
|
|
||||||
|
|
||||||
for policy in none kill restart stop;do
|
|
||||||
uhoh=/uh-oh
|
|
||||||
if [[ $policy != "none" ]];then
|
|
||||||
# only fail the first run
|
|
||||||
uhoh=/uh-oh-only-once
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Run healthcheck image.
|
|
||||||
run_podman run -d --name $ctr \
|
|
||||||
--health-cmd /home/podman/healthcheck \
|
|
||||||
--health-retries=1 \
|
|
||||||
--health-on-failure=$policy \
|
|
||||||
--health-interval=disable \
|
|
||||||
$IMAGE /home/podman/pause
|
|
||||||
|
|
||||||
# healthcheck should succeed
|
|
||||||
run_podman healthcheck run $ctr
|
|
||||||
|
|
||||||
# Now cause the healthcheck to fail
|
|
||||||
run_podman exec $ctr touch $uhoh
|
|
||||||
|
|
||||||
# healthcheck should now fail, with exit status 1 and 'unhealthy' output
|
|
||||||
run_podman 1 healthcheck run $ctr
|
|
||||||
is "$output" "unhealthy" "output from 'podman healthcheck run' (policy: $policy)"
|
|
||||||
|
|
||||||
if [[ $policy == "restart" ]];then
|
|
||||||
# Make sure the container transitions back to running
|
|
||||||
run_podman wait --condition=running $ctr
|
|
||||||
run_podman inspect $ctr --format "{{.RestartCount}}"
|
|
||||||
assert "${#lines[@]}" != 0 "Container has been restarted at least once"
|
|
||||||
run_podman container inspect $ctr --format "{{.State.Healthcheck.FailingStreak}}"
|
|
||||||
is "$output" "0" "Failing streak of restarted container should be 0 again"
|
|
||||||
run_podman healthcheck run $ctr
|
|
||||||
elif [[ $policy == "none" ]];then
|
|
||||||
run_podman inspect $ctr --format "{{.State.Status}} {{.Config.HealthcheckOnFailureAction}}"
|
|
||||||
# Container is still running and health check still broken
|
|
||||||
is "$output" "running $policy" "container continued running"
|
|
||||||
run_podman 1 healthcheck run $ctr
|
|
||||||
is "$output" "unhealthy" "output from 'podman healthcheck run' (policy: $policy)"
|
|
||||||
else
|
|
||||||
run_podman inspect $ctr --format "{{.State.Status}} {{.Config.HealthcheckOnFailureAction}}"
|
|
||||||
# kill and stop yield the container into a non-running state
|
|
||||||
is "$output" ".* $policy" "container was stopped/killed (policy: $policy)"
|
|
||||||
assert "$output" != "running $policy"
|
|
||||||
# also make sure that it's not stuck in the stopping state
|
|
||||||
assert "$output" != "stopping $policy"
|
|
||||||
fi
|
|
||||||
|
|
||||||
run_podman rm -f -t0 $ctr
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman healthcheck --health-on-failure with interval" {
|
|
||||||
ctr="c-h-$(safename)"
|
|
||||||
|
|
||||||
for policy in stop kill restart ;do
|
|
||||||
t0=$(date --iso-8601=seconds)
|
|
||||||
run_podman run -d --name $ctr \
|
|
||||||
--health-cmd /bin/false \
|
|
||||||
--health-retries=1 \
|
|
||||||
--health-on-failure=$policy \
|
|
||||||
--health-interval=1s \
|
|
||||||
$IMAGE top
|
|
||||||
|
|
||||||
if [[ $policy == "restart" ]];then
|
|
||||||
# Sleeping for 2 seconds makes the test much faster than using
|
|
||||||
# podman-wait which would compete with the container getting
|
|
||||||
# restarted.
|
|
||||||
sleep 2
|
|
||||||
# Make sure the container transitions back to running
|
|
||||||
run_podman wait --condition=running $ctr
|
|
||||||
run_podman inspect $ctr --format "{{.RestartCount}}"
|
|
||||||
assert "${#lines[@]}" != 0 "Container has been restarted at least once"
|
|
||||||
else
|
|
||||||
# kill and stop yield the container into a non-running state
|
|
||||||
run_podman wait $ctr
|
|
||||||
run_podman inspect $ctr --format "{{.State.Status}} {{.Config.HealthcheckOnFailureAction}}"
|
|
||||||
is "$output" ".* $policy" "container was stopped/killed (policy: $policy)"
|
|
||||||
assert "$output" != "running $policy"
|
|
||||||
# also make sure that it's not stuck in the stopping state
|
|
||||||
assert "$output" != "stopping $policy"
|
|
||||||
fi
|
|
||||||
|
|
||||||
run_podman rm -f -t0 $ctr
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
function _create_container_with_health_log_settings {
|
|
||||||
local ctrname="$1"
|
|
||||||
local msg="$2"
|
|
||||||
local format="$3"
|
|
||||||
local flag="$4"
|
|
||||||
local expect="$5"
|
|
||||||
local expect_msg="$6"
|
|
||||||
|
|
||||||
run_podman run -d --name $ctrname \
|
|
||||||
--health-cmd "echo $msg" \
|
|
||||||
$flag \
|
|
||||||
$IMAGE /home/podman/pause
|
|
||||||
cid="$output"
|
|
||||||
|
|
||||||
run_podman inspect $ctrname --format $format
|
|
||||||
is "$output" "$expect" "$expect_msg"
|
|
||||||
|
|
||||||
output=$cid
|
|
||||||
}
|
|
||||||
|
|
||||||
function _check_health_log {
|
|
||||||
local ctrname="$1"
|
|
||||||
local expect_msg="$2"
|
|
||||||
local comparison=$3
|
|
||||||
local expect_count="$4"
|
|
||||||
|
|
||||||
run_podman inspect $ctrname --format "{{.State.Health.Log}}"
|
|
||||||
count=$(grep -co "$expect_msg" <<< "$output")
|
|
||||||
assert "$count" $comparison $expect_count "Number of matching health log messages"
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman healthcheck --health-max-log-count values" {
|
|
||||||
# flag | expected value | op | log count
|
|
||||||
test="
|
|
||||||
| 5 | -eq | 5
|
|
||||||
--health-max-log-count 0 | 0 | -ge | 11
|
|
||||||
--health-max-log-count=0 | 0 | -ge | 11
|
|
||||||
--health-max-log-count 10 | 10 | -eq | 10
|
|
||||||
--health-max-log-count=10 | 10 | -eq | 10
|
|
||||||
"
|
|
||||||
|
|
||||||
while read flag value op logs_count ; do
|
|
||||||
local msg="healthmsg-$(random_string)"
|
|
||||||
local ctrname="c-h-$(safename)"
|
|
||||||
_create_container_with_health_log_settings $ctrname $msg "{{.Config.HealthMaxLogCount}}" $flag $value "HealthMaxLogCount"
|
|
||||||
|
|
||||||
for i in $(seq 1 $((logs_count + 5)));
|
|
||||||
do
|
|
||||||
run_podman healthcheck run $ctrname
|
|
||||||
is "$output" "" "unexpected output from podman healthcheck run (pass $i)"
|
|
||||||
done
|
|
||||||
|
|
||||||
_check_health_log $ctrname $msg $op $logs_count
|
|
||||||
|
|
||||||
run_podman rm -t 0 -f $ctrname
|
|
||||||
done < <(parse_table "$tests")
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman healthcheck --health-max-log-size values" {
|
|
||||||
local s=$(printf "healthmsg-%1000s")
|
|
||||||
local long_msg=${s// /$(random_string)}
|
|
||||||
|
|
||||||
# flag | expected value | exp_msg
|
|
||||||
test="
|
|
||||||
| 500 | ${long_msg:0:500}}]\$
|
|
||||||
--health-max-log-size 0 | 0 | $long_msg}]\$
|
|
||||||
--health-max-log-size=0 | 0 | $long_msg}]\$
|
|
||||||
--health-max-log-size 10 | 10 | ${long_msg:0:10}}]\$
|
|
||||||
--health-max-log-size=10 | 10 | ${long_msg:0:10}}]\$
|
|
||||||
"
|
|
||||||
|
|
||||||
while read flag value exp_msg ; do
|
|
||||||
local ctrname="c-h-$(safename)"
|
|
||||||
_create_container_with_health_log_settings $ctrname $long_msg "{{.Config.HealthMaxLogSize}}" $flag $value "HealthMaxLogSize"
|
|
||||||
|
|
||||||
run_podman healthcheck run $ctrname
|
|
||||||
is "$output" "" "output from 'podman healthcheck run'"
|
|
||||||
|
|
||||||
_check_health_log $ctrname $exp_msg -eq 1
|
|
||||||
|
|
||||||
run_podman rm -t 0 -f $ctrname
|
|
||||||
done < <(parse_table "$tests")
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman healthcheck --health-log-destination file" {
|
|
||||||
local TMP_DIR_HEALTHCHECK="$PODMAN_TMPDIR/healthcheck"
|
|
||||||
mkdir $TMP_DIR_HEALTHCHECK
|
|
||||||
local ctrname="c-h-$(safename)"
|
|
||||||
local msg="healthmsg-$(random_string)"
|
|
||||||
_create_container_with_health_log_settings $ctrname $msg "{{.Config.HealthLogDestination}}" "--health-log-destination $TMP_DIR_HEALTHCHECK" "$TMP_DIR_HEALTHCHECK" "HealthLogDestination"
|
|
||||||
cid="$output"
|
|
||||||
|
|
||||||
run_podman healthcheck run $ctrname
|
|
||||||
is "$output" "" "output from 'podman healthcheck run'"
|
|
||||||
|
|
||||||
healthcheck_log_path="${TMP_DIR_HEALTHCHECK}/${cid}-healthcheck.log"
|
|
||||||
# The healthcheck is triggered by the podman when the container is started, but its execution depends on systemd.
|
|
||||||
# And since `run_podman healthcheck run` is also run manually, it will result in two runs.
|
|
||||||
count=$(grep -co "$msg" $healthcheck_log_path)
|
|
||||||
assert "$count" -ge 1 "Number of matching health log messages"
|
|
||||||
|
|
||||||
run_podman rm -t 0 -f $ctrname
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@test "podman healthcheck --health-log-destination journal" {
|
|
||||||
skip_if_remote "We cannot read journalctl over remote."
|
|
||||||
|
|
||||||
# We can't use journald on RHEL as rootless, either: rhbz#1895105
|
|
||||||
skip_if_journald_unavailable
|
|
||||||
|
|
||||||
# FIXME: The rootless user belongs to systemd-journal, but this still fails
|
|
||||||
if is_rhel_or_centos; then
|
|
||||||
skip_if_rootless
|
|
||||||
fi
|
|
||||||
|
|
||||||
local ctrname="c-h-$(safename)"
|
|
||||||
local msg="healthmsg-$(random_string)"
|
|
||||||
_create_container_with_health_log_settings $ctrname $msg "{{.Config.HealthLogDestination}}" "--health-log-destination events_logger" "events_logger" "HealthLogDestination"
|
|
||||||
cid="$output"
|
|
||||||
|
|
||||||
run_podman healthcheck run $ctrname
|
|
||||||
is "$output" "" "output from 'podman healthcheck run'"
|
|
||||||
|
|
||||||
cmd="journalctl --output cat --output-fields=PODMAN_HEALTH_LOG PODMAN_ID=$cid"
|
|
||||||
echo "$_LOG_PROMPT $cmd"
|
|
||||||
run $cmd
|
|
||||||
echo "$output"
|
|
||||||
assert "$status" -eq 0 "exit status of journalctl"
|
|
||||||
|
|
||||||
# The healthcheck is triggered by the podman when the container is started, but its execution depends on systemd.
|
|
||||||
# And since `run_podman healthcheck run` is also run manually, it will result in two runs.
|
|
||||||
count=$(grep -co "$msg" <<< "$output")
|
|
||||||
assert "$count" -ge 1 "Number of matching health log messages"
|
|
||||||
|
|
||||||
run_podman rm -t 0 -f $ctrname
|
|
||||||
}
|
|
||||||
|
|
||||||
# vim: filetype=sh
|
|
1754
252-quadlet.bats
1754
252-quadlet.bats
File diff suppressed because it is too large
Load Diff
@ -1,701 +0,0 @@
|
|||||||
#!/usr/bin/env bats -*- bats -*-
|
|
||||||
#
|
|
||||||
# Tests for automatically update images for containerized services
|
|
||||||
#
|
|
||||||
|
|
||||||
load helpers
|
|
||||||
load helpers.network
|
|
||||||
load helpers.registry
|
|
||||||
load helpers.systemd
|
|
||||||
|
|
||||||
export SNAME_FILE
|
|
||||||
|
|
||||||
function setup() {
|
|
||||||
skip_if_remote "systemd tests are meaningless over remote"
|
|
||||||
basic_setup
|
|
||||||
|
|
||||||
SNAME_FILE=${PODMAN_TMPDIR}/services
|
|
||||||
}
|
|
||||||
|
|
||||||
function teardown() {
|
|
||||||
if [[ -e $SNAME_FILE ]]; then
|
|
||||||
while read line; do
|
|
||||||
if [[ "$line" =~ "podman-auto-update" ]]; then
|
|
||||||
echo "Stop timer: $line.timer"
|
|
||||||
systemctl stop $line.timer
|
|
||||||
systemctl disable $line.timer
|
|
||||||
else
|
|
||||||
systemctl stop $line
|
|
||||||
fi
|
|
||||||
rm -f $UNIT_DIR/$line.{service,timer}
|
|
||||||
done < $SNAME_FILE
|
|
||||||
|
|
||||||
rm -f $SNAME_FILE
|
|
||||||
fi
|
|
||||||
SNAME_FILE=
|
|
||||||
|
|
||||||
run_podman rmi -f \
|
|
||||||
quay.io/libpod/alpine:latest \
|
|
||||||
quay.io/libpod/busybox:latest \
|
|
||||||
quay.io/libpod/localtest:latest \
|
|
||||||
quay.io/libpod/autoupdatebroken:latest \
|
|
||||||
quay.io/libpod/test:latest
|
|
||||||
|
|
||||||
# The rollback tests may leave some dangling images behind, so let's prune
|
|
||||||
# them to leave a clean state.
|
|
||||||
run_podman image prune -f
|
|
||||||
basic_teardown
|
|
||||||
}
|
|
||||||
|
|
||||||
# This functions is used for handle the basic step in auto-update related
|
|
||||||
# tests. Including following steps:
|
|
||||||
# 1. Generate a random container name and echo it to output.
|
|
||||||
# 2. Tag the fake image before test
|
|
||||||
# 3. Start a container with io.containers.autoupdate
|
|
||||||
# 4. Generate the service file from the container
|
|
||||||
# 5. Remove the origin container
|
|
||||||
# 6. Start the container from service
|
|
||||||
# 7. Use this fully-qualified image instead of 2)
|
|
||||||
function generate_service() {
|
|
||||||
local target_img_basename=$1
|
|
||||||
local autoupdate=$2
|
|
||||||
local command=$3
|
|
||||||
local extraArgs=$4
|
|
||||||
local noTag=$5
|
|
||||||
local requires=$6
|
|
||||||
|
|
||||||
# Unless specified, set a default command.
|
|
||||||
if [[ -z "$command" ]]; then
|
|
||||||
command="top -d 120"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Container name. Include the autoupdate type, to make debugging easier.
|
|
||||||
# IMPORTANT: variable 'cname' is passed (out of scope) up to caller!
|
|
||||||
cname=c_${autoupdate//\'/}_$(random_string)
|
|
||||||
target_img="quay.io/libpod/$target_img_basename:latest"
|
|
||||||
if [[ -n "$7" ]]; then
|
|
||||||
target_img="$7"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z "$noTag" ]]; then
|
|
||||||
run_podman tag $IMAGE $target_img
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n "$autoupdate" ]]; then
|
|
||||||
label="--label io.containers.autoupdate=$autoupdate"
|
|
||||||
else
|
|
||||||
label=""
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n "$requires" ]]; then
|
|
||||||
requires="--requires=$requires"
|
|
||||||
fi
|
|
||||||
|
|
||||||
run_podman create $extraArgs --name $cname $label $target_img $command
|
|
||||||
|
|
||||||
(cd $UNIT_DIR; run_podman generate systemd --new --files --name $requires $cname)
|
|
||||||
echo "container-$cname" >> $SNAME_FILE
|
|
||||||
run_podman rm -t 0 -f $cname
|
|
||||||
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl_start container-$cname
|
|
||||||
systemctl status container-$cname
|
|
||||||
|
|
||||||
# Original image ID.
|
|
||||||
# IMPORTANT: variable 'ori_image' is passed (out of scope) up to caller!
|
|
||||||
run_podman inspect --format "{{.Image}}" $cname
|
|
||||||
ori_image=$output
|
|
||||||
}
|
|
||||||
|
|
||||||
function _wait_service_ready() {
|
|
||||||
local sname=$1
|
|
||||||
|
|
||||||
local timeout=6
|
|
||||||
while [[ $timeout -gt 1 ]]; do
|
|
||||||
if systemctl -q is-active $sname; then
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
let timeout=$timeout-1
|
|
||||||
done
|
|
||||||
|
|
||||||
# Print service status as debug information before failed the case
|
|
||||||
systemctl status $sname
|
|
||||||
die "Timed out waiting for $sname to start"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Wait for container to update, as confirmed by its image ID changing
|
|
||||||
function _confirm_update() {
|
|
||||||
local cname=$1
|
|
||||||
local old_iid=$2
|
|
||||||
|
|
||||||
# Image has already been pulled, so this shouldn't take too long
|
|
||||||
local timeout=10
|
|
||||||
while [[ $timeout -gt 0 ]]; do
|
|
||||||
run_podman '?' inspect --format "{{.Image}}" $cname
|
|
||||||
if [[ $status != 0 ]]; then
|
|
||||||
if [[ $output =~ (no such object|does not exist in database): ]]; then
|
|
||||||
# this is ok, it just means the container is being restarted
|
|
||||||
:
|
|
||||||
else
|
|
||||||
die "podman inspect $cname failed unexpectedly"
|
|
||||||
fi
|
|
||||||
elif [[ $output != $old_iid ]]; then
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
timeout=$((timeout - 1))
|
|
||||||
done
|
|
||||||
|
|
||||||
die "Timed out waiting for $cname to update; old IID=$old_iid"
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman auto-update - validate input" {
|
|
||||||
# Fully-qualified image reference is required
|
|
||||||
run_podman create --label io.containers.autoupdate=registry $IMAGE
|
|
||||||
run_podman rm -f "$output"
|
|
||||||
|
|
||||||
# Short name does not work
|
|
||||||
shortname="shortname:latest"
|
|
||||||
run_podman image tag $IMAGE $shortname
|
|
||||||
run_podman 125 create --label io.containers.autoupdate=registry $shortname
|
|
||||||
is "$output" "Error: short name: auto updates require fully-qualified image reference: \"$shortname\""
|
|
||||||
|
|
||||||
# Requires docker (or no) transport
|
|
||||||
archive=$PODMAN_TMPDIR/archive.tar
|
|
||||||
run_podman save -o $archive $IMAGE
|
|
||||||
run_podman 125 create --label io.containers.autoupdate=registry docker-archive:$archive
|
|
||||||
is "$output" ".*Error: auto updates require the docker image transport but image is of transport \"docker-archive\""
|
|
||||||
|
|
||||||
run_podman rmi $shortname
|
|
||||||
}
|
|
||||||
|
|
||||||
# This test can fail in dev. environment because of SELinux.
|
|
||||||
# quick fix: chcon -t container_runtime_exec_t ./bin/podman
|
|
||||||
@test "podman auto-update - label io.containers.autoupdate=image" {
|
|
||||||
since=$(date --iso-8601=seconds)
|
|
||||||
run_podman auto-update
|
|
||||||
is "$output" ""
|
|
||||||
run_podman events --filter type=system --since $since --stream=false
|
|
||||||
is "$output" ""
|
|
||||||
|
|
||||||
# Generate two units. The first "parent" to be auto updated, the second
|
|
||||||
# "child" depends on/requires the "parent" and is expected to get restarted
|
|
||||||
# as well on auto updates (regression test for #18926).
|
|
||||||
generate_service alpine image
|
|
||||||
ctr_parent=$cname
|
|
||||||
_wait_service_ready container-$ctr_parent.service
|
|
||||||
|
|
||||||
generate_service alpine image "" "" "" "container-$ctr_parent.service"
|
|
||||||
ctr_child=$cname
|
|
||||||
_wait_service_ready container-$ctr_child.service
|
|
||||||
run_podman container inspect --format "{{.ID}}" $ctr_child
|
|
||||||
old_child_id=$output
|
|
||||||
|
|
||||||
since=$(date --iso-8601=seconds)
|
|
||||||
run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" ".*container-$ctr_parent.service,quay.io/libpod/alpine:latest,pending,registry.*" "Image update is pending."
|
|
||||||
run_podman events --filter type=system --since $since --stream=false
|
|
||||||
is "$output" ".* system auto-update"
|
|
||||||
|
|
||||||
since=$(date --iso-8601=seconds)
|
|
||||||
run_podman auto-update --rollback=false --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" "Trying to pull.*" "Image is updated."
|
|
||||||
is "$output" ".*container-$ctr_parent.service,quay.io/libpod/alpine:latest,true,registry.*" "Image is updated."
|
|
||||||
run_podman events --filter type=system --since $since --stream=false
|
|
||||||
is "$output" ".* system auto-update"
|
|
||||||
|
|
||||||
# Confirm that the update was successful and that the child container/unit
|
|
||||||
# has been restarted as well.
|
|
||||||
_confirm_update $ctr_parent $ori_image
|
|
||||||
run_podman container inspect --format "{{.ID}}" $ctr_child
|
|
||||||
assert "$output" != "$old_child_id" \
|
|
||||||
"child container/unit has not been restarted during update"
|
|
||||||
run_podman container inspect --format "{{.ID}}" $ctr_child
|
|
||||||
run_podman container inspect --format "{{.State.Status}}" $ctr_child
|
|
||||||
is "$output" "running" "child container is in running state"
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman auto-update - label io.containers.autoupdate=image with rollback" {
|
|
||||||
# FIXME: this test should exercise the authfile label to have a regression
|
|
||||||
# test for #11171.
|
|
||||||
|
|
||||||
# Note: the autoupdatebroken image is empty on purpose so it cannot be
|
|
||||||
# executed and force a rollback. The rollback test for the local policy
|
|
||||||
# is exercising the case where the container doesn't send a ready message.
|
|
||||||
image=quay.io/libpod/autoupdatebroken
|
|
||||||
|
|
||||||
run_podman tag $IMAGE $image
|
|
||||||
generate_service autoupdatebroken image
|
|
||||||
|
|
||||||
_wait_service_ready container-$cname.service
|
|
||||||
run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" ".*container-$cname.service,$image:latest,pending,registry.*" "Image update is pending."
|
|
||||||
|
|
||||||
run_podman container inspect --format "{{.Image}}" $cname
|
|
||||||
oldID="$output"
|
|
||||||
|
|
||||||
run_podman inspect --format "{{.ID}}" $cname
|
|
||||||
containerID="$output"
|
|
||||||
|
|
||||||
run_podman auto-update --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" "Trying to pull.*" "Image is updated."
|
|
||||||
is "$output" ".*container-$cname.service,$image:latest,rolled back,registry.*" "Image has been rolled back."
|
|
||||||
|
|
||||||
run_podman container inspect --format "{{.Image}}" $cname
|
|
||||||
is "$output" "$oldID" "container rolled back to previous image"
|
|
||||||
|
|
||||||
run_podman container inspect --format "{{.ID}}" $cname
|
|
||||||
assert "$output" != "$containerID" \
|
|
||||||
"container has not been restarted during rollback"
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman auto-update - label io.containers.autoupdate=disabled" {
|
|
||||||
generate_service alpine disabled
|
|
||||||
|
|
||||||
_wait_service_ready container-$cname.service
|
|
||||||
run_podman auto-update
|
|
||||||
is "$output" "" "Image is not updated when autoupdate=disabled."
|
|
||||||
|
|
||||||
run_podman inspect --format "{{.Image}}" $cname
|
|
||||||
is "$output" "$ori_image" "Image ID should not change"
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman auto-update - label io.containers.autoupdate=fakevalue" {
|
|
||||||
fakevalue=fake_$(random_string)
|
|
||||||
generate_service alpine $fakevalue
|
|
||||||
|
|
||||||
_wait_service_ready container-$cname.service
|
|
||||||
run_podman 125 auto-update
|
|
||||||
is "$output" ".*invalid auto-update policy.*" "invalid policy setup"
|
|
||||||
|
|
||||||
run_podman inspect --format "{{.Image}}" $cname
|
|
||||||
is "$output" "$ori_image" "Image ID should not change"
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman auto-update - label io.containers.autoupdate=local" {
|
|
||||||
generate_service localtest local
|
|
||||||
_wait_service_ready container-$cname.service
|
|
||||||
|
|
||||||
image=quay.io/libpod/localtest:latest
|
|
||||||
run_podman commit --change CMD=/bin/bash $cname $image
|
|
||||||
run_podman image inspect --format "{{.ID}}" $image
|
|
||||||
|
|
||||||
run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" ".*container-$cname.service,quay.io/libpod/localtest:latest,pending,local.*" "Image update is pending."
|
|
||||||
|
|
||||||
run_podman auto-update --rollback=false --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" ".*container-$cname.service,quay.io/libpod/localtest:latest,true,local.*" "Image is updated."
|
|
||||||
|
|
||||||
_confirm_update $cname $ori_image
|
|
||||||
}
|
|
||||||
|
|
||||||
# This test can fail in dev. environment because of SELinux.
|
|
||||||
# quick fix: chcon -t container_runtime_exec_t ./bin/podman
|
|
||||||
@test "podman auto-update - label io.containers.autoupdate=local with rollback" {
|
|
||||||
# sdnotify fails with runc 1.0.0-3-dev2 on Ubuntu. Let's just
|
|
||||||
# assume that we work only with crun, nothing else.
|
|
||||||
# [copied from 260-sdnotify.bats]
|
|
||||||
runtime=$(podman_runtime)
|
|
||||||
if [[ "$runtime" != "crun" ]]; then
|
|
||||||
skip "this test only works with crun, not $runtime"
|
|
||||||
fi
|
|
||||||
|
|
||||||
_prefetch $SYSTEMD_IMAGE
|
|
||||||
|
|
||||||
dockerfile1=$PODMAN_TMPDIR/Dockerfile.1
|
|
||||||
cat >$dockerfile1 <<EOF
|
|
||||||
FROM $SYSTEMD_IMAGE
|
|
||||||
RUN echo -e "#!/bin/sh\n\
|
|
||||||
printenv NOTIFY_SOCKET; echo READY; systemd-notify --ready;\n\
|
|
||||||
trap 'echo Received SIGTERM, finishing; exit' SIGTERM; echo WAITING; while :; do sleep 0.1; done" \
|
|
||||||
>> /runme
|
|
||||||
RUN chmod +x /runme
|
|
||||||
EOF
|
|
||||||
|
|
||||||
dockerfile2=$PODMAN_TMPDIR/Dockerfile.2
|
|
||||||
cat >$dockerfile2 <<EOF
|
|
||||||
FROM $SYSTEMD_IMAGE
|
|
||||||
RUN echo -e "#!/bin/sh\n\
|
|
||||||
exit 1" >> /runme
|
|
||||||
RUN chmod +x /runme
|
|
||||||
EOF
|
|
||||||
image=test
|
|
||||||
|
|
||||||
# Generate a healthy image that will run correctly.
|
|
||||||
run_podman build -t quay.io/libpod/$image -f $dockerfile1
|
|
||||||
|
|
||||||
generate_service $image local /runme --sdnotify=container noTag
|
|
||||||
_wait_service_ready container-$cname.service
|
|
||||||
|
|
||||||
run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" ".*container-$cname.service,quay.io/libpod/$image:latest,false,local.*" "No update available"
|
|
||||||
|
|
||||||
# Generate an unhealthy image that will fail.
|
|
||||||
run_podman build -t quay.io/libpod/$image -f $dockerfile2
|
|
||||||
run_podman image inspect --format "{{.ID}}" $image
|
|
||||||
newID="$output"
|
|
||||||
|
|
||||||
run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" ".*container-$cname.service,quay.io/libpod/$image:latest,pending,local.*" "Image updated is pending"
|
|
||||||
|
|
||||||
# Note: we rollback automatically by default.
|
|
||||||
run_podman auto-update --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" ".*container-$cname.service,quay.io/libpod/$image:latest,rolled back,local.*" "Rolled back to old image"
|
|
||||||
|
|
||||||
# Make sure that new container is not using the new image ID anymore.
|
|
||||||
_confirm_update $cname $newID
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman auto-update with multiple services" {
|
|
||||||
# Preserve original image ID, to confirm that it changes (or not)
|
|
||||||
run_podman inspect --format "{{.Id}}" $IMAGE
|
|
||||||
local img_id="$output"
|
|
||||||
|
|
||||||
local cnames=()
|
|
||||||
local -A expect_update
|
|
||||||
local -A will_update=([image]=1 [registry]=1 [local]=1)
|
|
||||||
|
|
||||||
local fakevalue=fake_$(random_string)
|
|
||||||
for auto_update in image registry "" disabled "''" $fakevalue local
|
|
||||||
do
|
|
||||||
local img_base="alpine"
|
|
||||||
if [[ $auto_update == "registry" ]]; then
|
|
||||||
img_base="busybox"
|
|
||||||
elif [[ $auto_update == "local" ]]; then
|
|
||||||
img_base="localtest"
|
|
||||||
fi
|
|
||||||
generate_service $img_base $auto_update
|
|
||||||
cnames+=($cname)
|
|
||||||
if [[ $auto_update == "local" ]]; then
|
|
||||||
local_cname=$cname
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n "$auto_update" && -n "${will_update[$auto_update]}" ]]; then
|
|
||||||
expect_update[$cname]=1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Make sure all services are ready.
|
|
||||||
for cname in "${cnames[@]}"; do
|
|
||||||
_wait_service_ready container-$cname.service
|
|
||||||
done
|
|
||||||
run_podman commit --change CMD=/bin/bash $local_cname quay.io/libpod/localtest:latest
|
|
||||||
# Exit code is expected, due to invalid 'fakevalue'
|
|
||||||
run_podman 125 auto-update --rollback=false
|
|
||||||
update_log=$output
|
|
||||||
is "$update_log" ".*invalid auto-update policy.*" "invalid policy setup"
|
|
||||||
is "$update_log" ".*Error: invalid auto-update policy.*" "invalid policy setup"
|
|
||||||
|
|
||||||
local n_updated=$(grep -c 'Trying to pull' <<<"$update_log")
|
|
||||||
is "$n_updated" "2" "Number of images updated from registry."
|
|
||||||
|
|
||||||
for cname in "${!expect_update[@]}"; do
|
|
||||||
is "$update_log" ".*$cname.*" "container with auto-update policy image updated"
|
|
||||||
# Just because podman says it fetched, doesn't mean it actually updated
|
|
||||||
_confirm_update $cname $img_id
|
|
||||||
done
|
|
||||||
|
|
||||||
# Final confirmation that all image IDs have/haven't changed
|
|
||||||
for cname in "${cnames[@]}"; do
|
|
||||||
run_podman inspect --format "{{.Image}}" $cname
|
|
||||||
if [[ -n "${expect_update[$cname]}" ]]; then
|
|
||||||
assert "$output" != "$img_id" "$cname: image ID did not change"
|
|
||||||
else
|
|
||||||
assert "$output" = "$img_id" "Image ID should not be changed."
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman auto-update using systemd" {
|
|
||||||
# FIXME: The rootless user belongs to systemd-journal, but this still fails
|
|
||||||
if is_rhel_or_centos; then
|
|
||||||
skip_if_rootless
|
|
||||||
fi
|
|
||||||
|
|
||||||
generate_service alpine image
|
|
||||||
|
|
||||||
cat >$UNIT_DIR/podman-auto-update-$cname.timer <<EOF
|
|
||||||
[Unit]
|
|
||||||
Description=Podman auto-update testing timer
|
|
||||||
|
|
||||||
[Timer]
|
|
||||||
OnActiveSec=0s
|
|
||||||
Persistent=true
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=timers.target
|
|
||||||
EOF
|
|
||||||
cat >$UNIT_DIR/podman-auto-update-$cname.service <<EOF
|
|
||||||
[Unit]
|
|
||||||
Description=Podman auto-update testing service
|
|
||||||
Documentation=man:podman-auto-update(1)
|
|
||||||
Wants=network-online.target
|
|
||||||
After=network-online.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=oneshot
|
|
||||||
ExecStart=$PODMAN auto-update
|
|
||||||
Environment="http_proxy=${http_proxy}"
|
|
||||||
Environment="HTTP_PROXY=${HTTP_PROXY}"
|
|
||||||
Environment="https_proxy=${https_proxy}"
|
|
||||||
Environment="HTTPS_PROXY=${HTTPS_PROXY}"
|
|
||||||
Environment="no_proxy=${no_proxy}"
|
|
||||||
Environment="NO_PROXY=${NO_PROXY}"
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=default.target
|
|
||||||
EOF
|
|
||||||
|
|
||||||
echo "podman-auto-update-$cname" >> $SNAME_FILE
|
|
||||||
systemctl enable --now podman-auto-update-$cname.timer
|
|
||||||
systemctl list-timers --all
|
|
||||||
|
|
||||||
# systemd <245 displays 'Started Podman auto-update ...'
|
|
||||||
# systemd 245 - <250 displays 'Finished Podman auto-update ...'
|
|
||||||
# systemd 250 - ???? displays 'Finished <unit name> - Podman auto-...'
|
|
||||||
local expect='(Started|Finished.*) Podman auto-update testing service'
|
|
||||||
local failed_start=failed
|
|
||||||
local count=0
|
|
||||||
while [ $count -lt 120 ]; do
|
|
||||||
run journalctl -n 15 -u podman-auto-update-$cname.service
|
|
||||||
if [[ "$output" =~ $expect ]]; then
|
|
||||||
failed_start=
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
((count+=1))
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
if [[ -n "$failed_start" ]]; then
|
|
||||||
echo "journalctl output:"
|
|
||||||
sed -e 's/^/ /' <<<"$output"
|
|
||||||
die "Did not find expected string '$expect' in journalctl output for $cname"
|
|
||||||
fi
|
|
||||||
|
|
||||||
_confirm_update $cname $ori_image
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman-kube@.service template with rollback" {
|
|
||||||
# sdnotify fails with runc 1.0.0-3-dev2 on Ubuntu. Let's just
|
|
||||||
# assume that we work only with crun, nothing else.
|
|
||||||
# [copied from 260-sdnotify.bats]
|
|
||||||
runtime=$(podman_runtime)
|
|
||||||
if [[ "$runtime" != "crun" ]]; then
|
|
||||||
skip "this test only works with crun, not $runtime"
|
|
||||||
fi
|
|
||||||
|
|
||||||
_prefetch $SYSTEMD_IMAGE
|
|
||||||
install_kube_template
|
|
||||||
|
|
||||||
dockerfile1=$PODMAN_TMPDIR/Dockerfile.1
|
|
||||||
cat >$dockerfile1 <<EOF
|
|
||||||
FROM $SYSTEMD_IMAGE
|
|
||||||
RUN echo -e "#!/bin/sh\n\
|
|
||||||
printenv NOTIFY_SOCKET; echo READY; systemd-notify --ready;\n\
|
|
||||||
trap 'echo Received SIGTERM, finishing; exit' SIGTERM; echo WAITING; while :; do sleep 0.1; done" \
|
|
||||||
>> /runme
|
|
||||||
RUN chmod +x /runme
|
|
||||||
EOF
|
|
||||||
|
|
||||||
dockerfile2=$PODMAN_TMPDIR/Dockerfile.2
|
|
||||||
cat >$dockerfile2 <<EOF
|
|
||||||
FROM $SYSTEMD_IMAGE
|
|
||||||
RUN echo -e "#!/bin/sh\n\
|
|
||||||
exit 1" >> /runme
|
|
||||||
RUN chmod +x /runme
|
|
||||||
EOF
|
|
||||||
local_image=localhost/image:$(random_string 10)
|
|
||||||
|
|
||||||
# Generate a healthy image that will run correctly.
|
|
||||||
run_podman build -t $local_image -f $dockerfile1
|
|
||||||
run_podman image inspect --format "{{.ID}}" $local_image
|
|
||||||
oldID="$output"
|
|
||||||
|
|
||||||
# Create the YAMl file
|
|
||||||
yaml_source="$PODMAN_TMPDIR/test.yaml"
|
|
||||||
cat >$yaml_source <<EOF
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Pod
|
|
||||||
metadata:
|
|
||||||
annotations:
|
|
||||||
io.containers.autoupdate: "registry"
|
|
||||||
io.containers.autoupdate/b: "local"
|
|
||||||
io.containers.sdnotify/b: "container"
|
|
||||||
labels:
|
|
||||||
app: test
|
|
||||||
name: test_pod
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- command:
|
|
||||||
- top
|
|
||||||
image: $IMAGE
|
|
||||||
name: a
|
|
||||||
- command:
|
|
||||||
- /runme
|
|
||||||
image: $local_image
|
|
||||||
name: b
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Dispatch the YAML file
|
|
||||||
service_name="podman-kube@$(systemd-escape $yaml_source).service"
|
|
||||||
systemctl_start $service_name
|
|
||||||
systemctl is-active $service_name
|
|
||||||
|
|
||||||
# Make sure the containers are properly configured
|
|
||||||
run_podman auto-update --dry-run --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,false,registry.*" "global auto-update policy gets applied"
|
|
||||||
is "$output" ".*$service_name,.* (test_pod-b),$local_image,false,local.*" "container-specified auto-update policy gets applied"
|
|
||||||
|
|
||||||
# Generate a broken image that will fail.
|
|
||||||
run_podman build -t $local_image -f $dockerfile2
|
|
||||||
run_podman image inspect --format "{{.ID}}" $local_image
|
|
||||||
newID="$output"
|
|
||||||
|
|
||||||
assert "$oldID" != "$newID" "broken image really is a new one"
|
|
||||||
|
|
||||||
# Make sure container b sees the new image
|
|
||||||
run_podman auto-update --dry-run --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,false,registry.*" "global auto-update policy gets applied"
|
|
||||||
is "$output" ".*$service_name,.* (test_pod-b),$local_image,pending,local.*" "container b sees the new image"
|
|
||||||
|
|
||||||
# Now update and check for the rollback
|
|
||||||
run_podman auto-update --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,rolled back,registry.*" "container a was rolled back as the update of b failed"
|
|
||||||
is "$output" ".*$service_name,.* (test_pod-b),$local_image,rolled back,local.*" "container b was rolled back as its update has failed"
|
|
||||||
|
|
||||||
# Clean up
|
|
||||||
systemctl stop $service_name
|
|
||||||
run_podman rmi -f $(pause_image) $local_image $newID $oldID
|
|
||||||
run_podman network rm podman-default-kube-network
|
|
||||||
rm -f $UNIT_DIR/$unit_name
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman auto-update - pod" {
|
|
||||||
dockerfile=$PODMAN_TMPDIR/Dockerfile
|
|
||||||
cat >$dockerfile <<EOF
|
|
||||||
FROM $IMAGE
|
|
||||||
RUN touch /123
|
|
||||||
EOF
|
|
||||||
|
|
||||||
podname=$(random_string)
|
|
||||||
ctrname=$(random_string)
|
|
||||||
podunit="$UNIT_DIR/pod-$podname.service.*"
|
|
||||||
ctrunit="$UNIT_DIR/container-$ctrname.service.*"
|
|
||||||
local_image=localhost/image:$(random_string 10)
|
|
||||||
|
|
||||||
run_podman tag $IMAGE $local_image
|
|
||||||
|
|
||||||
run_podman pod create --name=$podname
|
|
||||||
run_podman create --label "io.containers.autoupdate=local" --pod=$podname --name=$ctrname $local_image top
|
|
||||||
|
|
||||||
# cd into the unit dir to generate the two files.
|
|
||||||
pushd "$UNIT_DIR"
|
|
||||||
run_podman generate systemd --name --new --files $podname
|
|
||||||
is "$output" ".*$podunit.*"
|
|
||||||
is "$output" ".*$ctrunit.*"
|
|
||||||
popd
|
|
||||||
|
|
||||||
systemctl daemon-reload
|
|
||||||
|
|
||||||
systemctl_start pod-$podname.service
|
|
||||||
_wait_service_ready container-$ctrname.service
|
|
||||||
|
|
||||||
run_podman pod inspect --format "{{.State}}" $podname
|
|
||||||
is "$output" "Running" "pod is in running state"
|
|
||||||
run_podman container inspect --format "{{.State.Status}}" $ctrname
|
|
||||||
is "$output" "running" "container is in running state"
|
|
||||||
|
|
||||||
run_podman pod inspect --format "{{.ID}}" $podname
|
|
||||||
podid="$output"
|
|
||||||
run_podman container inspect --format "{{.ID}}" $ctrname
|
|
||||||
ctrid="$output"
|
|
||||||
|
|
||||||
# Note that the pod's unit is listed below, not the one of the container.
|
|
||||||
run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" ".*pod-$podname.service,$local_image,false,local.*" "No update available"
|
|
||||||
|
|
||||||
run_podman build -t $local_image -f $dockerfile
|
|
||||||
|
|
||||||
run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" ".*pod-$podname.service,$local_image,pending,local.*" "Image updated is pending"
|
|
||||||
|
|
||||||
run_podman auto-update --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" ".*pod-$podname.service,$local_image,true,local.*" "Service has been restarted"
|
|
||||||
_wait_service_ready container-$ctrname.service
|
|
||||||
|
|
||||||
run_podman pod inspect --format "{{.ID}}" $podname
|
|
||||||
assert "$output" != "$podid" "pod has been recreated"
|
|
||||||
run_podman container inspect --format "{{.ID}}" $ctrname
|
|
||||||
assert "$output" != "$ctrid" "container has been recreated"
|
|
||||||
|
|
||||||
run systemctl stop pod-$podname.service
|
|
||||||
assert $status -eq 0 "Error stopping pod systemd unit: $output"
|
|
||||||
|
|
||||||
run_podman pod rm -f $podname
|
|
||||||
run_podman rmi $local_image $(pause_image)
|
|
||||||
rm -f $podunit $ctrunit
|
|
||||||
systemctl daemon-reload
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "podman-auto-update --authfile" {
|
|
||||||
# Test the three supported ways of using authfiles with auto updates
|
|
||||||
# 1) Passed via --authfile CLI flag
|
|
||||||
# 2) Passed via the REGISTRY_AUTH_FILE env variable
|
|
||||||
# 3) Via a label at container creation where 1) and 2) will be ignored
|
|
||||||
|
|
||||||
registry=localhost:${PODMAN_LOGIN_REGISTRY_PORT}
|
|
||||||
image_on_local_registry=$registry/name:tag
|
|
||||||
authfile=$PODMAN_TMPDIR/authfile.json
|
|
||||||
|
|
||||||
# First, start the registry and populate the authfile that we can use for the test.
|
|
||||||
start_registry
|
|
||||||
run_podman login --authfile=$authfile \
|
|
||||||
--tls-verify=false \
|
|
||||||
--username ${PODMAN_LOGIN_USER} \
|
|
||||||
--password ${PODMAN_LOGIN_PASS} \
|
|
||||||
$registry
|
|
||||||
|
|
||||||
# Push the image to the registry and pull it down again to make sure we
|
|
||||||
# have the identical digest in the local storage
|
|
||||||
run_podman push --tls-verify=false --creds "${PODMAN_LOGIN_USER}:${PODMAN_LOGIN_PASS}" $IMAGE $image_on_local_registry
|
|
||||||
run_podman pull --tls-verify=false --creds "${PODMAN_LOGIN_USER}:${PODMAN_LOGIN_PASS}" $image_on_local_registry
|
|
||||||
|
|
||||||
# Generate a systemd service with the "registry" auto-update policy running
|
|
||||||
# "top" inside the image we just pushed to the local registry.
|
|
||||||
generate_service "" registry top "" "" "" $image_on_local_registry
|
|
||||||
ctr=$cname
|
|
||||||
_wait_service_ready container-$ctr.service
|
|
||||||
|
|
||||||
run_podman 125 auto-update
|
|
||||||
is "$output" \
|
|
||||||
".*Error: checking image updates for container .*: x509: .*"
|
|
||||||
|
|
||||||
run_podman 125 auto-update --tls-verify=false
|
|
||||||
is "$output" \
|
|
||||||
".*Error: checking image updates for container .*: authentication required"
|
|
||||||
|
|
||||||
# Test 1)
|
|
||||||
run_podman auto-update --authfile=$authfile --tls-verify=false --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" "container-$ctr.service,$image_on_local_registry,false,registry" "auto-update works with authfile"
|
|
||||||
|
|
||||||
# Test 2)
|
|
||||||
REGISTRY_AUTH_FILE=$authfile run_podman auto-update --tls-verify=false --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" "container-$ctr.service,$image_on_local_registry,false,registry" "auto-update works with env var"
|
|
||||||
systemctl stop container-$ctr.service
|
|
||||||
run_podman rm -f -t0 --ignore $ctr
|
|
||||||
|
|
||||||
# Create a container with the auth-file label
|
|
||||||
generate_service "" registry top "--label io.containers.autoupdate.authfile=$authfile" "" "" $image_on_local_registry
|
|
||||||
ctr=$cname
|
|
||||||
_wait_service_ready container-$ctr.service
|
|
||||||
|
|
||||||
# Test 3)
|
|
||||||
# Also make sure that the label takes precedence over the CLI flag.
|
|
||||||
run_podman auto-update --authfile=/dev/null --tls-verify=false --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
|
||||||
is "$output" "container-$ctr.service,$image_on_local_registry,false,registry" "auto-update works with authfile container label"
|
|
||||||
run_podman rm -f -t0 --ignore $ctr
|
|
||||||
run_podman rmi $image_on_local_registry
|
|
||||||
}
|
|
||||||
|
|
||||||
# vim: filetype=sh
|
|
@ -1,451 +0,0 @@
|
|||||||
#!/usr/bin/env bats -*- bats -*-
|
|
||||||
#
|
|
||||||
# test podman checkpoint. Similar in many ways to our pause tests.
|
|
||||||
#
|
|
||||||
|
|
||||||
load helpers
|
|
||||||
load helpers.network
|
|
||||||
|
|
||||||
CHECKED_ROOTLESS=
|
|
||||||
function setup() {
|
|
||||||
skip_if_rhel_or_centos "Checkpoint tests are very unstable and that's a problem on RHEL/CentOS Stream"
|
|
||||||
|
|
||||||
# None of these tests work rootless....
|
|
||||||
if is_rootless; then
|
|
||||||
# ...however, is that a genuine cast-in-stone limitation, or one
|
|
||||||
# that can some day be fixed? If one day some PR removes that
|
|
||||||
# restriction, fail loudly here, so the developer can enable tests.
|
|
||||||
if [[ -n "$CHECKED_ROOTLESS" ]]; then
|
|
||||||
run_podman '?' container checkpoint -l
|
|
||||||
is "$output" "Error: checkpointing a container requires root" \
|
|
||||||
"Confirming that rootless checkpoint doesn't work. If that changed, please reexamine this test file!"
|
|
||||||
CHECKED_ROOTLESS=y
|
|
||||||
fi
|
|
||||||
skip "checkpoint does not work rootless"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# As of 2024-05, crun on Debian is not built with criu support:
|
|
||||||
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1008249
|
|
||||||
runtime=$(podman_runtime)
|
|
||||||
run $runtime checkpoint --help
|
|
||||||
if [[ $status -ne 0 ]]; then
|
|
||||||
skip "runtime $runtime does not support checkpoint/restore"
|
|
||||||
fi
|
|
||||||
|
|
||||||
basic_setup
|
|
||||||
}
|
|
||||||
|
|
||||||
# bats test_tags=ci:parallel
|
|
||||||
@test "podman checkpoint - basic test" {
|
|
||||||
run_podman run -d $IMAGE sh -c 'while :;do cat /proc/uptime; sleep 0.1;done'
|
|
||||||
local cid="$output"
|
|
||||||
|
|
||||||
# Wait for container to start emitting output
|
|
||||||
wait_for_output '[1-9]\+' $cid
|
|
||||||
|
|
||||||
# Checkpoint, and confirm via inspect
|
|
||||||
run_podman container checkpoint $cid
|
|
||||||
# FIXME: remove the `.*` prefix after fix packaged for https://github.com/checkpoint-restore/criu/pull/1706
|
|
||||||
is "$output" ".*$cid" "podman container checkpoint"
|
|
||||||
|
|
||||||
run_podman container inspect \
|
|
||||||
--format '{{.State.Status}}:{{.State.Running}}:{{.State.Paused}}:{{.State.Checkpointed}}' $cid
|
|
||||||
is "$output" "exited:false:false:true" "State. Status:Running:Pause:Checkpointed"
|
|
||||||
|
|
||||||
# Plan A was to do something similar to 080-pause.bats: sleep for long
|
|
||||||
# enough to cause a gap in the timestamps in the log. But checkpoint
|
|
||||||
# doesn't seem to work like that: upon restore, even if we sleep a long
|
|
||||||
# time, the newly-started container seems to pick back up close to
|
|
||||||
# where it left off. (Maybe it's something about /proc/uptime?)
|
|
||||||
# Anyway, scratch Plan A. Plan B is simply to make sure that the
|
|
||||||
# restarted container spits something out.
|
|
||||||
run_podman logs $cid
|
|
||||||
local nlines_before="${#lines[*]}"
|
|
||||||
|
|
||||||
# Restart immediately and confirm state
|
|
||||||
run_podman container restore $cid
|
|
||||||
is "$output" "$cid" "podman container restore"
|
|
||||||
|
|
||||||
# Note that upon restore, .Checkpointed reverts to false (#12117)
|
|
||||||
run_podman container inspect \
|
|
||||||
--format '{{.State.Status}}:{{.State.Running}}:{{.State.Paused}}:{{.State.Checkpointed}}' $cid
|
|
||||||
is "$output" "running:true:false:false" \
|
|
||||||
"State. Status:Running:Pause:Checkpointed"
|
|
||||||
|
|
||||||
# Re-fetch logs, and ensure that they continue growing.
|
|
||||||
# Allow a short while for container process to actually restart.
|
|
||||||
local retries=10
|
|
||||||
while [[ $retries -gt 0 ]]; do
|
|
||||||
run_podman logs $cid
|
|
||||||
local nlines_after="${#lines[*]}"
|
|
||||||
if [[ $nlines_after -gt $nlines_before ]]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
sleep 0.1
|
|
||||||
retries=$((retries - 1))
|
|
||||||
done
|
|
||||||
assert "$retries" -gt 0 \
|
|
||||||
"Container failed to output new lines after first restore"
|
|
||||||
|
|
||||||
# Same thing again: test for https://github.com/containers/crun/issues/756
|
|
||||||
# in which, after second checkpoint/restore, we lose logs
|
|
||||||
run_podman container checkpoint $cid
|
|
||||||
run_podman container logs $cid
|
|
||||||
nlines_before="${#lines[*]}"
|
|
||||||
run_podman container restore $cid
|
|
||||||
|
|
||||||
# Same as above, confirm that we get new output
|
|
||||||
retries=10
|
|
||||||
while [[ $retries -gt 0 ]]; do
|
|
||||||
run_podman logs $cid
|
|
||||||
local nlines_after="${#lines[*]}"
|
|
||||||
if [[ $nlines_after -gt $nlines_before ]]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
sleep 0.1
|
|
||||||
retries=$((retries - 1))
|
|
||||||
done
|
|
||||||
assert "$retries" -gt 0 \
|
|
||||||
"stdout went away after second restore (crun issue 756)"
|
|
||||||
|
|
||||||
run_podman rm -t 0 -f $cid
|
|
||||||
}
|
|
||||||
|
|
||||||
# CANNOT BE PARALLELIZED: checkpoint -a
|
|
||||||
@test "podman checkpoint/restore print IDs or raw input" {
|
|
||||||
# checkpoint/restore -a must print the IDs
|
|
||||||
run_podman run -d $IMAGE top
|
|
||||||
ctrID="$output"
|
|
||||||
run_podman container checkpoint -a
|
|
||||||
is "$output" "$ctrID"
|
|
||||||
run_podman container restore -a
|
|
||||||
is "$output" "$ctrID"
|
|
||||||
|
|
||||||
# checkpoint/restore $input must print $input
|
|
||||||
cname=c-$(safename)
|
|
||||||
run_podman run -d --name $cname $IMAGE top
|
|
||||||
run_podman container checkpoint $cname
|
|
||||||
is "$output" $cname
|
|
||||||
run_podman container restore $cname
|
|
||||||
is "$output" $cname
|
|
||||||
|
|
||||||
run_podman rm -t 0 -f $ctrID $cname
|
|
||||||
}
|
|
||||||
|
|
||||||
# bats test_tags=ci:parallel
|
|
||||||
@test "podman checkpoint --export, with volumes" {
|
|
||||||
skip_if_remote "Test uses --root/--runroot, which are N/A over remote"
|
|
||||||
|
|
||||||
# To avoid network pull, copy $IMAGE straight to temp root
|
|
||||||
local p_opts="$(podman_isolation_opts ${PODMAN_TMPDIR}) --events-backend file"
|
|
||||||
run_podman save -o $PODMAN_TMPDIR/image.tar $IMAGE
|
|
||||||
run_podman $p_opts load -i $PODMAN_TMPDIR/image.tar
|
|
||||||
|
|
||||||
# Create a volume, find unused network port, and create a webserv container
|
|
||||||
volname=v-$(safename)
|
|
||||||
run_podman $p_opts volume create $volname
|
|
||||||
local cname=c-$(safename)
|
|
||||||
local host_port=$(random_free_port)
|
|
||||||
local server=http://127.0.0.1:$host_port
|
|
||||||
|
|
||||||
run_podman $p_opts run -d --name $cname --volume $volname:/myvol \
|
|
||||||
-p $host_port:80 \
|
|
||||||
-w /myvol \
|
|
||||||
$IMAGE sh -c "/bin/busybox-extras httpd -p 80;echo $cname >cname;echo READY;while :;do cat /proc/uptime >mydate.tmp;mv -f mydate.tmp mydate;sleep 0.1;done"
|
|
||||||
local cid="$output"
|
|
||||||
_PODMAN_TEST_OPTS="$p_opts" wait_for_ready $cid
|
|
||||||
|
|
||||||
# Confirm that container responds
|
|
||||||
run curl --max-time 3 -s $server/cname
|
|
||||||
is "$output" "$cname" "curl $server/cname"
|
|
||||||
run curl --max-time 3 -s $server/mydate
|
|
||||||
local date_oldroot="$output"
|
|
||||||
|
|
||||||
# Checkpoint...
|
|
||||||
run_podman $p_opts container checkpoint \
|
|
||||||
--ignore-rootfs \
|
|
||||||
--export=$PODMAN_TMPDIR/$cname.tar.gz \
|
|
||||||
$cname
|
|
||||||
|
|
||||||
# ...confirm that port is now closed
|
|
||||||
run curl --max-time 1 -s $server/mydate
|
|
||||||
is "$status" "7" "cannot connect to port $host_port while container is down"
|
|
||||||
|
|
||||||
# ...now restore it to our regular root
|
|
||||||
run_podman container restore --import=$PODMAN_TMPDIR/$cname.tar.gz
|
|
||||||
is "$output" "$cid"
|
|
||||||
|
|
||||||
# Inspect (on regular root). Note that, unlike the basic test above,
|
|
||||||
# .State.Checkpointed here is *false*.
|
|
||||||
run_podman container inspect \
|
|
||||||
--format '{{.State.Status}}:{{.State.Running}}:{{.State.Paused}}:{{.State.Checkpointed}}' $cname
|
|
||||||
is "$output" "running:true:false:false" "State. Status:Running:Pause:Checkpointed"
|
|
||||||
|
|
||||||
# Pause a moment to let the restarted container update the timestamp file
|
|
||||||
sleep .3
|
|
||||||
run curl --max-time 3 -s $server/mydate
|
|
||||||
local date_newroot="$output"
|
|
||||||
assert "$date_newroot" != "$date_oldroot" \
|
|
||||||
"Restored container did not update the timestamp file"
|
|
||||||
|
|
||||||
run_podman exec $cid cat /myvol/cname
|
|
||||||
is "$output" "$cname" "volume transferred fine"
|
|
||||||
|
|
||||||
run_podman rm -t 0 -f $cid
|
|
||||||
run_podman volume rm -f $volname
|
|
||||||
}
|
|
||||||
|
|
||||||
# FIXME: test --leave-running
|
|
||||||
|
|
||||||
# bats test_tags=ci:parallel
|
|
||||||
@test "podman checkpoint --file-locks" {
|
|
||||||
action='flock test.lock sh -c "while [ -e /wait ];do sleep 0.5;done;for i in 1 2 3;do echo \$i;sleep 0.5;done"'
|
|
||||||
run_podman run -d $IMAGE sh -c "touch /wait; touch test.lock; echo READY; $action & $action & wait"
|
|
||||||
local cid="$output"
|
|
||||||
|
|
||||||
# Wait for container to start emitting output
|
|
||||||
wait_for_ready $cid
|
|
||||||
|
|
||||||
# Checkpoint, and confirm via inspect
|
|
||||||
run_podman container checkpoint --file-locks $cid
|
|
||||||
is "$output" "$cid" "podman container checkpoint"
|
|
||||||
|
|
||||||
run_podman container inspect \
|
|
||||||
--format '{{.State.Status}}:{{.State.Running}}:{{.State.Paused}}:{{.State.Checkpointed}}' $cid
|
|
||||||
is "$output" "exited:false:false:true" "State. Status:Running:Pause:Checkpointed"
|
|
||||||
|
|
||||||
# Restart immediately and confirm state
|
|
||||||
run_podman container restore --file-locks $cid
|
|
||||||
is "$output" "$cid" "podman container restore"
|
|
||||||
|
|
||||||
# Signal the container to continue; this is where the 1-2-3s will come from
|
|
||||||
# The '-d' is because container exit is racy: the exec process itself
|
|
||||||
# could get caught and killed by cleanup, causing this step to exit 137
|
|
||||||
run_podman exec -d $cid rm /wait
|
|
||||||
|
|
||||||
# Wait for the container to stop
|
|
||||||
run_podman wait $cid
|
|
||||||
|
|
||||||
run_podman logs $cid
|
|
||||||
trim=$(sed -z -e 's/[\r\n]\+//g' <<<"$output")
|
|
||||||
is "$trim" "READY123123" "File lock restored"
|
|
||||||
|
|
||||||
run_podman rm $cid
|
|
||||||
}
|
|
||||||
|
|
||||||
# bats test_tags=ci:parallel
|
|
||||||
@test "podman checkpoint/restore ip and mac handling" {
|
|
||||||
# Refer to https://github.com/containers/podman/issues/16666#issuecomment-1337860545
|
|
||||||
# for the correct behavior, this should cover all cases listed there.
|
|
||||||
local netname="net-$(safename)"
|
|
||||||
local subnet="$(random_rfc1918_subnet)"
|
|
||||||
run_podman network create --subnet "$subnet.0/24" $netname
|
|
||||||
|
|
||||||
run_podman run -d --network $netname $IMAGE top
|
|
||||||
cid="$output"
|
|
||||||
# get current ip and mac
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}"
|
|
||||||
ip1="$output"
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}"
|
|
||||||
mac1="$output"
|
|
||||||
|
|
||||||
run_podman exec $cid cat /etc/hosts /etc/resolv.conf
|
|
||||||
pre_hosts_resolv_conf_output="$output"
|
|
||||||
|
|
||||||
run_podman container checkpoint $cid
|
|
||||||
is "$output" "$cid"
|
|
||||||
run_podman container restore $cid
|
|
||||||
is "$output" "$cid"
|
|
||||||
|
|
||||||
# now get mac and ip after restore they should be the same
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}"
|
|
||||||
ip2="$output"
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}"
|
|
||||||
mac2="$output"
|
|
||||||
|
|
||||||
# Make sure hosts and resolv.conf are the same after restore (#22901)
|
|
||||||
run_podman exec $cid cat /etc/hosts /etc/resolv.conf
|
|
||||||
assert "$output" == "$pre_hosts_resolv_conf_output" "hosts/resolv.conf must be the same after checkpoint"
|
|
||||||
|
|
||||||
assert "$ip2" == "$ip1" "ip after restore should match"
|
|
||||||
assert "$mac2" == "$mac1" "mac after restore should match"
|
|
||||||
|
|
||||||
# restart the container we should get a new ip/mac because they are not static
|
|
||||||
run_podman restart $cid
|
|
||||||
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}"
|
|
||||||
ip3="$output"
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}"
|
|
||||||
mac3="$output"
|
|
||||||
|
|
||||||
# the ip/mac should be different this time
|
|
||||||
assert "$ip3" != "$ip1" "ip after restart should be different"
|
|
||||||
assert "$mac3" != "$mac1" "mac after restart should be different"
|
|
||||||
|
|
||||||
# restore with --ignore-static-ip/mac
|
|
||||||
run_podman container checkpoint $cid
|
|
||||||
is "$output" "$cid"
|
|
||||||
run_podman container restore --ignore-static-ip --ignore-static-mac $cid
|
|
||||||
is "$output" "$cid"
|
|
||||||
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}"
|
|
||||||
ip4="$output"
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}"
|
|
||||||
mac4="$output"
|
|
||||||
|
|
||||||
# the ip/mac should be different this time
|
|
||||||
assert "$ip4" != "$ip3" "ip after restore --ignore-static-ip should be different"
|
|
||||||
assert "$mac4" != "$mac3" "mac after restore --ignore-static-mac should be different"
|
|
||||||
|
|
||||||
local archive=$PODMAN_TMPDIR/checkpoint.tar.gz
|
|
||||||
|
|
||||||
# now checkpoint and export the container
|
|
||||||
run_podman container checkpoint --export "$archive" $cid
|
|
||||||
is "$output" "$cid"
|
|
||||||
# remove container
|
|
||||||
run_podman rm -t 0 -f $cid
|
|
||||||
|
|
||||||
# restore it without new name should keep the ip/mac, we also get a new container id
|
|
||||||
run_podman container restore --import "$archive"
|
|
||||||
cid="$output"
|
|
||||||
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}"
|
|
||||||
ip5="$output"
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}"
|
|
||||||
mac5="$output"
|
|
||||||
assert "$ip5" == "$ip4" "ip after restore --import should match"
|
|
||||||
assert "$mac5" == "$mac4" "mac after restore --import should match"
|
|
||||||
|
|
||||||
run_podman rm -t 0 -f $cid
|
|
||||||
|
|
||||||
# now restore it again but with --name this time, it should not keep the
|
|
||||||
# mac and ip to allow restoring the same container with different names
|
|
||||||
# at the same time
|
|
||||||
newname="newc-$(safename)"
|
|
||||||
run_podman container restore --import "$archive" --name $newname
|
|
||||||
cid="$output"
|
|
||||||
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}"
|
|
||||||
ip6="$output"
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}"
|
|
||||||
mac6="$output"
|
|
||||||
assert "$ip6" != "$ip5" "ip after restore --import --name should be different"
|
|
||||||
assert "$mac6" != "$mac5" "mac after restore --import --name should be different"
|
|
||||||
|
|
||||||
run_podman rm -t 0 -f $cid
|
|
||||||
|
|
||||||
# now create a container with a static mac and ip
|
|
||||||
local static_ip="$subnet.2"
|
|
||||||
local static_mac="92:d0:c6:0a:29:38"
|
|
||||||
run_podman run -d --network "$netname:ip=$static_ip,mac=$static_mac" $IMAGE top
|
|
||||||
cid="$output"
|
|
||||||
|
|
||||||
run_podman container checkpoint $cid
|
|
||||||
is "$output" "$cid"
|
|
||||||
run_podman container restore --ignore-static-ip --ignore-static-mac $cid
|
|
||||||
is "$output" "$cid"
|
|
||||||
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}"
|
|
||||||
ip7="$output"
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}"
|
|
||||||
mac7="$output"
|
|
||||||
assert "$ip7" != "$static_ip" "static ip after restore --ignore-static-ip should be different"
|
|
||||||
assert "$mac7" != "$static_mac" "static mac after restore --ignore-static-mac should be different"
|
|
||||||
|
|
||||||
# restart the container to make sure the change is actually persistent in the config and not just set for restore
|
|
||||||
run_podman restart $cid
|
|
||||||
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}"
|
|
||||||
ip8="$output"
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}"
|
|
||||||
mac8="$output"
|
|
||||||
assert "$ip8" != "$static_ip" "static ip after restore --ignore-static-ip and restart should be different"
|
|
||||||
assert "$mac8" != "$static_mac" "static mac after restore --ignore-static-mac and restart should be different"
|
|
||||||
assert "$ip8" != "$ip7" "static ip after restore --ignore-static-ip and restart should be different"
|
|
||||||
assert "$mac8" != "$ip" "static mac after restore --ignore-static-mac and restart should be different"
|
|
||||||
|
|
||||||
run_podman rm -t 0 -f $cid
|
|
||||||
|
|
||||||
# now create container again and try the same again with --export and --import
|
|
||||||
run_podman run -d --network "$netname:ip=$static_ip,mac=$static_mac" $IMAGE top
|
|
||||||
cid="$output"
|
|
||||||
|
|
||||||
run_podman container checkpoint --export "$archive" $cid
|
|
||||||
is "$output" "$cid"
|
|
||||||
# remove container
|
|
||||||
run_podman rm -t 0 -f $cid
|
|
||||||
|
|
||||||
# restore normal should keep static ip
|
|
||||||
run_podman container restore --import "$archive"
|
|
||||||
cid="$output"
|
|
||||||
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}"
|
|
||||||
ip9="$output"
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}"
|
|
||||||
mac9="$output"
|
|
||||||
assert "$ip9" == "$static_ip" "static ip after restore --import should match"
|
|
||||||
assert "$mac9" == "$static_mac" "static mac after restore --import should match"
|
|
||||||
|
|
||||||
# restart the container to make sure the change is actually persistent in the config and not just set for restore
|
|
||||||
run_podman restart $cid
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}"
|
|
||||||
ip10="$output"
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}"
|
|
||||||
mac10="$output"
|
|
||||||
assert "$ip10" == "$static_ip" "static ip after restore --import and restart should match"
|
|
||||||
assert "$mac10" == "$static_mac" "static mac after restore --import and restart should match"
|
|
||||||
|
|
||||||
run_podman rm -t 0 -f $cid
|
|
||||||
|
|
||||||
# restore normal without keeping static ip/mac
|
|
||||||
run_podman container restore --ignore-static-ip --ignore-static-mac --import "$archive"
|
|
||||||
cid="$output"
|
|
||||||
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}"
|
|
||||||
ip11="$output"
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}"
|
|
||||||
mac11="$output"
|
|
||||||
assert "$ip11" != "$static_ip" "static ip after restore --import --ignore-static-ip should be different"
|
|
||||||
assert "$mac11" != "$static_mac" "static mac after restore --import --ignore-static-mac should be different"
|
|
||||||
|
|
||||||
# restart the container to make sure the change is actually persistent in the config and not just set for restore
|
|
||||||
run_podman restart $cid
|
|
||||||
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").IPAddress}}"
|
|
||||||
ip12="$output"
|
|
||||||
run_podman inspect $cid --format "{{(index .NetworkSettings.Networks \"$netname\").MacAddress}}"
|
|
||||||
mac12="$output"
|
|
||||||
assert "$ip12" != "$static_ip" "static ip after restore --import --ignore-static-ip and restart should be different"
|
|
||||||
assert "$mac12" != "$static_mac" "static mac after restore --ignore-static-mac and restart should be different"
|
|
||||||
assert "$ip12" != "$ip11" "static ip after restore --import --ignore-static-ip and restart should be different"
|
|
||||||
assert "$mac12" != "$ip11" "static mac after restore --ignore-static-mac and restart should be different"
|
|
||||||
|
|
||||||
run_podman rm -t 0 -f $cid
|
|
||||||
run_podman network rm $netname
|
|
||||||
}
|
|
||||||
|
|
||||||
# rhbz#2177611 : podman breaks checkpoint/restore
|
|
||||||
# CANNOT BE PARALLELIZED: --latest
|
|
||||||
@test "podman checkpoint/restore the latest container" {
|
|
||||||
skip_if_remote "podman-remote does not support --latest option"
|
|
||||||
# checkpoint/restore -l must print the IDs
|
|
||||||
run_podman run -d $IMAGE top
|
|
||||||
ctrID="$output"
|
|
||||||
run_podman container checkpoint --latest
|
|
||||||
is "$output" "$ctrID"
|
|
||||||
|
|
||||||
run_podman container inspect \
|
|
||||||
--format '{{.State.Status}}:{{.State.Running}}:{{.State.Paused}}:{{.State.Checkpointed}}' $ctrID
|
|
||||||
is "$output" "exited:false:false:true" "State. Status:Running:Pause:Checkpointed"
|
|
||||||
|
|
||||||
run_podman container restore -l
|
|
||||||
is "$output" "$ctrID"
|
|
||||||
|
|
||||||
run_podman container inspect \
|
|
||||||
--format '{{.State.Status}}:{{.State.Running}}:{{.State.Paused}}:{{.State.Checkpointed}}' $ctrID
|
|
||||||
is "$output" "running:true:false:false" "State. Status:Running:Pause:Checkpointed"
|
|
||||||
|
|
||||||
run_podman rm -t 0 -f $ctrID
|
|
||||||
}
|
|
||||||
|
|
||||||
# vim: filetype=sh
|
|
1375
helpers.bash
1375
helpers.bash
File diff suppressed because it is too large
Load Diff
46
podman.spec
46
podman.spec
@ -5,7 +5,7 @@ GO111MODULE=off go build -buildmode pie -compiler gc -tags="rpm_crashtraceback $
|
|||||||
|
|
||||||
%global import_path github.com/containers/podman
|
%global import_path github.com/containers/podman
|
||||||
#%%global branch v5.4-rhel
|
#%%global branch v5.4-rhel
|
||||||
%global commit0 e7d8226745ba07a64b7176a7f128e4ef53225a0e
|
%global commit0 da671ef6cfa3fc9ac6225c18f1dd0a70a951e43f
|
||||||
%global shortcommit0 %(c=%{commit0}; echo ${c:0:7})
|
%global shortcommit0 %(c=%{commit0}; echo ${c:0:7})
|
||||||
%global cataver 0.1.7
|
%global cataver 0.1.7
|
||||||
%global commit_dnsname bdc4ab85266ade865a7c398336e98721e62ef6b2
|
%global commit_dnsname bdc4ab85266ade865a7c398336e98721e62ef6b2
|
||||||
@ -13,7 +13,7 @@ GO111MODULE=off go build -buildmode pie -compiler gc -tags="rpm_crashtraceback $
|
|||||||
|
|
||||||
Epoch: 5
|
Epoch: 5
|
||||||
Name: podman
|
Name: podman
|
||||||
Version: 5.5.2
|
Version: 5.6.0
|
||||||
Release: 1%{?dist}
|
Release: 1%{?dist}
|
||||||
Summary: Manage Pods, Containers and Container Images
|
Summary: Manage Pods, Containers and Container Images
|
||||||
License: Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MPL-2.0
|
License: Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MPL-2.0
|
||||||
@ -27,16 +27,6 @@ Source1: https://github.com/openSUSE/catatonit/archive/v%{cataver}.tar.gz
|
|||||||
#Source2: https://github.com/containers/dnsname/archive/v%%{dnsnamever}.tar.gz
|
#Source2: https://github.com/containers/dnsname/archive/v%%{dnsnamever}.tar.gz
|
||||||
Source2: https://github.com/containers/dnsname/archive/%{commit_dnsname}/dnsname-%{shortcommit_dnsname}.tar.gz
|
Source2: https://github.com/containers/dnsname/archive/%{commit_dnsname}/dnsname-%{shortcommit_dnsname}.tar.gz
|
||||||
# https://fedoraproject.org/wiki/PackagingDrafts/Go#Go_Language_Architectures
|
# https://fedoraproject.org/wiki/PackagingDrafts/Go#Go_Language_Architectures
|
||||||
# NOTE: Delete these sources after
|
|
||||||
# https://github.com/containers/podman/pull/26183 merges
|
|
||||||
# Not needed for copr builds on upstream PRs
|
|
||||||
%if !%{defined copr_username}
|
|
||||||
Source3: 220-healthcheck.bats
|
|
||||||
Source4: 252-quadlet.bats
|
|
||||||
Source5: 255-auto-update.bats
|
|
||||||
Source6: 520-checkpoint.bats
|
|
||||||
Source7: helpers.bash
|
|
||||||
%endif
|
|
||||||
ExclusiveArch: %{go_arches}
|
ExclusiveArch: %{go_arches}
|
||||||
Provides: %{name}-manpages = %{epoch}:%{version}-%{release}
|
Provides: %{name}-manpages = %{epoch}:%{version}-%{release}
|
||||||
Obsoletes: %{name}-manpages < %{epoch}:%{version}-%{release}
|
Obsoletes: %{name}-manpages < %{epoch}:%{version}-%{release}
|
||||||
@ -53,7 +43,7 @@ BuildRequires: libassuan-devel
|
|||||||
BuildRequires: libgpg-error-devel
|
BuildRequires: libgpg-error-devel
|
||||||
BuildRequires: libseccomp-devel
|
BuildRequires: libseccomp-devel
|
||||||
BuildRequires: libselinux-devel
|
BuildRequires: libselinux-devel
|
||||||
BuildRequires: ostree-devel
|
BuildRequires: sqlite-devel
|
||||||
BuildRequires: pkgconfig
|
BuildRequires: pkgconfig
|
||||||
BuildRequires: make
|
BuildRequires: make
|
||||||
BuildRequires: systemd
|
BuildRequires: systemd
|
||||||
@ -153,8 +143,8 @@ Recommends: slirp4netns
|
|||||||
%description tests
|
%description tests
|
||||||
%{summary}
|
%{summary}
|
||||||
|
|
||||||
This package contains system tests for %{name}. Only used for gating tests. End
|
This package contains system tests for %{name}. Only intended to be used for
|
||||||
user / customer use cases not supported.
|
gating tests. Not supported for end users / customers.
|
||||||
|
|
||||||
%prep
|
%prep
|
||||||
%if 0%{?branch:1}
|
%if 0%{?branch:1}
|
||||||
@ -170,14 +160,6 @@ sed -i '$d' configure.ac
|
|||||||
popd
|
popd
|
||||||
tar fx %{SOURCE2}
|
tar fx %{SOURCE2}
|
||||||
|
|
||||||
%if !%{defined copr_username}
|
|
||||||
cp %{SOURCE3} test/system/.
|
|
||||||
cp %{SOURCE4} test/system/.
|
|
||||||
cp %{SOURCE5} test/system/.
|
|
||||||
cp %{SOURCE6} test/system/.
|
|
||||||
cp %{SOURCE7} test/system/.
|
|
||||||
%endif
|
|
||||||
|
|
||||||
# cgroups-v1 is supported on rhel9
|
# cgroups-v1 is supported on rhel9
|
||||||
%if 0%{?rhel} == 9
|
%if 0%{?rhel} == 9
|
||||||
sed -i '/DELETE ON RHEL9/,/DELETE ON RHEL9/d' libpod/runtime.go
|
sed -i '/DELETE ON RHEL9/,/DELETE ON RHEL9/d' libpod/runtime.go
|
||||||
@ -230,19 +212,23 @@ LDFLAGS="-X %{import_path}/libpod/define.buildInfo=$(date +%s)"
|
|||||||
# build rootlessport
|
# build rootlessport
|
||||||
%gobuild -o bin/rootlessport %{import_path}/cmd/rootlessport
|
%gobuild -o bin/rootlessport %{import_path}/cmd/rootlessport
|
||||||
|
|
||||||
export BUILDTAGS="cni seccomp btrfs_noversion exclude_graphdriver_devicemapper exclude_graphdriver_btrfs $(hack/libdm_tag.sh) $(hack/selinux_tag.sh) $(hack/systemd_tag.sh) $(hack/libsubid_tag.sh)"
|
export BASEBUILDTAGS="cni seccomp $(hack/systemd_tag.sh) $(hack/libsubid_tag.sh) libsqlite3 grpcnotrace"
|
||||||
|
|
||||||
|
export BUILDTAGS="$BASEBUILDTAGS $(hack/btrfs_installed_tag.sh)"
|
||||||
%gobuild -o bin/%{name} %{import_path}/cmd/%{name}
|
%gobuild -o bin/%{name} %{import_path}/cmd/%{name}
|
||||||
|
|
||||||
# build %%{name}-testing
|
|
||||||
%gobuild -o bin/podman-testing %{import_path}/cmd/podman-testing
|
|
||||||
|
|
||||||
# build %%{name}-remote
|
# build %%{name}-remote
|
||||||
export BUILDTAGS="remote $BUILDTAGS"
|
export BUILDTAGS="$BASEBUILDTAGS exclude_graphdriver_btrfs remote"
|
||||||
%gobuild -o bin/%{name}-remote %{import_path}/cmd/%{name}
|
%gobuild -o bin/%{name}-remote %{import_path}/cmd/%{name}
|
||||||
|
|
||||||
# build quadlet
|
# build quadlet
|
||||||
|
export BUILDTAGS="$BASEBUILDTAGS $(hack/btrfs_installed_tag.sh)"
|
||||||
%gobuild -o bin/quadlet %{import_path}/cmd/quadlet
|
%gobuild -o bin/quadlet %{import_path}/cmd/quadlet
|
||||||
|
|
||||||
|
# build %%{name}-testing
|
||||||
|
export BUILDTAGS="$BASEBUILDTAGS $(hack/btrfs_installed_tag.sh)"
|
||||||
|
%gobuild -o bin/podman-testing %{import_path}/cmd/podman-testing
|
||||||
|
|
||||||
%{__make} docs
|
%{__make} docs
|
||||||
%{__make} docker-docs
|
%{__make} docker-docs
|
||||||
|
|
||||||
@ -391,6 +377,10 @@ fi
|
|||||||
%{_datadir}/%{name}/test
|
%{_datadir}/%{name}/test
|
||||||
|
|
||||||
%changelog
|
%changelog
|
||||||
|
* Mon Aug 18 2025 Jindrich Novy <jnovy@redhat.com> - 5:5.6.0-1
|
||||||
|
- update to https://github.com/containers/podman/releases/tag/v5.6.0
|
||||||
|
- Related: RHEL-80816
|
||||||
|
|
||||||
* Thu Jul 03 2025 Jindrich Novy <jnovy@redhat.com> - 5:5.5.2-1
|
* Thu Jul 03 2025 Jindrich Novy <jnovy@redhat.com> - 5:5.5.2-1
|
||||||
- update to https://github.com/containers/podman/releases/tag/v5.5.2
|
- update to https://github.com/containers/podman/releases/tag/v5.5.2
|
||||||
- Related: RHEL-80816
|
- Related: RHEL-80816
|
||||||
|
2
sources
2
sources
@ -1,3 +1,3 @@
|
|||||||
SHA512 (podman-5.5.2-e7d8226.tar.gz) = fec98c06f79283436a0f4a7c7f24ba96e7f288b2b13d6f6829364f58810ba86b2bf86f51b0d06cd7e43ae63cd0a92207aa228bf1aec5bf65fd85a9ff3f00b445
|
SHA512 (podman-5.6.0-da671ef.tar.gz) = dddffa8874438df953b5aac09173e05577c26cabf3a0c110b0afdd483df742d57923d4e4574a78770a5d5ed78d4df3311d3abc8c317cb0d7821f01ced7cebf26
|
||||||
SHA512 (v0.1.7.tar.gz) = 7d3174c60e1c8bd1f4b95b7751ccbe01cac63265060f18914b53311f68f7b4c63c693604f348ccfac5db4a96939169f835fbbbd614803b18062053d94f7dca67
|
SHA512 (v0.1.7.tar.gz) = 7d3174c60e1c8bd1f4b95b7751ccbe01cac63265060f18914b53311f68f7b4c63c693604f348ccfac5db4a96939169f835fbbbd614803b18062053d94f7dca67
|
||||||
SHA512 (dnsname-bdc4ab8.tar.gz) = 2c7f4463b439d143c1cc1194b45bbd18e220bd03aa2c466cd81a14d043b753d69d84d331518cca3fab9b75ad5467b73f640f7d8ddce3eee05a5f5507d6f20cef
|
SHA512 (dnsname-bdc4ab8.tar.gz) = 2c7f4463b439d143c1cc1194b45bbd18e220bd03aa2c466cd81a14d043b753d69d84d331518cca3fab9b75ad5467b73f640f7d8ddce3eee05a5f5507d6f20cef
|
||||||
|
Loading…
Reference in New Issue
Block a user