Do not test CNI in CI

CNI is deprecated and is build tagged out for 5.0. Don't test it in our CI.
This commit also disables upgrade tests for now - those need more work since the old version of Podman only uses CNI. Upgrade tests will be re-vamped in a later commit.
Signed-off-by: Ashley Cui <acui@redhat.com>
This commit is contained in:
Ashley Cui
2024-01-28 15:25:00 -05:00
parent ee923358c3
commit 26cd01ee51
6 changed files with 6 additions and 146 deletions

View File

@ -105,13 +105,11 @@ build_task:
CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
# ID for re-use of build output
CI_DESIRED_RUNTIME: crun
CI_DESIRED_NETWORK: netavark
- env:
DISTRO_NV: ${PRIOR_FEDORA_NAME}
VM_IMAGE_NAME: ${PRIOR_FEDORA_CACHE_IMAGE_NAME}
CTR_FQIN: ${PRIOR_FEDORA_CONTAINER_FQIN}
CI_DESIRED_RUNTIME: crun
CI_DESIRED_NETWORK: cni
CI_DESIRED_DATABASE: boltdb
CI_DESIRED_STORAGE: vfs
# Catch invalid "TMPDIR == /tmp" assumptions; PR #19281
@ -125,7 +123,6 @@ build_task:
DISTRO_NV: ${DEBIAN_NAME}
VM_IMAGE_NAME: ${DEBIAN_CACHE_IMAGE_NAME}
CI_DESIRED_RUNTIME: runc
CI_DESIRED_NETWORK: netavark
env:
TEST_FLAVOR: build
# NOTE: The default way Cirrus-CI clones is *NOT* compatible with
@ -186,7 +183,6 @@ build_aarch64_task:
VM_IMAGE_NAME: ${FEDORA_AARCH64_AMI}
CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
CI_DESIRED_RUNTIME: crun
CI_DESIRED_NETWORK: netavark
TEST_FLAVOR: build
clone_script: *full_clone
prebuild_script: *prebuild
@ -682,13 +678,11 @@ container_integration_test_task:
VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME}
CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
CI_DESIRED_RUNTIME: crun
CI_DESIRED_NETWORK: netavark
- env:
DISTRO_NV: ${PRIOR_FEDORA_NAME}
VM_IMAGE_NAME: ${PRIOR_FEDORA_CACHE_IMAGE_NAME}
CTR_FQIN: ${PRIOR_FEDORA_CONTAINER_FQIN}
CI_DESIRED_RUNTIME: crun
CI_DESIRED_NETWORK: cni
CI_DESIRED_DATABASE: boltdb
gce_instance: *standardvm
timeout_in: 50m
@ -746,7 +740,6 @@ podman_machine_task:
PRIV_NAME: "rootless" # intended use-case
DISTRO_NV: "${FEDORA_NAME}"
VM_IMAGE_NAME: "${FEDORA_AMI}"
CI_DESIRED_NETWORK: netavark
clone_script: *get_gosrc
setup_script: *setup
main_script: *main
@ -772,7 +765,6 @@ podman_machine_aarch64_task:
PRIV_NAME: "rootless" # intended use-case
DISTRO_NV: "${FEDORA_AARCH64_NAME}"
VM_IMAGE_NAME: "${FEDORA_AARCH64_AMI}"
CI_DESIRED_NETWORK: netavark
clone_script: *get_gosrc_aarch64
setup_script: *setup
main_script: *main
@ -922,7 +914,6 @@ rootless_remote_system_test_task:
VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME}
CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
CI_DESIRED_RUNTIME: crun
CI_DESIRED_NETWORK: netavark
<<: *local_system_test_task
alias: rootless_remote_system_test
gce_instance: *standardvm
@ -1007,37 +998,6 @@ buildah_bud_test_task:
main_script: *main
always: *int_logs_artifacts
upgrade_test_task:
name: "Upgrade test: from $PODMAN_UPGRADE_FROM"
alias: upgrade_test
# Docs: ./contrib/cirrus/CIModes.md
only_if: *not_tag_magic
depends_on:
- build
- local_system_test
matrix:
- env:
PODMAN_UPGRADE_FROM: v2.1.1
CI_DESIRED_NETWORK: cni
- env:
PODMAN_UPGRADE_FROM: v3.1.2
CI_DESIRED_NETWORK: cni
- env:
PODMAN_UPGRADE_FROM: v3.4.4
CI_DESIRED_NETWORK: cni
gce_instance: *standardvm
env:
TEST_FLAVOR: upgrade_test
DISTRO_NV: ${FEDORA_NAME}
VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME}
# FIXME: remove this once we have VMs with podman >= 4.8
CI_DESIRED_DATABASE: boltdb
clone_script: *get_gosrc
setup_script: *setup
main_script: *main
always: *logs_artifacts
# This task is critical. It updates the "last-used by" timestamp stored
# in metadata for all VM images. This mechanism functions in tandem with
# an out-of-band pruning operation to remove disused VM images.
@ -1111,7 +1071,6 @@ success_task:
- minikube_test
- farm_test
- buildah_bud_test
- upgrade_test
- meta
container: &smallcontainer
image: ${CTR_FQIN}

View File

@ -95,7 +95,7 @@ EPOCH_TEST_COMMIT="$CIRRUS_BASE_SHA"
# contexts, such as host->container or root->rootless user
#
# List of envariables which must be EXACT matches
PASSTHROUGH_ENV_EXACT='CGROUP_MANAGER|DEST_BRANCH|DISTRO_NV|GOCACHE|GOPATH|GOSRC|NETWORK_BACKEND|OCI_RUNTIME|ROOTLESS_USER|SCRIPT_BASE|SKIP_USERNS|EC2_INST_TYPE|PODMAN_DB|STORAGE_FS'
PASSTHROUGH_ENV_EXACT='CGROUP_MANAGER|DEST_BRANCH|DISTRO_NV|GOCACHE|GOPATH|GOSRC|OCI_RUNTIME|ROOTLESS_USER|SCRIPT_BASE|SKIP_USERNS|EC2_INST_TYPE|PODMAN_DB|STORAGE_FS'
# List of envariable patterns which must match AT THE BEGINNING of the name.
PASSTHROUGH_ENV_ATSTART='CI|LANG|LC_|TEST'
@ -202,61 +202,6 @@ install_test_configs() {
install -v -D -m 644 ./test/registries.conf /etc/containers/
}
use_cni() {
req_env_vars OS_RELEASE_ID PACKAGE_DOWNLOAD_DIR SCRIPT_BASE
# Defined by common automation library
# shellcheck disable=SC2154
if [[ "$OS_RELEASE_ID" =~ "debian" ]]; then
# Supporting it involves swapping the rpm & dnf commands below
die "Testing debian w/ CNI networking currently not supported"
fi
msg "Forcing NETWORK_BACKEND=cni for all subsequent environments."
echo "NETWORK_BACKEND=cni" >> /etc/ci_environment
export NETWORK_BACKEND=cni
# While it's possible a user may want both installed, for CNI CI testing
# purposes we only care about backward-compatibility, not forward.
# If both CNI & netavark are present, in some situations where --root
# is used it's possible for podman to pick the "wrong" networking stack.
msg "Force-removing netavark and aardvark-dns"
# Other packages depend on nv/av, but we're testing with podman
# binaries built from source, so it's safe to ignore these deps.
#
# Do not fail when netavark and aardvark-dns are not installed.
for pkg in aardvark-dns netavark
do
[ -z "$(rpm -qa | grep $pkg)" ] && echo "$pkg not installed" || rpm -e --nodeps $pkg
done
msg "Installing default CNI configuration"
showrun dnf install -y $PACKAGE_DOWNLOAD_DIR/podman-plugins*
cd $GOSRC || exit 1
rm -rvf /etc/cni/net.d
mkdir -p /etc/cni/net.d
showrun install -v -D -m 644 ./cni/87-podman-bridge.conflist \
/etc/cni/net.d/
# This config must always sort last in the list of networks (podman picks
# first one as the default). This config prevents allocation of network
# address space used by default in google cloud.
# https://cloud.google.com/vpc/docs/vpc#ip-ranges
showrun install -v -D -m 644 $SCRIPT_BASE/99-do-not-use-google-subnets.conflist \
/etc/cni/net.d/
}
use_netavark() {
req_env_vars OS_RELEASE_ID PRIOR_FEDORA_NAME DISTRO_NV
local magickind repokind
msg "Unsetting NETWORK_BACKEND for all subsequent environments."
echo "export -n NETWORK_BACKEND" >> /etc/ci_environment
echo "unset NETWORK_BACKEND" >> /etc/ci_environment
export -n NETWORK_BACKEND
unset NETWORK_BACKEND
msg "Removing any/all CNI configuration"
showrun rm -rvf /etc/cni/net.d/*
# N/B: The CNI packages are still installed and available. This is
# on purpose, since CI needs to verify the selection mechanisms are
# functional when both are available.
}
# Remove all files provided by the distro version of podman.
# All VM cache-images used for testing include the distro podman because (1) it's
# required for podman-in-podman testing and (2) it somewhat simplifies the task

View File

@ -158,16 +158,6 @@ case "$OS_RELEASE_ID" in
*) die_unknown OS_RELEASE_ID
esac
# Networking: force CNI or Netavark as requested in .cirrus.yml
# (this variable is mandatory).
# shellcheck disable=SC2154
showrun echo "about to set up for CI_DESIRED_NETWORK [=$CI_DESIRED_NETWORK]"
case "$CI_DESIRED_NETWORK" in
netavark) use_netavark ;;
cni) use_cni ;;
*) die_unknown CI_DESIRED_NETWORK ;;
esac
# Database: force SQLite or BoltDB as requested in .cirrus.yml.
# If unset, will default to SQLite.
# shellcheck disable=SC2154

View File

@ -152,23 +152,15 @@ var _ = Describe("Podman Info", func() {
})
It("Podman info: check desired network backend", func() {
// defined in .cirrus.yml
want := os.Getenv("CI_DESIRED_NETWORK")
if want == "" {
if os.Getenv("CIRRUS_CI") == "" {
Skip("CI_DESIRED_NETWORK is not set--this is OK because we're not running under Cirrus")
}
Fail("CIRRUS_CI is set, but CI_DESIRED_NETWORK is not! See #16389")
}
session := podmanTest.Podman([]string{"info", "--format", "{{.Host.NetworkBackend}}"})
session.WaitWithDefaultTimeout()
Expect(session).To(ExitCleanly())
Expect(session.OutputToString()).To(Equal(want))
Expect(session.OutputToString()).To(Equal("netavark"))
session = podmanTest.Podman([]string{"info", "--format", "{{.Host.NetworkBackendInfo.Backend}}"})
session.WaitWithDefaultTimeout()
Expect(session).To(ExitCleanly())
Expect(session.OutputToString()).To(Equal(want))
Expect(session.OutputToString()).To(Equal("netavark"))
})
It("Podman info: check desired database backend", func() {

View File

@ -82,21 +82,8 @@ host.slirp4netns.executable | $expr_path
}
@test "podman info - confirm desired network backend" {
if [[ -z "$CI_DESIRED_NETWORK" ]]; then
# When running in Cirrus, CI_DESIRED_NETWORK *must* be defined
# in .cirrus.yml so we can double-check that all CI VMs are
# using netavark or cni as desired.
if [[ -n "$CIRRUS_CI" ]]; then
die "CIRRUS_CI is set, but CI_DESIRED_NETWORK is not! See #16389"
fi
# Not running under Cirrus (e.g., gating tests, or dev laptop).
# Totally OK to skip this test.
skip "CI_DESIRED_NETWORK is unset--OK, because we're not in Cirrus"
fi
run_podman info --format '{{.Host.NetworkBackend}}'
is "$output" "$CI_DESIRED_NETWORK" "CI_DESIRED_NETWORK (from .cirrus.yml)"
is "$output" "netavark" "netavark backend"
}
@test "podman info - confirm desired database" {

View File

@ -125,10 +125,6 @@ while :;do
echo STOPPING
podman \$opts stop -t 0 myrunningcontainer || true
podman \$opts rm -f myrunningcontainer || true
# sigh, network rm fails with exec: "ip": executable file not found in $PATH
# we cannot change the images afterwards so we remove it manually (#11403)
# hardcode /etc/cni/net.d dir for now
podman \$opts network rm -f mynetwork || rm -f /etc/cni/net.d/mynetwork.conflist
exit 0
fi
sleep 0.5
@ -143,10 +139,7 @@ EOF
# Also use --network host to prevent any netavark/cni conflicts
$PODMAN run --rm --network host $OLD_PODMAN true
# Podman 4.0 might no longer use cni so /run/cni and /run/containers will no be created in this case
# Create directories manually to fix this. Also running with netavark can
# cause connectivity issues since cni and netavark should never be mixed.
mkdir -p /run/netns /run/cni /run/containers /var/lib/cni /etc/cni/net.d
mkdir -p /run/netns
# Containers-common around release 1-55 no-longer supplies this file
sconf=/etc/containers/storage.conf
@ -165,7 +158,6 @@ EOF
#
# mount /etc/containers/storage.conf to use the same storage settings as on the host
# mount /dev/shm because the container locks are stored there
# mount /var/lib/cni, /run/cni and /etc/cni/net.d for cni networking
# mount /run/containers for the dnsname plugin
#
$PODMAN run -d --name podman_parent --pid=host \
@ -178,9 +170,6 @@ EOF
-v /run/crun:/run/crun \
-v /run/netns:/run/netns:rshared \
-v /run/containers:/run/containers \
-v /run/cni:/run/cni \
-v /var/lib/cni:/var/lib/cni \
-v /etc/cni/net.d:/etc/cni/net.d \
-v /dev/shm:/dev/shm \
-v $pmroot:$pmroot:rshared \
$OLD_PODMAN $pmroot/setup
@ -200,10 +189,8 @@ EOF
}
@test "info" {
# check network backend, since this is an old version we should use CNI
# when we start testing from 4.0 we should have netavark as backend
run_podman info --format '{{.Host.NetworkBackend}}'
is "$output" "cni" "correct network backend"
is "$output" "netavark" "correct network backend"
}
@test "images" {