ginkgo v2: drop localbenchmarks

Porting them over to v2 requires a full rewrite.
IT is not clear who actually uses these benchmarks, Valentin who wrote
them originally is in favor of removing them. He recommends to use
script from hack/perf instead.

This commit also drop the CI integration, it is not clear who actually
uses this data. If it is needed for something please speak up.

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
This commit is contained in:
Paul Holzinger
2023-04-13 15:11:07 +02:00
parent fb7a96638c
commit 2ce4e935be
5 changed files with 0 additions and 449 deletions

View File

@ -583,14 +583,6 @@ remoteintegration: test-binaries ginkgo-remote
localmachine: test-binaries .install.ginkgo
$(MAKE) ginkgo-run GINKGONODES=1 GINKGOWHAT=pkg/machine/e2e/. HACK=
.PHONY: localbenchmarks
localbenchmarks: install.tools test-binaries
PATH=$(PATH):$(shell pwd)/hack $(GINKGO) \
--focus "Podman Benchmark Suite" \
--tags "$(BUILDTAGS) benchmarks" --no-color \
--succinct \
test/e2e/.
.PHONY: localsystem
localsystem:
# Wipe existing config, database, and cache: start with clean slate.

View File

@ -335,37 +335,3 @@ remove_packaged_podman_files() {
# Be super extra sure and careful vs performant and completely safe
sync && echo 3 > /proc/sys/vm/drop_caches || true
}
# Execute make localbenchmarks in $CIRRUS_WORKING_DIR/data
# for preserving as a task artifact.
localbenchmarks() {
local datadir envnames envname
req_env_vars DISTRO_NV PODBIN_NAME PRIV_NAME TEST_ENVIRON TEST_FLAVOR
req_env_vars VM_IMAGE_NAME EC2_INST_TYPE
datadir=$CIRRUS_WORKING_DIR/data
mkdir -p $datadir
envnames=$(passthrough_envars | sort);
(
echo "# Env. var basis for benchmarks benchmarks."
for envname in $envnames; do
printf "$envname=%q\n" "${!envname}"
done
echo "# Machine details for data-comparison sake, not actual env. vars."
# Checked above in req_env_vars
# shellcheck disable=SC2154
echo "\
BENCH_ENV_VER=1
CPUTOTAL=$(grep -ce '^processor' /proc/cpuinfo)
INST_TYPE=$EC2_INST_TYPE
MEMTOTALKB=$(awk -F: '$1 == "MemTotal" { print $2 }' </proc/meminfo | sed -e "s/^ *//" | cut -d ' ' -f 1)
UNAME_R=$(uname -r)
UNAME_M=$(uname -m)
"
) > $datadir/benchmarks.env
make localbenchmarks | tee $datadir/benchmarks.raw
msg "Processing raw benchmarks output"
hack/parse-localbenchmarks < $datadir/benchmarks.raw | tee $datadir/benchmarks.csv
}

View File

@ -403,9 +403,6 @@ dotest() {
}
_run_machine() {
# This environment is convenient for executing some benchmarking
localbenchmarks
# N/B: Can't use _bail_if_test_can_be_skipped here b/c content isn't under test/
make localmachine |& logformatter
}

View File

@ -1,133 +0,0 @@
#!/usr/bin/perl
#
# parse-localbenchmarks - convert localbenchmarks output to CSV
#
# This is a filter. It transforms data from one format to another. Usage:
#
# $ make localbenchmarks &> mylogfile
# $ hack/parse-localbenchmarks <mylogfile > benchmarks.csv
#
# To be more precise, this is a very stupid simpleminded filter. It is
# not a complete solution to the benchmarks problem. In particular,
# other tools are still needed to:
#
# * Actually _run_ the benchmarks in some standard production environment
# * Run this script on the results
# * Save results, with identifying tags (datetime, git hash, PR id, ...)
# * Compare two or more sets of CSVs
#
(our $ME = $0) =~ s|^.*/||; # script name
use v5.14;
use utf8;
# FIXME: add --help. Some day. Not urgent.
die "$ME: This is a filter, not an interactive tool\n" if -t *STDIN;
my $n_samples; # Number of timing runs (FIXME: unused)
my %results; # Timing results
my @benchmarks; # Names of benchmarks
my ($type, $testname); # Current context
#
# Pass 1: read in timings
#
while (my $line = <STDIN>) {
# Log will have lots of ginkgo output. The only thing we care about is
# the summary at the end, which will look something like:
#
# * [MEASUREMENT]
# Podman Benchmark Suite
# ....
# Ran 3 samples:
# [CPU] podman images:
# Fastest Time: 0.265s
# Slowest Time: 0.322s
# Average Time: 0.302s ± 0.018s
# [MEM] podman images:
# Smallest: 44076.0KB
# Largest: 44616.0KB
# Average: 44338.7KB ± 171.2KB
# [CPU] podman push:
# ....repeat [CPU] and [MEM] for each test
# --------------------------
# SSSSSSSSSSSSSSSSSSSSS (and more ginkgo output we don't care about)
#
chomp $line;
next unless $line =~ /^.{1,3}\s+\[MEASUREMENT\]/ .. $line =~ /^-{20,}$/;
# Trim leading & trailing whitespace
$line =~ s/(^\s+|\s+$)//g;
# FIXME: we don't actually emit this. What would be a good way to do so?
if ($line =~ /^Ran\s+(\d+)\s+samples/) {
$n_samples = $1;
}
# e.g., [CPU] podman foo:
elsif ($line =~ /^\[([A-Z]+)\]\s+(\S.*\S):$/) {
($type, $testname) = ($1, $2);
}
# e.g., 'Fastest Time: 0.265s'
elsif ($line =~ /^(\S.*?\S):\s+(.*)/) {
log_result($testname, $type, $1, $2);
}
else {
warn "Cannot grok '$line'\n" if $ENV{DEBUG_PARSELOCALBENCHMARKS};
}
}
#
# Pass 2: write out CSV
#
# Headings...
print "\"Test Name\"";
printf ", \"%s\"", $_ for @benchmarks;
print "\n";
# ...then data
for my $t (sort keys %results) {
printf "\"%s\"", $t;
for my $benchmark (@benchmarks) {
printf ", \"%s\"", $results{$t}{$benchmark} || '';
}
print "\n";
}
exit 0;
################
# log_result # Preserve one record
################
sub log_result {
my $testname = shift; # in: test name (eg "podman foo")
my $type = shift; # in: CPU or MEM
my $name = shift; # in: benchmark name (eg "Fastest")
my $result = shift; # in: benchmark value
my $benchmark = "$type $name";
$results{$testname}{$benchmark} = $result;
# Keep an ordered list of benchmark names (as in, the order we
# encounter them)
push @benchmarks, $benchmark
unless grep { $_ eq $benchmark } @benchmarks;
# Special case: "Average X" may be of the form "xxx ± yyy". Move the
# standard deviation to its own column.
if ($name =~ /Average/) {
if ($results{$testname}{$benchmark} =~ s/^(\S+)\s+.*\s+(\S+)$/$1/) {
my ($average, $sd) = ($1, $2);
log_result($testname, $type, 'StdDev', $sd);
# Strip off units, so we can determine it as a percentage
$average =~ s/[a-z]+$//i;
$sd =~ s/[a-z]+$//i;
my $pct = sprintf("%.1f%%", $sd * 100.0 / $average);
log_result($testname, $type, 'StdDev (Percent)', $pct);
}
}
}

View File

@ -1,271 +0,0 @@
//go:build benchmarks
// +build benchmarks
package integration
import (
"fmt"
"os"
"path"
"strconv"
"strings"
podmanRegistry "github.com/containers/podman/v4/hack/podman-registry-go"
. "github.com/containers/podman/v4/test/utils"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
"github.com/sirupsen/logrus"
)
var (
// Number of times to execute each benchmark.
numBenchmarkSamples = 3
// All benchmarks are ququed here.
allBenchmarks []benchmark
)
// An internal struct for queuing benchmarks.
type benchmark struct {
// The name of the benchmark.
name string
// The function to execute.
main func()
// Allows for extending a benchmark.
options newBenchmarkOptions
}
var benchmarkRegistry *podmanRegistry.Registry
// Allows for customizing the benchnmark in an easy to extend way.
type newBenchmarkOptions struct {
// Sets the benchmark's init function.
init func()
// Run a local registry for this benchmark. Use `getPortUserPass()` in
// the benchmark to get the port, user and password.
needsRegistry bool
}
// Queue a new benchmark.
func newBenchmark(name string, main func(), options *newBenchmarkOptions) {
bm := benchmark{name: name, main: main}
if options != nil {
bm.options = *options
}
allBenchmarks = append(allBenchmarks, bm)
}
// getPortUserPass returns the port, user and password of the currently running
// registry.
func getPortUserPass() (string, string, string) {
if benchmarkRegistry == nil {
return "", "", ""
}
return benchmarkRegistry.Port, benchmarkRegistry.User, benchmarkRegistry.Password
}
var _ = Describe("Podman Benchmark Suite", func() {
var (
timedir string
podmanTest *PodmanTestIntegration
)
setup := func() {
tempdir, err := CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
timedir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
}
}
cleanup := func() {
podmanTest.Cleanup()
os.RemoveAll(timedir)
// Stop the local registry.
if benchmarkRegistry != nil {
if err := benchmarkRegistry.Stop(); err != nil {
logrus.Errorf("Error stopping registry: %v", err)
os.Exit(1)
}
benchmarkRegistry = nil
}
}
totalMemoryInKb := func() (total uint64) {
files, err := os.ReadDir(timedir)
if err != nil {
Fail(fmt.Sprintf("Error reading timing dir: %v", err))
}
for _, f := range files {
if f.IsDir() {
continue
}
raw, err := os.ReadFile(path.Join(timedir, f.Name()))
if err != nil {
Fail(fmt.Sprintf("Error reading timing file: %v", err))
}
rawS := strings.TrimSuffix(string(raw), "\n")
number, err := strconv.ParseUint(rawS, 10, 64)
if err != nil {
Fail(fmt.Sprintf("Error converting timing file to numeric value: %v", err))
}
total += number
}
return total
}
// Make sure to clean up after the benchmarks.
AfterEach(func() {
cleanup()
})
// All benchmarks are executed here to have *one* table listing all data.
Measure("Podman Benchmark Suite", func(b Benchmarker) {
registryOptions := &podmanRegistry.Options{
Image: "docker-archive:" + imageTarPath(REGISTRY_IMAGE),
}
for i := range allBenchmarks {
setup()
bm := allBenchmarks[i]
// Start a local registry if requested.
if bm.options.needsRegistry {
reg, err := podmanRegistry.StartWithOptions(registryOptions)
if err != nil {
logrus.Errorf("Error starting registry: %v", err)
os.Exit(1)
}
benchmarkRegistry = reg
}
if bm.options.init != nil {
bm.options.init()
}
// Set the time dir only for the main() function.
os.Setenv(EnvTimeDir, timedir)
b.Time("[CPU] "+bm.name, bm.main)
os.Unsetenv(EnvTimeDir)
mem := totalMemoryInKb()
b.RecordValueWithPrecision("[MEM] "+bm.name, float64(mem), "KB", 1)
cleanup()
}
}, numBenchmarkSamples)
BeforeEach(func() {
// --------------------------------------------------------------------------
// IMAGE BENCHMARKS
// --------------------------------------------------------------------------
newBenchmark("podman images", func() {
session := podmanTest.Podman([]string{"images"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, nil)
newBenchmark("podman push", func() {
port, user, pass := getPortUserPass()
session := podmanTest.Podman([]string{"push", "--tls-verify=false", "--creds", user + ":" + pass, SYSTEMD_IMAGE, "localhost:" + port + "/repo/image:tag"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, &newBenchmarkOptions{needsRegistry: true})
newBenchmark("podman pull", func() {
port, user, pass := getPortUserPass()
session := podmanTest.Podman([]string{"pull", "--tls-verify=false", "--creds", user + ":" + pass, "localhost:" + port + "/repo/image:tag"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, &newBenchmarkOptions{
needsRegistry: true,
init: func() {
port, user, pass := getPortUserPass()
session := podmanTest.Podman([]string{"push", "--tls-verify=false", "--creds", user + ":" + pass, SYSTEMD_IMAGE, "localhost:" + port + "/repo/image:tag"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
},
})
newBenchmark("podman load [docker]", func() {
session := podmanTest.Podman([]string{"load", "-i", "./testdata/docker-two-images.tar.xz"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, nil)
newBenchmark("podman load [oci]", func() {
session := podmanTest.Podman([]string{"load", "-i", "./testdata/oci-registry-name.tar.gz"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, nil)
newBenchmark("podman save", func() {
session := podmanTest.Podman([]string{"save", ALPINE, "-o", path.Join(podmanTest.TempDir, "alpine.tar")})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, nil)
newBenchmark("podman image inspect", func() {
session := podmanTest.Podman([]string{"inspect", ALPINE})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, nil)
newBenchmark("podman login + logout", func() {
port, user, pass := getPortUserPass()
session := podmanTest.Podman([]string{"login", "-u", user, "-p", pass, "--tls-verify=false", "localhost:" + port})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
session = podmanTest.Podman([]string{"logout", "localhost:" + port})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, &newBenchmarkOptions{needsRegistry: true})
// --------------------------------------------------------------------------
// CONTAINER BENCHMARKS
// --------------------------------------------------------------------------
newBenchmark("podman create", func() {
session := podmanTest.Podman([]string{"create", ALPINE, "true"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, nil)
newBenchmark("podman start", func() {
session := podmanTest.Podman([]string{"start", "foo"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, &newBenchmarkOptions{
init: func() {
session := podmanTest.Podman([]string{"create", "--name=foo", ALPINE, "true"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
},
})
newBenchmark("podman run", func() {
session := podmanTest.Podman([]string{"run", ALPINE, "true"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, nil)
newBenchmark("podman run --detach", func() {
session := podmanTest.Podman([]string{"run", "--detach", ALPINE, "true"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, nil)
})
})