Merge pull request #13835 from vrothberg/run-1436

benchmarking Podman: proof of concept
This commit is contained in:
OpenShift Merge Robot
2022-04-20 08:41:54 -04:00
committed by GitHub
3 changed files with 196 additions and 2 deletions

View File

@ -553,8 +553,8 @@ test: localunit localintegration remoteintegration localsystem remotesystem ##
.PHONY: ginkgo-run
ginkgo-run:
$(GOBIN)/ginkgo version
$(GOBIN)/ginkgo -v $(TESTFLAGS) -tags "$(TAGS)" $(GINKGOTIMEOUT) -cover -flakeAttempts 3 -progress -trace -noColor -nodes 3 -debug test/e2e/. $(HACK)
ACK_GINKGO_RC=true $(GOBIN)/ginkgo version
ACK_GINKGO_RC=true $(GOBIN)/ginkgo -v $(TESTFLAGS) -tags "$(TAGS)" $(GINKGOTIMEOUT) -cover -flakeAttempts 3 -progress -trace -noColor -nodes 3 -debug test/e2e/. $(HACK)
.PHONY: ginkgo
ginkgo:
@ -570,6 +570,14 @@ localintegration: test-binaries ginkgo
.PHONY: remoteintegration
remoteintegration: test-binaries ginkgo-remote
.PHONY: localbenchmarks
localbenchmarks: test-binaries
ACK_GINKGO_RC=true $(GOBIN)/ginkgo \
-focus "Podman Benchmark Suite" \
-tags "$(BUILDTAGS) benchmarks" -noColor \
-noisySkippings=false -noisyPendings=false \
test/e2e/.
.PHONY: localsystem
localsystem:
# Wipe existing config, database, and cache: start with clean slate.

173
test/e2e/benchmarks_test.go Normal file
View File

@ -0,0 +1,173 @@
//go:build benchmarks
// +build benchmarks
package integration
import (
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
. "github.com/containers/podman/v4/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
)
var (
// Number of times to execute each benchmark.
numBenchmarkSamples = 3
// All benchmarks are ququed here.
allBenchmarks []benchmark
)
// An internal struct for queuing benchmarks.
type benchmark struct {
// The name of the benchmark.
name string
// The function to execute.
main func()
// Function is run before `main`.
init func()
}
// Allows for customizing the benchnmark in an easy to extend way.
type newBenchmarkOptions struct {
// Sets the benchmark's init function.
init func()
}
// Queue a new benchmark.
func newBenchmark(name string, main func(), options *newBenchmarkOptions) {
bm := benchmark{name: name, main: main}
if options != nil {
bm.init = options.init
}
allBenchmarks = append(allBenchmarks, bm)
}
var _ = Describe("Podman Benchmark Suite", func() {
var (
timedir string
podmanTest *PodmanTestIntegration
)
setup := func() {
tempdir, err := CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
timedir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
}
}
cleanup := func() {
podmanTest.Cleanup()
os.RemoveAll(timedir)
}
totalMemoryInKb := func() (total uint64) {
files, err := ioutil.ReadDir(timedir)
if err != nil {
Fail(fmt.Sprintf("Error reading timing dir: %v", err))
}
for _, f := range files {
if f.IsDir() {
continue
}
raw, err := ioutil.ReadFile(path.Join(timedir, f.Name()))
if err != nil {
Fail(fmt.Sprintf("Error reading timing file: %v", err))
}
rawS := strings.TrimSuffix(string(raw), "\n")
number, err := strconv.ParseUint(rawS, 10, 64)
if err != nil {
Fail(fmt.Sprintf("Error converting timing file to numeric value: %v", err))
}
total += number
}
return total
}
// Make sure to clean up after the benchmarks.
AfterEach(func() {
cleanup()
})
// All benchmarks are executed here to have *one* table listing all data.
Measure("Podman Benchmark Suite", func(b Benchmarker) {
for i := range allBenchmarks {
setup()
bm := allBenchmarks[i]
if bm.init != nil {
bm.init()
}
// Set the time dir only for the main() function.
os.Setenv(EnvTimeDir, timedir)
b.Time("[CPU] "+bm.name, bm.main)
os.Unsetenv(EnvTimeDir)
mem := totalMemoryInKb()
b.RecordValueWithPrecision("[MEM] "+bm.name, float64(mem), "KB", 1)
cleanup()
}
}, numBenchmarkSamples)
BeforeEach(func() {
// --------------------------------------------------------------------------
// IMAGE BENCHMARKS
// --------------------------------------------------------------------------
newBenchmark("podman images", func() {
session := podmanTest.Podman([]string{"images"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, nil)
newBenchmark("podman pull", func() {
session := podmanTest.Podman([]string{"pull", "quay.io/libpod/cirros"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, nil)
// --------------------------------------------------------------------------
// CONTAINER BENCHMARKS
// --------------------------------------------------------------------------
newBenchmark("podman create", func() {
session := podmanTest.Podman([]string{"run", ALPINE, "true"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, nil)
newBenchmark("podman start", func() {
session := podmanTest.Podman([]string{"start", "foo"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, &newBenchmarkOptions{
init: func() {
session := podmanTest.Podman([]string{"create", "--name=foo", ALPINE, "true"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
},
})
newBenchmark("podman run", func() {
session := podmanTest.Podman([]string{"run", ALPINE, "true"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
}, nil)
})
})

View File

@ -27,6 +27,8 @@ const (
CNI NetworkBackend = iota
// Netavark network backend
Netavark NetworkBackend = iota
// Env variable for creating time files.
EnvTimeDir = "_PODMAN_TIME_DIR"
)
func (n NetworkBackend) ToString() string {
@ -96,6 +98,17 @@ func (p *PodmanTest) PodmanAsUserBase(args []string, uid, gid uint32, cwd string
if p.RemoteTest {
podmanBinary = p.RemotePodmanBinary
}
if timeDir := os.Getenv(EnvTimeDir); timeDir != "" {
timeFile, err := ioutil.TempFile(timeDir, ".time")
if err != nil {
Fail(fmt.Sprintf("Error creating time file: %v", err))
}
timeArgs := []string{"-f", "%M", "-o", timeFile.Name()}
timeCmd := append([]string{"/usr/bin/time"}, timeArgs...)
wrapper = append(timeCmd, wrapper...)
}
runCmd := append(wrapper, podmanBinary)
if p.NetworkBackend == Netavark {
runCmd = append(runCmd, []string{"--network-backend", "netavark"}...)