1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-05-17 15:06:47 +08:00

feat: port collect-profiles.sh to 'ipfs diag profile' (#8786)

* feat: add block profiling to collect-profiles.sh

* feat: add more profiles to 'ipfs diag profile'

This adds mutex and block profiles, and brings the command up-to-par
with 'collect-profiles.sh', so that we can remove it.

Profiles are also now collected concurrently, which improves the
runtime from (profile_time * num_profiles) to just (profile_time).

Note that this has a backwards-incompatible change, removing
--cpu-profile-time in favor of the more general --profile-time, which
covers all sampling profiles.

* docs(cli): ipfs diag profile

* add CLI flag to select specific diag collectors

Co-authored-by: Marcin Rataj <lidel@lidel.org>
This commit is contained in:
Gus Eggert
2022-04-12 11:58:03 -04:00
committed by GitHub
parent ca4a3ed961
commit bb68a68525
10 changed files with 574 additions and 248 deletions

View File

@ -1,53 +0,0 @@
#!/usr/bin/env bash
# collect-profiles.sh
#
# Collects go profile information from a running `ipfs` daemon.
# Creates an archive including the profiles, profile graph svgs,
# ...and where available, a copy of the `ipfs` binary on the PATH.
#
# Please run this script and attach the profile archive it creates
# when reporting bugs at https://github.com/ipfs/go-ipfs/issues
set -euo pipefail
IFS=$'\n\t'
SOURCE_URL="${1:-http://127.0.0.1:5001}"
tmpdir=$(mktemp -d)
export PPROF_TMPDIR="$tmpdir"
pushd "$tmpdir" > /dev/null
if command -v ipfs > /dev/null 2>&1; then
cp "$(command -v ipfs)" ipfs
fi
echo Collecting goroutine stacks
curl -s -o goroutines.stacks "$SOURCE_URL"'/debug/pprof/goroutine?debug=2'
curl -s -o goroutines.stacks.full "$SOURCE_URL"'/debug/stack'
echo Collecting goroutine profile
go tool pprof -symbolize=remote -svg -output goroutine.svg "$SOURCE_URL/debug/pprof/goroutine"
echo Collecting heap profile
go tool pprof -symbolize=remote -svg -output heap.svg "$SOURCE_URL/debug/pprof/heap"
echo "Collecting cpu profile (~30s)"
go tool pprof -symbolize=remote -svg -output cpu.svg "$SOURCE_URL/debug/pprof/profile"
echo "Enabling mutex profiling"
curl -X POST "$SOURCE_URL"'/debug/pprof-mutex/?fraction=4'
echo "Waiting for mutex data to be updated (30s)"
sleep 30
curl -s -o mutex.txt "$SOURCE_URL"'/debug/pprof/mutex?debug=2'
go tool pprof -symbolize=remote -svg -output mutex.svg "$SOURCE_URL/debug/pprof/mutex"
echo "Disabling mutex profiling"
curl -X POST "$SOURCE_URL"'/debug/pprof-mutex/?fraction=0'
OUTPUT_NAME=ipfs-profile-$(uname -n)-$(date +'%Y-%m-%dT%H:%M:%S%z').tar.gz
echo "Creating $OUTPUT_NAME"
popd > /dev/null
tar czf "./$OUTPUT_NAME" -C "$tmpdir" .
rm -rf "$tmpdir"

View File

@ -3,13 +3,13 @@ package main
import (
"net/http"
"github.com/ipfs/go-ipfs/core/commands"
"github.com/ipfs/go-ipfs/profile"
)
func init() {
http.HandleFunc("/debug/stack",
func(w http.ResponseWriter, _ *http.Request) {
_ = commands.WriteAllGoroutineStacks(w)
_ = profile.WriteAllGoroutineStacks(w)
},
)
}

View File

@ -2,18 +2,15 @@ package commands
import (
"archive/zip"
"context"
"encoding/json"
"fmt"
"io"
"os"
"runtime"
"runtime/pprof"
"strings"
"time"
cmds "github.com/ipfs/go-ipfs-cmds"
"github.com/ipfs/go-ipfs/core/commands/e"
"github.com/ipfs/go-ipfs/profile"
)
// time format that works in filenames on windows.
@ -23,22 +20,27 @@ type profileResult struct {
File string
}
const cpuProfileTimeOption = "cpu-profile-time"
const (
collectorsOptionName = "collectors"
profileTimeOption = "profile-time"
mutexProfileFractionOption = "mutex-profile-fraction"
blockProfileRateOption = "block-profile-rate"
)
var sysProfileCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Collect a performance profile for debugging.",
ShortDescription: `
Collects cpu, heap, and goroutine profiles from a running go-ipfs daemon
into a single zip file. To aid in debugging, this command also attempts to
include a copy of the running go-ipfs binary.
Collects profiles from a running go-ipfs daemon into a single zip file.
To aid in debugging, this command also attempts to include a copy of
the running go-ipfs binary.
`,
LongDescription: `
Collects cpu, heap, and goroutine profiles from a running go-ipfs daemon
into a single zipfile. To aid in debugging, this command also attempts to
include a copy of the running go-ipfs binary.
Collects profiles from a running go-ipfs daemon into a single zipfile.
To aid in debugging, this command also attempts to include a copy of
the running go-ipfs binary.
Profile's can be examined using 'go tool pprof', some tips can be found at
Profiles can be examined using 'go tool pprof', some tips can be found at
https://github.com/ipfs/go-ipfs/blob/master/docs/debug-guide.md.
Privacy Notice:
@ -48,6 +50,8 @@ The output file includes:
- A list of running goroutines.
- A CPU profile.
- A heap profile.
- A mutex profile.
- A block profile.
- Your copy of go-ipfs.
- The output of 'ipfs version --all'.
@ -68,19 +72,51 @@ However, it could reveal:
},
NoLocal: true,
Options: []cmds.Option{
cmds.StringOption(outputOptionName, "o", "The path where the output should be stored."),
cmds.StringOption(cpuProfileTimeOption, "The amount of time spent profiling CPU usage.").WithDefault("30s"),
cmds.StringOption(outputOptionName, "o", "The path where the output .zip should be stored. Default: ./ipfs-profile-[timestamp].zip"),
cmds.DelimitedStringsOption(",", collectorsOptionName, "The list of collectors to use for collecting diagnostic data.").
WithDefault([]string{
profile.CollectorGoroutinesStack,
profile.CollectorGoroutinesPprof,
profile.CollectorVersion,
profile.CollectorHeap,
profile.CollectorBin,
profile.CollectorCPU,
profile.CollectorMutex,
profile.CollectorBlock,
}),
cmds.StringOption(profileTimeOption, "The amount of time spent profiling. If this is set to 0, then sampling profiles are skipped.").WithDefault("30s"),
cmds.IntOption(mutexProfileFractionOption, "The fraction 1/n of mutex contention events that are reported in the mutex profile.").WithDefault(4),
cmds.StringOption(blockProfileRateOption, "The duration to wait between sampling goroutine-blocking events for the blocking profile.").WithDefault("1ms"),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
cpuProfileTimeStr, _ := req.Options[cpuProfileTimeOption].(string)
cpuProfileTime, err := time.ParseDuration(cpuProfileTimeStr)
collectors := req.Options[collectorsOptionName].([]string)
profileTimeStr, _ := req.Options[profileTimeOption].(string)
profileTime, err := time.ParseDuration(profileTimeStr)
if err != nil {
return fmt.Errorf("failed to parse CPU profile duration %q: %w", cpuProfileTimeStr, err)
return fmt.Errorf("failed to parse profile duration %q: %w", profileTimeStr, err)
}
blockProfileRateStr, _ := req.Options[blockProfileRateOption].(string)
blockProfileRate, err := time.ParseDuration(blockProfileRateStr)
if err != nil {
return fmt.Errorf("failed to parse block profile rate %q: %w", blockProfileRateStr, err)
}
mutexProfileFraction, _ := req.Options[mutexProfileFractionOption].(int)
r, w := io.Pipe()
go func() {
_ = w.CloseWithError(writeProfiles(req.Context, cpuProfileTime, w))
archive := zip.NewWriter(w)
err = profile.WriteProfiles(req.Context, archive, profile.Options{
Collectors: collectors,
ProfileDuration: profileTime,
MutexProfileFraction: mutexProfileFraction,
BlockProfileRate: blockProfileRate,
})
archive.Close()
_ = w.CloseWithError(err)
}()
return res.Emit(r)
},
@ -120,148 +156,3 @@ However, it could reveal:
}),
},
}
func WriteAllGoroutineStacks(w io.Writer) error {
// this is based on pprof.writeGoroutineStacks, and removes the 64 MB limit
buf := make([]byte, 1<<20)
for i := 0; ; i++ {
n := runtime.Stack(buf, true)
if n < len(buf) {
buf = buf[:n]
break
}
// if len(buf) >= 64<<20 {
// // Filled 64 MB - stop there.
// break
// }
buf = make([]byte, 2*len(buf))
}
_, err := w.Write(buf)
return err
}
func writeProfiles(ctx context.Context, cpuProfileTime time.Duration, w io.Writer) error {
archive := zip.NewWriter(w)
// Take some profiles.
type profile struct {
name string
file string
debug int
}
profiles := []profile{{
name: "goroutine",
file: "goroutines.stacks",
debug: 2,
}, {
name: "goroutine",
file: "goroutines.pprof",
}, {
name: "heap",
file: "heap.pprof",
}}
{
out, err := archive.Create("goroutines-all.stacks")
if err != nil {
return err
}
err = WriteAllGoroutineStacks(out)
if err != nil {
return err
}
}
for _, profile := range profiles {
prof := pprof.Lookup(profile.name)
out, err := archive.Create(profile.file)
if err != nil {
return err
}
err = prof.WriteTo(out, profile.debug)
if err != nil {
return err
}
}
// Take a CPU profile.
if cpuProfileTime != 0 {
out, err := archive.Create("cpu.pprof")
if err != nil {
return err
}
err = writeCPUProfile(ctx, cpuProfileTime, out)
if err != nil {
return err
}
}
// Collect version info
// I'd use diag sysinfo, but that includes some more sensitive information
// (GOPATH, etc.).
{
out, err := archive.Create("version.json")
if err != nil {
return err
}
err = json.NewEncoder(out).Encode(getVersionInfo())
if err != nil {
return err
}
}
// Collect binary
if fi, err := openIPFSBinary(); err == nil {
fname := "ipfs"
if runtime.GOOS == "windows" {
fname += ".exe"
}
out, err := archive.Create(fname)
if err != nil {
return err
}
_, err = io.Copy(out, fi)
_ = fi.Close()
if err != nil {
return err
}
}
return archive.Close()
}
func writeCPUProfile(ctx context.Context, d time.Duration, w io.Writer) error {
if err := pprof.StartCPUProfile(w); err != nil {
return err
}
defer pprof.StopCPUProfile()
timer := time.NewTimer(d)
defer timer.Stop()
select {
case <-timer.C:
case <-ctx.Done():
return ctx.Err()
}
return nil
}
func openIPFSBinary() (*os.File, error) {
if runtime.GOOS == "linux" {
pid := os.Getpid()
fi, err := os.Open(fmt.Sprintf("/proc/%d/exe", pid))
if err == nil {
return fi, nil
}
}
path, err := os.Executable()
if err != nil {
return nil, err
}
return os.Open(path)
}

View File

@ -67,13 +67,13 @@ NETWORK COMMANDS
swarm Manage connections to the p2p network
dht Query the DHT for values or peers
ping Measure the latency of a connection
diag Print diagnostics
bitswap Inspect bitswap state
pubsub Send and receive messages via pubsub
TOOL COMMANDS
config Manage configuration
version Show IPFS version information
diag Generate diagnostic reports
update Download and apply go-ipfs updates
commands List all available commands
log Manage and show logs of running daemon

View File

@ -4,23 +4,13 @@ import (
"errors"
"fmt"
"io"
"runtime"
"runtime/debug"
version "github.com/ipfs/go-ipfs"
fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo"
cmds "github.com/ipfs/go-ipfs-cmds"
)
type VersionOutput struct {
Version string
Commit string
Repo string
System string
Golang string
}
const (
versionNumberOptionName = "number"
versionCommitOptionName = "commit"
@ -28,16 +18,6 @@ const (
versionAllOptionName = "all"
)
func getVersionInfo() *VersionOutput {
return &VersionOutput{
Version: version.CurrentVersionNumber,
Commit: version.CurrentCommit,
Repo: fmt.Sprint(fsrepo.RepoVersion),
System: runtime.GOARCH + "/" + runtime.GOOS, //TODO: Precise version here
Golang: runtime.Version(),
}
}
var VersionCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Show IPFS version information.",
@ -56,10 +36,10 @@ var VersionCmd = &cmds.Command{
// must be permitted to run before init
Extra: CreateCmdExtras(SetDoesNotUseRepo(true), SetDoesNotUseConfigAsInput(true)),
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
return cmds.EmitOnce(res, getVersionInfo())
return cmds.EmitOnce(res, version.GetVersionInfo())
},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, version *VersionOutput) error {
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, version *version.VersionInfo) error {
all, _ := req.Options[versionAllOptionName].(bool)
if all {
ver := version.Version
@ -95,7 +75,7 @@ var VersionCmd = &cmds.Command{
return nil
}),
},
Type: VersionOutput{},
Type: version.VersionInfo{},
}
type Dependency struct {

27
profile/goroutines.go Normal file
View File

@ -0,0 +1,27 @@
package profile
import (
"io"
"runtime"
)
// WriteAllGoroutineStacks writes a stack trace to the given writer.
// This is distinct from the Go-provided method because it does not truncate after 64 MB.
func WriteAllGoroutineStacks(w io.Writer) error {
// this is based on pprof.writeGoroutineStacks, and removes the 64 MB limit
buf := make([]byte, 1<<20)
for i := 0; ; i++ {
n := runtime.Stack(buf, true)
if n < len(buf) {
buf = buf[:n]
break
}
// if len(buf) >= 64<<20 {
// // Filled 64 MB - stop there.
// break
// }
buf = make([]byte, 2*len(buf))
}
_, err := w.Write(buf)
return err
}

268
profile/profile.go Normal file
View File

@ -0,0 +1,268 @@
package profile
import (
"archive/zip"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"os"
"runtime"
"runtime/pprof"
"sync"
"time"
version "github.com/ipfs/go-ipfs"
"github.com/ipfs/go-log"
)
const (
CollectorGoroutinesStack = "goroutines-stack"
CollectorGoroutinesPprof = "goroutines-pprof"
CollectorVersion = "version"
CollectorHeap = "heap"
CollectorBin = "bin"
CollectorCPU = "cpu"
CollectorMutex = "mutex"
CollectorBlock = "block"
)
var (
logger = log.Logger("profile")
goos = runtime.GOOS
)
type collector struct {
outputFile string
isExecutable bool
collectFunc func(ctx context.Context, opts Options, writer io.Writer) error
enabledFunc func(opts Options) bool
}
func (p *collector) outputFileName() string {
fName := p.outputFile
if p.isExecutable {
if goos == "windows" {
fName += ".exe"
}
}
return fName
}
var collectors = map[string]collector{
CollectorGoroutinesStack: {
outputFile: "goroutines.stacks",
collectFunc: goroutineStacksText,
enabledFunc: func(opts Options) bool { return true },
},
CollectorGoroutinesPprof: {
outputFile: "goroutines.pprof",
collectFunc: goroutineStacksProto,
enabledFunc: func(opts Options) bool { return true },
},
CollectorVersion: {
outputFile: "version.json",
collectFunc: versionInfo,
enabledFunc: func(opts Options) bool { return true },
},
CollectorHeap: {
outputFile: "heap.pprof",
collectFunc: heapProfile,
enabledFunc: func(opts Options) bool { return true },
},
CollectorBin: {
outputFile: "ipfs",
isExecutable: true,
collectFunc: binary,
enabledFunc: func(opts Options) bool { return true },
},
CollectorCPU: {
outputFile: "cpu.pprof",
collectFunc: profileCPU,
enabledFunc: func(opts Options) bool { return opts.ProfileDuration > 0 },
},
CollectorMutex: {
outputFile: "mutex.pprof",
collectFunc: mutexProfile,
enabledFunc: func(opts Options) bool { return opts.ProfileDuration > 0 && opts.MutexProfileFraction > 0 },
},
CollectorBlock: {
outputFile: "block.pprof",
collectFunc: blockProfile,
enabledFunc: func(opts Options) bool { return opts.ProfileDuration > 0 && opts.BlockProfileRate > 0 },
},
}
type Options struct {
Collectors []string
ProfileDuration time.Duration
MutexProfileFraction int
BlockProfileRate time.Duration
}
func WriteProfiles(ctx context.Context, archive *zip.Writer, opts Options) error {
p := profiler{
archive: archive,
opts: opts,
}
return p.runProfile(ctx)
}
// profiler runs the collectors concurrently and writes the results to the zip archive.
type profiler struct {
archive *zip.Writer
opts Options
}
func (p *profiler) runProfile(ctx context.Context) error {
type profileResult struct {
fName string
buf *bytes.Buffer
err error
}
ctx, cancelFn := context.WithCancel(ctx)
defer cancelFn()
var collectorsToRun []collector
for _, name := range p.opts.Collectors {
c, ok := collectors[name]
if !ok {
return fmt.Errorf("unknown collector '%s'", name)
}
collectorsToRun = append(collectorsToRun, c)
}
results := make(chan profileResult, len(p.opts.Collectors))
wg := sync.WaitGroup{}
for _, c := range collectorsToRun {
if !c.enabledFunc(p.opts) {
continue
}
fName := c.outputFileName()
wg.Add(1)
go func(c collector) {
defer wg.Done()
logger.Infow("collecting profile", "File", fName)
defer logger.Infow("profile done", "File", fName)
b := bytes.Buffer{}
err := c.collectFunc(ctx, p.opts, &b)
if err != nil {
select {
case results <- profileResult{err: fmt.Errorf("generating profile data for %q: %w", fName, err)}:
case <-ctx.Done():
return
}
}
select {
case results <- profileResult{buf: &b, fName: fName}:
case <-ctx.Done():
}
}(c)
}
go func() {
wg.Wait()
close(results)
}()
for res := range results {
if res.err != nil {
return res.err
}
out, err := p.archive.Create(res.fName)
if err != nil {
return fmt.Errorf("creating output file %q: %w", res.fName, err)
}
_, err = io.Copy(out, res.buf)
if err != nil {
return fmt.Errorf("compressing result %q: %w", res.fName, err)
}
}
return nil
}
func goroutineStacksText(ctx context.Context, _ Options, w io.Writer) error {
return WriteAllGoroutineStacks(w)
}
func goroutineStacksProto(ctx context.Context, _ Options, w io.Writer) error {
return pprof.Lookup("goroutine").WriteTo(w, 0)
}
func heapProfile(ctx context.Context, _ Options, w io.Writer) error {
return pprof.Lookup("heap").WriteTo(w, 0)
}
func versionInfo(ctx context.Context, _ Options, w io.Writer) error {
return json.NewEncoder(w).Encode(version.GetVersionInfo())
}
func binary(ctx context.Context, _ Options, w io.Writer) error {
var (
path string
err error
)
if goos == "linux" {
pid := os.Getpid()
path = fmt.Sprintf("/proc/%d/exe", pid)
} else {
path, err = os.Executable()
if err != nil {
return fmt.Errorf("finding binary path: %w", err)
}
}
fi, err := os.Open(path)
if err != nil {
return fmt.Errorf("opening binary %q: %w", path, err)
}
_, err = io.Copy(w, fi)
_ = fi.Close()
if err != nil {
return fmt.Errorf("copying binary %q: %w", path, err)
}
return nil
}
func mutexProfile(ctx context.Context, opts Options, w io.Writer) error {
prev := runtime.SetMutexProfileFraction(opts.MutexProfileFraction)
defer runtime.SetMutexProfileFraction(prev)
err := waitOrCancel(ctx, opts.ProfileDuration)
if err != nil {
return err
}
return pprof.Lookup("mutex").WriteTo(w, 2)
}
func blockProfile(ctx context.Context, opts Options, w io.Writer) error {
runtime.SetBlockProfileRate(int(opts.BlockProfileRate.Nanoseconds()))
defer runtime.SetBlockProfileRate(0)
err := waitOrCancel(ctx, opts.ProfileDuration)
if err != nil {
return err
}
return pprof.Lookup("block").WriteTo(w, 2)
}
func profileCPU(ctx context.Context, opts Options, w io.Writer) error {
err := pprof.StartCPUProfile(w)
if err != nil {
return err
}
defer pprof.StopCPUProfile()
return waitOrCancel(ctx, opts.ProfileDuration)
}
func waitOrCancel(ctx context.Context, d time.Duration) error {
timer := time.NewTimer(d)
defer timer.Stop()
select {
case <-timer.C:
return nil
case <-ctx.Done():
return ctx.Err()
}
}

172
profile/profile_test.go Normal file
View File

@ -0,0 +1,172 @@
package profile
import (
"archive/zip"
"bytes"
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestProfiler(t *testing.T) {
allCollectors := []string{
CollectorGoroutinesStack,
CollectorGoroutinesPprof,
CollectorVersion,
CollectorHeap,
CollectorBin,
CollectorCPU,
CollectorMutex,
CollectorBlock,
}
cases := []struct {
name string
opts Options
goos string
expectFiles []string
}{
{
name: "happy case",
opts: Options{
Collectors: allCollectors,
ProfileDuration: 1 * time.Millisecond,
MutexProfileFraction: 4,
BlockProfileRate: 50 * time.Nanosecond,
},
expectFiles: []string{
"goroutines.stacks",
"goroutines.pprof",
"version.json",
"heap.pprof",
"ipfs",
"cpu.pprof",
"mutex.pprof",
"block.pprof",
},
},
{
name: "windows",
opts: Options{
Collectors: allCollectors,
ProfileDuration: 1 * time.Millisecond,
MutexProfileFraction: 4,
BlockProfileRate: 50 * time.Nanosecond,
},
goos: "windows",
expectFiles: []string{
"goroutines.stacks",
"goroutines.pprof",
"version.json",
"heap.pprof",
"ipfs.exe",
"cpu.pprof",
"mutex.pprof",
"block.pprof",
},
},
{
name: "sampling profiling disabled",
opts: Options{
Collectors: allCollectors,
MutexProfileFraction: 4,
BlockProfileRate: 50 * time.Nanosecond,
},
expectFiles: []string{
"goroutines.stacks",
"goroutines.pprof",
"version.json",
"heap.pprof",
"ipfs",
},
},
{
name: "Mutex profiling disabled",
opts: Options{
Collectors: allCollectors,
ProfileDuration: 1 * time.Millisecond,
BlockProfileRate: 50 * time.Nanosecond,
},
expectFiles: []string{
"goroutines.stacks",
"goroutines.pprof",
"version.json",
"heap.pprof",
"ipfs",
"cpu.pprof",
"block.pprof",
},
},
{
name: "block profiling disabled",
opts: Options{
Collectors: allCollectors,
ProfileDuration: 1 * time.Millisecond,
MutexProfileFraction: 4,
BlockProfileRate: 0,
},
expectFiles: []string{
"goroutines.stacks",
"goroutines.pprof",
"version.json",
"heap.pprof",
"ipfs",
"cpu.pprof",
"mutex.pprof",
},
},
{
name: "single collector",
opts: Options{
Collectors: []string{CollectorVersion},
ProfileDuration: 1 * time.Millisecond,
MutexProfileFraction: 4,
BlockProfileRate: 0,
},
expectFiles: []string{
"version.json",
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
if c.goos != "" {
oldGOOS := goos
goos = c.goos
defer func() { goos = oldGOOS }()
}
buf := &bytes.Buffer{}
archive := zip.NewWriter(buf)
err := WriteProfiles(context.Background(), archive, c.opts)
require.NoError(t, err)
err = archive.Close()
require.NoError(t, err)
zr, err := zip.NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
require.NoError(t, err)
for _, f := range zr.File {
logger.Info("zip file: ", f.Name)
}
require.Equal(t, len(c.expectFiles), len(zr.File))
for _, expectedFile := range c.expectFiles {
func() {
f, err := zr.Open(expectedFile)
require.NoError(t, err)
defer f.Close()
fi, err := f.Stat()
require.NoError(t, err)
assert.NotZero(t, fi.Size())
}()
}
})
}
}

View File

@ -16,8 +16,8 @@ test_expect_success "profiling requires a running daemon" '
test_launch_ipfs_daemon
test_expect_success "test profiling (without CPU)" '
ipfs diag profile --cpu-profile-time=0 > cmd_out
test_expect_success "test profiling (without sampling)" '
ipfs diag profile --profile-time=0 > cmd_out
'
test_expect_success "filename shows up in output" '
@ -29,12 +29,17 @@ test_expect_success "profile file created" '
'
test_expect_success "test profiling with -o" '
ipfs diag profile --cpu-profile-time=1s -o test-profile.zip
ipfs diag profile --profile-time=1s -o test-profile.zip
'
test_expect_success "test that test-profile.zip exists" '
test -e test-profile.zip
'
test_expect_success "test profiling with specific collectors" '
ipfs diag profile --collectors version,goroutines-stack -o test-profile-small.zip
'
test_kill_ipfs_daemon
if ! test_have_prereq UNZIP; then
@ -42,7 +47,8 @@ if ! test_have_prereq UNZIP; then
fi
test_expect_success "unpack profiles" '
unzip -d profiles test-profile.zip
unzip -d profiles test-profile.zip &&
unzip -d profiles-small test-profile-small.zip
'
test_expect_success "cpu profile is valid" '
@ -57,12 +63,22 @@ test_expect_success "goroutines profile is valid" '
go tool pprof -top profiles/ipfs "profiles/goroutines.pprof" | grep -q "Type: goroutine"
'
test_expect_success "mutex profile is valid" '
go tool pprof -top profiles/ipfs "profiles/mutex.pprof" | grep -q "Type: delay"
'
test_expect_success "block profile is valid" '
go tool pprof -top profiles/ipfs "profiles/block.pprof" | grep -q "Type: delay"
'
test_expect_success "goroutines stacktrace is valid" '
grep -q "goroutine" "profiles/goroutines.stacks"
'
test_expect_success "full goroutines stacktrace is valid" '
grep -q "goroutine" "profiles/goroutines-all.stacks"
test_expect_success "the small profile only contains the requested data" '
find profiles-small -type f | sort > actual &&
echo -e "profiles-small/goroutines.stacks\nprofiles-small/version.json" > expected &&
test_cmp expected actual
'
test_done

View File

@ -1,5 +1,12 @@
package ipfs
import (
"fmt"
"runtime"
"github.com/ipfs/go-ipfs/repo/fsrepo"
)
// CurrentCommit is the current git commit, this is set as a ldflag in the Makefile
var CurrentCommit string
@ -27,3 +34,21 @@ var userAgentSuffix string
func SetUserAgentSuffix(suffix string) {
userAgentSuffix = suffix
}
type VersionInfo struct {
Version string
Commit string
Repo string
System string
Golang string
}
func GetVersionInfo() *VersionInfo {
return &VersionInfo{
Version: CurrentVersionNumber,
Commit: CurrentCommit,
Repo: fmt.Sprint(fsrepo.RepoVersion),
System: runtime.GOARCH + "/" + runtime.GOOS, //TODO: Precise version here
Golang: runtime.Version(),
}
}