mirror of
https://github.com/containers/podman.git
synced 2025-10-25 02:04:43 +08:00
Merge pull request #3437 from giuseppe/fix-nocgo
build: allow to build without cgo on RISC-V
This commit is contained in:
34
.cirrus.yml
34
.cirrus.yml
@ -215,6 +215,35 @@ build_each_commit_task:
|
||||
on_failure:
|
||||
failed_master_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_master_failure.sh'
|
||||
|
||||
build_without_cgo:
|
||||
|
||||
depends_on:
|
||||
- "gating"
|
||||
- "vendor"
|
||||
- "varlink_api"
|
||||
|
||||
# $CIRRUS_BASE_BRANCH is only set when testing a PR
|
||||
only_if: $CIRRUS_BRANCH != 'master' &&
|
||||
$CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
|
||||
|
||||
gce_instance:
|
||||
image_project: "libpod-218412"
|
||||
zone: "us-central1-a" # Required by Cirrus for the time being
|
||||
cpu: 8
|
||||
memory: "8Gb"
|
||||
disk: 200
|
||||
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
|
||||
|
||||
timeout_in: 30m
|
||||
|
||||
setup_environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}'
|
||||
build_without_cgo_script:
|
||||
- 'source $SCRIPT_BASE/lib.sh'
|
||||
- 'make build-no-cgo'
|
||||
|
||||
on_failure:
|
||||
failed_master_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_master_failure.sh'
|
||||
|
||||
|
||||
# Update metadata on VM images referenced by this repository state
|
||||
meta_task:
|
||||
@ -224,6 +253,7 @@ meta_task:
|
||||
- "vendor"
|
||||
- "varlink_api"
|
||||
- "build_each_commit"
|
||||
- "build_without_cgo"
|
||||
|
||||
container:
|
||||
image: "quay.io/libpod/imgts:latest" # see contrib/imgts
|
||||
@ -257,6 +287,7 @@ testing_task:
|
||||
- "vendor"
|
||||
- "varlink_api"
|
||||
- "build_each_commit"
|
||||
- "build_without_cgo"
|
||||
|
||||
# Only test build cache-images, if that's what's requested
|
||||
only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
|
||||
@ -298,6 +329,7 @@ special_testing_rootless_task:
|
||||
- "varlink_api"
|
||||
- "vendor"
|
||||
- "build_each_commit"
|
||||
- "build_without_cgo"
|
||||
|
||||
only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
|
||||
|
||||
@ -328,6 +360,7 @@ special_testing_in_podman_task:
|
||||
- "varlink_api"
|
||||
- "vendor"
|
||||
- "build_each_commit"
|
||||
- "build_without_cgo"
|
||||
|
||||
only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
|
||||
|
||||
@ -433,6 +466,7 @@ success_task:
|
||||
- "special_testing_in_podman"
|
||||
- "test_build_cache_images"
|
||||
- "verify_test_built_images"
|
||||
- "build_without_cgo"
|
||||
|
||||
env:
|
||||
CIRRUS_WORKING_DIR: "/usr/src/libpod"
|
||||
|
||||
3
Makefile
3
Makefile
@ -399,6 +399,9 @@ build-all-new-commits:
|
||||
# Validate that all the commits build on top of $(GIT_BASE_BRANCH)
|
||||
git rebase $(GIT_BASE_BRANCH) -x make
|
||||
|
||||
build-no-cgo:
|
||||
env BUILDTAGS="containers_image_openpgp containers_image_ostree_stub exclude_graphdriver_btrfs exclude_graphdriver_devicemapper exclude_disk_quota" CGO_ENABLED=0 $(MAKE)
|
||||
|
||||
vendor:
|
||||
export GO111MODULE=on \
|
||||
$(GO) mod tidy && \
|
||||
|
||||
@ -27,6 +27,9 @@ libpod to manage containers.
|
||||
**cgroup_manager**=""
|
||||
Specify the CGroup Manager to use; valid values are "systemd" and "cgroupfs"
|
||||
|
||||
**lock_type**=""
|
||||
Specify the locking mechanism to use; valid values are "shm" and "file". Change the default only if you are sure of what you are doing, in general "file" is useful only on platforms where cgo is not available for using the faster "shm" lock type. You may need to run "podman system renumber" after you change the lock type.
|
||||
|
||||
**init_path**=""
|
||||
Path to the container-init binary, which forwards signals and reaps processes within containers. Note that the container-init binary will only be used when the `--init` for podman-create and podman-run is set.
|
||||
|
||||
|
||||
2
go.mod
2
go.mod
@ -19,7 +19,7 @@ require (
|
||||
github.com/containernetworking/plugins v0.8.1
|
||||
github.com/containers/buildah v1.9.0
|
||||
github.com/containers/image v2.0.0+incompatible
|
||||
github.com/containers/psgo v1.3.0
|
||||
github.com/containers/psgo v1.3.1
|
||||
github.com/containers/storage v1.12.13
|
||||
github.com/coreos/bbolt v1.3.3 // indirect
|
||||
github.com/coreos/etcd v3.3.13+incompatible // indirect
|
||||
|
||||
6
go.sum
6
go.sum
@ -72,6 +72,12 @@ github.com/containers/image v2.0.0+incompatible h1:FTr6Br7jlIKNCKMjSOMbAxKp2keQ0
|
||||
github.com/containers/image v2.0.0+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
|
||||
github.com/containers/psgo v1.3.0 h1:kDhiA4gNNyJ2qCzmOuBf6AmrF/Pp+6Jo98P68R7fB8I=
|
||||
github.com/containers/psgo v1.3.0/go.mod h1:7MELvPTW1fj6yMrwD9I1Iasx1vU+hKlRkHXAJ51sFtU=
|
||||
github.com/containers/psgo v1.3.1-0.20190626112706-fbef66e4ce92 h1:aVJs/Av0Yc9uNoWnIwmG+6Z+XozuRXFwvLwAOVmwlvI=
|
||||
github.com/containers/psgo v1.3.1-0.20190626112706-fbef66e4ce92/go.mod h1:LLiRMmxZ6FWP4bB/fOUu6kDT+4okk/ZCeeykqh0O5Ns=
|
||||
github.com/containers/psgo v1.3.1 h1:1kE+jJ9Ou5f9zQT/M2IdeSclsKWsXrSFlOcnqc+F2TA=
|
||||
github.com/containers/psgo v1.3.1/go.mod h1:LLiRMmxZ6FWP4bB/fOUu6kDT+4okk/ZCeeykqh0O5Ns=
|
||||
github.com/containers/storage v1.12.10-0.20190627120555-8eed0c36d1e3 h1:kO/YA36sGuPDFvVIzZxJp7xmwa+/wCVADxDSuFzsZwM=
|
||||
github.com/containers/storage v1.12.10-0.20190627120555-8eed0c36d1e3/go.mod h1:+RirK6VQAqskQlaTBrOG6ulDvn4si2QjFE1NZCn06MM=
|
||||
github.com/containers/storage v1.12.11 h1:r35VsROen9Kw3+LN/v4O4g7cT5zQPX06vkcjqScJ2z8=
|
||||
github.com/containers/storage v1.12.11/go.mod h1:+RirK6VQAqskQlaTBrOG6ulDvn4si2QjFE1NZCn06MM=
|
||||
github.com/containers/storage v1.12.12 h1:gao0GNzjmSX4Ai/StOHtUVIrBguC0OKyvx/ZMwBdyuY=
|
||||
|
||||
@ -87,6 +87,9 @@ infra_command = "/pause"
|
||||
# Default libpod support for container labeling
|
||||
# label=true
|
||||
|
||||
# The locking mechanism to use
|
||||
lock_type = "shm"
|
||||
|
||||
# Number of locks available for containers and pods.
|
||||
# If this is changed, a lock renumber must be performed (e.g. with the
|
||||
# 'podman system renumber' command).
|
||||
|
||||
@ -19,10 +19,6 @@ import (
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
)
|
||||
|
||||
//#include <sys/un.h>
|
||||
// extern int unix_path_length(){struct sockaddr_un addr; return sizeof(addr.sun_path) - 1;}
|
||||
import "C"
|
||||
|
||||
/* Sync with stdpipe_t in conmon.c */
|
||||
const (
|
||||
AttachPipeStdin = 1
|
||||
@ -80,7 +76,7 @@ func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSi
|
||||
|
||||
socketPath := c.AttachSocketPath()
|
||||
|
||||
maxUnixLength := int(C.unix_path_length())
|
||||
maxUnixLength := unixPathLength()
|
||||
if maxUnixLength < len(socketPath) {
|
||||
socketPath = socketPath[0:maxUnixLength]
|
||||
}
|
||||
|
||||
11
libpod/container_attach_linux_cgo.go
Normal file
11
libpod/container_attach_linux_cgo.go
Normal file
@ -0,0 +1,11 @@
|
||||
//+build linux,cgo
|
||||
|
||||
package libpod
|
||||
|
||||
//#include <sys/un.h>
|
||||
// extern int unix_path_length(){struct sockaddr_un addr; return sizeof(addr.sun_path) - 1;}
|
||||
import "C"
|
||||
|
||||
func unixPathLength() int {
|
||||
return int(C.unix_path_length())
|
||||
}
|
||||
7
libpod/container_attach_linux_nocgo.go
Normal file
7
libpod/container_attach_linux_nocgo.go
Normal file
@ -0,0 +1,7 @@
|
||||
//+build linux,!cgo
|
||||
|
||||
package libpod
|
||||
|
||||
func unixPathLength() int {
|
||||
return 107
|
||||
}
|
||||
175
libpod/lock/file/file_lock.go
Normal file
175
libpod/lock/file/file_lock.go
Normal file
@ -0,0 +1,175 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// FileLocks is a struct enabling POSIX lock locking in a shared memory
|
||||
// segment.
|
||||
type FileLocks struct { // nolint
|
||||
lockPath string
|
||||
valid bool
|
||||
}
|
||||
|
||||
// CreateFileLock sets up a directory containing the various lock files.
|
||||
func CreateFileLock(path string) (*FileLocks, error) {
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
return nil, errors.Wrapf(syscall.EEXIST, "directory %s exists", path)
|
||||
}
|
||||
if err := os.MkdirAll(path, 0711); err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot create %s", path)
|
||||
}
|
||||
|
||||
locks := new(FileLocks)
|
||||
locks.lockPath = path
|
||||
locks.valid = true
|
||||
|
||||
return locks, nil
|
||||
}
|
||||
|
||||
// OpenFileLock opens an existing directory with the lock files.
|
||||
func OpenFileLock(path string) (*FileLocks, error) {
|
||||
_, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "accessing directory %s", path)
|
||||
}
|
||||
|
||||
locks := new(FileLocks)
|
||||
locks.lockPath = path
|
||||
locks.valid = true
|
||||
|
||||
return locks, nil
|
||||
}
|
||||
|
||||
// Close closes an existing shared-memory segment.
|
||||
// The segment will be rendered unusable after closing.
|
||||
// WARNING: If you Close() while there are still locks locked, these locks may
|
||||
// fail to release, causing a program freeze.
|
||||
// Close() is only intended to be used while testing the locks.
|
||||
func (locks *FileLocks) Close() error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
}
|
||||
err := os.RemoveAll(locks.lockPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "deleting directory %s", locks.lockPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (locks *FileLocks) getLockPath(lck uint32) string {
|
||||
return filepath.Join(locks.lockPath, strconv.FormatInt(int64(lck), 10))
|
||||
}
|
||||
|
||||
// AllocateLock allocates a lock and returns the index of the lock that was allocated.
|
||||
func (locks *FileLocks) AllocateLock() (uint32, error) {
|
||||
if !locks.valid {
|
||||
return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
}
|
||||
|
||||
id := uint32(0)
|
||||
for ; ; id++ {
|
||||
path := locks.getLockPath(id)
|
||||
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
|
||||
if err != nil {
|
||||
if os.IsExist(err) {
|
||||
continue
|
||||
}
|
||||
return 0, errors.Wrapf(err, "creating lock file")
|
||||
}
|
||||
f.Close()
|
||||
break
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// AllocateGivenLock allocates the given lock from the shared-memory
|
||||
// segment for use by a container or pod.
|
||||
// If the lock is already in use or the index is invalid an error will be
|
||||
// returned.
|
||||
func (locks *FileLocks) AllocateGivenLock(lck uint32) error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(locks.getLockPath(lck), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating lock %d", lck)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeallocateLock frees a lock in a shared-memory segment so it can be
|
||||
// reallocated to another container or pod.
|
||||
// The given lock must be already allocated, or an error will be returned.
|
||||
func (locks *FileLocks) DeallocateLock(lck uint32) error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
}
|
||||
if err := os.Remove(locks.getLockPath(lck)); err != nil {
|
||||
return errors.Wrapf(err, "deallocating lock %d", lck)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeallocateAllLocks frees all locks so they can be reallocated to
|
||||
// other containers and pods.
|
||||
func (locks *FileLocks) DeallocateAllLocks() error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
}
|
||||
files, err := ioutil.ReadDir(locks.lockPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading directory %s", locks.lockPath)
|
||||
}
|
||||
var lastErr error
|
||||
for _, f := range files {
|
||||
p := filepath.Join(locks.lockPath, f.Name())
|
||||
err := os.Remove(p)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
logrus.Errorf("deallocating lock %s", p)
|
||||
}
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
|
||||
// LockFileLock locks the given lock.
|
||||
func (locks *FileLocks) LockFileLock(lck uint32) error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
}
|
||||
|
||||
l, err := storage.GetLockfile(locks.getLockPath(lck))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error acquiring lock")
|
||||
}
|
||||
|
||||
l.Lock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnlockFileLock unlocks the given lock.
|
||||
func (locks *FileLocks) UnlockFileLock(lck uint32) error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
}
|
||||
l, err := storage.GetLockfile(locks.getLockPath(lck))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error acquiring lock")
|
||||
}
|
||||
|
||||
l.Unlock()
|
||||
return nil
|
||||
}
|
||||
74
libpod/lock/file/file_lock_test.go
Normal file
74
libpod/lock/file/file_lock_test.go
Normal file
@ -0,0 +1,74 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Test that creating and destroying locks work
|
||||
func TestCreateAndDeallocate(t *testing.T) {
|
||||
d, err := ioutil.TempDir("", "filelock")
|
||||
assert.NoError(t, err)
|
||||
defer os.RemoveAll(d)
|
||||
|
||||
l, err := OpenFileLock(filepath.Join(d, "locks"))
|
||||
assert.Error(t, err)
|
||||
|
||||
l, err = CreateFileLock(filepath.Join(d, "locks"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
lock, err := l.AllocateLock()
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = l.AllocateGivenLock(lock)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = l.DeallocateLock(lock)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = l.AllocateGivenLock(lock)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = l.DeallocateAllLocks()
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = l.AllocateGivenLock(lock)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = l.DeallocateAllLocks()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// Test that creating and destroying locks work
|
||||
func TestLockAndUnlock(t *testing.T) {
|
||||
d, err := ioutil.TempDir("", "filelock")
|
||||
assert.NoError(t, err)
|
||||
defer os.RemoveAll(d)
|
||||
|
||||
l, err := CreateFileLock(filepath.Join(d, "locks"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
lock, err := l.AllocateLock()
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = l.LockFileLock(lock)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lslocks, err := exec.LookPath("lslocks")
|
||||
if err == nil {
|
||||
lockPath := l.getLockPath(lock)
|
||||
out, err := exec.Command(lslocks, "--json", "-p", fmt.Sprintf("%d", os.Getpid())).CombinedOutput()
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, string(out), lockPath)
|
||||
}
|
||||
|
||||
err = l.UnlockFileLock(lock)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
110
libpod/lock/file_lock_manager.go
Normal file
110
libpod/lock/file_lock_manager.go
Normal file
@ -0,0 +1,110 @@
|
||||
package lock
|
||||
|
||||
import (
|
||||
"github.com/containers/libpod/libpod/lock/file"
|
||||
)
|
||||
|
||||
// FileLockManager manages shared memory locks.
|
||||
type FileLockManager struct {
|
||||
locks *file.FileLocks
|
||||
}
|
||||
|
||||
// NewFileLockManager makes a new FileLockManager at the specified directory.
|
||||
func NewFileLockManager(lockPath string) (Manager, error) {
|
||||
locks, err := file.CreateFileLock(lockPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
manager := new(FileLockManager)
|
||||
manager.locks = locks
|
||||
|
||||
return manager, nil
|
||||
}
|
||||
|
||||
// OpenFileLockManager opens an existing FileLockManager at the specified directory.
|
||||
func OpenFileLockManager(path string) (Manager, error) {
|
||||
locks, err := file.OpenFileLock(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
manager := new(FileLockManager)
|
||||
manager.locks = locks
|
||||
|
||||
return manager, nil
|
||||
}
|
||||
|
||||
// AllocateLock allocates a new lock from the manager.
|
||||
func (m *FileLockManager) AllocateLock() (Locker, error) {
|
||||
semIndex, err := m.locks.AllocateLock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lock := new(FileLock)
|
||||
lock.lockID = semIndex
|
||||
lock.manager = m
|
||||
|
||||
return lock, nil
|
||||
}
|
||||
|
||||
// AllocateAndRetrieveLock allocates the lock with the given ID and returns it.
|
||||
// If the lock is already allocated, error.
|
||||
func (m *FileLockManager) AllocateAndRetrieveLock(id uint32) (Locker, error) {
|
||||
lock := new(FileLock)
|
||||
lock.lockID = id
|
||||
lock.manager = m
|
||||
|
||||
if err := m.locks.AllocateGivenLock(id); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return lock, nil
|
||||
}
|
||||
|
||||
// RetrieveLock retrieves a lock from the manager given its ID.
|
||||
func (m *FileLockManager) RetrieveLock(id uint32) (Locker, error) {
|
||||
lock := new(FileLock)
|
||||
lock.lockID = id
|
||||
lock.manager = m
|
||||
|
||||
return lock, nil
|
||||
}
|
||||
|
||||
// FreeAllLocks frees all locks in the manager.
|
||||
// This function is DANGEROUS. Please read the full comment in locks.go before
|
||||
// trying to use it.
|
||||
func (m *FileLockManager) FreeAllLocks() error {
|
||||
return m.locks.DeallocateAllLocks()
|
||||
}
|
||||
|
||||
// FileLock is an individual shared memory lock.
|
||||
type FileLock struct {
|
||||
lockID uint32
|
||||
manager *FileLockManager
|
||||
}
|
||||
|
||||
// ID returns the ID of the lock.
|
||||
func (l *FileLock) ID() uint32 {
|
||||
return l.lockID
|
||||
}
|
||||
|
||||
// Lock acquires the lock.
|
||||
func (l *FileLock) Lock() {
|
||||
if err := l.manager.locks.LockFileLock(l.lockID); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock releases the lock.
|
||||
func (l *FileLock) Unlock() {
|
||||
if err := l.manager.locks.UnlockFileLock(l.lockID); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Free releases the lock, allowing it to be reused.
|
||||
func (l *FileLock) Free() error {
|
||||
return l.manager.locks.DeallocateLock(l.lockID)
|
||||
}
|
||||
@ -1,3 +1,5 @@
|
||||
// +build linux,cgo
|
||||
|
||||
package shm
|
||||
|
||||
// #cgo LDFLAGS: -lrt -lpthread
|
||||
|
||||
102
libpod/lock/shm/shm_lock_nocgo.go
Normal file
102
libpod/lock/shm/shm_lock_nocgo.go
Normal file
@ -0,0 +1,102 @@
|
||||
// +build linux,!cgo
|
||||
|
||||
package shm
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
|
||||
// segment.
|
||||
type SHMLocks struct {
|
||||
}
|
||||
|
||||
// CreateSHMLock sets up a shared-memory segment holding a given number of POSIX
|
||||
// semaphores, and returns a struct that can be used to operate on those locks.
|
||||
// numLocks must not be 0, and may be rounded up to a multiple of the bitmap
|
||||
// size used by the underlying implementation.
|
||||
func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
|
||||
logrus.Error("locks are not supported without cgo")
|
||||
return &SHMLocks{}, nil
|
||||
}
|
||||
|
||||
// OpenSHMLock opens an existing shared-memory segment holding a given number of
|
||||
// POSIX semaphores. numLocks must match the number of locks the shared memory
|
||||
// segment was created with.
|
||||
func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
|
||||
logrus.Error("locks are not supported without cgo")
|
||||
return &SHMLocks{}, nil
|
||||
}
|
||||
|
||||
// GetMaxLocks returns the maximum number of locks in the SHM
|
||||
func (locks *SHMLocks) GetMaxLocks() uint32 {
|
||||
logrus.Error("locks are not supported without cgo")
|
||||
return 0
|
||||
}
|
||||
|
||||
// Close closes an existing shared-memory segment.
|
||||
// The segment will be rendered unusable after closing.
|
||||
// WARNING: If you Close() while there are still locks locked, these locks may
|
||||
// fail to release, causing a program freeze.
|
||||
// Close() is only intended to be used while testing the locks.
|
||||
func (locks *SHMLocks) Close() error {
|
||||
logrus.Error("locks are not supported without cgo")
|
||||
return nil
|
||||
}
|
||||
|
||||
// AllocateSemaphore allocates a semaphore from a shared-memory segment for use
|
||||
// by a container or pod.
|
||||
// Returns the index of the semaphore that was allocated.
|
||||
// Allocations past the maximum number of locks given when the SHM segment was
|
||||
// created will result in an error, and no semaphore will be allocated.
|
||||
func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
|
||||
logrus.Error("locks are not supported without cgo")
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// AllocateGivenSemaphore allocates the given semaphore from the shared-memory
|
||||
// segment for use by a container or pod.
|
||||
// If the semaphore is already in use or the index is invalid an error will be
|
||||
// returned.
|
||||
func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
|
||||
logrus.Error("locks are not supported without cgo")
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be
|
||||
// reallocated to another container or pod.
|
||||
// The given semaphore must be already allocated, or an error will be returned.
|
||||
func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
|
||||
logrus.Error("locks are not supported without cgo")
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeallocateAllSemaphores frees all semaphores so they can be reallocated to
|
||||
// other containers and pods.
|
||||
func (locks *SHMLocks) DeallocateAllSemaphores() error {
|
||||
logrus.Error("locks are not supported without cgo")
|
||||
return nil
|
||||
}
|
||||
|
||||
// LockSemaphore locks the given semaphore.
|
||||
// If the semaphore is already locked, LockSemaphore will block until the lock
|
||||
// can be acquired.
|
||||
// There is no requirement that the given semaphore be allocated.
|
||||
// This ensures that attempts to lock a container after it has been deleted,
|
||||
// but before the caller has queried the database to determine this, will
|
||||
// succeed.
|
||||
func (locks *SHMLocks) LockSemaphore(sem uint32) error {
|
||||
logrus.Error("locks are not supported without cgo")
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnlockSemaphore unlocks the given semaphore.
|
||||
// Unlocking a semaphore that is already unlocked with return EBUSY.
|
||||
// There is no requirement that the given semaphore be allocated.
|
||||
// This ensures that attempts to lock a container after it has been deleted,
|
||||
// but before the caller has queried the database to determine this, will
|
||||
// succeed.
|
||||
func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
|
||||
logrus.Error("locks are not supported without cgo")
|
||||
return nil
|
||||
}
|
||||
@ -239,6 +239,9 @@ type RuntimeConfig struct {
|
||||
// pods.
|
||||
NumLocks uint32 `toml:"num_locks,omitempty"`
|
||||
|
||||
// LockType is the type of locking to use.
|
||||
LockType string `toml:"lock_type,omitempty"`
|
||||
|
||||
// EventsLogger determines where events should be logged
|
||||
EventsLogger string `toml:"events_logger"`
|
||||
// EventsLogFilePath is where the events log is stored.
|
||||
@ -318,6 +321,7 @@ func defaultRuntimeConfig() (RuntimeConfig, error) {
|
||||
NumLocks: 2048,
|
||||
EventsLogger: events.DefaultEventerType.String(),
|
||||
DetachKeys: DefaultDetachKeys,
|
||||
LockType: "shm",
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -659,6 +663,62 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
|
||||
return runtime, nil
|
||||
}
|
||||
|
||||
func getLockManager(runtime *Runtime) (lock.Manager, error) {
|
||||
var err error
|
||||
var manager lock.Manager
|
||||
|
||||
switch runtime.config.LockType {
|
||||
case "file":
|
||||
lockPath := filepath.Join(runtime.config.TmpDir, "locks")
|
||||
manager, err = lock.OpenFileLockManager(lockPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
manager, err = lock.NewFileLockManager(lockPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get new file lock manager")
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
case "", "shm":
|
||||
lockPath := DefaultSHMLockPath
|
||||
if rootless.IsRootless() {
|
||||
lockPath = fmt.Sprintf("%s_%d", DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
|
||||
}
|
||||
// Set up the lock manager
|
||||
manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks)
|
||||
if err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get new shm lock manager")
|
||||
}
|
||||
} else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber {
|
||||
logrus.Debugf("Number of locks does not match - removing old locks")
|
||||
|
||||
// ERANGE indicates a lock numbering mismatch.
|
||||
// Since we're renumbering, this is not fatal.
|
||||
// Remove the earlier set of locks and recreate.
|
||||
if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
|
||||
return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
|
||||
}
|
||||
|
||||
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.LockType)
|
||||
}
|
||||
return manager, nil
|
||||
}
|
||||
|
||||
// Make a new runtime based on the given configuration
|
||||
// Sets up containers/storage, state store, OCI runtime
|
||||
func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
|
||||
@ -1044,37 +1104,10 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
lockPath := DefaultSHMLockPath
|
||||
if rootless.IsRootless() {
|
||||
lockPath = fmt.Sprintf("%s_%d", DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
|
||||
}
|
||||
// Set up the lock manager
|
||||
manager, err := lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks)
|
||||
if err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get new shm lock manager")
|
||||
}
|
||||
} else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber {
|
||||
logrus.Debugf("Number of locks does not match - removing old locks")
|
||||
|
||||
// ERANGE indicates a lock numbering mismatch.
|
||||
// Since we're renumbering, this is not fatal.
|
||||
// Remove the earlier set of locks and recreate.
|
||||
if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
|
||||
return errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
|
||||
}
|
||||
|
||||
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
|
||||
runtime.lockManager, err = getLockManager(runtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
runtime.lockManager = manager
|
||||
|
||||
// If we're renumbering locks, do it now.
|
||||
// It breaks out of normal runtime init, and will not return a valid
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// +build linux
|
||||
// +build linux,cgo
|
||||
|
||||
package rootless
|
||||
|
||||
|
||||
@ -1,14 +1,21 @@
|
||||
// +build !linux
|
||||
// +build !linux !cgo
|
||||
|
||||
package rootless
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// IsRootless returns false on all non-linux platforms
|
||||
// IsRootless returns whether the user is rootless
|
||||
func IsRootless() bool {
|
||||
uid := os.Geteuid()
|
||||
// os.Geteuid() on Windows returns -1
|
||||
if uid == -1 {
|
||||
return false
|
||||
}
|
||||
return uid != 0
|
||||
}
|
||||
|
||||
// BecomeRootInUserNS re-exec podman in a new userNS. It returns whether podman was re-executed
|
||||
|
||||
@ -4,12 +4,10 @@ package createconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/profiles/seccomp"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/devices"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
@ -130,29 +128,6 @@ func (c *CreateConfig) addPrivilegedDevices(g *generate.Generator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getSeccompConfig(config *CreateConfig, configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
|
||||
var seccompConfig *spec.LinuxSeccomp
|
||||
var err error
|
||||
|
||||
if config.SeccompProfilePath != "" {
|
||||
seccompProfile, err := ioutil.ReadFile(config.SeccompProfilePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "opening seccomp profile (%s) failed", config.SeccompProfilePath)
|
||||
}
|
||||
seccompConfig, err = seccomp.LoadProfile(string(seccompProfile), configSpec)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "loading seccomp profile (%s) failed", config.SeccompProfilePath)
|
||||
}
|
||||
} else {
|
||||
seccompConfig, err = seccomp.GetDefaultProfile(configSpec)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "loading seccomp profile (%s) failed", config.SeccompProfilePath)
|
||||
}
|
||||
}
|
||||
|
||||
return seccompConfig, nil
|
||||
}
|
||||
|
||||
func (c *CreateConfig) createBlockIO() (*spec.LinuxBlockIO, error) {
|
||||
var ret *spec.LinuxBlockIO
|
||||
bio := &spec.LinuxBlockIO{}
|
||||
|
||||
34
pkg/spec/config_linux_cgo.go
Normal file
34
pkg/spec/config_linux_cgo.go
Normal file
@ -0,0 +1,34 @@
|
||||
// +build linux,cgo
|
||||
|
||||
package createconfig
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/docker/docker/profiles/seccomp"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func getSeccompConfig(config *CreateConfig, configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
|
||||
var seccompConfig *spec.LinuxSeccomp
|
||||
var err error
|
||||
|
||||
if config.SeccompProfilePath != "" {
|
||||
seccompProfile, err := ioutil.ReadFile(config.SeccompProfilePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "opening seccomp profile (%s) failed", config.SeccompProfilePath)
|
||||
}
|
||||
seccompConfig, err = seccomp.LoadProfile(string(seccompProfile), configSpec)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "loading seccomp profile (%s) failed", config.SeccompProfilePath)
|
||||
}
|
||||
} else {
|
||||
seccompConfig, err = seccomp.GetDefaultProfile(configSpec)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "loading seccomp profile (%s) failed", config.SeccompProfilePath)
|
||||
}
|
||||
}
|
||||
|
||||
return seccompConfig, nil
|
||||
}
|
||||
11
pkg/spec/config_linux_nocgo.go
Normal file
11
pkg/spec/config_linux_nocgo.go
Normal file
@ -0,0 +1,11 @@
|
||||
// +build linux,!cgo
|
||||
|
||||
package createconfig
|
||||
|
||||
import (
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
func getSeccompConfig(config *CreateConfig, configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
|
||||
return nil, nil
|
||||
}
|
||||
4
vendor/github.com/containers/psgo/Makefile
generated
vendored
4
vendor/github.com/containers/psgo/Makefile
generated
vendored
@ -1,3 +1,5 @@
|
||||
export GO111MODULE=off
|
||||
|
||||
SHELL= /bin/bash
|
||||
GO ?= go
|
||||
BUILD_DIR := ./bin
|
||||
@ -51,7 +53,7 @@ install:
|
||||
.PHONY: .install.lint
|
||||
.install.lint:
|
||||
# Workaround for https://github.com/golangci/golangci-lint/issues/523
|
||||
go get -u github.com/golangci/golangci-lint/cmd/golangci-lint@master
|
||||
go get -u github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
|
||||
.PHONY: uninstall
|
||||
uninstall:
|
||||
|
||||
2
vendor/github.com/containers/psgo/go.mod
generated
vendored
2
vendor/github.com/containers/psgo/go.mod
generated
vendored
@ -6,6 +6,6 @@ require (
|
||||
github.com/opencontainers/runc v0.0.0-20190425234816-dae70e8efea4
|
||||
github.com/pkg/errors v0.0.0-20190227000051-27936f6d90f9
|
||||
github.com/sirupsen/logrus v0.0.0-20190403091019-9b3cdde74fbe
|
||||
github.com/stretchr/testify v1.2.2
|
||||
github.com/stretchr/testify v1.3.0
|
||||
golang.org/x/sys v0.0.0-20190425145619-16072639606e
|
||||
)
|
||||
|
||||
5
vendor/github.com/containers/psgo/go.sum
generated
vendored
5
vendor/github.com/containers/psgo/go.sum
generated
vendored
@ -1,3 +1,4 @@
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
@ -10,9 +11,13 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v0.0.0-20190403091019-9b3cdde74fbe h1:PBQLA9wc7FrXiUBnlfs/diNlg3ZdrP21tzcgL3OlVhU=
|
||||
github.com/sirupsen/logrus v0.0.0-20190403091019-9b3cdde74fbe/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190425145619-16072639606e h1:4ktJgTV34+N3qOZUc5fAaG3Pb11qzMm3PkAoTAgUZ2I=
|
||||
golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
||||
20
vendor/github.com/containers/psgo/internal/host/host.go
generated
vendored
20
vendor/github.com/containers/psgo/internal/host/host.go
generated
vendored
@ -24,26 +24,6 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
#include <unistd.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
var (
|
||||
// cache host queries to redundant calculations
|
||||
clockTicks *int64
|
||||
bootTime *int64
|
||||
)
|
||||
|
||||
// ClockTicks returns sysconf(SC_CLK_TCK).
|
||||
func ClockTicks() int64 {
|
||||
if clockTicks == nil {
|
||||
ticks := int64(C.sysconf(C._SC_CLK_TCK))
|
||||
clockTicks = &ticks
|
||||
}
|
||||
return *clockTicks
|
||||
}
|
||||
|
||||
// BootTime parses /proc/uptime returns the boot time in seconds since the
|
||||
// Epoch, 1970-01-01 00:00:00 +0000 (UTC).
|
||||
func BootTime() (int64, error) {
|
||||
|
||||
37
vendor/github.com/containers/psgo/internal/host/host_cgo.go
generated
vendored
Normal file
37
vendor/github.com/containers/psgo/internal/host/host_cgo.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright 2018 psgo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package host extracts data from the host, such as the system's boot time or
|
||||
// the tick rate of the system clock.
|
||||
package host
|
||||
|
||||
/*
|
||||
#include <unistd.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
var (
|
||||
// cache host queries to redundant calculations
|
||||
clockTicks *int64
|
||||
bootTime *int64
|
||||
)
|
||||
|
||||
// ClockTicks returns sysconf(SC_CLK_TCK).
|
||||
func ClockTicks() (int64, error) {
|
||||
if clockTicks == nil {
|
||||
ticks := int64(C.sysconf(C._SC_CLK_TCK))
|
||||
clockTicks = &ticks
|
||||
}
|
||||
return *clockTicks, nil
|
||||
}
|
||||
84
vendor/github.com/containers/psgo/internal/host/host_nocgo.go
generated
vendored
Normal file
84
vendor/github.com/containers/psgo/internal/host/host_nocgo.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
// +build !cgo
|
||||
|
||||
// Copyright 2018 psgo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package host extracts data from the host, such as the system's boot time or
|
||||
// the tick rate of the system clock.
|
||||
package host
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
// cache host queries to redundant calculations
|
||||
clockTicks *int64
|
||||
bootTime *int64
|
||||
)
|
||||
|
||||
func getNativeEndianness() binary.ByteOrder {
|
||||
var i int32 = 0x00000001
|
||||
u := unsafe.Pointer(&i)
|
||||
if *((*byte)(u)) == 0x01 {
|
||||
return binary.LittleEndian
|
||||
}
|
||||
return binary.BigEndian
|
||||
}
|
||||
|
||||
const (
|
||||
atClktck = 17
|
||||
)
|
||||
|
||||
func getFromAuxv(what uint, whatName string) (uint, error) {
|
||||
dataLen := int(unsafe.Sizeof(int(0)))
|
||||
p, err := ioutil.ReadFile("/proc/self/auxv")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
native := getNativeEndianness()
|
||||
for i := 0; i < len(p); {
|
||||
var k, v uint
|
||||
|
||||
switch dataLen {
|
||||
case 4:
|
||||
k = uint(native.Uint32(p[i : i+dataLen]))
|
||||
v = uint(native.Uint32(p[i+dataLen : i+dataLen*2]))
|
||||
case 8:
|
||||
k = uint(native.Uint64(p[i : i+dataLen]))
|
||||
v = uint(native.Uint64(p[i+dataLen : i+dataLen*2]))
|
||||
}
|
||||
i += dataLen * 2
|
||||
if k == what {
|
||||
return v, nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("cannot find %s in auxv", whatName)
|
||||
}
|
||||
|
||||
// ClockTicks returns sysconf(SC_CLK_TCK).
|
||||
func ClockTicks() (int64, error) {
|
||||
if clockTicks == nil {
|
||||
ret, err := getFromAuxv(atClktck, "AT_CLKTCK")
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
ticks := int64(ret)
|
||||
clockTicks = &ticks
|
||||
}
|
||||
return *clockTicks, nil
|
||||
}
|
||||
12
vendor/github.com/containers/psgo/internal/process/process.go
generated
vendored
12
vendor/github.com/containers/psgo/internal/process/process.go
generated
vendored
@ -192,8 +192,12 @@ func (p *Process) ElapsedTime() (time.Duration, error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
clockTicks, err := host.ClockTicks()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
sinceBoot = sinceBoot / host.ClockTicks()
|
||||
sinceBoot = sinceBoot / clockTicks
|
||||
|
||||
bootTime, err := host.BootTime()
|
||||
if err != nil {
|
||||
@ -213,7 +217,11 @@ func (p *Process) CPUTime() (time.Duration, error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
secs := (user + system) / host.ClockTicks()
|
||||
clockTicks, err := host.ClockTicks()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
secs := (user + system) / clockTicks
|
||||
cpu := time.Unix(secs, 0)
|
||||
return cpu.Sub(time.Unix(0, 0)), nil
|
||||
}
|
||||
|
||||
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
@ -98,7 +98,7 @@ github.com/containers/image/pkg/compression
|
||||
github.com/containers/image/pkg/blobinfocache/boltdb
|
||||
github.com/containers/image/pkg/blobinfocache/memory
|
||||
github.com/containers/image/pkg/blobinfocache/internal/prioritize
|
||||
# github.com/containers/psgo v1.3.0
|
||||
# github.com/containers/psgo v1.3.1
|
||||
github.com/containers/psgo
|
||||
github.com/containers/psgo/internal/capabilities
|
||||
github.com/containers/psgo/internal/dev
|
||||
|
||||
Reference in New Issue
Block a user