mirror of
https://github.com/containers/podman.git
synced 2025-05-20 00:27:03 +08:00
@ -1,5 +1,31 @@
|
||||
# Release Notes
|
||||
|
||||
## 3.2.1
|
||||
### Changes
|
||||
- Podman now allows corrupt images (e.g. from restarting the system during an image pull) to be replaced by a `podman pull` of the same image (instead of requiring they be removed first, then re-pulled).
|
||||
|
||||
### Bugfixes
|
||||
- Fixed a bug where Podman would fail to start containers if a Seccomp profile was not available at `/usr/share/containers/seccomp.json` ([#10556](https://github.com/containers/podman/issues/10556)).
|
||||
- Fixed a bug where the `podman machine start` command failed on OS X machines with the AMD64 architecture and certain QEMU versions ([#10555](https://github.com/containers/podman/issues/10555)).
|
||||
- Fixed a bug where Podman would always use the slow path for joining the rootless user namespace.
|
||||
- Fixed a bug where the `podman stats` command would fail on Cgroups v1 systems when run on a container running systemd ([#10602](https://github.com/containers/podman/issues/10602)).
|
||||
- Fixed a bug where pre-checkpoint support for `podman container checkpoint` did not function correctly.
|
||||
- Fixed a bug where the remote Podman client's `podman build` command did not properly handle the `-f` option ([#9871](https://github.com/containers/podman/issues/9871)).
|
||||
- Fixed a bug where the remote Podman client's `podman run` command would sometimes not resize the container's terminal before execution began ([#9859](https://github.com/containers/podman/issues/9859)).
|
||||
- Fixed a bug where the `--filter` option to the `podman image prune` command was nonfunctional.
|
||||
- Fixed a bug where the `podman logs -f` command would exit before all output for a container was printed when the `k8s-file` log driver was in use ([#10596](https://github.com/containers/podman/issues/10596)).
|
||||
- Fixed a bug where Podman would not correctly detect that systemd-resolved was in use on the host and adjust DNS servers in the container appropriately under some circumstances ([#10570](https://github.com/containers/podman/issues/10570)).
|
||||
- Fixed a bug where the `podman network connect` and `podman network disconnect` commands acted improperly when containers were in the Created state, marking the changes as done but not actually performing them.
|
||||
|
||||
### API
|
||||
- Fixed a bug where the Compat and Libpod Prune endpoints for Networks returned null, instead of an empty array, when nothing was pruned.
|
||||
- Fixed a bug where the Create API for Images would continue to pull images even if a client closed the connection mid-pull ([#7558](https://github.com/containers/podman/issues/7558)).
|
||||
- Fixed a bug where the Events API did not include some information (including labels) when sending events.
|
||||
- Fixed a bug where the Events API would, when streaming was not requested, send at most one event ([#10529](https://github.com/containers/podman/issues/10529)).
|
||||
|
||||
### Misc
|
||||
- Updated the containers/common library to v0.38.9
|
||||
|
||||
## 3.2.0
|
||||
### Features
|
||||
- Docker Compose is now supported with rootless Podman ([#9169](https://github.com/containers/podman/issues/9169)).
|
||||
|
@ -1,3 +1,25 @@
|
||||
- Changelog for v3.2.1 (2021-06-11):
|
||||
* Updated release notes for v3.2.1
|
||||
* remote events: fix --stream=false
|
||||
* [CI:DOCS] fix incorrect network remove api doc
|
||||
* remote: always send resize before the container starts
|
||||
* remote events: support labels
|
||||
* remote pull: cancel pull when connection is closed
|
||||
* Fix network prune api docs
|
||||
* Improve systemd-resolved detection
|
||||
* logs: k8s-file: fix race
|
||||
* Fix image prune --filter cmd behavior
|
||||
* podman-remote build should handle -f option properly
|
||||
* System tests: deal with crun 0.20.1
|
||||
* Fix build tags for pkg/machine...
|
||||
* Fix pre-checkpointing
|
||||
* container: ignore named hierarchies
|
||||
* [v3.2] vendor containers/common@v0.38.9
|
||||
* rootless: fix fast join userns path
|
||||
* [v3.2] vendor containers/common@v0.38.7
|
||||
* [v3.2] vendor containers/common@v0.38.6
|
||||
* Correct qemu options for Intel macs
|
||||
|
||||
- Changelog for v3.2.0 (2021-06-03):
|
||||
* Final release notes updates for v3.2.0
|
||||
* add ipv6 nameservers only when the container has ipv6 enabled
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"github.com/containers/podman/v3/cmd/podman/registry"
|
||||
"github.com/containers/podman/v3/libpod/define"
|
||||
"github.com/containers/podman/v3/pkg/domain/entities"
|
||||
"github.com/containers/podman/v3/pkg/network"
|
||||
"github.com/containers/podman/v3/pkg/registries"
|
||||
"github.com/containers/podman/v3/pkg/rootless"
|
||||
systemdDefine "github.com/containers/podman/v3/pkg/systemd/define"
|
||||
@ -243,7 +244,7 @@ func getRegistries() ([]string, cobra.ShellCompDirective) {
|
||||
return regs, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
|
||||
func getNetworks(cmd *cobra.Command, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
func getNetworks(cmd *cobra.Command, toComplete string, cType completeType) ([]string, cobra.ShellCompDirective) {
|
||||
suggestions := []string{}
|
||||
networkListOptions := entities.NetworkListOptions{}
|
||||
|
||||
@ -259,7 +260,15 @@ func getNetworks(cmd *cobra.Command, toComplete string) ([]string, cobra.ShellCo
|
||||
}
|
||||
|
||||
for _, n := range networks {
|
||||
if strings.HasPrefix(n.Name, toComplete) {
|
||||
id := network.GetNetworkID(n.Name)
|
||||
// include ids in suggestions if cType == completeIDs or
|
||||
// more then 2 chars are typed and cType == completeDefault
|
||||
if ((len(toComplete) > 1 && cType == completeDefault) ||
|
||||
cType == completeIDs) && strings.HasPrefix(id, toComplete) {
|
||||
suggestions = append(suggestions, id[0:12])
|
||||
}
|
||||
// include name in suggestions
|
||||
if cType != completeIDs && strings.HasPrefix(n.Name, toComplete) {
|
||||
suggestions = append(suggestions, n.Name)
|
||||
}
|
||||
}
|
||||
@ -502,7 +511,7 @@ func AutocompleteNetworks(cmd *cobra.Command, args []string, toComplete string)
|
||||
if !validCurrentCmdLine(cmd, args, toComplete) {
|
||||
return nil, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
return getNetworks(cmd, toComplete)
|
||||
return getNetworks(cmd, toComplete, completeDefault)
|
||||
}
|
||||
|
||||
// AutocompleteDefaultOneArg - Autocomplete path only for the first argument.
|
||||
@ -588,7 +597,7 @@ func AutocompleteContainerOneArg(cmd *cobra.Command, args []string, toComplete s
|
||||
// AutocompleteNetworkConnectCmd - Autocomplete podman network connect/disconnect command args.
|
||||
func AutocompleteNetworkConnectCmd(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
if len(args) == 0 {
|
||||
return getNetworks(cmd, toComplete)
|
||||
return getNetworks(cmd, toComplete, completeDefault)
|
||||
}
|
||||
if len(args) == 1 {
|
||||
return getContainers(cmd, toComplete, completeDefault)
|
||||
@ -624,7 +633,7 @@ func AutocompleteInspect(cmd *cobra.Command, args []string, toComplete string) (
|
||||
containers, _ := getContainers(cmd, toComplete, completeDefault)
|
||||
images, _ := getImages(cmd, toComplete)
|
||||
pods, _ := getPods(cmd, toComplete, completeDefault)
|
||||
networks, _ := getNetworks(cmd, toComplete)
|
||||
networks, _ := getNetworks(cmd, toComplete, completeDefault)
|
||||
volumes, _ := getVolumes(cmd, toComplete)
|
||||
suggestions := append(containers, images...)
|
||||
suggestions = append(suggestions, pods...)
|
||||
@ -885,7 +894,7 @@ func AutocompleteNetworkFlag(cmd *cobra.Command, args []string, toComplete strin
|
||||
},
|
||||
}
|
||||
|
||||
networks, _ := getNetworks(cmd, toComplete)
|
||||
networks, _ := getNetworks(cmd, toComplete, completeDefault)
|
||||
suggestions, dir := completeKeyValues(toComplete, kv)
|
||||
// add slirp4netns here it does not work correct if we add it to the kv map
|
||||
suggestions = append(suggestions, "slirp4netns")
|
||||
@ -1039,7 +1048,10 @@ func AutocompleteNetworkDriver(cmd *cobra.Command, args []string, toComplete str
|
||||
// -> "ipc", "net", "pid", "user", "uts", "cgroup", "none"
|
||||
func AutocompletePodShareNamespace(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
namespaces := []string{"ipc", "net", "pid", "user", "uts", "cgroup", "none"}
|
||||
return namespaces, cobra.ShellCompDirectiveNoFileComp
|
||||
split := strings.Split(toComplete, ",")
|
||||
split[len(split)-1] = ""
|
||||
toComplete = strings.Join(split, ",")
|
||||
return prefixSlice(toComplete, namespaces), cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
|
||||
// AutocompletePodPsSort - Autocomplete images sort options.
|
||||
@ -1115,7 +1127,7 @@ func AutocompletePsFilters(cmd *cobra.Command, args []string, toComplete string)
|
||||
return []string{define.HealthCheckHealthy,
|
||||
define.HealthCheckUnhealthy}, cobra.ShellCompDirectiveNoFileComp
|
||||
},
|
||||
"network=": func(s string) ([]string, cobra.ShellCompDirective) { return getNetworks(cmd, s) },
|
||||
"network=": func(s string) ([]string, cobra.ShellCompDirective) { return getNetworks(cmd, s, completeDefault) },
|
||||
"label=": nil,
|
||||
"exited=": nil,
|
||||
"until=": nil,
|
||||
@ -1138,7 +1150,7 @@ func AutocompletePodPsFilters(cmd *cobra.Command, args []string, toComplete stri
|
||||
"ctr-status=": func(_ string) ([]string, cobra.ShellCompDirective) {
|
||||
return containerStatuses, cobra.ShellCompDirectiveNoFileComp
|
||||
},
|
||||
"network=": func(s string) ([]string, cobra.ShellCompDirective) { return getNetworks(cmd, s) },
|
||||
"network=": func(s string) ([]string, cobra.ShellCompDirective) { return getNetworks(cmd, s, completeDefault) },
|
||||
"label=": nil,
|
||||
}
|
||||
return completeKeyValues(toComplete, kv)
|
||||
@ -1158,11 +1170,28 @@ func AutocompleteImageFilters(cmd *cobra.Command, args []string, toComplete stri
|
||||
return completeKeyValues(toComplete, kv)
|
||||
}
|
||||
|
||||
// AutocompletePruneFilters - Autocomplete container/image prune --filter options.
|
||||
func AutocompletePruneFilters(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
kv := keyValueCompletion{
|
||||
"until=": nil,
|
||||
"label=": nil,
|
||||
}
|
||||
return completeKeyValues(toComplete, kv)
|
||||
}
|
||||
|
||||
// AutocompleteNetworkFilters - Autocomplete network ls --filter options.
|
||||
func AutocompleteNetworkFilters(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
kv := keyValueCompletion{
|
||||
"name=": func(s string) ([]string, cobra.ShellCompDirective) { return getNetworks(cmd, s) },
|
||||
"plugin=": nil,
|
||||
"name=": func(s string) ([]string, cobra.ShellCompDirective) { return getNetworks(cmd, s, completeNames) },
|
||||
"id=": func(s string) ([]string, cobra.ShellCompDirective) { return getNetworks(cmd, s, completeIDs) },
|
||||
"plugin=": func(_ string) ([]string, cobra.ShellCompDirective) {
|
||||
return []string{"bridge", "portmap",
|
||||
"firewall", "tuning", "dnsname", "macvlan"}, cobra.ShellCompDirectiveNoFileComp
|
||||
},
|
||||
"label=": nil,
|
||||
"driver=": func(_ string) ([]string, cobra.ShellCompDirective) {
|
||||
return []string{"bridge"}, cobra.ShellCompDirectiveNoFileComp
|
||||
},
|
||||
}
|
||||
return completeKeyValues(toComplete, kv)
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ func init() {
|
||||
flags.BoolVarP(&force, "force", "f", false, "Do not prompt for confirmation. The default is false")
|
||||
filterFlagName := "filter"
|
||||
flags.StringArrayVar(&filter, filterFlagName, []string{}, "Provide filter values (e.g. 'label=<key>=<value>')")
|
||||
_ = pruneCommand.RegisterFlagCompletionFunc(filterFlagName, completion.AutocompleteNone)
|
||||
_ = pruneCommand.RegisterFlagCompletionFunc(filterFlagName, common.AutocompletePruneFilters)
|
||||
}
|
||||
|
||||
func prune(cmd *cobra.Command, args []string) error {
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/completion"
|
||||
"github.com/containers/podman/v3/cmd/podman/common"
|
||||
"github.com/containers/podman/v3/cmd/podman/registry"
|
||||
"github.com/containers/podman/v3/cmd/podman/utils"
|
||||
"github.com/containers/podman/v3/cmd/podman/validate"
|
||||
@ -44,8 +45,7 @@ func init() {
|
||||
|
||||
filterFlagName := "filter"
|
||||
flags.StringArrayVar(&filter, filterFlagName, []string{}, "Provide filter values (e.g. 'label=<key>=<value>')")
|
||||
//TODO: add completion for filters
|
||||
_ = pruneCmd.RegisterFlagCompletionFunc(filterFlagName, completion.AutocompleteNone)
|
||||
_ = pruneCmd.RegisterFlagCompletionFunc(filterFlagName, common.AutocompletePruneFilters)
|
||||
}
|
||||
|
||||
func prune(cmd *cobra.Command, args []string) error {
|
||||
@ -60,7 +60,15 @@ func prune(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
filterMap, err := common.ParseFilters(filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range filterMap {
|
||||
for _, val := range v {
|
||||
pruneOpts.Filter = append(pruneOpts.Filter, fmt.Sprintf("%s=%s", k, val))
|
||||
}
|
||||
}
|
||||
results, err := registry.ImageEngine().Prune(registry.GetContext(), pruneOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/completion"
|
||||
"github.com/containers/podman/v3/cmd/podman/common"
|
||||
"github.com/containers/podman/v3/cmd/podman/registry"
|
||||
"github.com/containers/podman/v3/cmd/podman/utils"
|
||||
@ -39,7 +38,7 @@ func networkPruneFlags(cmd *cobra.Command, flags *pflag.FlagSet) {
|
||||
flags.BoolVarP(&force, "force", "f", false, "do not prompt for confirmation")
|
||||
filterFlagName := "filter"
|
||||
flags.StringArrayVar(&filter, filterFlagName, []string{}, "Provide filter values (e.g. 'label=<key>=<value>')")
|
||||
_ = cmd.RegisterFlagCompletionFunc(filterFlagName, completion.AutocompleteNone)
|
||||
_ = cmd.RegisterFlagCompletionFunc(filterFlagName, common.AutocompletePruneFilters)
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/completion"
|
||||
"github.com/containers/podman/v3/cmd/podman/common"
|
||||
"github.com/containers/podman/v3/cmd/podman/parse"
|
||||
"github.com/containers/podman/v3/cmd/podman/registry"
|
||||
"github.com/containers/podman/v3/cmd/podman/utils"
|
||||
@ -50,7 +51,7 @@ func init() {
|
||||
flags.BoolVar(&pruneOptions.Volume, "volumes", false, "Prune volumes")
|
||||
filterFlagName := "filter"
|
||||
flags.StringArrayVar(&filters, filterFlagName, []string{}, "Provide filter values (e.g. 'label=<key>=<value>')")
|
||||
_ = pruneCommand.RegisterFlagCompletionFunc(filterFlagName, completion.AutocompleteNone)
|
||||
_ = pruneCommand.RegisterFlagCompletionFunc(filterFlagName, common.AutocompletePruneFilters)
|
||||
}
|
||||
|
||||
func prune(cmd *cobra.Command, args []string) error {
|
||||
|
@ -36,7 +36,7 @@ Epoch: 99
|
||||
%else
|
||||
Epoch: 0
|
||||
%endif
|
||||
Version: 3.2.1
|
||||
Version: 3.2.2
|
||||
Release: #COMMITDATE#.git%{shortcommit0}%{?dist}
|
||||
Summary: Manage Pods, Containers and Container Images
|
||||
License: ASL 2.0
|
||||
|
@ -9,7 +9,7 @@ podman\-network-create - Create a Podman CNI network
|
||||
## DESCRIPTION
|
||||
Create a CNI-network configuration for use with Podman. By default, Podman creates a bridge connection.
|
||||
A *Macvlan* connection can be created with the *-d macvlan* option. A parent device for macvlan can
|
||||
be designated with the *-o parent=\<device>* option. In the case of *Macvlan* connections, the
|
||||
be designated with the *-o parent=`<device>`* option. In the case of *Macvlan* connections, the
|
||||
CNI *dhcp* plugin needs to be activated or the container image must have a DHCP client to interact
|
||||
with the host network's DHCP server.
|
||||
|
||||
|
@ -10,8 +10,8 @@ podman\-pod\-create - Create a new pod
|
||||
|
||||
Creates an empty pod, or unit of multiple containers, and prepares it to have
|
||||
containers added to it. The pod id is printed to STDOUT. You can then use
|
||||
**podman create --pod \<pod_id|pod_name\> ...** to add containers to the pod, and
|
||||
**podman pod start \<pod_id|pod_name\>** to start the pod.
|
||||
**podman create --pod `<pod_id|pod_name>` ...** to add containers to the pod, and
|
||||
**podman pod start `<pod_id|pod_name>`** to start the pod.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
|
@ -16,7 +16,14 @@ flag. Use the **--quiet** flag to print only the volume names.
|
||||
|
||||
#### **--filter**=*filter*, **-f**
|
||||
|
||||
Filter volume output.
|
||||
Volumes can be filtered by the following attributes:
|
||||
|
||||
- dangling
|
||||
- driver
|
||||
- label
|
||||
- name
|
||||
- opt
|
||||
- scope
|
||||
|
||||
#### **--format**=*format*
|
||||
|
||||
|
@ -946,6 +946,12 @@ func (c *Container) cGroupPath() (string, error) {
|
||||
// is the libpod-specific one we're looking for.
|
||||
//
|
||||
// See #8397 on the need for the longest-path look up.
|
||||
//
|
||||
// And another workaround for containers running systemd as the payload.
|
||||
// containers running systemd moves themselves into a child subgroup of
|
||||
// the named systemd cgroup hierarchy. Ignore any named cgroups during
|
||||
// the lookup.
|
||||
// See #10602 for more details.
|
||||
procPath := fmt.Sprintf("/proc/%d/cgroup", c.state.PID)
|
||||
lines, err := ioutil.ReadFile(procPath)
|
||||
if err != nil {
|
||||
@ -961,6 +967,10 @@ func (c *Container) cGroupPath() (string, error) {
|
||||
logrus.Debugf("Error parsing cgroup: expected 3 fields but got %d: %s", len(fields), procPath)
|
||||
continue
|
||||
}
|
||||
// Ignore named cgroups like name=systemd.
|
||||
if bytes.Contains(fields[1], []byte("=")) {
|
||||
continue
|
||||
}
|
||||
path := string(fields[2])
|
||||
if len(path) > len(cgroupPath) {
|
||||
cgroupPath = path
|
||||
|
@ -41,6 +41,7 @@ const (
|
||||
// name of the directory holding the artifacts
|
||||
artifactsDir = "artifacts"
|
||||
execDirPermission = 0755
|
||||
preCheckpointDir = "pre-checkpoint"
|
||||
)
|
||||
|
||||
// rootFsSize gets the size of the container's root filesystem
|
||||
@ -140,7 +141,7 @@ func (c *Container) CheckpointPath() string {
|
||||
|
||||
// PreCheckpointPath returns the path to the directory containing the pre-checkpoint-images
|
||||
func (c *Container) PreCheckPointPath() string {
|
||||
return filepath.Join(c.bundlePath(), "pre-checkpoint")
|
||||
return filepath.Join(c.bundlePath(), preCheckpointDir)
|
||||
}
|
||||
|
||||
// AttachSocketPath retrieves the path of the container's attach socket
|
||||
|
@ -907,14 +907,15 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
|
||||
includeFiles := []string{
|
||||
"artifacts",
|
||||
"ctr.log",
|
||||
metadata.CheckpointDirectory,
|
||||
metadata.ConfigDumpFile,
|
||||
metadata.SpecDumpFile,
|
||||
metadata.NetworkStatusFile,
|
||||
}
|
||||
|
||||
if options.PreCheckPoint {
|
||||
includeFiles[0] = "pre-checkpoint"
|
||||
includeFiles = append(includeFiles, preCheckpointDir)
|
||||
} else {
|
||||
includeFiles = append(includeFiles, metadata.CheckpointDirectory)
|
||||
}
|
||||
// Get root file-system changes included in the checkpoint archive
|
||||
var addToTarFiles []string
|
||||
@ -1648,22 +1649,20 @@ func (c *Container) generateResolvConf() (string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Determine the endpoint for resolv.conf in case it is a symlink
|
||||
resolvPath, err := filepath.EvalSymlinks(resolvConf)
|
||||
contents, err := ioutil.ReadFile(resolvConf)
|
||||
// resolv.conf doesn't have to exists
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Determine if symlink points to any of the systemd-resolved files
|
||||
if strings.HasPrefix(resolvPath, "/run/systemd/resolve/") {
|
||||
resolvPath = "/run/systemd/resolve/resolv.conf"
|
||||
}
|
||||
|
||||
contents, err := ioutil.ReadFile(resolvPath)
|
||||
// resolv.conf doesn't have to exists
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return "", err
|
||||
ns := resolvconf.GetNameservers(contents)
|
||||
// check if systemd-resolved is used, assume it is used when 127.0.0.53 is the only nameserver
|
||||
if len(ns) == 1 && ns[0] == "127.0.0.53" {
|
||||
// read the actual resolv.conf file for systemd-resolved
|
||||
contents, err = ioutil.ReadFile("/run/systemd/resolve/resolv.conf")
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "detected that systemd-resolved is in use, but could not locate real resolv.conf")
|
||||
}
|
||||
}
|
||||
|
||||
ipv6 := false
|
||||
|
@ -4,11 +4,10 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/containers/podman/v3/libpod/define"
|
||||
"github.com/containers/podman/v3/libpod/events"
|
||||
"github.com/containers/podman/v3/libpod/logs"
|
||||
"github.com/hpcloud/tail/watch"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -94,27 +93,40 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
|
||||
}()
|
||||
// Check if container is still running or paused
|
||||
if options.Follow {
|
||||
state, err := c.State()
|
||||
if err != nil || state != define.ContainerStateRunning {
|
||||
// If the container isn't running or if we encountered
|
||||
// an error getting its state, instruct the logger to
|
||||
// read the file until EOF.
|
||||
tailError := t.StopAtEOF()
|
||||
if tailError != nil && fmt.Sprintf("%v", tailError) != "tail: stop at eof" {
|
||||
logrus.Error(tailError)
|
||||
}
|
||||
if errors.Cause(err) != define.ErrNoSuchCtr {
|
||||
logrus.Error(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The container is running, so we need to wait until the container exited
|
||||
go func() {
|
||||
for {
|
||||
state, err := c.State()
|
||||
time.Sleep(watch.POLL_DURATION)
|
||||
if err != nil {
|
||||
tailError := t.StopAtEOF()
|
||||
if tailError != nil && fmt.Sprintf("%v", tailError) != "tail: stop at eof" {
|
||||
logrus.Error(tailError)
|
||||
}
|
||||
if errors.Cause(err) != define.ErrNoSuchCtr {
|
||||
logrus.Error(err)
|
||||
}
|
||||
break
|
||||
}
|
||||
if state != define.ContainerStateRunning && state != define.ContainerStatePaused {
|
||||
tailError := t.StopAtEOF()
|
||||
if tailError != nil && fmt.Sprintf("%v", tailError) != "tail: stop at eof" {
|
||||
logrus.Error(tailError)
|
||||
}
|
||||
break
|
||||
eventChannel := make(chan *events.Event)
|
||||
eventOptions := events.ReadOptions{
|
||||
EventChannel: eventChannel,
|
||||
Filters: []string{"event=died", "container=" + c.ID()},
|
||||
Stream: true,
|
||||
}
|
||||
go func() {
|
||||
if err := c.runtime.Events(ctx, eventOptions); err != nil {
|
||||
logrus.Errorf("Error waiting for container to exit: %v", err)
|
||||
}
|
||||
}()
|
||||
// Now wait for the died event and signal to finish
|
||||
// reading the log until EOF.
|
||||
<-eventChannel
|
||||
tailError := t.StopAtEOF()
|
||||
if tailError != nil && fmt.Sprintf("%v", tailError) != "tail: stop at eof" {
|
||||
logrus.Error(tailError)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
@ -1068,7 +1068,7 @@ func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) erro
|
||||
}
|
||||
|
||||
c.newNetworkEvent(events.NetworkDisconnect, netName)
|
||||
if c.state.State != define.ContainerStateRunning {
|
||||
if !c.ensureState(define.ContainerStateRunning, define.ContainerStateCreated) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1123,7 +1123,7 @@ func (c *Container) NetworkConnect(nameOrID, netName string, aliases []string) e
|
||||
return err
|
||||
}
|
||||
c.newNetworkEvent(events.NetworkConnect, netName)
|
||||
if c.state.State != define.ContainerStateRunning {
|
||||
if !c.ensureState(define.ContainerStateRunning, define.ContainerStateCreated) {
|
||||
return nil
|
||||
}
|
||||
if c.state.NetNS == nil {
|
||||
|
@ -787,7 +787,11 @@ func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options Container
|
||||
args = append(args, "--pre-dump")
|
||||
}
|
||||
if !options.PreCheckPoint && options.WithPrevious {
|
||||
args = append(args, "--parent-path", ctr.PreCheckPointPath())
|
||||
args = append(
|
||||
args,
|
||||
"--parent-path",
|
||||
filepath.Join("..", preCheckpointDir),
|
||||
)
|
||||
}
|
||||
runtimeDir, err := util.GetRuntimeDir()
|
||||
if err != nil {
|
||||
|
@ -75,7 +75,7 @@ func GetEvents(w http.ResponseWriter, r *http.Request) {
|
||||
coder := json.NewEncoder(w)
|
||||
coder.SetEscapeHTML(true)
|
||||
|
||||
for stream := true; stream; stream = query.Stream {
|
||||
for {
|
||||
select {
|
||||
case err := <-errorChannel:
|
||||
if err != nil {
|
||||
|
@ -139,6 +139,31 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
|
||||
addCaps = m
|
||||
}
|
||||
|
||||
// convert addcaps formats
|
||||
containerFiles := []string{}
|
||||
if _, found := r.URL.Query()["dockerfile"]; found {
|
||||
var m = []string{}
|
||||
if err := json.Unmarshal([]byte(query.Dockerfile), &m); err != nil {
|
||||
// it's not json, assume just a string
|
||||
m = append(m, query.Dockerfile)
|
||||
}
|
||||
containerFiles = m
|
||||
} else {
|
||||
containerFiles = []string{"Dockerfile"}
|
||||
if utils.IsLibpodRequest(r) {
|
||||
containerFiles = []string{"Containerfile"}
|
||||
if _, err = os.Stat(filepath.Join(contextDirectory, "Containerfile")); err != nil {
|
||||
if _, err1 := os.Stat(filepath.Join(contextDirectory, "Dockerfile")); err1 == nil {
|
||||
containerFiles = []string{"Dockerfile"}
|
||||
} else {
|
||||
utils.BadRequest(w, "dockerfile", query.Dockerfile, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
containerFiles = []string{"Dockerfile"}
|
||||
}
|
||||
}
|
||||
|
||||
addhosts := []string{}
|
||||
if _, found := r.URL.Query()["extrahosts"]; found {
|
||||
if err := json.Unmarshal([]byte(query.AddHosts), &addhosts); err != nil {
|
||||
@ -470,7 +495,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
|
||||
runCtx, cancel := context.WithCancel(context.Background())
|
||||
go func() {
|
||||
defer cancel()
|
||||
imageID, _, err = runtime.Build(r.Context(), buildOptions, query.Dockerfile)
|
||||
imageID, _, err = runtime.Build(r.Context(), buildOptions, containerFiles...)
|
||||
if err == nil {
|
||||
success = true
|
||||
} else {
|
||||
|
@ -414,7 +414,7 @@ func Prune(w http.ResponseWriter, r *http.Request) {
|
||||
type response struct {
|
||||
NetworksDeleted []string
|
||||
}
|
||||
var prunedNetworks []string //nolint
|
||||
prunedNetworks := []string{}
|
||||
for _, pr := range pruneReports {
|
||||
if pr.Error != nil {
|
||||
logrus.Error(pr.Error)
|
||||
|
@ -46,20 +46,13 @@ func ResizeTTY(w http.ResponseWriter, r *http.Request) {
|
||||
utils.ContainerNotFound(w, name, err)
|
||||
return
|
||||
}
|
||||
if state, err := ctnr.State(); err != nil {
|
||||
utils.InternalServerError(w, errors.Wrapf(err, "cannot obtain container state"))
|
||||
return
|
||||
} else if state != define.ContainerStateRunning && !query.IgnoreNotRunning {
|
||||
utils.Error(w, "Container not running", http.StatusConflict,
|
||||
fmt.Errorf("container %q in wrong state %q", name, state.String()))
|
||||
return
|
||||
}
|
||||
// If container is not running, ignore since this can be a race condition, and is expected
|
||||
if err := ctnr.AttachResize(sz); err != nil {
|
||||
if errors.Cause(err) != define.ErrCtrStateInvalid || !query.IgnoreNotRunning {
|
||||
if errors.Cause(err) != define.ErrCtrStateInvalid {
|
||||
utils.InternalServerError(w, errors.Wrapf(err, "cannot resize container"))
|
||||
return
|
||||
} else {
|
||||
utils.Error(w, "Container not running", http.StatusConflict, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
// This is not a 204, even though we write nothing, for compatibility
|
||||
// reasons.
|
||||
|
@ -77,10 +77,3 @@ type swagCompatNetworkDisconnectRequest struct {
|
||||
// in:body
|
||||
Body struct{ types.NetworkDisconnect }
|
||||
}
|
||||
|
||||
// Network prune
|
||||
// swagger:response NetworkPruneResponse
|
||||
type swagCompatNetworkPruneResponse struct {
|
||||
// in:body
|
||||
Body []string
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ func ImagesPull(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
var pulledImages []*libimage.Image
|
||||
var pullError error
|
||||
runCtx, cancel := context.WithCancel(context.Background())
|
||||
runCtx, cancel := context.WithCancel(r.Context())
|
||||
go func() {
|
||||
defer cancel()
|
||||
pulledImages, pullError = runtime.LibimageRuntime().Pull(runCtx, query.Reference, config.PullPolicyAlways, pullOptions)
|
||||
|
@ -190,5 +190,8 @@ func Prune(w http.ResponseWriter, r *http.Request) {
|
||||
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
if pruneReports == nil {
|
||||
pruneReports = []*entities.NetworkPruneReport{}
|
||||
}
|
||||
utils.WriteResponse(w, http.StatusOK, pruneReports)
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ type swagInfoResponse struct {
|
||||
// swagger:response NetworkRmReport
|
||||
type swagNetworkRmReport struct {
|
||||
// in:body
|
||||
Body entities.NetworkRmReport
|
||||
Body []entities.NetworkRmReport
|
||||
}
|
||||
|
||||
// Network inspect
|
||||
@ -119,6 +119,13 @@ type swagNetworkCreateReport struct {
|
||||
Body entities.NetworkCreateReport
|
||||
}
|
||||
|
||||
// Network prune
|
||||
// swagger:response NetworkPruneResponse
|
||||
type swagNetworkPruneResponse struct {
|
||||
// in:body
|
||||
Body []entities.NetworkPruneReport
|
||||
}
|
||||
|
||||
func ServeSwagger(w http.ResponseWriter, r *http.Request) {
|
||||
path := DefaultPodmanSwaggerSpec
|
||||
if p, found := os.LookupEnv("PODMAN_SWAGGER_SPEC"); found {
|
||||
|
@ -1359,6 +1359,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error {
|
||||
// $ref: "#/responses/ok"
|
||||
// 404:
|
||||
// $ref: "#/responses/NoSuchContainer"
|
||||
// 409:
|
||||
// $ref: "#/responses/ConflictError"
|
||||
// 500:
|
||||
// $ref: "#/responses/InternalError"
|
||||
r.HandleFunc(VersionedPath("/libpod/containers/{name}/resize"), s.APIHandler(compat.ResizeTTY)).Methods(http.MethodPost)
|
||||
|
@ -180,9 +180,12 @@ func (s *APIServer) registerNetworkHandlers(r *mux.Router) error {
|
||||
// 200:
|
||||
// description: OK
|
||||
// schema:
|
||||
// type: array
|
||||
// items:
|
||||
// type: string
|
||||
// type: object
|
||||
// properties:
|
||||
// NetworksDeleted:
|
||||
// type: array
|
||||
// items:
|
||||
// type: string
|
||||
// 500:
|
||||
// $ref: "#/responses/InternalError"
|
||||
r.HandleFunc(VersionedPath("/networks/prune"), s.APIHandler(compat.Prune)).Methods(http.MethodPost)
|
||||
|
@ -138,7 +138,7 @@ func Attach(ctx context.Context, nameOrID string, stdin io.Reader, stdout io.Wri
|
||||
winCtx, winCancel := context.WithCancel(ctx)
|
||||
defer winCancel()
|
||||
|
||||
go attachHandleResize(ctx, winCtx, winChange, false, nameOrID, file)
|
||||
attachHandleResize(ctx, winCtx, winChange, false, nameOrID, file)
|
||||
}
|
||||
|
||||
// If we are attaching around a start, we need to "signal"
|
||||
@ -327,32 +327,38 @@ func (f *rawFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
return append(buffer, '\r'), nil
|
||||
}
|
||||
|
||||
// This is intended to be run as a goroutine, handling resizing for a container
|
||||
// or exec session.
|
||||
// This is intended to not be run as a goroutine, handling resizing for a container
|
||||
// or exec session. It will call resize once and then starts a goroutine which calls resize on winChange
|
||||
func attachHandleResize(ctx, winCtx context.Context, winChange chan os.Signal, isExec bool, id string, file *os.File) {
|
||||
// Prime the pump, we need one reset to ensure everything is ready
|
||||
winChange <- sig.SIGWINCH
|
||||
for {
|
||||
select {
|
||||
case <-winCtx.Done():
|
||||
return
|
||||
case <-winChange:
|
||||
w, h, err := terminal.GetSize(int(file.Fd()))
|
||||
if err != nil {
|
||||
logrus.Warnf("failed to obtain TTY size: %v", err)
|
||||
}
|
||||
resize := func() {
|
||||
w, h, err := terminal.GetSize(int(file.Fd()))
|
||||
if err != nil {
|
||||
logrus.Warnf("failed to obtain TTY size: %v", err)
|
||||
}
|
||||
|
||||
var resizeErr error
|
||||
if isExec {
|
||||
resizeErr = ResizeExecTTY(ctx, id, new(ResizeExecTTYOptions).WithHeight(h).WithWidth(w))
|
||||
} else {
|
||||
resizeErr = ResizeContainerTTY(ctx, id, new(ResizeTTYOptions).WithHeight(h).WithWidth(w))
|
||||
}
|
||||
if resizeErr != nil {
|
||||
logrus.Warnf("failed to resize TTY: %v", resizeErr)
|
||||
}
|
||||
var resizeErr error
|
||||
if isExec {
|
||||
resizeErr = ResizeExecTTY(ctx, id, new(ResizeExecTTYOptions).WithHeight(h).WithWidth(w))
|
||||
} else {
|
||||
resizeErr = ResizeContainerTTY(ctx, id, new(ResizeTTYOptions).WithHeight(h).WithWidth(w))
|
||||
}
|
||||
if resizeErr != nil {
|
||||
logrus.Warnf("failed to resize TTY: %v", resizeErr)
|
||||
}
|
||||
}
|
||||
|
||||
resize()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-winCtx.Done():
|
||||
return
|
||||
case <-winChange:
|
||||
resize()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Configure the given terminal for raw mode
|
||||
@ -457,7 +463,7 @@ func ExecStartAndAttach(ctx context.Context, sessionID string, options *ExecStar
|
||||
winCtx, winCancel := context.WithCancel(ctx)
|
||||
defer winCancel()
|
||||
|
||||
go attachHandleResize(ctx, winCtx, winChange, true, sessionID, terminalFile)
|
||||
attachHandleResize(ctx, winCtx, winChange, true, sessionID, terminalFile)
|
||||
}
|
||||
|
||||
if options.GetAttachInput() {
|
||||
|
@ -282,10 +282,6 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
|
||||
stdout = options.Out
|
||||
}
|
||||
|
||||
entries := make([]string, len(containerFiles))
|
||||
copy(entries, containerFiles)
|
||||
entries = append(entries, options.ContextDirectory)
|
||||
|
||||
excludes := options.Excludes
|
||||
if len(excludes) == 0 {
|
||||
excludes, err = parseDockerignore(options.ContextDirectory)
|
||||
@ -294,9 +290,50 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
|
||||
}
|
||||
}
|
||||
|
||||
tarfile, err := nTar(excludes, entries...)
|
||||
contextDir, err := filepath.Abs(options.ContextDirectory)
|
||||
if err != nil {
|
||||
logrus.Errorf("cannot tar container entries %v error: %v", entries, err)
|
||||
logrus.Errorf("cannot find absolute path of %v: %v", options.ContextDirectory, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tarContent := []string{options.ContextDirectory}
|
||||
newContainerFiles := []string{}
|
||||
for _, c := range containerFiles {
|
||||
containerfile, err := filepath.Abs(c)
|
||||
if err != nil {
|
||||
logrus.Errorf("cannot find absolute path of %v: %v", c, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if Containerfile is in the context directory, if so truncate the contextdirectory off path
|
||||
// Do NOT add to tarfile
|
||||
if strings.HasPrefix(containerfile, contextDir+string(filepath.Separator)) {
|
||||
containerfile = strings.TrimPrefix(containerfile, contextDir+string(filepath.Separator))
|
||||
} else {
|
||||
// If Containerfile does not exists assume it is in context directory, do Not add to tarfile
|
||||
if _, err := os.Lstat(containerfile); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
containerfile = c
|
||||
} else {
|
||||
// If Containerfile does exists but is not in context directory add it to the tarfile
|
||||
tarContent = append(tarContent, containerfile)
|
||||
}
|
||||
}
|
||||
newContainerFiles = append(newContainerFiles, containerfile)
|
||||
}
|
||||
if len(newContainerFiles) > 0 {
|
||||
cFileJSON, err := json.Marshal(newContainerFiles)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params.Set("dockerfile", string(cFileJSON))
|
||||
}
|
||||
|
||||
tarfile, err := nTar(excludes, tarContent...)
|
||||
if err != nil {
|
||||
logrus.Errorf("cannot tar container entries %v error: %v", tarContent, err)
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
@ -305,23 +342,6 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
|
||||
}
|
||||
}()
|
||||
|
||||
containerFile, err := filepath.Abs(entries[0])
|
||||
if err != nil {
|
||||
logrus.Errorf("cannot find absolute path of %v: %v", entries[0], err)
|
||||
return nil, err
|
||||
}
|
||||
contextDir, err := filepath.Abs(entries[1])
|
||||
if err != nil {
|
||||
logrus.Errorf("cannot find absolute path of %v: %v", entries[1], err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if strings.HasPrefix(containerFile, contextDir+string(filepath.Separator)) {
|
||||
containerFile = strings.TrimPrefix(containerFile, contextDir+string(filepath.Separator))
|
||||
}
|
||||
|
||||
params.Set("dockerfile", containerFile)
|
||||
|
||||
conn, err := bindings.GetClient(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -30,29 +30,41 @@ func ConvertToLibpodEvent(e Event) *libpodEvents.Event {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
image := e.Actor.Attributes["image"]
|
||||
name := e.Actor.Attributes["name"]
|
||||
details := e.Actor.Attributes
|
||||
delete(details, "image")
|
||||
delete(details, "name")
|
||||
delete(details, "containerExitCode")
|
||||
return &libpodEvents.Event{
|
||||
ContainerExitCode: exitCode,
|
||||
ID: e.Actor.ID,
|
||||
Image: e.Actor.Attributes["image"],
|
||||
Name: e.Actor.Attributes["name"],
|
||||
Image: image,
|
||||
Name: name,
|
||||
Status: status,
|
||||
Time: time.Unix(e.Time, e.TimeNano),
|
||||
Type: t,
|
||||
Details: libpodEvents.Details{
|
||||
Attributes: details,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ConvertToEntitiesEvent converts a libpod event to an entities one.
|
||||
func ConvertToEntitiesEvent(e libpodEvents.Event) *Event {
|
||||
attributes := e.Details.Attributes
|
||||
if attributes == nil {
|
||||
attributes = make(map[string]string)
|
||||
}
|
||||
attributes["image"] = e.Image
|
||||
attributes["name"] = e.Name
|
||||
attributes["containerExitCode"] = strconv.Itoa(e.ContainerExitCode)
|
||||
return &Event{dockerEvents.Message{
|
||||
Type: e.Type.String(),
|
||||
Action: e.Status.String(),
|
||||
Actor: dockerEvents.Actor{
|
||||
ID: e.ID,
|
||||
Attributes: map[string]string{
|
||||
"image": e.Image,
|
||||
"name": e.Name,
|
||||
"containerExitCode": strconv.Itoa(e.ContainerExitCode),
|
||||
},
|
||||
ID: e.ID,
|
||||
Attributes: attributes,
|
||||
},
|
||||
Scope: "local",
|
||||
Time: e.Time.Unix(),
|
||||
|
@ -1,3 +1,5 @@
|
||||
// +build amd64,linux arm64,linux amd64,darwin arm64,darwin
|
||||
|
||||
package machine
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,5 @@
|
||||
// +build amd64,linux arm64,linux amd64,darwin arm64,darwin
|
||||
|
||||
package machine
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,5 @@
|
||||
// +build amd64,linux arm64,linux amd64,darwin arm64,darwin
|
||||
|
||||
package machine
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,5 @@
|
||||
// +build amd64,linux arm64,linux amd64,darwin arm64,darwin
|
||||
|
||||
package machine
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,5 @@
|
||||
// +build amd64,linux arm64,linux amd64,darwin arm64,darwin
|
||||
|
||||
package machine
|
||||
|
||||
/*
|
||||
|
@ -1,3 +1,5 @@
|
||||
// +build amd64,linux arm64,linux amd64,darwin arm64,darwin
|
||||
|
||||
package machine
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,5 @@
|
||||
// +build amd64,linux arm64,linux amd64,darwin arm64,darwin
|
||||
|
||||
package libvirt
|
||||
|
||||
type MachineVM struct {
|
||||
|
@ -1,3 +1,5 @@
|
||||
// +build amd64,linux arm64,linux amd64,darwin arm64,darwin
|
||||
|
||||
package libvirt
|
||||
|
||||
import "github.com/containers/podman/v3/pkg/machine"
|
||||
|
3
pkg/machine/libvirt/machine_unsupported.go
Normal file
3
pkg/machine/libvirt/machine_unsupported.go
Normal file
@ -0,0 +1,3 @@
|
||||
// +build !amd64 amd64,windows
|
||||
|
||||
package libvirt
|
3
pkg/machine/machine_unsupported.go
Normal file
3
pkg/machine/machine_unsupported.go
Normal file
@ -0,0 +1,3 @@
|
||||
// +build !amd64 amd64,windows
|
||||
|
||||
package machine
|
@ -1,3 +1,5 @@
|
||||
// +build amd64,linux arm64,linux amd64,darwin arm64,darwin
|
||||
|
||||
package machine
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,5 @@
|
||||
// +build amd64,linux arm64,linux amd64,darwin arm64,darwin
|
||||
|
||||
package qemu
|
||||
|
||||
import "time"
|
||||
|
@ -1,3 +1,5 @@
|
||||
// +build amd64,linux arm64,linux amd64,darwin arm64,darwin
|
||||
|
||||
package qemu
|
||||
|
||||
import (
|
||||
|
3
pkg/machine/qemu/machine_unsupported.go
Normal file
3
pkg/machine/qemu/machine_unsupported.go
Normal file
@ -0,0 +1,3 @@
|
||||
// +build !amd64 amd64,windows
|
||||
|
||||
package qemu
|
@ -19,7 +19,7 @@ for i in /version version; do
|
||||
t GET $i 200 \
|
||||
.Components[0].Name="Podman Engine" \
|
||||
.Components[0].Details.APIVersion~3[0-9.-]\\+ \
|
||||
.Components[0].Details.MinAPIVersion=3.2.0 \
|
||||
.Components[0].Details.MinAPIVersion=3.1.0 \
|
||||
.Components[0].Details.Os=linux \
|
||||
.ApiVersion=1.40 \
|
||||
.MinAPIVersion=1.24 \
|
||||
|
@ -147,4 +147,39 @@ t GET "images/get?names=alpine&names=busybox" 200 '[POSIX tar archive]'
|
||||
img_cnt=$(tar xf "$WORKDIR/curl.result.out" manifest.json -O | jq "length")
|
||||
is "$img_cnt" 2 "number of images in tar archive"
|
||||
|
||||
# check build works when uploading container file as a tar, see issue #10660
|
||||
TMPD=$(mktemp -d podman-apiv2-test.build.XXXXXXXX)
|
||||
function cleanBuildTest() {
|
||||
podman rmi -a -f
|
||||
rm -rf "${TMPD}" &> /dev/null
|
||||
}
|
||||
CONTAINERFILE_TAR="${TMPD}/containerfile.tar"
|
||||
cat > $TMPD/containerfile << EOF
|
||||
FROM quay.io/libpod/alpine_labels:latest
|
||||
EOF
|
||||
tar --format=posix -C $TMPD -cvf ${CONTAINERFILE_TAR} containerfile &> /dev/null
|
||||
|
||||
curl -XPOST --data-binary @<(cat $CONTAINERFILE_TAR) \
|
||||
-H "content-type: application/x-tar" \
|
||||
--dump-header "${TMPD}/headers.txt" \
|
||||
-o "${TMPD}/response.txt" \
|
||||
"http://$HOST:$PORT/v1.40/libpod/build?dockerfile=containerfile" &> /dev/null
|
||||
|
||||
BUILD_TEST_ERROR=""
|
||||
|
||||
if ! grep -q '200 OK' "${TMPD}/headers.txt"; then
|
||||
echo -e "${red}NOK: Image build from tar failed response was not 200 OK"
|
||||
BUILD_TEST_ERROR="1"
|
||||
fi
|
||||
|
||||
if ! grep -q 'quay.io/libpod/alpine_labels' "${TMPD}/response.txt"; then
|
||||
echo -e "${red}NOK: Image build from tar failed image name not in response"
|
||||
BUILD_TEST_ERROR="1"
|
||||
fi
|
||||
|
||||
cleanBuildTest
|
||||
if [[ "${BUILD_TEST_ERROR}" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# vim: filetype=sh
|
||||
|
@ -183,6 +183,8 @@ function test_port() {
|
||||
fi
|
||||
echo "# cat $WORKDIR/server.log:"
|
||||
cat $WORKDIR/server.log
|
||||
echo "# cat $logfile:"
|
||||
cat $logfile
|
||||
return
|
||||
fi
|
||||
|
||||
|
@ -451,19 +451,13 @@ func (p *PodmanTestIntegration) RunLsContainerInPod(name, pod string) (*PodmanSe
|
||||
// BuildImage uses podman build and buildah to build an image
|
||||
// called imageName based on a string dockerfile
|
||||
func (p *PodmanTestIntegration) BuildImage(dockerfile, imageName string, layers string) string {
|
||||
dockerfilePath := filepath.Join(p.TempDir, "Dockerfile")
|
||||
err := ioutil.WriteFile(dockerfilePath, []byte(dockerfile), 0755)
|
||||
Expect(err).To(BeNil())
|
||||
cmd := []string{"build", "--pull-never", "--layers=" + layers, "--file", dockerfilePath}
|
||||
if len(imageName) > 0 {
|
||||
cmd = append(cmd, []string{"-t", imageName}...)
|
||||
}
|
||||
cmd = append(cmd, p.TempDir)
|
||||
session := p.Podman(cmd)
|
||||
session.Wait(240)
|
||||
Expect(session).Should(Exit(0), fmt.Sprintf("BuildImage session output: %q", session.OutputToString()))
|
||||
output := session.OutputToStringArray()
|
||||
return output[len(output)-1]
|
||||
return p.buildImage(dockerfile, imageName, layers, "")
|
||||
}
|
||||
|
||||
// BuildImageWithLabel uses podman build and buildah to build an image
|
||||
// called imageName based on a string dockerfile, adds desired label to paramset
|
||||
func (p *PodmanTestIntegration) BuildImageWithLabel(dockerfile, imageName string, layers string, label string) string {
|
||||
return p.buildImage(dockerfile, imageName, layers, label)
|
||||
}
|
||||
|
||||
// PodmanPID execs podman and returns its PID
|
||||
@ -828,3 +822,22 @@ func (p *PodmanSessionIntegration) jq(jqCommand string) (string, error) {
|
||||
err := cmd.Run()
|
||||
return strings.TrimRight(out.String(), "\n"), err
|
||||
}
|
||||
|
||||
func (p *PodmanTestIntegration) buildImage(dockerfile, imageName string, layers string, label string) string {
|
||||
dockerfilePath := filepath.Join(p.TempDir, "Dockerfile")
|
||||
err := ioutil.WriteFile(dockerfilePath, []byte(dockerfile), 0755)
|
||||
Expect(err).To(BeNil())
|
||||
cmd := []string{"build", "--pull-never", "--layers=" + layers, "--file", dockerfilePath}
|
||||
if label != "" {
|
||||
cmd = append(cmd, "--label="+label)
|
||||
}
|
||||
if len(imageName) > 0 {
|
||||
cmd = append(cmd, []string{"-t", imageName}...)
|
||||
}
|
||||
cmd = append(cmd, p.TempDir)
|
||||
session := p.Podman(cmd)
|
||||
session.Wait(240)
|
||||
Expect(session).Should(Exit(0), fmt.Sprintf("BuildImage session output: %q", session.OutputToString()))
|
||||
output := session.OutputToStringArray()
|
||||
return output[len(output)-1]
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/podman/v3/libpod/events"
|
||||
. "github.com/containers/podman/v3/test/utils"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -134,12 +135,10 @@ var _ = Describe("Podman events", func() {
|
||||
jsonArr := test.OutputToStringArray()
|
||||
Expect(test.OutputToStringArray()).ShouldNot(BeEmpty())
|
||||
|
||||
eventsMap := make(map[string]string)
|
||||
err := json.Unmarshal([]byte(jsonArr[0]), &eventsMap)
|
||||
event := events.Event{}
|
||||
err := json.Unmarshal([]byte(jsonArr[0]), &event)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(eventsMap).To(HaveKey("Status"))
|
||||
|
||||
test = podmanTest.Podman([]string{"events", "--stream=false", "--format", "{{json.}}"})
|
||||
test.WaitWithDefaultTimeout()
|
||||
Expect(test).To(Exit(0))
|
||||
@ -147,11 +146,9 @@ var _ = Describe("Podman events", func() {
|
||||
jsonArr = test.OutputToStringArray()
|
||||
Expect(test.OutputToStringArray()).ShouldNot(BeEmpty())
|
||||
|
||||
eventsMap = make(map[string]string)
|
||||
err = json.Unmarshal([]byte(jsonArr[0]), &eventsMap)
|
||||
event = events.Event{}
|
||||
err = json.Unmarshal([]byte(jsonArr[0]), &event)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(eventsMap).To(HaveKey("Status"))
|
||||
})
|
||||
|
||||
It("podman events --until future", func() {
|
||||
|
@ -425,4 +425,25 @@ LABEL "com.example.vendor"="Example Vendor"
|
||||
Expect(result.OutputToStringArray()).To(Not(Equal(result1.OutputToStringArray())))
|
||||
})
|
||||
|
||||
It("podman image prune --filter", func() {
|
||||
dockerfile := `FROM quay.io/libpod/alpine:latest
|
||||
RUN > file
|
||||
`
|
||||
dockerfile2 := `FROM quay.io/libpod/alpine:latest
|
||||
RUN > file2
|
||||
`
|
||||
podmanTest.BuildImageWithLabel(dockerfile, "foobar.com/workdir:latest", "false", "abc")
|
||||
podmanTest.BuildImageWithLabel(dockerfile2, "foobar.com/workdir:latest", "false", "xyz")
|
||||
// --force used to to avoid y/n question
|
||||
result := podmanTest.Podman([]string{"image", "prune", "--filter", "label=abc", "--force"})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).Should(Exit(0))
|
||||
Expect(len(result.OutputToStringArray())).To(Equal(1))
|
||||
|
||||
//check if really abc is removed
|
||||
result = podmanTest.Podman([]string{"image", "list", "--filter", "label=abc"})
|
||||
Expect(len(result.OutputToStringArray())).To(Equal(0))
|
||||
|
||||
})
|
||||
|
||||
})
|
||||
|
@ -173,9 +173,9 @@ var _ = Describe("Podman logs", func() {
|
||||
})
|
||||
|
||||
It("streaming output: "+log, func() {
|
||||
containerName := "logs-f-rm"
|
||||
containerName := "logs-f"
|
||||
|
||||
logc := podmanTest.Podman([]string{"run", "--log-driver", log, "--rm", "--name", containerName, "-dt", ALPINE, "sh", "-c", "echo podman; sleep 1; echo podman"})
|
||||
logc := podmanTest.Podman([]string{"run", "--log-driver", log, "--name", containerName, "-dt", ALPINE, "sh", "-c", "echo podman-1; sleep 1; echo podman-2"})
|
||||
logc.WaitWithDefaultTimeout()
|
||||
Expect(logc).To(Exit(0))
|
||||
|
||||
@ -183,10 +183,8 @@ var _ = Describe("Podman logs", func() {
|
||||
results.WaitWithDefaultTimeout()
|
||||
Expect(results).To(Exit(0))
|
||||
|
||||
// TODO: we should actually check for two podman lines,
|
||||
// but as of 2020-06-17 there's a race condition in which
|
||||
// 'logs -f' may not catch all output from a container
|
||||
Expect(results.OutputToString()).To(ContainSubstring("podman"))
|
||||
Expect(results.OutputToString()).To(ContainSubstring("podman-1"))
|
||||
Expect(results.OutputToString()).To(ContainSubstring("podman-2"))
|
||||
|
||||
// Container should now be terminatING or terminatED, but we
|
||||
// have no guarantee of which: 'logs -f' does not necessarily
|
||||
@ -199,6 +197,10 @@ var _ = Describe("Podman logs", func() {
|
||||
} else {
|
||||
Expect(inspect.ErrorToString()).To(ContainSubstring("no such container"))
|
||||
}
|
||||
|
||||
results = podmanTest.Podman([]string{"rm", "-f", containerName})
|
||||
results.WaitWithDefaultTimeout()
|
||||
Expect(results).To(Exit(0))
|
||||
})
|
||||
|
||||
It("follow output stopped container: "+log, func() {
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/podman/v3/pkg/rootless"
|
||||
. "github.com/containers/podman/v3/test/utils"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -115,6 +116,12 @@ WantedBy=multi-user.target
|
||||
conData := result.InspectContainerToJSON()
|
||||
Expect(len(conData)).To(Equal(1))
|
||||
Expect(conData[0].Config.SystemdMode).To(BeTrue())
|
||||
|
||||
if CGROUPSV2 || !rootless.IsRootless() {
|
||||
stats := podmanTest.Podman([]string{"stats", "--no-stream", ctrName})
|
||||
stats.WaitWithDefaultTimeout()
|
||||
Expect(stats.ExitCode()).To(Equal(0))
|
||||
}
|
||||
})
|
||||
|
||||
It("podman create container with systemd entrypoint triggers systemd mode", func() {
|
||||
|
@ -794,6 +794,32 @@ EOF
|
||||
run_podman rmi -f build_test
|
||||
}
|
||||
|
||||
@test "podman build -f test " {
|
||||
tmpdir=$PODMAN_TMPDIR/build-test
|
||||
subdir=$tmpdir/subdir
|
||||
mkdir -p $subdir
|
||||
|
||||
containerfile1=$tmpdir/Containerfile1
|
||||
cat >$containerfile1 <<EOF
|
||||
FROM scratch
|
||||
copy . /tmp
|
||||
EOF
|
||||
containerfile2=$PODMAN_TMPDIR/Containerfile2
|
||||
cat >$containerfile2 <<EOF
|
||||
FROM $IMAGE
|
||||
EOF
|
||||
run_podman build -t build_test -f Containerfile1 $tmpdir
|
||||
run_podman 125 build -t build_test -f Containerfile2 $tmpdir
|
||||
is "$output" ".*Containerfile2: no such file or directory" "Containerfile2 should not exist"
|
||||
run_podman build -t build_test -f $containerfile1 $tmpdir
|
||||
run_podman build -t build_test -f $containerfile2 $tmpdir
|
||||
run_podman build -t build_test -f $containerfile1
|
||||
run_podman build -t build_test -f $containerfile2
|
||||
run_podman build -t build_test -f $containerfile1 -f $containerfile2 $tmpdir
|
||||
is "$output" ".*$IMAGE" "Containerfile2 is also passed to server"
|
||||
run_podman rmi -f build_test
|
||||
}
|
||||
|
||||
function teardown() {
|
||||
# A timeout or other error in 'build' can leave behind stale images
|
||||
# that podman can't even see and which will cascade into subsequent
|
||||
|
@ -6,7 +6,6 @@
|
||||
load helpers
|
||||
|
||||
@test "events with a filter by label" {
|
||||
skip_if_remote "FIXME: -remote does not include labels in event output"
|
||||
cname=test-$(random_string 30 | tr A-Z a-z)
|
||||
labelname=$(random_string 10)
|
||||
labelvalue=$(random_string 15)
|
||||
@ -27,7 +26,7 @@ load helpers
|
||||
}
|
||||
|
||||
@test "image events" {
|
||||
skip_if_remote "FIXME: remove events on podman-remote seem to be broken"
|
||||
skip_if_remote "remote does not support --events-backend"
|
||||
pushedDir=$PODMAN_TMPDIR/dir
|
||||
mkdir -p $pushedDir
|
||||
|
||||
@ -86,7 +85,5 @@ function _events_disjunctive_filters() {
|
||||
}
|
||||
|
||||
@test "events with disjunctive filters - default" {
|
||||
# NOTE: the last event for bar doesn't show up reliably.
|
||||
skip_if_remote "FIXME #10529: remote events lose data"
|
||||
_events_disjunctive_filters ""
|
||||
}
|
||||
|
@ -183,7 +183,10 @@ function check_label() {
|
||||
# runc and crun emit different diagnostics
|
||||
runtime=$(podman_runtime)
|
||||
case "$runtime" in
|
||||
crun) expect="\`/proc/thread-self/attr/exec\`: OCI runtime error: unable to assign security attribute" ;;
|
||||
# crun 0.20.1 changes the error message
|
||||
# from /proc/thread-self/attr/exec`: .* unable to assign
|
||||
# to /proc/self/attr/keycreate`: .* unable to process
|
||||
crun) expect="\`/proc/.*\`: OCI runtime error: unable to \(assign\|process\) security attribute" ;;
|
||||
runc) expect="OCI runtime error: .*: failed to set /proc/self/attr/keycreate on procfs" ;;
|
||||
*) skip "Unknown runtime '$runtime'";;
|
||||
esac
|
||||
|
@ -56,8 +56,7 @@ function teardown() {
|
||||
stty rows $rows cols $cols <$PODMAN_TEST_PTY
|
||||
|
||||
# ...and make sure stty under podman reads that.
|
||||
# FIXME: 'sleep 1' is needed for podman-remote; without it, there's
|
||||
run_podman run -it --name mystty $IMAGE sh -c 'sleep 1;stty size' <$PODMAN_TEST_PTY
|
||||
run_podman run -it --name mystty $IMAGE stty size <$PODMAN_TEST_PTY
|
||||
is "$output" "$rows $cols" "stty under podman reads the correct dimensions"
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,7 @@ const (
|
||||
// NOTE: remember to bump the version at the top
|
||||
// of the top-level README.md file when this is
|
||||
// bumped.
|
||||
var Version = semver.MustParse("3.2.1-dev")
|
||||
var Version = semver.MustParse("3.2.2-dev")
|
||||
|
||||
// See https://docs.docker.com/engine/api/v1.40/
|
||||
// libpod compat handlers are expected to honor docker API versions
|
||||
@ -38,7 +38,7 @@ var Version = semver.MustParse("3.2.1-dev")
|
||||
var APIVersion = map[Tree]map[Level]semver.Version{
|
||||
Libpod: {
|
||||
CurrentAPI: Version,
|
||||
MinimalAPI: semver.MustParse("3.2.0"),
|
||||
MinimalAPI: semver.MustParse("3.1.0"),
|
||||
},
|
||||
Compat: {
|
||||
CurrentAPI: semver.MustParse("1.40.0"),
|
||||
|
Reference in New Issue
Block a user