vendor: bump containers/(storage, common, buildah, image)

Bump containers/(storage, common, buildah and image)

Changes since 2023-01-01:
 - skip mount-cache-selinux-long-name test under remote, with
   a FIXME requesting that someone see if it can be made to work.

 - skip six tests that fail under rootless-remote

 - add new --build-arg-file option:
 - update man page

Squash of:
* cf56eb1865
* 561f082772

Signed-off-by: Ed Santiago <santiago@redhat.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
Signed-off-by: Aditya R <arajan@redhat.com>
This commit is contained in:
Aditya R
2023-04-10 17:02:21 +05:30
parent c04ccdbc55
commit 260bc3ec4c
274 changed files with 19599 additions and 1689 deletions

View File

@ -112,33 +112,33 @@ type Layer struct {
Created time.Time `json:"created,omitempty"`
// CompressedDigest is the digest of the blob that was last passed to
// ApplyDiff() or Put(), as it was presented to us.
// ApplyDiff() or create(), as it was presented to us.
CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"`
// CompressedSize is the length of the blob that was last passed to
// ApplyDiff() or Put(), as it was presented to us. If
// ApplyDiff() or create(), as it was presented to us. If
// CompressedDigest is not set, this should be treated as if it were an
// uninitialized value.
CompressedSize int64 `json:"compressed-size,omitempty"`
// UncompressedDigest is the digest of the blob that was last passed to
// ApplyDiff() or Put(), after we decompressed it. Often referred to
// ApplyDiff() or create(), after we decompressed it. Often referred to
// as a DiffID.
UncompressedDigest digest.Digest `json:"diff-digest,omitempty"`
// UncompressedSize is the length of the blob that was last passed to
// ApplyDiff() or Put(), after we decompressed it. If
// ApplyDiff() or create(), after we decompressed it. If
// UncompressedDigest is not set, this should be treated as if it were
// an uninitialized value.
UncompressedSize int64 `json:"diff-size,omitempty"`
// CompressionType is the type of compression which we detected on the blob
// that was last passed to ApplyDiff() or Put().
// that was last passed to ApplyDiff() or create().
CompressionType archive.Compression `json:"compression,omitempty"`
// UIDs and GIDs are lists of UIDs and GIDs used in the layer. This
// field is only populated (i.e., will only contain one or more
// entries) if the layer was created using ApplyDiff() or Put().
// entries) if the layer was created using ApplyDiff() or create().
UIDs []uint32 `json:"uidset,omitempty"`
GIDs []uint32 `json:"gidset,omitempty"`
@ -248,20 +248,15 @@ type rwLayerStore interface {
// stopWriting releases locks obtained by startWriting.
stopWriting()
// Create creates a new layer, optionally giving it a specified ID rather than
// create creates a new layer, optionally giving it a specified ID rather than
// a randomly-generated one, either inheriting data from another specified
// layer or the empty base layer. The new layer can optionally be given names
// and have an SELinux label specified for use when mounting it. Some
// underlying drivers can accept a "size" option. At this time, most
// underlying drivers do not themselves distinguish between writeable
// and read-only layers.
Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error)
// CreateWithFlags combines the functions of Create and SetFlag.
CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error)
// Put combines the functions of CreateWithFlags and ApplyDiff.
Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error)
// and read-only layers. Returns the new layer structure and the size of the
// diff which was applied to its parent to initialize its contents.
create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (*Layer, int64, error)
// updateNames modifies names associated with a layer based on (op, names).
updateNames(id string, names []string, op updateNameOperation) error
@ -1186,8 +1181,10 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
// TODO: check if necessary fields are filled
r.layers = append(r.layers, layer)
// This can only fail on duplicate IDs, which shouldnt happen — and in that case the index is already in the desired state anyway.
// Implementing recovery from an unlikely and unimportant failure here would be too risky.
// This can only fail on duplicate IDs, which shouldnt happen — and in
// that case the index is already in the desired state anyway.
// Implementing recovery from an unlikely and unimportant failure here
// would be too risky.
_ = r.idindex.Add(id)
r.byid[id] = layer
for _, name := range names { // names got from the additional layer store won't be used
@ -1200,8 +1197,8 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
}
if err := r.saveFor(layer); err != nil {
if err2 := r.driver.Remove(id); err2 != nil {
logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, err2)
if e := r.Delete(layer.ID); e != nil {
logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, e)
}
return nil, err
}
@ -1209,7 +1206,10 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
}
// Requires startWriting.
func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) {
func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (layer *Layer, size int64, err error) {
if moreOptions == nil {
moreOptions = &LayerOptions{}
}
if !r.lockfile.IsReadWrite() {
return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
@ -1230,7 +1230,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
if duplicateLayer, idInUse := r.byid[id]; idInUse {
return duplicateLayer, -1, ErrDuplicateID
}
names = dedupeNames(names)
names = dedupeStrings(names)
for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse {
return nil, -1, ErrDuplicateName
@ -1252,7 +1252,6 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
templateTSdata []byte
)
if moreOptions.TemplateLayer != "" {
var tserr error
templateLayer, ok := r.lookup(moreOptions.TemplateLayer)
if !ok {
return nil, -1, ErrLayerUnknown
@ -1263,9 +1262,9 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
templateCompressionType = templateLayer.CompressionType
templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...)
templateTSdata, tserr = os.ReadFile(r.tspath(templateLayer.ID))
if tserr != nil && !os.IsNotExist(tserr) {
return nil, -1, tserr
templateTSdata, err = os.ReadFile(r.tspath(templateLayer.ID))
if err != nil && !errors.Is(err, os.ErrNotExist) {
return nil, -1, err
}
} else {
templateIDMappings = &idtools.IDMappings{}
@ -1279,9 +1278,10 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
selinux.ReserveLabel(mountLabel)
}
// Before actually creating the layer, make a persistent record of it with incompleteFlag,
// so that future processes have a chance to delete it.
layer := &Layer{
// Before actually creating the layer, make a persistent record of it
// with the incomplete flag set, so that future processes have a chance
// to clean up after it.
layer = &Layer{
ID: id,
Parent: parent,
Names: names,
@ -1295,98 +1295,109 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
CompressionType: templateCompressionType,
UIDs: templateUIDs,
GIDs: templateGIDs,
Flags: make(map[string]interface{}),
Flags: copyStringInterfaceMap(moreOptions.Flags),
UIDMap: copyIDMap(moreOptions.UIDMap),
GIDMap: copyIDMap(moreOptions.GIDMap),
BigDataNames: []string{},
volatileStore: moreOptions.Volatile,
}
layer.Flags[incompleteFlag] = true
r.layers = append(r.layers, layer)
// This can only fail if the ID is already missing, which shouldnt happen — and in that case the index is already in the desired state anyway.
// This is on various paths to recover from failures, so this should be robust against partially missing data.
// This can only fail if the ID is already missing, which shouldnt
// happen — and in that case the index is already in the desired state
// anyway. This is on various paths to recover from failures, so this
// should be robust against partially missing data.
_ = r.idindex.Add(id)
r.byid[id] = layer
for _, name := range names {
r.byname[name] = layer
}
for flag, value := range flags {
layer.Flags[flag] = value
}
layer.Flags[incompleteFlag] = true
succeeded := false
cleanupFailureContext := ""
defer func() {
if !succeeded {
// On any error, try both removing the driver's data as well
// as the in-memory layer record.
if err2 := r.Delete(layer.ID); err2 != nil {
if cleanupFailureContext == "" {
cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site"
}
logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, layer.ID, err2)
if err != nil {
// now that the in-memory structures know about the new
// record, we can use regular Delete() to clean up if
// anything breaks from here on out
if cleanupFailureContext == "" {
cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site"
}
if e := r.Delete(id); e != nil {
logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, id, e)
}
}
}()
err := r.saveFor(layer)
if err != nil {
if err = r.saveFor(layer); err != nil {
cleanupFailureContext = "saving incomplete layer metadata"
return nil, -1, err
}
for _, item := range moreOptions.BigData {
if err = r.setBigData(layer, item.Key, item.Data); err != nil {
cleanupFailureContext = fmt.Sprintf("saving big data item %q", item.Key)
return nil, -1, err
}
}
idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap)
opts := drivers.CreateOpts{
MountLabel: mountLabel,
StorageOpt: options,
IDMappings: idMappings,
}
if moreOptions.TemplateLayer != "" {
if err := r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
cleanupFailureContext = "creating a layer from template"
if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
cleanupFailureContext = fmt.Sprintf("creating a layer from template layer %q", moreOptions.TemplateLayer)
return nil, -1, fmt.Errorf("creating copy of template layer %q with ID %q: %w", moreOptions.TemplateLayer, id, err)
}
oldMappings = templateIDMappings
} else {
if writeable {
if err := r.driver.CreateReadWrite(id, parent, &opts); err != nil {
if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil {
cleanupFailureContext = "creating a read-write layer"
return nil, -1, fmt.Errorf("creating read-write layer with ID %q: %w", id, err)
}
} else {
if err := r.driver.Create(id, parent, &opts); err != nil {
if err = r.driver.Create(id, parent, &opts); err != nil {
cleanupFailureContext = "creating a read-only layer"
return nil, -1, fmt.Errorf("creating layer with ID %q: %w", id, err)
return nil, -1, fmt.Errorf("creating read-only layer with ID %q: %w", id, err)
}
}
oldMappings = parentMappings
}
if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) {
if err := r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
cleanupFailureContext = "in UpdateLayerIDMap"
return nil, -1, err
}
}
if len(templateTSdata) > 0 {
if err := os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil {
if err = os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil {
cleanupFailureContext = "creating tar-split parent directory for a copy from template"
return nil, -1, err
}
if err := ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil {
if err = ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil {
cleanupFailureContext = "creating a tar-split copy from template"
return nil, -1, err
}
}
var size int64 = -1
size = -1
if diff != nil {
size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff)
if err != nil {
if size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff); err != nil {
cleanupFailureContext = "applying layer diff"
return nil, -1, err
}
} else {
// applyDiffWithOptions in the `diff != nil` case handles this bit for us
// applyDiffWithOptions() would have updated r.bycompressedsum
// and r.byuncompressedsum for us, but if we used a template
// layer, we didn't call it, so add the new layer as candidates
// for searches for layers by checksum
if layer.CompressedDigest != "" {
r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
}
@ -1394,29 +1405,17 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
}
}
delete(layer.Flags, incompleteFlag)
err = r.saveFor(layer)
if err != nil {
if err = r.saveFor(layer); err != nil {
cleanupFailureContext = "saving finished layer metadata"
return nil, -1, err
}
layer = copyLayer(layer)
succeeded = true
return layer, size, err
}
// Requires startWriting.
func (r *layerStore) CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) {
layer, _, err = r.Put(id, parent, names, mountLabel, options, moreOptions, writeable, flags, nil)
return layer, err
}
// Requires startWriting.
func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (layer *Layer, err error) {
return r.CreateWithFlags(id, parent, names, mountLabel, options, moreOptions, writeable, nil)
}
// Requires startReading or startWriting.
func (r *layerStore) Mounted(id string) (int, error) {
if !r.lockfile.IsReadWrite() {
@ -1677,9 +1676,6 @@ func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) {
// Requires startWriting.
func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
if key == "" {
return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
}
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
@ -1687,6 +1683,13 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
if !ok {
return fmt.Errorf("locating layer with ID %q to write bigdata: %w", id, ErrLayerUnknown)
}
return r.setBigData(layer, key, data)
}
func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error {
if key == "" {
return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
}
err := os.MkdirAll(r.datadir(layer.ID), 0700)
if err != nil {
return err
@ -1759,7 +1762,9 @@ func (r *layerStore) tspath(id string) string {
// layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true
// The caller must hold r.inProcessLock for reading.
func layerHasIncompleteFlag(layer *Layer) bool {
// layer.Flags[…] is defined to succeed and return ok == false if Flags == nil
if layer.Flags == nil {
return false
}
if flagValue, ok := layer.Flags[incompleteFlag]; ok {
if b, ok := flagValue.(bool); ok && b {
return true
@ -1788,20 +1793,21 @@ func (r *layerStore) deleteInternal(id string) error {
}
}
// We never unset incompleteFlag; below, we remove the entire object from r.layers.
id = layer.ID
if err := r.driver.Remove(id); err != nil {
if err := r.driver.Remove(id); err != nil && !errors.Is(err, os.ErrNotExist) {
return err
}
os.Remove(r.tspath(id))
os.RemoveAll(r.datadir(id))
delete(r.byid, id)
for _, name := range layer.Names {
delete(r.byname, name)
}
// This can only fail if the ID is already missing, which shouldnt happen — and in that case the index is already in the desired state anyway.
// The stores Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
// This can only fail if the ID is already missing, which shouldnt
// happen — and in that case the index is already in the desired state
// anyway. The stores Delete method is used on various paths to
// recover from failures, so this should be robust against partially
// missing data.
_ = r.idindex.Delete(id)
mountLabel := layer.MountLabel
if layer.MountPoint != "" {
@ -1835,7 +1841,6 @@ func (r *layerStore) deleteInternal(id string) error {
selinux.ReleaseLabel(mountLabel)
}
}
return nil
}