fix(deps): update github.com/containers/libhvee digest to 0ff33af

Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
This commit is contained in:
renovate[bot]
2024-03-04 13:51:06 +00:00
committed by GitHub
parent 5a4864c340
commit 8c92228482
8 changed files with 250 additions and 195 deletions

4
go.mod
View File

@ -15,10 +15,10 @@ require (
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/gvisor-tap-vsock v0.7.3
github.com/containers/image/v5 v5.29.3-0.20240229213915-cdc68020a24f
github.com/containers/libhvee v0.6.1-0.20240225143609-c1bda9d3838c
github.com/containers/libhvee v0.6.1-0.20240301191848-0ff33af3be2d
github.com/containers/ocicrypt v1.1.9
github.com/containers/psgo v1.9.0
github.com/containers/storage v1.52.1-0.20240229151339-eadc620e74e7
github.com/containers/storage v1.52.1-0.20240301185114-bdd7d8188030
github.com/containers/winquit v1.1.0
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09
github.com/coreos/stream-metadata-go v0.4.4

8
go.sum
View File

@ -84,8 +84,8 @@ github.com/containers/gvisor-tap-vsock v0.7.3 h1:yORnf15sP+sLFhxLNLgmB5/lOhldn9d
github.com/containers/gvisor-tap-vsock v0.7.3/go.mod h1:NI1fLMtKXQZoDrrOeqryGz7x7j/XSFWRmQILva7Fu9c=
github.com/containers/image/v5 v5.29.3-0.20240229213915-cdc68020a24f h1:DEK6PaY5/B6CYXjtdfAQGCUltHEPaoXvLb+C0PH6HiE=
github.com/containers/image/v5 v5.29.3-0.20240229213915-cdc68020a24f/go.mod h1:a48d1rhHBl2zb630MSf20QQo4eIlIQvhZTqTcVJhbpA=
github.com/containers/libhvee v0.6.1-0.20240225143609-c1bda9d3838c h1:C80Xw6cDHkx0zMJk/Qkczcz/1OOVEF9+6iHuEZbD47k=
github.com/containers/libhvee v0.6.1-0.20240225143609-c1bda9d3838c/go.mod h1:zX7HGsRwCxBOpzc8Jvwq2aEaECsb2q5/l5HqB9n7UPc=
github.com/containers/libhvee v0.6.1-0.20240301191848-0ff33af3be2d h1:UapmAtc33jKPaZgjcL1+3uya/5j6Bnod8k+hokySyak=
github.com/containers/libhvee v0.6.1-0.20240301191848-0ff33af3be2d/go.mod h1:/rNb2NTQtsH/fYU4LDd2ofIRdFC1+l6d4ZeDDz8Klyw=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/luksy v0.0.0-20240129181507-b62d551ce6d8 h1:0p58QJRICjkRVCDix1nsnyrtJ3Qj4CWcGd1bOEY9sVY=
@ -94,8 +94,8 @@ github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOj
github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys=
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A=
github.com/containers/storage v1.52.1-0.20240229151339-eadc620e74e7 h1:rL6WUiGo7uyDY8WRfpLidnzSZuPPepcQLuc9p29Y9k8=
github.com/containers/storage v1.52.1-0.20240229151339-eadc620e74e7/go.mod h1:mFA6QpUoT9qTa3q2DD1CvSo3Az3syNkw1P9X+4nUYdY=
github.com/containers/storage v1.52.1-0.20240301185114-bdd7d8188030 h1:2Ksbho+rB+bkVbGMuaCAHPZnrkL9heCdisQIhnTWp+8=
github.com/containers/storage v1.52.1-0.20240301185114-bdd7d8188030/go.mod h1:pujcoOSc+upx15Jirdkebhtd8uJiLwbSd/mYT6zDJK8=
github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE=
github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8=
github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo=

View File

@ -310,16 +310,6 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool {
// If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error.
// If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned.
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
// If custom --imagestore is selected never
// ditch the original graphRoot, instead add it as
// additionalImageStore so its images can still be
// read and used.
if options.ImageStore != "" {
graphRootAsAdditionalStore := fmt.Sprintf("AdditionalImageStore=%s", options.ImageStore)
options.DriverOptions = append(options.DriverOptions, graphRootAsAdditionalStore)
// complete base name with driver name included
options.ImageStore = filepath.Join(options.ImageStore, "overlay")
}
opts, err := parseOptions(options.DriverOptions)
if err != nil {
return nil, err
@ -863,22 +853,15 @@ func (d *Driver) Status() [][2]string {
// Metadata returns meta data about the overlay driver such as
// LowerDir, UpperDir, WorkDir and MergeDir used to store data.
func (d *Driver) Metadata(id string) (map[string]string, error) {
dir, imagestore, _ := d.dir2(id)
dir := d.dir(id)
if _, err := os.Stat(dir); err != nil {
return nil, err
}
workDirBase := dir
if imagestore != "" {
if _, err := os.Stat(dir); err != nil {
return nil, err
}
workDirBase = imagestore
}
metadata := map[string]string{
"WorkDir": path.Join(workDirBase, "work"),
"MergedDir": path.Join(workDirBase, "merged"),
"UpperDir": path.Join(workDirBase, "diff"),
"WorkDir": path.Join(dir, "work"),
"MergedDir": path.Join(dir, "merged"),
"UpperDir": path.Join(dir, "diff"),
}
lowerDirs, err := d.getLowerDirs(id)
@ -896,7 +879,7 @@ func (d *Driver) Metadata(id string) (map[string]string, error) {
// is being shutdown. For now, we just have to unmount the bind mounted
// we had created.
func (d *Driver) Cleanup() error {
_ = os.RemoveAll(d.getStagingDir())
_ = os.RemoveAll(filepath.Join(d.home, stagingDir))
return mount.Unmount(d.home)
}
@ -992,8 +975,10 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
return d.create(id, parent, opts, true)
}
func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) {
dir, imageStore, _ := d.dir2(id)
func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnly bool) (retErr error) {
dir, homedir, _ := d.dir2(id, readOnly)
disableQuota := readOnly
uidMaps := d.uidMaps
gidMaps := d.gidMaps
@ -1004,7 +989,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
}
// Make the link directory if it does not exist
if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil {
if err := idtools.MkdirAllAs(path.Join(homedir, linkDir), 0o755, 0, 0); err != nil {
return err
}
@ -1021,20 +1006,8 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil {
return err
}
workDirBase := dir
if imageStore != "" {
workDirBase = imageStore
if err := idtools.MkdirAllAndChownNew(path.Dir(imageStore), 0o755, idPair); err != nil {
return err
}
}
if parent != "" {
parentBase, parentImageStore, inAdditionalStore := d.dir2(parent)
// If parentBase path is additional image store, select the image contained in parentBase.
// See https://github.com/containers/podman/issues/19748
if parentImageStore != "" && !inAdditionalStore {
parentBase = parentImageStore
}
parentBase := d.dir(parent)
st, err := system.Stat(filepath.Join(parentBase, "diff"))
if err != nil {
return err
@ -1055,11 +1028,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
if err := idtools.MkdirAllAndChownNew(dir, 0o700, idPair); err != nil {
return err
}
if imageStore != "" {
if err := idtools.MkdirAllAndChownNew(imageStore, 0o700, idPair); err != nil {
return err
}
}
defer func() {
// Clean up on failure
@ -1067,11 +1035,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
if err2 := os.RemoveAll(dir); err2 != nil {
logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2)
}
if imageStore != "" {
if err2 := os.RemoveAll(workDirBase); err2 != nil {
logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", workDirBase, err2)
}
}
}
}()
@ -1094,11 +1057,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
if err := d.quotaCtl.SetQuota(dir, quota); err != nil {
return err
}
if imageStore != "" {
if err := d.quotaCtl.SetQuota(imageStore, quota); err != nil {
return err
}
}
}
perms := defaultPerms
@ -1107,12 +1065,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
}
if parent != "" {
parentBase, parentImageStore, inAdditionalStore := d.dir2(parent)
// If parentBase path is additional image store, select the image contained in parentBase.
// See https://github.com/containers/podman/issues/19748
if parentImageStore != "" && !inAdditionalStore {
parentBase = parentImageStore
}
parentBase := d.dir(parent)
st, err := system.Stat(filepath.Join(parentBase, "diff"))
if err != nil {
return err
@ -1120,17 +1073,14 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
perms = os.FileMode(st.Mode())
}
if err := idtools.MkdirAs(path.Join(workDirBase, "diff"), perms, rootUID, rootGID); err != nil {
if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil {
return err
}
lid := generateID(idLength)
linkBase := path.Join("..", id, "diff")
if imageStore != "" {
linkBase = path.Join(imageStore, "diff")
}
if err := os.Symlink(linkBase, path.Join(d.home, linkDir, lid)); err != nil {
if err := os.Symlink(linkBase, path.Join(homedir, linkDir, lid)); err != nil {
return err
}
@ -1139,10 +1089,10 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
return err
}
if err := idtools.MkdirAs(path.Join(workDirBase, "work"), 0o700, rootUID, rootGID); err != nil {
if err := idtools.MkdirAs(path.Join(dir, "work"), 0o700, rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(path.Join(workDirBase, "merged"), 0o700, rootUID, rootGID); err != nil {
if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, rootUID, rootGID); err != nil {
return err
}
@ -1224,26 +1174,39 @@ func (d *Driver) getLower(parent string) (string, error) {
}
func (d *Driver) dir(id string) string {
p, _, _ := d.dir2(id)
p, _, _ := d.dir2(id, false)
return p
}
func (d *Driver) dir2(id string) (string, string, bool) {
newpath := path.Join(d.home, id)
imageStore := ""
func (d *Driver) getAllImageStores() []string {
additionalImageStores := d.AdditionalImageStores()
if d.imageStore != "" {
imageStore = path.Join(d.imageStore, id)
additionalImageStores = append([]string{d.imageStore}, additionalImageStores...)
}
return additionalImageStores
}
func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) {
var homedir string
if useImageStore && d.imageStore != "" {
homedir = path.Join(d.imageStore, d.name)
} else {
homedir = d.home
}
newpath := path.Join(homedir, id)
if _, err := os.Stat(newpath); err != nil {
for _, p := range d.AdditionalImageStores() {
for _, p := range d.getAllImageStores() {
l := path.Join(p, d.name, id)
_, err = os.Stat(l)
if err == nil {
return l, imageStore, true
return l, homedir, true
}
}
}
return newpath, imageStore, false
return newpath, homedir, false
}
func (d *Driver) getLowerDirs(id string) ([]string, error) {
@ -1453,14 +1416,11 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
}
func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) {
dir, imageStore, inAdditionalStore := d.dir2(id)
dir, _, inAdditionalStore := d.dir2(id, false)
if _, err := os.Stat(dir); err != nil {
return "", err
}
workDirBase := dir
if imageStore != "" {
workDirBase = imageStore
}
readWrite := !inAdditionalStore
if !d.SupportsShifting() || options.DisableShifting {
@ -1565,7 +1525,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}()
composeFsLayers := []string{}
composeFsLayersDir := filepath.Join(workDirBase, "composefs-layers")
composeFsLayersDir := filepath.Join(dir, "composefs-layers")
maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) {
composefsBlob := d.getComposefsData(lowerID)
_, err = os.Stat(composefsBlob)
@ -1599,7 +1559,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
return dest, nil
}
diffDir := path.Join(workDirBase, "diff")
diffDir := path.Join(dir, "diff")
if dest, err := maybeAddComposefsMount(id, 0, readWrite); err != nil {
return "", err
@ -1617,7 +1577,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
lower := ""
newpath := path.Join(d.home, l)
if st, err := os.Stat(newpath); err != nil {
for _, p := range d.AdditionalImageStores() {
for _, p := range d.getAllImageStores() {
lower = path.Join(p, d.name, l)
if st2, err2 := os.Stat(lower); err2 == nil {
if !permsKnown {
@ -1685,16 +1645,16 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
optsList = append(optsList, "metacopy=on", "redirect_dir=on")
}
if len(absLowers) == 0 {
absLowers = append(absLowers, path.Join(dir, "empty"))
}
// user namespace requires this to move a directory from lower to upper.
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps)
if err != nil {
return "", err
}
if len(absLowers) == 0 {
absLowers = append(absLowers, path.Join(dir, "empty"))
}
if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil {
if !inAdditionalStore {
return "", err
@ -1705,7 +1665,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}
mergedDir := path.Join(workDirBase, "merged")
mergedDir := path.Join(dir, "merged")
// Create the driver merged dir
if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return "", err
@ -1723,7 +1683,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}()
workdir := path.Join(workDirBase, "work")
workdir := path.Join(dir, "work")
if d.options.mountProgram == "" && unshare.IsRootless() {
optsList = append(optsList, "userxattr")
@ -1873,7 +1833,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
// Put unmounts the mount path created for the give id.
func (d *Driver) Put(id string) error {
dir, _, inAdditionalStore := d.dir2(id)
dir, _, inAdditionalStore := d.dir2(id, false)
if _, err := os.Stat(dir); err != nil {
return err
}
@ -2042,8 +2002,9 @@ func (g *overlayFileGetter) Close() error {
return nil
}
func (d *Driver) getStagingDir() string {
return filepath.Join(d.home, stagingDir)
func (d *Driver) getStagingDir(id string) string {
_, homedir, _ := d.dir2(id, d.imageStore != "")
return filepath.Join(homedir, stagingDir)
}
// DiffGetter returns a FileGetCloser that can read files from the directory that
@ -2100,11 +2061,12 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
var applyDir string
if id == "" {
err := os.MkdirAll(d.getStagingDir(), 0o700)
stagingDir := d.getStagingDir(id)
err := os.MkdirAll(stagingDir, 0o700)
if err != nil && !os.IsExist(err) {
return graphdriver.DriverWithDifferOutput{}, err
}
applyDir, err = os.MkdirTemp(d.getStagingDir(), "")
applyDir, err = os.MkdirTemp(stagingDir, "")
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
@ -2148,7 +2110,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error {
stagingDirectory := diffOutput.Target
if filepath.Dir(stagingDirectory) != d.getStagingDir() {
if filepath.Dir(stagingDirectory) != d.getStagingDir(id) {
return fmt.Errorf("%q is not a staging directory", stagingDirectory)
}
diffPath, err := d.getDiffPath(id)
@ -2234,12 +2196,8 @@ func (d *Driver) getComposefsData(id string) string {
}
func (d *Driver) getDiffPath(id string) (string, error) {
dir, imagestore, _ := d.dir2(id)
base := dir
if imagestore != "" {
base = imagestore
}
return redirectDiffIfAdditionalLayer(path.Join(base, "diff"))
dir := d.dir(id)
return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"))
}
func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
@ -2330,12 +2288,8 @@ func (d *Driver) AdditionalImageStores() []string {
// by toContainer to those specified by toHost.
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
var err error
dir, imagestore, _ := d.dir2(id)
base := dir
if imagestore != "" {
base = imagestore
}
diffDir := filepath.Join(base, "diff")
dir := d.dir(id)
diffDir := filepath.Join(dir, "diff")
rootUID, rootGID := 0, 0
if toHost != nil {

View File

@ -31,8 +31,9 @@ func init() {
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
d := &Driver{
name: "vfs",
homes: []string{home},
home: home,
idMappings: idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
imageStore: options.ImageStore,
}
rootIDs := d.idMappings.RootPair()
@ -47,7 +48,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
key = strings.ToLower(key)
switch key {
case "vfs.imagestore", ".imagestore":
d.homes = append(d.homes, strings.Split(val, ",")...)
d.additionalHomes = append(d.additionalHomes, strings.Split(val, ",")...)
continue
case "vfs.mountopt":
return nil, fmt.Errorf("vfs driver does not support mount options")
@ -62,12 +63,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
return nil, fmt.Errorf("vfs driver does not support %s options", key)
}
}
// If --imagestore is provided, lets add writable graphRoot
// to vfs's additional image store, as it is done for
// `overlay` driver.
if options.ImageStore != "" {
d.homes = append(d.homes, options.ImageStore)
}
d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d)
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater)
@ -80,11 +76,13 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver
type Driver struct {
name string
homes []string
home string
additionalHomes []string
idMappings *idtools.IDMappings
ignoreChownErrors bool
naiveDiff graphdriver.DiffDriver
updater graphdriver.LayerIDMapUpdater
imageStore string
}
func (d *Driver) String() string {
@ -158,7 +156,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
idMappings = opts.IDMappings
}
dir := d.dir(id)
dir := d.dir2(id, ro)
rootIDs := idMappings.RootPair()
if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0o700, rootIDs); err != nil {
return err
@ -204,18 +202,32 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
return nil
}
func (d *Driver) dir(id string) string {
for i, home := range d.homes {
if i > 0 {
home = filepath.Join(home, d.String())
func (d *Driver) dir2(id string, useImageStore bool) string {
var homedir string
if useImageStore && d.imageStore != "" {
homedir = filepath.Join(d.imageStore, d.String(), "dir", filepath.Base(id))
} else {
homedir = filepath.Join(d.home, "dir", filepath.Base(id))
}
if _, err := os.Stat(homedir); err != nil {
additionalHomes := d.additionalHomes[:]
if d.imageStore != "" {
additionalHomes = append(additionalHomes, d.imageStore)
}
candidate := filepath.Join(home, "dir", filepath.Base(id))
fi, err := os.Stat(candidate)
if err == nil && fi.IsDir() {
return candidate
for _, home := range additionalHomes {
candidate := filepath.Join(home, d.String(), "dir", filepath.Base(id))
fi, err := os.Stat(candidate)
if err == nil && fi.IsDir() {
return candidate
}
}
}
return filepath.Join(d.homes[0], "dir", filepath.Base(id))
return homedir
}
func (d *Driver) dir(id string) string {
return d.dir2(id, false)
}
// Remove deletes the content from the directory for a given id.
@ -263,7 +275,7 @@ func (d *Driver) Exists(id string) bool {
// List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) {
entries, err := os.ReadDir(filepath.Join(d.homes[0], "dir"))
entries, err := os.ReadDir(filepath.Join(d.home, "dir"))
if err != nil {
return nil, err
}
@ -285,8 +297,8 @@ func (d *Driver) ListLayers() ([]string, error) {
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
if len(d.homes) > 1 {
return d.homes[1:]
if len(d.additionalHomes) > 0 {
return d.additionalHomes
}
return nil
}

View File

@ -334,10 +334,71 @@ type rwLayerStore interface {
GarbageCollect() error
}
type multipleLockFile struct {
lockfiles []*lockfile.LockFile
}
func (l multipleLockFile) Lock() {
for _, lock := range l.lockfiles {
lock.Lock()
}
}
func (l multipleLockFile) RLock() {
for _, lock := range l.lockfiles {
lock.RLock()
}
}
func (l multipleLockFile) Unlock() {
for _, lock := range l.lockfiles {
lock.Unlock()
}
}
func (l multipleLockFile) ModifiedSince(lastWrite lockfile.LastWrite) (lockfile.LastWrite, bool, error) {
// Look up only the first lockfile, since this is the value returned by RecordWrite().
return l.lockfiles[0].ModifiedSince(lastWrite)
}
func (l multipleLockFile) AssertLockedForWriting() {
for _, lock := range l.lockfiles {
lock.AssertLockedForWriting()
}
}
func (l multipleLockFile) GetLastWrite() (lockfile.LastWrite, error) {
return l.lockfiles[0].GetLastWrite()
}
func (l multipleLockFile) RecordWrite() (lockfile.LastWrite, error) {
var lastWrite *lockfile.LastWrite
for _, lock := range l.lockfiles {
lw, err := lock.RecordWrite()
if err != nil {
return lw, err
}
// Return the first value we get so we know that
// all the locks have a write time >= to this one.
if lastWrite == nil {
lastWrite = &lw
}
}
return *lastWrite, nil
}
func (l multipleLockFile) IsReadWrite() bool {
return l.lockfiles[0].IsReadWrite()
}
func newMultipleLockFile(l ...*lockfile.LockFile) *multipleLockFile {
return &multipleLockFile{lockfiles: l}
}
type layerStore struct {
// The following fields are only set when constructing layerStore, and must never be modified afterwards.
// They are safe to access without any other locking.
lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores.
lockfile *multipleLockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores.
mountsLockfile *lockfile.LockFile // Can _only_ be obtained with inProcessLock held.
rundir string
jsonPath [numLayerLocationIndex]string
@ -1023,22 +1084,37 @@ func (r *layerStore) saveMounts() error {
return r.loadMounts()
}
func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) {
func (s *store) newLayerStore(rundir, layerdir, imagedir string, driver drivers.Driver, transient bool) (rwLayerStore, error) {
if err := os.MkdirAll(rundir, 0o700); err != nil {
return nil, err
}
if err := os.MkdirAll(layerdir, 0o700); err != nil {
return nil, err
}
if imagedir != "" {
if err := os.MkdirAll(imagedir, 0o700); err != nil {
return nil, err
}
}
// Note: While the containers.lock file is in rundir for transient stores
// we don't want to do this here, because the non-transient layers in
// layers.json might be used externally as a read-only layer (using e.g.
// additionalimagestores), and that would look for the lockfile in the
// same directory
var lockFiles []*lockfile.LockFile
lockFile, err := lockfile.GetLockFile(filepath.Join(layerdir, "layers.lock"))
if err != nil {
return nil, err
}
lockFiles = append(lockFiles, lockFile)
if imagedir != "" {
lockFile, err := lockfile.GetLockFile(filepath.Join(imagedir, "layers.lock"))
if err != nil {
return nil, err
}
lockFiles = append(lockFiles, lockFile)
}
mountsLockfile, err := lockfile.GetLockFile(filepath.Join(rundir, "mountpoints.lock"))
if err != nil {
return nil, err
@ -1048,7 +1124,7 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
volatileDir = rundir
}
rlstore := layerStore{
lockfile: lockFile,
lockfile: newMultipleLockFile(lockFiles...),
mountsLockfile: mountsLockfile,
rundir: rundir,
jsonPath: [numLayerLocationIndex]string{
@ -1085,7 +1161,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL
return nil, err
}
rlstore := layerStore{
lockfile: lockfile,
lockfile: newMultipleLockFile(lockfile),
mountsLockfile: nil,
rundir: rundir,
jsonPath: [numLayerLocationIndex]string{

View File

@ -41,6 +41,7 @@ import (
const (
maxNumberMissingChunks = 1024
autoMergePartsThreshold = 128 // if the gap between two ranges is below this threshold, automatically merge them.
newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY)
containersOverrideXattr = "user.containers.override_stat"
bigDataKey = "zstd-chunked-manifest"
@ -1180,22 +1181,12 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan
}
func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
getGap := func(missingParts []missingPart, i int) int {
getGap := func(missingParts []missingPart, i int) uint64 {
prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length
return int(missingParts[i].SourceChunk.Offset - prev)
}
getCost := func(missingParts []missingPart, i int) int {
cost := getGap(missingParts, i)
if missingParts[i-1].OriginFile != nil {
cost += int(missingParts[i-1].SourceChunk.Length)
}
if missingParts[i].OriginFile != nil {
cost += int(missingParts[i].SourceChunk.Length)
}
return cost
return missingParts[i].SourceChunk.Offset - prev
}
// simple case: merge chunks from the same file.
// simple case: merge chunks from the same file. Useful to reduce the number of parts to work with later.
newMissingParts := missingParts[0:1]
prevIndex := 0
for i := 1; i < len(missingParts); i++ {
@ -1215,28 +1206,50 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
}
missingParts = newMissingParts
if len(missingParts) <= target {
return missingParts
type gap struct {
from int
to int
cost uint64
}
// this implementation doesn't account for duplicates, so it could merge
// more than necessary to reach the specified target. Since target itself
// is a heuristic value, it doesn't matter.
costs := make([]int, len(missingParts)-1)
for i := 1; i < len(missingParts); i++ {
costs[i-1] = getCost(missingParts, i)
var requestGaps []gap
lastOffset := int(-1)
numberSourceChunks := 0
for i, c := range missingParts {
if c.OriginFile != nil || c.Hole {
// it does not require a network request
continue
}
numberSourceChunks++
if lastOffset >= 0 {
prevEnd := missingParts[lastOffset].SourceChunk.Offset + missingParts[lastOffset].SourceChunk.Length
cost := c.SourceChunk.Offset - prevEnd
g := gap{
from: lastOffset,
to: i,
cost: cost,
}
requestGaps = append(requestGaps, g)
}
lastOffset = i
}
sort.Ints(costs)
toShrink := len(missingParts) - target
if toShrink >= len(costs) {
toShrink = len(costs) - 1
sort.Slice(requestGaps, func(i, j int) bool {
return requestGaps[i].cost < requestGaps[j].cost
})
toMergeMap := make([]bool, len(missingParts))
remainingToMerge := numberSourceChunks - target
for _, g := range requestGaps {
if remainingToMerge < 0 && g.cost > autoMergePartsThreshold {
continue
}
for i := g.from + 1; i <= g.to; i++ {
toMergeMap[i] = true
}
remainingToMerge--
}
targetValue := costs[toShrink]
newMissingParts = missingParts[0:1]
for i := 1; i < len(missingParts); i++ {
if getCost(missingParts, i) > targetValue {
if !toMergeMap[i] {
newMissingParts = append(newMissingParts, missingParts[i])
} else {
gap := getGap(missingParts, i)
@ -1268,6 +1281,7 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st
}
}
missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks)
calculateChunksToRequest()
// There are some missing files. Prepare a multirange request for the missing chunks.
@ -1281,14 +1295,13 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st
}
if _, ok := err.(ErrBadRequest); ok {
requested := len(missingParts)
// If the server cannot handle at least 64 chunks in a single request, just give up.
if requested < 64 {
if len(chunksToRequest) < 64 {
return err
}
// Merge more chunks to request
missingParts = mergeMissingChunks(missingParts, requested/2)
missingParts = mergeMissingChunks(missingParts, len(chunksToRequest)/2)
calculateChunksToRequest()
continue
}
@ -1999,7 +2012,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
}
// There are some missing files. Prepare a multirange request for the missing chunks.
if len(missingParts) > 0 {
missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks)
if err := c.retrieveMissingFiles(stream, dest, dirfd, missingParts, options); err != nil {
return output, err
}

View File

@ -972,11 +972,13 @@ func (s *store) load() error {
if err := os.MkdirAll(gipath, 0o700); err != nil {
return err
}
ris, err := newImageStore(gipath)
imageStore, err := newImageStore(gipath)
if err != nil {
return err
}
s.imageStore = ris
s.imageStore = imageStore
s.rwImageStores = []rwImageStore{imageStore}
gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers")
if err := os.MkdirAll(gcpath, 0o700); err != nil {
@ -994,13 +996,16 @@ func (s *store) load() error {
s.containerStore = rcs
for _, store := range driver.AdditionalImageStores() {
additionalImageStores := s.graphDriver.AdditionalImageStores()
if s.imageStoreDir != "" {
additionalImageStores = append([]string{s.graphRoot}, additionalImageStores...)
}
for _, store := range additionalImageStores {
gipath := filepath.Join(store, driverPrefix+"images")
var ris roImageStore
if s.imageStoreDir != "" && store == s.graphRoot {
// If --imagestore was set and current store
// is `graphRoot` then mount it as a `rw` additional
// store instead of `readonly` additional store.
// both the graphdriver and the imagestore must be used read-write.
if store == s.imageStoreDir || store == s.graphRoot {
imageStore, err := newImageStore(gipath)
if err != nil {
return err
@ -1085,15 +1090,9 @@ func (s *store) stopUsingGraphDriver() {
// Almost all users should use startUsingGraphDriver instead.
// The caller must hold s.graphLock.
func (s *store) createGraphDriverLocked() (drivers.Driver, error) {
driverRoot := s.imageStoreDir
imageStoreBase := s.graphRoot
if driverRoot == "" {
driverRoot = s.graphRoot
imageStoreBase = ""
}
config := drivers.Options{
Root: driverRoot,
ImageStore: imageStoreBase,
Root: s.graphRoot,
ImageStore: s.imageStoreDir,
RunRoot: s.runRoot,
DriverPriority: s.graphDriverPriority,
DriverOptions: s.graphOptions,
@ -1123,15 +1122,15 @@ func (s *store) getLayerStoreLocked() (rwLayerStore, error) {
if err := os.MkdirAll(rlpath, 0o700); err != nil {
return nil, err
}
imgStoreRoot := s.imageStoreDir
if imgStoreRoot == "" {
imgStoreRoot = s.graphRoot
}
glpath := filepath.Join(imgStoreRoot, driverPrefix+"layers")
glpath := filepath.Join(s.graphRoot, driverPrefix+"layers")
if err := os.MkdirAll(glpath, 0o700); err != nil {
return nil, err
}
rls, err := s.newLayerStore(rlpath, glpath, s.graphDriver, s.transientStore)
ilpath := ""
if s.imageStoreDir != "" {
ilpath = filepath.Join(s.imageStoreDir, driverPrefix+"layers")
}
rls, err := s.newLayerStore(rlpath, glpath, ilpath, s.graphDriver, s.transientStore)
if err != nil {
return nil, err
}
@ -1162,8 +1161,10 @@ func (s *store) getROLayerStoresLocked() ([]roLayerStore, error) {
if err := os.MkdirAll(rlpath, 0o700); err != nil {
return nil, err
}
for _, store := range s.graphDriver.AdditionalImageStores() {
glpath := filepath.Join(store, driverPrefix+"layers")
rls, err := newROLayerStore(rlpath, glpath, s.graphDriver)
if err != nil {
return nil, err
@ -2590,7 +2591,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
if err := s.writeToAllStores(func(rlstore rwLayerStore) error {
// Delete image from all available imagestores configured to be used.
imageFound := false
for _, is := range append([]rwImageStore{s.imageStore}, s.rwImageStores...) {
for _, is := range s.rwImageStores {
if is != s.imageStore {
// This is an additional writeable image store
// so we must perform lock

4
vendor/modules.txt vendored
View File

@ -315,7 +315,7 @@ github.com/containers/image/v5/transports
github.com/containers/image/v5/transports/alltransports
github.com/containers/image/v5/types
github.com/containers/image/v5/version
# github.com/containers/libhvee v0.6.1-0.20240225143609-c1bda9d3838c
# github.com/containers/libhvee v0.6.1-0.20240301191848-0ff33af3be2d
## explicit; go 1.18
github.com/containers/libhvee/pkg/hypervctl
github.com/containers/libhvee/pkg/kvp/ginsu
@ -353,7 +353,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
# github.com/containers/storage v1.52.1-0.20240229151339-eadc620e74e7
# github.com/containers/storage v1.52.1-0.20240301185114-bdd7d8188030
## explicit; go 1.20
github.com/containers/storage
github.com/containers/storage/drivers