mirror of
https://github.com/containers/podman.git
synced 2025-06-22 01:48:54 +08:00
Merge pull request #11060 from containers/dependabot/go_modules/github.com/containers/storage-1.33.0
Bump github.com/containers/storage from 1.32.6 to 1.33.0
This commit is contained in:
2
go.mod
2
go.mod
@ -17,7 +17,7 @@ require (
|
|||||||
github.com/containers/image/v5 v5.14.0
|
github.com/containers/image/v5 v5.14.0
|
||||||
github.com/containers/ocicrypt v1.1.2
|
github.com/containers/ocicrypt v1.1.2
|
||||||
github.com/containers/psgo v1.5.2
|
github.com/containers/psgo v1.5.2
|
||||||
github.com/containers/storage v1.32.6
|
github.com/containers/storage v1.33.0
|
||||||
github.com/coreos/go-systemd/v22 v22.3.2
|
github.com/coreos/go-systemd/v22 v22.3.2
|
||||||
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
|
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
|
||||||
github.com/cri-o/ocicni v0.2.1-0.20210621164014-d0acc7862283
|
github.com/cri-o/ocicni v0.2.1-0.20210621164014-d0acc7862283
|
||||||
|
3
go.sum
3
go.sum
@ -260,8 +260,9 @@ github.com/containers/psgo v1.5.2 h1:3aoozst/GIwsrr/5jnFy3FrJay98uujPCu9lTuSZ/Cw
|
|||||||
github.com/containers/psgo v1.5.2/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU=
|
github.com/containers/psgo v1.5.2/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU=
|
||||||
github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM=
|
github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM=
|
||||||
github.com/containers/storage v1.32.2/go.mod h1:YIBxxjfXZTi04Ah49sh1uSGfmT1V89+I5i3deRobzQo=
|
github.com/containers/storage v1.32.2/go.mod h1:YIBxxjfXZTi04Ah49sh1uSGfmT1V89+I5i3deRobzQo=
|
||||||
github.com/containers/storage v1.32.6 h1:NqdFRewXO/PYPjgCAScoigZc5QUA21yapSEj6kqD8cw=
|
|
||||||
github.com/containers/storage v1.32.6/go.mod h1:mdB+b89p+jU8zpzLTVXA0gWMmIo0WrkfGMh1R8O2IQw=
|
github.com/containers/storage v1.32.6/go.mod h1:mdB+b89p+jU8zpzLTVXA0gWMmIo0WrkfGMh1R8O2IQw=
|
||||||
|
github.com/containers/storage v1.33.0 h1:sTk1Mfz3uSNg7cxeaDb0Ld8/UV+8pZEOQjvysjJuzX8=
|
||||||
|
github.com/containers/storage v1.33.0/go.mod h1:FUZPF4nJijX8ixdhByZJXf02cvbyLi6dyDwXdIe8QVY=
|
||||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
|
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@ -1 +1 @@
|
|||||||
1.32.6
|
1.33.0
|
||||||
|
39
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
39
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -364,12 +364,12 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||||||
// Try to enable project quota support over xfs.
|
// Try to enable project quota support over xfs.
|
||||||
if d.quotaCtl, err = quota.NewControl(home); err == nil {
|
if d.quotaCtl, err = quota.NewControl(home); err == nil {
|
||||||
projectQuotaSupported = true
|
projectQuotaSupported = true
|
||||||
} else if opts.quota.Size > 0 {
|
} else if opts.quota.Size > 0 || opts.quota.Inodes > 0 {
|
||||||
return nil, fmt.Errorf("Storage option overlay.size not supported. Filesystem does not support Project Quota: %v", err)
|
return nil, fmt.Errorf("Storage options overlay.size and overlay.inodes not supported. Filesystem does not support Project Quota: %v", err)
|
||||||
}
|
}
|
||||||
} else if opts.quota.Size > 0 {
|
} else if opts.quota.Size > 0 || opts.quota.Inodes > 0 {
|
||||||
// if xfs is not the backing fs then error out if the storage-opt overlay.size is used.
|
// if xfs is not the backing fs then error out if the storage-opt overlay.size is used.
|
||||||
return nil, fmt.Errorf("Storage option overlay.size only supported for backingFS XFS. Found %v", backingFs)
|
return nil, fmt.Errorf("Storage option overlay.size and overlay.inodes only supported for backingFS XFS. Found %v", backingFs)
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy)
|
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy)
|
||||||
@ -400,6 +400,13 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
o.quota.Size = uint64(size)
|
o.quota.Size = uint64(size)
|
||||||
|
case "inodes":
|
||||||
|
logrus.Debugf("overlay: inodes=%s", val)
|
||||||
|
inodes, err := strconv.ParseUint(val, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
o.quota.Inodes = uint64(inodes)
|
||||||
case "imagestore", "additionalimagestore":
|
case "imagestore", "additionalimagestore":
|
||||||
logrus.Debugf("overlay: imagestore=%s", val)
|
logrus.Debugf("overlay: imagestore=%s", val)
|
||||||
// Additional read only image stores to use for lower paths
|
// Additional read only image stores to use for lower paths
|
||||||
@ -613,6 +620,10 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
|
|||||||
if unshare.IsRootless() {
|
if unshare.IsRootless() {
|
||||||
flags = fmt.Sprintf("%s,userxattr", flags)
|
flags = fmt.Sprintf("%s,userxattr", flags)
|
||||||
}
|
}
|
||||||
|
if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0600, int(unix.Mkdev(0, 0))); err != nil {
|
||||||
|
logrus.Debugf("unable to create kernel-style whiteout: %v", err)
|
||||||
|
return supportsDType, errors.Wrapf(err, "unable to create kernel-style whiteout")
|
||||||
|
}
|
||||||
|
|
||||||
if len(flags) < unix.Getpagesize() {
|
if len(flags) < unix.Getpagesize() {
|
||||||
err := unix.Mount("overlay", mergedDir, "overlay", 0, flags)
|
err := unix.Mount("overlay", mergedDir, "overlay", 0, flags)
|
||||||
@ -784,6 +795,13 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
|
|||||||
opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10)
|
opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if _, ok := opts.StorageOpt["inodes"]; !ok {
|
||||||
|
if opts.StorageOpt == nil {
|
||||||
|
opts.StorageOpt = map[string]string{}
|
||||||
|
}
|
||||||
|
opts.StorageOpt["inodes"] = strconv.FormatUint(d.options.quota.Inodes, 10)
|
||||||
|
}
|
||||||
|
|
||||||
return d.create(id, parent, opts)
|
return d.create(id, parent, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -794,6 +812,9 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
|
|||||||
if _, ok := opts.StorageOpt["size"]; ok {
|
if _, ok := opts.StorageOpt["size"]; ok {
|
||||||
return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers")
|
return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers")
|
||||||
}
|
}
|
||||||
|
if _, ok := opts.StorageOpt["inodes"]; ok {
|
||||||
|
return fmt.Errorf("--storage-opt inodes is only supported for ReadWrite Layers")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return d.create(id, parent, opts)
|
return d.create(id, parent, opts)
|
||||||
@ -850,7 +871,9 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
|
|||||||
if driver.options.quota.Size > 0 {
|
if driver.options.quota.Size > 0 {
|
||||||
quota.Size = driver.options.quota.Size
|
quota.Size = driver.options.quota.Size
|
||||||
}
|
}
|
||||||
|
if driver.options.quota.Inodes > 0 {
|
||||||
|
quota.Inodes = driver.options.quota.Inodes
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Set container disk quota limit
|
// Set container disk quota limit
|
||||||
// If it is set to 0, we will track the disk usage, but not enforce a limit
|
// If it is set to 0, we will track the disk usage, but not enforce a limit
|
||||||
@ -922,6 +945,12 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
driver.options.quota.Size = uint64(size)
|
driver.options.quota.Size = uint64(size)
|
||||||
|
case "inodes":
|
||||||
|
inodes, err := strconv.ParseUint(val, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
driver.options.quota.Inodes = uint64(inodes)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("Unknown option %s", key)
|
return fmt.Errorf("Unknown option %s", key)
|
||||||
}
|
}
|
||||||
|
30
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
30
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
@ -38,8 +38,8 @@ struct fsxattr {
|
|||||||
#ifndef PRJQUOTA
|
#ifndef PRJQUOTA
|
||||||
#define PRJQUOTA 2
|
#define PRJQUOTA 2
|
||||||
#endif
|
#endif
|
||||||
#ifndef XFS_PROJ_QUOTA
|
#ifndef FS_PROJ_QUOTA
|
||||||
#define XFS_PROJ_QUOTA 2
|
#define FS_PROJ_QUOTA 2
|
||||||
#endif
|
#endif
|
||||||
#ifndef Q_XSETPQLIM
|
#ifndef Q_XSETPQLIM
|
||||||
#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA)
|
#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA)
|
||||||
@ -61,9 +61,10 @@ import (
|
|||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Quota limit params - currently we only control blocks hard limit
|
// Quota limit params - currently we only control blocks hard limit and inodes
|
||||||
type Quota struct {
|
type Quota struct {
|
||||||
Size uint64
|
Size uint64
|
||||||
|
Inodes uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Control - Context to be used by storage driver (e.g. overlay)
|
// Control - Context to be used by storage driver (e.g. overlay)
|
||||||
@ -119,7 +120,8 @@ func NewControl(basePath string) (*Control, error) {
|
|||||||
// a quota on the first available project id
|
// a quota on the first available project id
|
||||||
//
|
//
|
||||||
quota := Quota{
|
quota := Quota{
|
||||||
Size: 0,
|
Size: 0,
|
||||||
|
Inodes: 0,
|
||||||
}
|
}
|
||||||
if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil {
|
if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -166,7 +168,7 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
|||||||
//
|
//
|
||||||
// set the quota limit for the container's project id
|
// set the quota limit for the container's project id
|
||||||
//
|
//
|
||||||
logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID)
|
logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID)
|
||||||
return setProjectQuota(q.backingFsBlockDev, projectID, quota)
|
return setProjectQuota(q.backingFsBlockDev, projectID, quota)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -175,11 +177,18 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er
|
|||||||
var d C.fs_disk_quota_t
|
var d C.fs_disk_quota_t
|
||||||
d.d_version = C.FS_DQUOT_VERSION
|
d.d_version = C.FS_DQUOT_VERSION
|
||||||
d.d_id = C.__u32(projectID)
|
d.d_id = C.__u32(projectID)
|
||||||
d.d_flags = C.XFS_PROJ_QUOTA
|
d.d_flags = C.FS_PROJ_QUOTA
|
||||||
|
|
||||||
d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT
|
if quota.Size > 0 {
|
||||||
d.d_blk_hardlimit = C.__u64(quota.Size / 512)
|
d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT
|
||||||
d.d_blk_softlimit = d.d_blk_hardlimit
|
d.d_blk_hardlimit = C.__u64(quota.Size / 512)
|
||||||
|
d.d_blk_softlimit = d.d_blk_hardlimit
|
||||||
|
}
|
||||||
|
if quota.Inodes > 0 {
|
||||||
|
d.d_fieldmask = C.FS_DQ_IHARD | C.FS_DQ_ISOFT
|
||||||
|
d.d_ino_hardlimit = C.__u64(quota.Inodes)
|
||||||
|
d.d_ino_softlimit = d.d_ino_hardlimit
|
||||||
|
}
|
||||||
|
|
||||||
var cs = C.CString(backingFsBlockDev)
|
var cs = C.CString(backingFsBlockDev)
|
||||||
defer C.free(unsafe.Pointer(cs))
|
defer C.free(unsafe.Pointer(cs))
|
||||||
@ -202,6 +211,7 @@ func (q *Control) GetQuota(targetPath string, quota *Quota) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
quota.Size = uint64(d.d_blk_hardlimit) * 512
|
quota.Size = uint64(d.d_blk_hardlimit) * 512
|
||||||
|
quota.Inodes = uint64(d.d_ino_hardlimit)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
3
vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
generated
vendored
@ -8,7 +8,8 @@ import (
|
|||||||
|
|
||||||
// Quota limit params - currently we only control blocks hard limit
|
// Quota limit params - currently we only control blocks hard limit
|
||||||
type Quota struct {
|
type Quota struct {
|
||||||
Size uint64
|
Size uint64
|
||||||
|
Inodes uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Control - Context to be used by storage driver (e.g. overlay)
|
// Control - Context to be used by storage driver (e.g. overlay)
|
||||||
|
2
vendor/github.com/containers/storage/go.mod
generated
vendored
2
vendor/github.com/containers/storage/go.mod
generated
vendored
@ -16,7 +16,7 @@ require (
|
|||||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
|
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
|
||||||
github.com/moby/sys/mountinfo v0.4.1
|
github.com/moby/sys/mountinfo v0.4.1
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/runc v1.0.0
|
github.com/opencontainers/runc v1.0.1
|
||||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
|
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
|
||||||
github.com/opencontainers/selinux v1.8.2
|
github.com/opencontainers/selinux v1.8.2
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
|
6
vendor/github.com/containers/storage/go.sum
generated
vendored
6
vendor/github.com/containers/storage/go.sum
generated
vendored
@ -99,7 +99,7 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE
|
|||||||
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
|
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
|
||||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||||
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||||
github.com/cilium/ebpf v0.6.1/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||||
@ -468,8 +468,8 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P
|
|||||||
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||||
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||||
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
|
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
|
||||||
github.com/opencontainers/runc v1.0.0 h1:QOhAQAYUlKeofuyeKdR6ITvOnXLPbEAjPMjz9wCUXcU=
|
github.com/opencontainers/runc v1.0.1 h1:G18PGckGdAm3yVQRWDVQ1rLSLntiniKJ0cNRT2Tm5gs=
|
||||||
github.com/opencontainers/runc v1.0.0/go.mod h1:MU2S3KEB2ZExnhnAQYbwjdYV6HwKtDlNbA2Z2OeNDeA=
|
github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
|
5
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
5
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@ -645,10 +645,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||||||
}
|
}
|
||||||
file.Close()
|
file.Close()
|
||||||
|
|
||||||
case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
|
case tar.TypeBlock, tar.TypeChar:
|
||||||
if inUserns { // cannot create devices in a userns
|
if inUserns { // cannot create devices in a userns
|
||||||
|
logrus.Debugf("Tar: Can't create device %v while running in user namespace", path)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
fallthrough
|
||||||
|
case tar.TypeFifo:
|
||||||
// Handle this is an OS-specific way
|
// Handle this is an OS-specific way
|
||||||
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
|
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
|
||||||
return err
|
return err
|
||||||
|
6
vendor/github.com/containers/storage/pkg/archive/archive_unix.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/archive/archive_unix.go
generated
vendored
@ -11,7 +11,6 @@ import (
|
|||||||
|
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/system"
|
"github.com/containers/storage/pkg/system"
|
||||||
"github.com/opencontainers/runc/libcontainer/userns"
|
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -88,11 +87,6 @@ func minor(device uint64) uint64 {
|
|||||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||||
if userns.RunningInUserNS() {
|
|
||||||
// cannot create a device if running in user namespace
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
mode := uint32(hdr.Mode & 07777)
|
mode := uint32(hdr.Mode & 07777)
|
||||||
switch hdr.Typeflag {
|
switch hdr.Typeflag {
|
||||||
case tar.TypeBlock:
|
case tar.TypeBlock:
|
||||||
|
380
vendor/github.com/containers/storage/pkg/chunked/compression.go
generated
vendored
380
vendor/github.com/containers/storage/pkg/chunked/compression.go
generated
vendored
@ -2,72 +2,29 @@ package chunked
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/base64"
|
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/ioutils"
|
"github.com/containers/storage/pkg/chunked/compressor"
|
||||||
|
"github.com/containers/storage/pkg/chunked/internal"
|
||||||
"github.com/klauspost/compress/zstd"
|
"github.com/klauspost/compress/zstd"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/vbatts/tar-split/archive/tar"
|
"github.com/vbatts/tar-split/archive/tar"
|
||||||
)
|
)
|
||||||
|
|
||||||
type zstdTOC struct {
|
|
||||||
Version int `json:"version"`
|
|
||||||
Entries []zstdFileMetadata `json:"entries"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type zstdFileMetadata struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Linkname string `json:"linkName,omitempty"`
|
|
||||||
Mode int64 `json:"mode,omitempty"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
UID int `json:"uid"`
|
|
||||||
GID int `json:"gid"`
|
|
||||||
ModTime time.Time `json:"modtime"`
|
|
||||||
AccessTime time.Time `json:"accesstime"`
|
|
||||||
ChangeTime time.Time `json:"changetime"`
|
|
||||||
Devmajor int64 `json:"devMajor"`
|
|
||||||
Devminor int64 `json:"devMinor"`
|
|
||||||
Xattrs map[string]string `json:"xattrs,omitempty"`
|
|
||||||
Digest string `json:"digest,omitempty"`
|
|
||||||
Offset int64 `json:"offset,omitempty"`
|
|
||||||
EndOffset int64 `json:"endOffset,omitempty"`
|
|
||||||
|
|
||||||
// Currently chunking is not supported.
|
|
||||||
ChunkSize int64 `json:"chunkSize,omitempty"`
|
|
||||||
ChunkOffset int64 `json:"chunkOffset,omitempty"`
|
|
||||||
ChunkDigest string `json:"chunkDigest,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
TypeReg = "reg"
|
TypeReg = internal.TypeReg
|
||||||
TypeChunk = "chunk"
|
TypeChunk = internal.TypeChunk
|
||||||
TypeLink = "hardlink"
|
TypeLink = internal.TypeLink
|
||||||
TypeChar = "char"
|
TypeChar = internal.TypeChar
|
||||||
TypeBlock = "block"
|
TypeBlock = internal.TypeBlock
|
||||||
TypeDir = "dir"
|
TypeDir = internal.TypeDir
|
||||||
TypeFifo = "fifo"
|
TypeFifo = internal.TypeFifo
|
||||||
TypeSymlink = "symlink"
|
TypeSymlink = internal.TypeSymlink
|
||||||
)
|
)
|
||||||
|
|
||||||
var tarTypes = map[byte]string{
|
|
||||||
tar.TypeReg: TypeReg,
|
|
||||||
tar.TypeRegA: TypeReg,
|
|
||||||
tar.TypeLink: TypeLink,
|
|
||||||
tar.TypeChar: TypeChar,
|
|
||||||
tar.TypeBlock: TypeBlock,
|
|
||||||
tar.TypeDir: TypeDir,
|
|
||||||
tar.TypeFifo: TypeFifo,
|
|
||||||
tar.TypeSymlink: TypeSymlink,
|
|
||||||
}
|
|
||||||
|
|
||||||
var typesToTar = map[string]byte{
|
var typesToTar = map[string]byte{
|
||||||
TypeReg: tar.TypeReg,
|
TypeReg: tar.TypeReg,
|
||||||
TypeLink: tar.TypeLink,
|
TypeLink: tar.TypeLink,
|
||||||
@ -78,14 +35,6 @@ var typesToTar = map[string]byte{
|
|||||||
TypeSymlink: tar.TypeSymlink,
|
TypeSymlink: tar.TypeSymlink,
|
||||||
}
|
}
|
||||||
|
|
||||||
func getType(t byte) (string, error) {
|
|
||||||
r, found := tarTypes[t]
|
|
||||||
if !found {
|
|
||||||
return "", fmt.Errorf("unknown tarball type: %v", t)
|
|
||||||
}
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func typeToTarType(t string) (byte, error) {
|
func typeToTarType(t string) (byte, error) {
|
||||||
r, found := typesToTar[t]
|
r, found := typesToTar[t]
|
||||||
if !found {
|
if !found {
|
||||||
@ -94,52 +43,30 @@ func typeToTarType(t string) (byte, error) {
|
|||||||
return r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
manifestChecksumKey = "io.containers.zstd-chunked.manifest-checksum"
|
|
||||||
manifestInfoKey = "io.containers.zstd-chunked.manifest-position"
|
|
||||||
|
|
||||||
// manifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
|
|
||||||
manifestTypeCRFS = 1
|
|
||||||
|
|
||||||
// footerSizeSupported is the footer size supported by this implementation.
|
|
||||||
// Newer versions of the image format might increase this value, so reject
|
|
||||||
// any version that is not supported.
|
|
||||||
footerSizeSupported = 40
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// when the zstd decoder encounters a skippable frame + 1 byte for the size, it
|
|
||||||
// will ignore it.
|
|
||||||
// https://tools.ietf.org/html/rfc8478#section-3.1.2
|
|
||||||
skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18}
|
|
||||||
|
|
||||||
zstdChunkedFrameMagic = []byte{0x47, 0x6e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78}
|
|
||||||
)
|
|
||||||
|
|
||||||
func isZstdChunkedFrameMagic(data []byte) bool {
|
func isZstdChunkedFrameMagic(data []byte) bool {
|
||||||
if len(data) < 8 {
|
if len(data) < 8 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return bytes.Equal(zstdChunkedFrameMagic, data[:8])
|
return bytes.Equal(internal.ZstdChunkedFrameMagic, data[:8])
|
||||||
}
|
}
|
||||||
|
|
||||||
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
|
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
|
||||||
// be specified.
|
// be specified.
|
||||||
// This function uses the io.containers.zstd-chunked. annotations when specified.
|
// This function uses the io.containers.zstd-chunked. annotations when specified.
|
||||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, error) {
|
func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, error) {
|
||||||
footerSize := int64(footerSizeSupported)
|
footerSize := int64(internal.FooterSizeSupported)
|
||||||
if blobSize <= footerSize {
|
if blobSize <= footerSize {
|
||||||
return nil, errors.New("blob too small")
|
return nil, errors.New("blob too small")
|
||||||
}
|
}
|
||||||
|
|
||||||
manifestChecksumAnnotation := annotations[manifestChecksumKey]
|
manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey]
|
||||||
if manifestChecksumAnnotation == "" {
|
if manifestChecksumAnnotation == "" {
|
||||||
return nil, fmt.Errorf("manifest checksum annotation %q not found", manifestChecksumKey)
|
return nil, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
var offset, length, lengthUncompressed, manifestType uint64
|
var offset, length, lengthUncompressed, manifestType uint64
|
||||||
|
|
||||||
if offsetMetadata := annotations[manifestInfoKey]; offsetMetadata != "" {
|
if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
|
||||||
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil {
|
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -173,7 +100,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if manifestType != manifestTypeCRFS {
|
if manifestType != internal.ManifestTypeCRFS {
|
||||||
return nil, errors.New("invalid manifest type")
|
return nil, errors.New("invalid manifest type")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -235,279 +162,8 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
|
|||||||
return manifest, nil
|
return manifest, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
|
|
||||||
if _, err := dest.Write(skippableFrameMagic); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var size []byte = make([]byte, 4)
|
|
||||||
binary.LittleEndian.PutUint32(size, uint32(len(data)))
|
|
||||||
if _, err := dest.Write(size); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := dest.Write(data); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []zstdFileMetadata, level int) error {
|
|
||||||
// 8 is the size of the zstd skippable frame header + the frame size
|
|
||||||
manifestOffset := offset + 8
|
|
||||||
|
|
||||||
toc := zstdTOC{
|
|
||||||
Version: 1,
|
|
||||||
Entries: metadata,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate the manifest
|
|
||||||
manifest, err := json.Marshal(toc)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var compressedBuffer bytes.Buffer
|
|
||||||
zstdWriter, err := zstdWriterWithLevel(&compressedBuffer, level)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := zstdWriter.Write(manifest); err != nil {
|
|
||||||
zstdWriter.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := zstdWriter.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
compressedManifest := compressedBuffer.Bytes()
|
|
||||||
|
|
||||||
manifestDigester := digest.Canonical.Digester()
|
|
||||||
manifestChecksum := manifestDigester.Hash()
|
|
||||||
if _, err := manifestChecksum.Write(compressedManifest); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
outMetadata[manifestChecksumKey] = manifestDigester.Digest().String()
|
|
||||||
outMetadata[manifestInfoKey] = fmt.Sprintf("%d:%d:%d:%d", manifestOffset, len(compressedManifest), len(manifest), manifestTypeCRFS)
|
|
||||||
if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store the offset to the manifest and its size in LE order
|
|
||||||
var manifestDataLE []byte = make([]byte, footerSizeSupported)
|
|
||||||
binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
|
|
||||||
binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest)))
|
|
||||||
binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest)))
|
|
||||||
binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(manifestTypeCRFS))
|
|
||||||
copy(manifestDataLE[32:], zstdChunkedFrameMagic)
|
|
||||||
|
|
||||||
return appendZstdSkippableFrame(dest, manifestDataLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
|
|
||||||
// total written so far. Used to retrieve partial offsets in the file
|
|
||||||
dest := ioutils.NewWriteCounter(destFile)
|
|
||||||
|
|
||||||
tr := tar.NewReader(reader)
|
|
||||||
tr.RawAccounting = true
|
|
||||||
|
|
||||||
buf := make([]byte, 4096)
|
|
||||||
|
|
||||||
zstdWriter, err := zstdWriterWithLevel(dest, level)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if zstdWriter != nil {
|
|
||||||
zstdWriter.Close()
|
|
||||||
zstdWriter.Flush()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
restartCompression := func() (int64, error) {
|
|
||||||
var offset int64
|
|
||||||
if zstdWriter != nil {
|
|
||||||
if err := zstdWriter.Close(); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if err := zstdWriter.Flush(); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
offset = dest.Count
|
|
||||||
zstdWriter.Reset(dest)
|
|
||||||
}
|
|
||||||
return offset, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var metadata []zstdFileMetadata
|
|
||||||
for {
|
|
||||||
hdr, err := tr.Next()
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
rawBytes := tr.RawBytes()
|
|
||||||
if _, err := zstdWriter.Write(rawBytes); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
payloadDigester := digest.Canonical.Digester()
|
|
||||||
payloadChecksum := payloadDigester.Hash()
|
|
||||||
|
|
||||||
payloadDest := io.MultiWriter(payloadChecksum, zstdWriter)
|
|
||||||
|
|
||||||
// Now handle the payload, if any
|
|
||||||
var startOffset, endOffset int64
|
|
||||||
checksum := ""
|
|
||||||
for {
|
|
||||||
read, errRead := tr.Read(buf)
|
|
||||||
if errRead != nil && errRead != io.EOF {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// restart the compression only if there is
|
|
||||||
// a payload.
|
|
||||||
if read > 0 {
|
|
||||||
if startOffset == 0 {
|
|
||||||
startOffset, err = restartCompression()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_, err := payloadDest.Write(buf[:read])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if errRead == io.EOF {
|
|
||||||
if startOffset > 0 {
|
|
||||||
endOffset, err = restartCompression()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
checksum = payloadDigester.Digest().String()
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
typ, err := getType(hdr.Typeflag)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
xattrs := make(map[string]string)
|
|
||||||
for k, v := range hdr.Xattrs {
|
|
||||||
xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
|
|
||||||
}
|
|
||||||
m := zstdFileMetadata{
|
|
||||||
Type: typ,
|
|
||||||
Name: hdr.Name,
|
|
||||||
Linkname: hdr.Linkname,
|
|
||||||
Mode: hdr.Mode,
|
|
||||||
Size: hdr.Size,
|
|
||||||
UID: hdr.Uid,
|
|
||||||
GID: hdr.Gid,
|
|
||||||
ModTime: hdr.ModTime,
|
|
||||||
AccessTime: hdr.AccessTime,
|
|
||||||
ChangeTime: hdr.ChangeTime,
|
|
||||||
Devmajor: hdr.Devmajor,
|
|
||||||
Devminor: hdr.Devminor,
|
|
||||||
Xattrs: xattrs,
|
|
||||||
Digest: checksum,
|
|
||||||
Offset: startOffset,
|
|
||||||
EndOffset: endOffset,
|
|
||||||
|
|
||||||
// ChunkSize is 0 for the last chunk
|
|
||||||
ChunkSize: 0,
|
|
||||||
ChunkOffset: 0,
|
|
||||||
ChunkDigest: checksum,
|
|
||||||
}
|
|
||||||
metadata = append(metadata, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
rawBytes := tr.RawBytes()
|
|
||||||
if _, err := zstdWriter.Write(rawBytes); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := zstdWriter.Flush(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := zstdWriter.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
zstdWriter = nil
|
|
||||||
|
|
||||||
return writeZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level)
|
|
||||||
}
|
|
||||||
|
|
||||||
type zstdChunkedWriter struct {
|
|
||||||
tarSplitOut *io.PipeWriter
|
|
||||||
tarSplitErr chan error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w zstdChunkedWriter) Close() error {
|
|
||||||
err := <-w.tarSplitErr
|
|
||||||
if err != nil {
|
|
||||||
w.tarSplitOut.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return w.tarSplitOut.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w zstdChunkedWriter) Write(p []byte) (int, error) {
|
|
||||||
select {
|
|
||||||
case err := <-w.tarSplitErr:
|
|
||||||
w.tarSplitOut.Close()
|
|
||||||
return 0, err
|
|
||||||
default:
|
|
||||||
return w.tarSplitOut.Write(p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// zstdChunkedWriterWithLevel writes a zstd compressed tarball where each file is
|
|
||||||
// compressed separately so it can be addressed separately. Idea based on CRFS:
|
|
||||||
// https://github.com/google/crfs
|
|
||||||
// The difference with CRFS is that the zstd compression is used instead of gzip.
|
|
||||||
// The reason for it is that zstd supports embedding metadata ignored by the decoder
|
|
||||||
// as part of the compressed stream.
|
|
||||||
// A manifest json file with all the metadata is appended at the end of the tarball
|
|
||||||
// stream, using zstd skippable frames.
|
|
||||||
// The final file will look like:
|
|
||||||
// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2]
|
|
||||||
// Where:
|
|
||||||
// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER]
|
|
||||||
// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST]
|
|
||||||
// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER]
|
|
||||||
// MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format.
|
|
||||||
func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level int) (io.WriteCloser, error) {
|
|
||||||
ch := make(chan error, 1)
|
|
||||||
r, w := io.Pipe()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
ch <- writeZstdChunkedStream(out, metadata, r, level)
|
|
||||||
io.Copy(ioutil.Discard, r)
|
|
||||||
r.Close()
|
|
||||||
close(ch)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return zstdChunkedWriter{
|
|
||||||
tarSplitOut: w,
|
|
||||||
tarSplitErr: ch,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func zstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
|
|
||||||
el := zstd.EncoderLevelFromZstd(level)
|
|
||||||
return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
||||||
|
// Deprecated: Use pkg/chunked/compressor.ZstdCompressor.
|
||||||
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
||||||
if level == nil {
|
return compressor.ZstdCompressor(r, metadata, level)
|
||||||
l := 3
|
|
||||||
level = &l
|
|
||||||
}
|
|
||||||
|
|
||||||
return zstdChunkedWriterWithLevel(r, metadata, *level)
|
|
||||||
}
|
}
|
||||||
|
220
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
Normal file
220
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
Normal file
@ -0,0 +1,220 @@
|
|||||||
|
package compressor
|
||||||
|
|
||||||
|
// NOTE: This is used from github.com/containers/image by callers that
|
||||||
|
// don't otherwise use containers/storage, so don't make this depend on any
|
||||||
|
// larger software like the graph drivers.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/containers/storage/pkg/chunked/internal"
|
||||||
|
"github.com/containers/storage/pkg/ioutils"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
"github.com/vbatts/tar-split/archive/tar"
|
||||||
|
)
|
||||||
|
|
||||||
|
func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
|
||||||
|
// total written so far. Used to retrieve partial offsets in the file
|
||||||
|
dest := ioutils.NewWriteCounter(destFile)
|
||||||
|
|
||||||
|
tr := tar.NewReader(reader)
|
||||||
|
tr.RawAccounting = true
|
||||||
|
|
||||||
|
buf := make([]byte, 4096)
|
||||||
|
|
||||||
|
zstdWriter, err := internal.ZstdWriterWithLevel(dest, level)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if zstdWriter != nil {
|
||||||
|
zstdWriter.Close()
|
||||||
|
zstdWriter.Flush()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
restartCompression := func() (int64, error) {
|
||||||
|
var offset int64
|
||||||
|
if zstdWriter != nil {
|
||||||
|
if err := zstdWriter.Close(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if err := zstdWriter.Flush(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
offset = dest.Count
|
||||||
|
zstdWriter.Reset(dest)
|
||||||
|
}
|
||||||
|
return offset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var metadata []internal.ZstdFileMetadata
|
||||||
|
for {
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rawBytes := tr.RawBytes()
|
||||||
|
if _, err := zstdWriter.Write(rawBytes); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
payloadDigester := digest.Canonical.Digester()
|
||||||
|
payloadChecksum := payloadDigester.Hash()
|
||||||
|
|
||||||
|
payloadDest := io.MultiWriter(payloadChecksum, zstdWriter)
|
||||||
|
|
||||||
|
// Now handle the payload, if any
|
||||||
|
var startOffset, endOffset int64
|
||||||
|
checksum := ""
|
||||||
|
for {
|
||||||
|
read, errRead := tr.Read(buf)
|
||||||
|
if errRead != nil && errRead != io.EOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// restart the compression only if there is
|
||||||
|
// a payload.
|
||||||
|
if read > 0 {
|
||||||
|
if startOffset == 0 {
|
||||||
|
startOffset, err = restartCompression()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, err := payloadDest.Write(buf[:read])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if errRead == io.EOF {
|
||||||
|
if startOffset > 0 {
|
||||||
|
endOffset, err = restartCompression()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
checksum = payloadDigester.Digest().String()
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
typ, err := internal.GetType(hdr.Typeflag)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
xattrs := make(map[string]string)
|
||||||
|
for k, v := range hdr.Xattrs {
|
||||||
|
xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
|
||||||
|
}
|
||||||
|
m := internal.ZstdFileMetadata{
|
||||||
|
Type: typ,
|
||||||
|
Name: hdr.Name,
|
||||||
|
Linkname: hdr.Linkname,
|
||||||
|
Mode: hdr.Mode,
|
||||||
|
Size: hdr.Size,
|
||||||
|
UID: hdr.Uid,
|
||||||
|
GID: hdr.Gid,
|
||||||
|
ModTime: hdr.ModTime,
|
||||||
|
AccessTime: hdr.AccessTime,
|
||||||
|
ChangeTime: hdr.ChangeTime,
|
||||||
|
Devmajor: hdr.Devmajor,
|
||||||
|
Devminor: hdr.Devminor,
|
||||||
|
Xattrs: xattrs,
|
||||||
|
Digest: checksum,
|
||||||
|
Offset: startOffset,
|
||||||
|
EndOffset: endOffset,
|
||||||
|
|
||||||
|
// ChunkSize is 0 for the last chunk
|
||||||
|
ChunkSize: 0,
|
||||||
|
ChunkOffset: 0,
|
||||||
|
ChunkDigest: checksum,
|
||||||
|
}
|
||||||
|
metadata = append(metadata, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawBytes := tr.RawBytes()
|
||||||
|
if _, err := zstdWriter.Write(rawBytes); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := zstdWriter.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := zstdWriter.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
zstdWriter = nil
|
||||||
|
|
||||||
|
return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level)
|
||||||
|
}
|
||||||
|
|
||||||
|
type zstdChunkedWriter struct {
|
||||||
|
tarSplitOut *io.PipeWriter
|
||||||
|
tarSplitErr chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w zstdChunkedWriter) Close() error {
|
||||||
|
err := <-w.tarSplitErr
|
||||||
|
if err != nil {
|
||||||
|
w.tarSplitOut.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return w.tarSplitOut.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w zstdChunkedWriter) Write(p []byte) (int, error) {
|
||||||
|
select {
|
||||||
|
case err := <-w.tarSplitErr:
|
||||||
|
w.tarSplitOut.Close()
|
||||||
|
return 0, err
|
||||||
|
default:
|
||||||
|
return w.tarSplitOut.Write(p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// zstdChunkedWriterWithLevel writes a zstd compressed tarball where each file is
|
||||||
|
// compressed separately so it can be addressed separately. Idea based on CRFS:
|
||||||
|
// https://github.com/google/crfs
|
||||||
|
// The difference with CRFS is that the zstd compression is used instead of gzip.
|
||||||
|
// The reason for it is that zstd supports embedding metadata ignored by the decoder
|
||||||
|
// as part of the compressed stream.
|
||||||
|
// A manifest json file with all the metadata is appended at the end of the tarball
|
||||||
|
// stream, using zstd skippable frames.
|
||||||
|
// The final file will look like:
|
||||||
|
// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2]
|
||||||
|
// Where:
|
||||||
|
// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER]
|
||||||
|
// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST]
|
||||||
|
// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER]
|
||||||
|
// MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format.
|
||||||
|
func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level int) (io.WriteCloser, error) {
|
||||||
|
ch := make(chan error, 1)
|
||||||
|
r, w := io.Pipe()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
ch <- writeZstdChunkedStream(out, metadata, r, level)
|
||||||
|
io.Copy(ioutil.Discard, r)
|
||||||
|
r.Close()
|
||||||
|
close(ch)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return zstdChunkedWriter{
|
||||||
|
tarSplitOut: w,
|
||||||
|
tarSplitErr: ch,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
||||||
|
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
||||||
|
if level == nil {
|
||||||
|
l := 3
|
||||||
|
level = &l
|
||||||
|
}
|
||||||
|
|
||||||
|
return zstdChunkedWriterWithLevel(r, metadata, *level)
|
||||||
|
}
|
172
vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
generated
vendored
Normal file
172
vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
generated
vendored
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
// NOTE: This is used from github.com/containers/image by callers that
|
||||||
|
// don't otherwise use containers/storage, so don't make this depend on any
|
||||||
|
// larger software like the graph drivers.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/klauspost/compress/zstd"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ZstdTOC struct {
|
||||||
|
Version int `json:"version"`
|
||||||
|
Entries []ZstdFileMetadata `json:"entries"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ZstdFileMetadata struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Linkname string `json:"linkName,omitempty"`
|
||||||
|
Mode int64 `json:"mode,omitempty"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
UID int `json:"uid"`
|
||||||
|
GID int `json:"gid"`
|
||||||
|
ModTime time.Time `json:"modtime"`
|
||||||
|
AccessTime time.Time `json:"accesstime"`
|
||||||
|
ChangeTime time.Time `json:"changetime"`
|
||||||
|
Devmajor int64 `json:"devMajor"`
|
||||||
|
Devminor int64 `json:"devMinor"`
|
||||||
|
Xattrs map[string]string `json:"xattrs,omitempty"`
|
||||||
|
Digest string `json:"digest,omitempty"`
|
||||||
|
Offset int64 `json:"offset,omitempty"`
|
||||||
|
EndOffset int64 `json:"endOffset,omitempty"`
|
||||||
|
|
||||||
|
// Currently chunking is not supported.
|
||||||
|
ChunkSize int64 `json:"chunkSize,omitempty"`
|
||||||
|
ChunkOffset int64 `json:"chunkOffset,omitempty"`
|
||||||
|
ChunkDigest string `json:"chunkDigest,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
TypeReg = "reg"
|
||||||
|
TypeChunk = "chunk"
|
||||||
|
TypeLink = "hardlink"
|
||||||
|
TypeChar = "char"
|
||||||
|
TypeBlock = "block"
|
||||||
|
TypeDir = "dir"
|
||||||
|
TypeFifo = "fifo"
|
||||||
|
TypeSymlink = "symlink"
|
||||||
|
)
|
||||||
|
|
||||||
|
var TarTypes = map[byte]string{
|
||||||
|
tar.TypeReg: TypeReg,
|
||||||
|
tar.TypeRegA: TypeReg,
|
||||||
|
tar.TypeLink: TypeLink,
|
||||||
|
tar.TypeChar: TypeChar,
|
||||||
|
tar.TypeBlock: TypeBlock,
|
||||||
|
tar.TypeDir: TypeDir,
|
||||||
|
tar.TypeFifo: TypeFifo,
|
||||||
|
tar.TypeSymlink: TypeSymlink,
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetType(t byte) (string, error) {
|
||||||
|
r, found := TarTypes[t]
|
||||||
|
if !found {
|
||||||
|
return "", fmt.Errorf("unknown tarball type: %v", t)
|
||||||
|
}
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
ManifestChecksumKey = "io.containers.zstd-chunked.manifest-checksum"
|
||||||
|
ManifestInfoKey = "io.containers.zstd-chunked.manifest-position"
|
||||||
|
|
||||||
|
// ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
|
||||||
|
ManifestTypeCRFS = 1
|
||||||
|
|
||||||
|
// FooterSizeSupported is the footer size supported by this implementation.
|
||||||
|
// Newer versions of the image format might increase this value, so reject
|
||||||
|
// any version that is not supported.
|
||||||
|
FooterSizeSupported = 40
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// when the zstd decoder encounters a skippable frame + 1 byte for the size, it
|
||||||
|
// will ignore it.
|
||||||
|
// https://tools.ietf.org/html/rfc8478#section-3.1.2
|
||||||
|
skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18}
|
||||||
|
|
||||||
|
ZstdChunkedFrameMagic = []byte{0x47, 0x6e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78}
|
||||||
|
)
|
||||||
|
|
||||||
|
func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
|
||||||
|
if _, err := dest.Write(skippableFrameMagic); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var size []byte = make([]byte, 4)
|
||||||
|
binary.LittleEndian.PutUint32(size, uint32(len(data)))
|
||||||
|
if _, err := dest.Write(size); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := dest.Write(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []ZstdFileMetadata, level int) error {
|
||||||
|
// 8 is the size of the zstd skippable frame header + the frame size
|
||||||
|
manifestOffset := offset + 8
|
||||||
|
|
||||||
|
toc := ZstdTOC{
|
||||||
|
Version: 1,
|
||||||
|
Entries: metadata,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate the manifest
|
||||||
|
manifest, err := json.Marshal(toc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var compressedBuffer bytes.Buffer
|
||||||
|
zstdWriter, err := ZstdWriterWithLevel(&compressedBuffer, level)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := zstdWriter.Write(manifest); err != nil {
|
||||||
|
zstdWriter.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := zstdWriter.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
compressedManifest := compressedBuffer.Bytes()
|
||||||
|
|
||||||
|
manifestDigester := digest.Canonical.Digester()
|
||||||
|
manifestChecksum := manifestDigester.Hash()
|
||||||
|
if _, err := manifestChecksum.Write(compressedManifest); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
outMetadata[ManifestChecksumKey] = manifestDigester.Digest().String()
|
||||||
|
outMetadata[ManifestInfoKey] = fmt.Sprintf("%d:%d:%d:%d", manifestOffset, len(compressedManifest), len(manifest), ManifestTypeCRFS)
|
||||||
|
if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store the offset to the manifest and its size in LE order
|
||||||
|
var manifestDataLE []byte = make([]byte, FooterSizeSupported)
|
||||||
|
binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
|
||||||
|
binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest)))
|
||||||
|
binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest)))
|
||||||
|
binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(ManifestTypeCRFS))
|
||||||
|
copy(manifestDataLE[32:], ZstdChunkedFrameMagic)
|
||||||
|
|
||||||
|
return appendZstdSkippableFrame(dest, manifestDataLE)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
|
||||||
|
el := zstd.EncoderLevelFromZstd(level)
|
||||||
|
return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
|
||||||
|
}
|
41
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
41
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
@ -19,6 +19,7 @@ import (
|
|||||||
graphdriver "github.com/containers/storage/drivers"
|
graphdriver "github.com/containers/storage/drivers"
|
||||||
driversCopy "github.com/containers/storage/drivers/copy"
|
driversCopy "github.com/containers/storage/drivers/copy"
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
|
"github.com/containers/storage/pkg/chunked/internal"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/types"
|
"github.com/containers/storage/types"
|
||||||
"github.com/klauspost/compress/zstd"
|
"github.com/klauspost/compress/zstd"
|
||||||
@ -39,7 +40,7 @@ const (
|
|||||||
type chunkedZstdDiffer struct {
|
type chunkedZstdDiffer struct {
|
||||||
stream ImageSourceSeekable
|
stream ImageSourceSeekable
|
||||||
manifest []byte
|
manifest []byte
|
||||||
layersMetadata map[string][]zstdFileMetadata
|
layersMetadata map[string][]internal.ZstdFileMetadata
|
||||||
layersTarget map[string]string
|
layersTarget map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -75,11 +76,11 @@ func copyFileContent(src, destFile, root string, dirfd int, missingDirsMode, mod
|
|||||||
return dstFile, st.Size(), err
|
return dstFile, st.Size(), err
|
||||||
}
|
}
|
||||||
|
|
||||||
func prepareOtherLayersCache(layersMetadata map[string][]zstdFileMetadata) map[string]map[string]*zstdFileMetadata {
|
func prepareOtherLayersCache(layersMetadata map[string][]internal.ZstdFileMetadata) map[string]map[string]*internal.ZstdFileMetadata {
|
||||||
maps := make(map[string]map[string]*zstdFileMetadata)
|
maps := make(map[string]map[string]*internal.ZstdFileMetadata)
|
||||||
|
|
||||||
for layerID, v := range layersMetadata {
|
for layerID, v := range layersMetadata {
|
||||||
r := make(map[string]*zstdFileMetadata)
|
r := make(map[string]*internal.ZstdFileMetadata)
|
||||||
for i := range v {
|
for i := range v {
|
||||||
r[v[i].Digest] = &v[i]
|
r[v[i].Digest] = &v[i]
|
||||||
}
|
}
|
||||||
@ -88,13 +89,13 @@ func prepareOtherLayersCache(layersMetadata map[string][]zstdFileMetadata) map[s
|
|||||||
return maps
|
return maps
|
||||||
}
|
}
|
||||||
|
|
||||||
func getLayersCache(store storage.Store) (map[string][]zstdFileMetadata, map[string]string, error) {
|
func getLayersCache(store storage.Store) (map[string][]internal.ZstdFileMetadata, map[string]string, error) {
|
||||||
allLayers, err := store.Layers()
|
allLayers, err := store.Layers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
layersMetadata := make(map[string][]zstdFileMetadata)
|
layersMetadata := make(map[string][]internal.ZstdFileMetadata)
|
||||||
layersTarget := make(map[string]string)
|
layersTarget := make(map[string]string)
|
||||||
for _, r := range allLayers {
|
for _, r := range allLayers {
|
||||||
manifestReader, err := store.LayerBigData(r.ID, bigDataKey)
|
manifestReader, err := store.LayerBigData(r.ID, bigDataKey)
|
||||||
@ -106,7 +107,7 @@ func getLayersCache(store storage.Store) (map[string][]zstdFileMetadata, map[str
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
var toc zstdTOC
|
var toc internal.ZstdTOC
|
||||||
if err := json.Unmarshal(manifest, &toc); err != nil {
|
if err := json.Unmarshal(manifest, &toc); err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -123,7 +124,7 @@ func getLayersCache(store storage.Store) (map[string][]zstdFileMetadata, map[str
|
|||||||
|
|
||||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||||
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||||
if _, ok := annotations[manifestChecksumKey]; ok {
|
if _, ok := annotations[internal.ManifestChecksumKey]; ok {
|
||||||
return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss)
|
return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss)
|
||||||
}
|
}
|
||||||
return nil, errors.New("blob type not supported for partial retrieval")
|
return nil, errors.New("blob type not supported for partial retrieval")
|
||||||
@ -147,7 +148,7 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func findFileInOtherLayers(file zstdFileMetadata, root string, dirfd int, layersMetadata map[string]map[string]*zstdFileMetadata, layersTarget map[string]string, missingDirsMode os.FileMode) (*os.File, int64, error) {
|
func findFileInOtherLayers(file internal.ZstdFileMetadata, root string, dirfd int, layersMetadata map[string]map[string]*internal.ZstdFileMetadata, layersTarget map[string]string, missingDirsMode os.FileMode) (*os.File, int64, error) {
|
||||||
// this is ugly, needs to be indexed
|
// this is ugly, needs to be indexed
|
||||||
for layerID, checksums := range layersMetadata {
|
for layerID, checksums := range layersMetadata {
|
||||||
m, found := checksums[file.Digest]
|
m, found := checksums[file.Digest]
|
||||||
@ -194,7 +195,7 @@ func getFileDigest(f *os.File) (digest.Digest, error) {
|
|||||||
// findFileOnTheHost checks whether the requested file already exist on the host and copies the file content from there if possible.
|
// findFileOnTheHost checks whether the requested file already exist on the host and copies the file content from there if possible.
|
||||||
// It is currently implemented to look only at the file with the same path. Ideally it can detect the same content also at different
|
// It is currently implemented to look only at the file with the same path. Ideally it can detect the same content also at different
|
||||||
// paths.
|
// paths.
|
||||||
func findFileOnTheHost(file zstdFileMetadata, root string, dirfd int, missingDirsMode os.FileMode) (*os.File, int64, error) {
|
func findFileOnTheHost(file internal.ZstdFileMetadata, root string, dirfd int, missingDirsMode os.FileMode) (*os.File, int64, error) {
|
||||||
sourceFile := filepath.Clean(filepath.Join("/", file.Name))
|
sourceFile := filepath.Clean(filepath.Join("/", file.Name))
|
||||||
if !strings.HasPrefix(sourceFile, "/usr/") {
|
if !strings.HasPrefix(sourceFile, "/usr/") {
|
||||||
// limit host deduplication to files under /usr.
|
// limit host deduplication to files under /usr.
|
||||||
@ -251,7 +252,7 @@ func findFileOnTheHost(file zstdFileMetadata, root string, dirfd int, missingDir
|
|||||||
return dstFile, written, nil
|
return dstFile, written, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func maybeDoIDRemap(manifest []zstdFileMetadata, options *archive.TarOptions) error {
|
func maybeDoIDRemap(manifest []internal.ZstdFileMetadata, options *archive.TarOptions) error {
|
||||||
if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 {
|
if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -278,7 +279,7 @@ func maybeDoIDRemap(manifest []zstdFileMetadata, options *archive.TarOptions) er
|
|||||||
}
|
}
|
||||||
|
|
||||||
type missingFile struct {
|
type missingFile struct {
|
||||||
File *zstdFileMetadata
|
File *internal.ZstdFileMetadata
|
||||||
Gap int64
|
Gap int64
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -291,7 +292,7 @@ type missingChunk struct {
|
|||||||
Files []missingFile
|
Files []missingFile
|
||||||
}
|
}
|
||||||
|
|
||||||
func setFileAttrs(file *os.File, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) error {
|
func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
|
||||||
if file == nil || file.Fd() < 0 {
|
if file == nil || file.Fd() < 0 {
|
||||||
return errors.Errorf("invalid file")
|
return errors.Errorf("invalid file")
|
||||||
}
|
}
|
||||||
@ -346,7 +347,7 @@ func openFileUnderRoot(name, root string, dirfd int, flags uint64, mode os.FileM
|
|||||||
return os.NewFile(uintptr(fd), name), nil
|
return os.NewFile(uintptr(fd), name), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createFileFromZstdStream(dest string, dirfd int, reader io.Reader, missingDirsMode, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) (err error) {
|
func createFileFromZstdStream(dest string, dirfd int, reader io.Reader, missingDirsMode, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) (err error) {
|
||||||
file, err := openFileUnderRoot(metadata.Name, dest, dirfd, newFileFlags, 0)
|
file, err := openFileUnderRoot(metadata.Name, dest, dirfd, newFileFlags, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -497,7 +498,7 @@ func retrieveMissingFiles(input *chunkedZstdDiffer, dest string, dirfd int, miss
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) error {
|
func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
|
||||||
parent := filepath.Dir(metadata.Name)
|
parent := filepath.Dir(metadata.Name)
|
||||||
base := filepath.Base(metadata.Name)
|
base := filepath.Base(metadata.Name)
|
||||||
|
|
||||||
@ -526,7 +527,7 @@ func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *zstdFileMet
|
|||||||
return setFileAttrs(file, mode, metadata, options)
|
return setFileAttrs(file, mode, metadata, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
func safeLink(target string, dirfd int, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) error {
|
func safeLink(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
|
||||||
sourceFile, err := openFileUnderRoot(metadata.Linkname, target, dirfd, unix.O_RDONLY, 0)
|
sourceFile, err := openFileUnderRoot(metadata.Linkname, target, dirfd, unix.O_RDONLY, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -558,7 +559,7 @@ func safeLink(target string, dirfd int, mode os.FileMode, metadata *zstdFileMeta
|
|||||||
return setFileAttrs(newFile, mode, metadata, options)
|
return setFileAttrs(newFile, mode, metadata, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
func safeSymlink(target string, dirfd int, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) error {
|
func safeSymlink(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
|
||||||
destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
|
destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
|
||||||
destDirFd := dirfd
|
destDirFd := dirfd
|
||||||
if destDir != "." {
|
if destDir != "." {
|
||||||
@ -636,7 +637,7 @@ type hardLinkToCreate struct {
|
|||||||
dest string
|
dest string
|
||||||
dirfd int
|
dirfd int
|
||||||
mode os.FileMode
|
mode os.FileMode
|
||||||
metadata *zstdFileMetadata
|
metadata *internal.ZstdFileMetadata
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) {
|
func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) {
|
||||||
@ -659,7 +660,7 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Generate the manifest
|
// Generate the manifest
|
||||||
var toc zstdTOC
|
var toc internal.ZstdTOC
|
||||||
if err := json.Unmarshal(d.manifest, &toc); err != nil {
|
if err := json.Unmarshal(d.manifest, &toc); err != nil {
|
||||||
return output, err
|
return output, err
|
||||||
}
|
}
|
||||||
@ -667,7 +668,7 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
|
|||||||
whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
|
whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
|
||||||
|
|
||||||
var missingChunks []missingChunk
|
var missingChunks []missingChunk
|
||||||
var mergedEntries []zstdFileMetadata
|
var mergedEntries []internal.ZstdFileMetadata
|
||||||
|
|
||||||
if err := maybeDoIDRemap(toc.Entries, options); err != nil {
|
if err := maybeDoIDRemap(toc.Entries, options); err != nil {
|
||||||
return output, err
|
return output, err
|
||||||
|
3
vendor/github.com/containers/storage/storage.conf
generated
vendored
3
vendor/github.com/containers/storage/storage.conf
generated
vendored
@ -69,6 +69,9 @@ additionalimagestores = [
|
|||||||
# and vfs drivers.
|
# and vfs drivers.
|
||||||
#ignore_chown_errors = "false"
|
#ignore_chown_errors = "false"
|
||||||
|
|
||||||
|
# Inodes is used to set a maximum inodes of the container image.
|
||||||
|
# inodes = ""
|
||||||
|
|
||||||
# Path to an helper program to use for mounting the file system instead of mounting it
|
# Path to an helper program to use for mounting the file system instead of mounting it
|
||||||
# directly.
|
# directly.
|
||||||
#mount_program = "/usr/bin/fuse-overlayfs"
|
#mount_program = "/usr/bin/fuse-overlayfs"
|
||||||
|
10
vendor/github.com/containers/storage/types/utils.go
generated
vendored
10
vendor/github.com/containers/storage/types/utils.go
generated
vendored
@ -2,6 +2,7 @@ package types
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -74,9 +75,12 @@ func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, e
|
|||||||
return runtimeDir, nil
|
return runtimeDir, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
runUserDir := env.getRunUserDir()
|
initCommand, err := ioutil.ReadFile(env.getProcCommandFile())
|
||||||
if isRootlessRuntimeDirOwner(runUserDir, env) {
|
if err != nil || string(initCommand) == "systemd" {
|
||||||
return runUserDir, nil
|
runUserDir := env.getRunUserDir()
|
||||||
|
if isRootlessRuntimeDirOwner(runUserDir, env) {
|
||||||
|
return runUserDir, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpPerUserDir := env.getTmpPerUserDir()
|
tmpPerUserDir := env.getTmpPerUserDir()
|
||||||
|
4
vendor/modules.txt
vendored
4
vendor/modules.txt
vendored
@ -197,7 +197,7 @@ github.com/containers/psgo/internal/dev
|
|||||||
github.com/containers/psgo/internal/host
|
github.com/containers/psgo/internal/host
|
||||||
github.com/containers/psgo/internal/proc
|
github.com/containers/psgo/internal/proc
|
||||||
github.com/containers/psgo/internal/process
|
github.com/containers/psgo/internal/process
|
||||||
# github.com/containers/storage v1.32.6
|
# github.com/containers/storage v1.33.0
|
||||||
github.com/containers/storage
|
github.com/containers/storage
|
||||||
github.com/containers/storage/drivers
|
github.com/containers/storage/drivers
|
||||||
github.com/containers/storage/drivers/aufs
|
github.com/containers/storage/drivers/aufs
|
||||||
@ -214,6 +214,8 @@ github.com/containers/storage/drivers/zfs
|
|||||||
github.com/containers/storage/pkg/archive
|
github.com/containers/storage/pkg/archive
|
||||||
github.com/containers/storage/pkg/chrootarchive
|
github.com/containers/storage/pkg/chrootarchive
|
||||||
github.com/containers/storage/pkg/chunked
|
github.com/containers/storage/pkg/chunked
|
||||||
|
github.com/containers/storage/pkg/chunked/compressor
|
||||||
|
github.com/containers/storage/pkg/chunked/internal
|
||||||
github.com/containers/storage/pkg/config
|
github.com/containers/storage/pkg/config
|
||||||
github.com/containers/storage/pkg/devicemapper
|
github.com/containers/storage/pkg/devicemapper
|
||||||
github.com/containers/storage/pkg/directory
|
github.com/containers/storage/pkg/directory
|
||||||
|
Reference in New Issue
Block a user