Merge pull request #20273 from giuseppe/bump-storage

vendor: bump c/storage
This commit is contained in:
openshift-ci[bot]
2023-10-06 06:26:03 +00:00
committed by GitHub
35 changed files with 1199 additions and 234 deletions

4
go.mod
View File

@ -20,7 +20,7 @@ require (
github.com/containers/libhvee v0.4.1-0.20231002175221-e9b181166118 github.com/containers/libhvee v0.4.1-0.20231002175221-e9b181166118
github.com/containers/ocicrypt v1.1.8 github.com/containers/ocicrypt v1.1.8
github.com/containers/psgo v1.8.0 github.com/containers/psgo v1.8.0
github.com/containers/storage v1.50.2 github.com/containers/storage v1.50.3-0.20231005112617-44418abb2d89
github.com/coreos/go-systemd/v22 v22.5.0 github.com/coreos/go-systemd/v22 v22.5.0
github.com/coreos/stream-metadata-go v0.4.3 github.com/coreos/stream-metadata-go v0.4.3
github.com/crc-org/vfkit v0.1.2-0.20230829083117-09e62065eb6e github.com/crc-org/vfkit v0.1.2-0.20230829083117-09e62065eb6e
@ -138,7 +138,7 @@ require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jinzhu/copier v0.4.0 // indirect github.com/jinzhu/copier v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/compress v1.16.7 // indirect github.com/klauspost/compress v1.17.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect github.com/klauspost/pgzip v1.2.6 // indirect
github.com/kr/fs v0.1.0 // indirect github.com/kr/fs v0.1.0 // indirect

8
go.sum
View File

@ -271,8 +271,8 @@ github.com/containers/ocicrypt v1.1.8/go.mod h1:jM362hyBtbwLMWzXQZTlkjKGAQf/BN/L
github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY= github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY=
github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc= github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc=
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s= github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
github.com/containers/storage v1.50.2 h1:Fys4BjFUVNRBEXlO70hFI48VW4EXsgnGisTpk9tTMsE= github.com/containers/storage v1.50.3-0.20231005112617-44418abb2d89 h1:IAFsJzjIalzJCqE6786P9K1qbrYBd1abeL8/ip/waNA=
github.com/containers/storage v1.50.2/go.mod h1:dpspZsUrcKD8SpTofvKWhwPDHD0MkO4Q7VE+oYdWkiA= github.com/containers/storage v1.50.3-0.20231005112617-44418abb2d89/go.mod h1:HZESuTLIRmcs00JFSZr6daHD/B51J0ZCZr0T7uDDc9Y=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
@ -678,8 +678,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=

View File

@ -1 +1 @@
1.50.2 1.50.3-dev

View File

@ -275,3 +275,36 @@ func supportsIdmappedLowerLayers(home string) (bool, error) {
}() }()
return true, nil return true, nil
} }
// supportsDataOnlyLayers checks if the kernel supports mounting a overlay file system
// that uses data-only layers.
func supportsDataOnlyLayers(home string) (bool, error) {
layerDir, err := os.MkdirTemp(home, "compat")
if err != nil {
return false, err
}
defer func() {
_ = os.RemoveAll(layerDir)
}()
mergedDir := filepath.Join(layerDir, "merged")
lowerDir := filepath.Join(layerDir, "lower")
lowerDirDataOnly := filepath.Join(layerDir, "lower-data")
upperDir := filepath.Join(layerDir, "upper")
workDir := filepath.Join(layerDir, "work")
_ = idtools.MkdirAs(mergedDir, 0o700, 0, 0)
_ = idtools.MkdirAs(lowerDir, 0o700, 0, 0)
_ = idtools.MkdirAs(lowerDirDataOnly, 0o700, 0, 0)
_ = idtools.MkdirAs(upperDir, 0o700, 0, 0)
_ = idtools.MkdirAs(workDir, 0o700, 0, 0)
opts := fmt.Sprintf("lowerdir=%s::%s,upperdir=%s,workdir=%s,metacopy=on", lowerDir, lowerDirDataOnly, upperDir, workDir)
flags := uintptr(0)
if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil {
return false, err
}
_ = unix.Unmount(mergedDir, unix.MNT_DETACH)
return true, nil
}

View File

@ -93,7 +93,7 @@ func generateComposeFsBlob(toc []byte, composefsDir string) error {
fd, err := unix.Openat(unix.AT_FDCWD, destFile, unix.O_WRONLY|unix.O_CREAT|unix.O_TRUNC|unix.O_EXCL|unix.O_CLOEXEC, 0o644) fd, err := unix.Openat(unix.AT_FDCWD, destFile, unix.O_WRONLY|unix.O_CREAT|unix.O_TRUNC|unix.O_EXCL|unix.O_CLOEXEC, 0o644)
if err != nil { if err != nil {
return fmt.Errorf("failed to open output file: %w", err) return fmt.Errorf("failed to open output file %q: %w", destFile, err)
} }
outFd := os.NewFile(uintptr(fd), "outFd") outFd := os.NewFile(uintptr(fd), "outFd")
@ -166,7 +166,7 @@ func hasACL(path string) (bool, error) {
func mountComposefsBlob(dataDir, mountPoint string) error { func mountComposefsBlob(dataDir, mountPoint string) error {
blobFile := getComposefsBlob(dataDir) blobFile := getComposefsBlob(dataDir)
loop, err := loopback.AttachLoopDevice(blobFile) loop, err := loopback.AttachLoopDeviceRO(blobFile)
if err != nil { if err != nil {
return err return err
} }

View File

@ -141,14 +141,27 @@ func mountOverlayFromMain() {
// the new value for the list of lowers, because it's shorter. // the new value for the list of lowers, because it's shorter.
if lowerv != "" { if lowerv != "" {
lowers := strings.Split(lowerv, ":") lowers := strings.Split(lowerv, ":")
for i := range lowers { var newLowers []string
lowerFd, err := unix.Open(lowers[i], unix.O_RDONLY, 0) dataOnly := false
for _, lowerPath := range lowers {
if lowerPath == "" {
dataOnly = true
continue
}
lowerFd, err := unix.Open(lowerPath, unix.O_RDONLY, 0)
if err != nil { if err != nil {
fatal(err) fatal(err)
} }
lowers[i] = fmt.Sprintf("%d", lowerFd) var lower string
if dataOnly {
lower = fmt.Sprintf(":%d", lowerFd)
dataOnly = false
} else {
lower = fmt.Sprintf("%d", lowerFd)
} }
lowerv = strings.Join(lowers, ":") newLowers = append(newLowers, lower)
}
lowerv = strings.Join(newLowers, ":")
} }
// Reconstruct the Label field. // Reconstruct the Label field.

View File

@ -1447,7 +1447,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
needsIDMapping := !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == "" needsIDMapping := !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == ""
if len(optsList) == 0 { if len(optsList) == 0 {
if d.options.mountOptions != "" {
optsList = strings.Split(d.options.mountOptions, ",") optsList = strings.Split(d.options.mountOptions, ",")
}
} else { } else {
// If metacopy=on is present in d.options.mountOptions it must be present in the mount // If metacopy=on is present in d.options.mountOptions it must be present in the mount
// options otherwise the kernel refuses to follow the metacopy xattr. // options otherwise the kernel refuses to follow the metacopy xattr.
@ -1540,7 +1542,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
} }
}() }()
maybeAddComposefsMount := func(lowerID string, i int) (string, error) { maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) {
composefsBlob := d.getComposefsData(lowerID) composefsBlob := d.getComposefsData(lowerID)
_, err = os.Stat(composefsBlob) _, err = os.Stat(composefsBlob)
if err != nil { if err != nil {
@ -1551,6 +1553,10 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
} }
logrus.Debugf("overlay: using composefs blob %s for lower %s", composefsBlob, lowerID) logrus.Debugf("overlay: using composefs blob %s for lower %s", composefsBlob, lowerID)
if readWrite && i == 0 {
return "", fmt.Errorf("cannot mount a composefs layer as writeable")
}
dest := filepath.Join(composefsLayers, fmt.Sprintf("%d", i)) dest := filepath.Join(composefsLayers, fmt.Sprintf("%d", i))
if err := os.MkdirAll(dest, 0o700); err != nil { if err := os.MkdirAll(dest, 0o700); err != nil {
return "", err return "", err
@ -1571,7 +1577,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
diffDir := path.Join(workDirBase, "diff") diffDir := path.Join(workDirBase, "diff")
if dest, err := maybeAddComposefsMount(id, 0); err != nil { if dest, err := maybeAddComposefsMount(id, 0, readWrite); err != nil {
return "", err return "", err
} else if dest != "" { } else if dest != "" {
diffDir = dest diffDir = dest
@ -1623,7 +1629,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
return "", err return "", err
} }
lowerID := filepath.Base(filepath.Dir(linkContent)) lowerID := filepath.Base(filepath.Dir(linkContent))
composefsMount, err := maybeAddComposefsMount(lowerID, i+1) composefsMount, err := maybeAddComposefsMount(lowerID, i+1, readWrite)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -1655,8 +1661,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
optsList = append(optsList, "metacopy=on", "redirect_dir=on") optsList = append(optsList, "metacopy=on", "redirect_dir=on")
} }
absLowers = append(absLowers, composeFsLayers...)
if len(absLowers) == 0 { if len(absLowers) == 0 {
absLowers = append(absLowers, path.Join(dir, "empty")) absLowers = append(absLowers, path.Join(dir, "empty"))
} }
@ -1750,11 +1754,20 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
absLowers = newAbsDir absLowers = newAbsDir
} }
lowerDirs := strings.Join(absLowers, ":")
if len(composeFsLayers) > 0 {
composeFsLayersLowerDirs := strings.Join(composeFsLayers, "::")
lowerDirs = lowerDirs + "::" + composeFsLayersLowerDirs
}
// absLowers is not valid anymore now as we have added composeFsLayers to it, so prevent
// its usage.
absLowers = nil //nolint:ineffassign
var opts string var opts string
if readWrite { if readWrite {
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDirs, diffDir, workdir)
} else { } else {
opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, lowerDirs)
} }
if len(optsList) > 0 { if len(optsList) > 0 {
opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ","))
@ -1798,9 +1811,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if readWrite { if readWrite {
diffDir := path.Join(id, "diff") diffDir := path.Join(id, "diff")
workDir := path.Join(id, "work") workDir := path.Join(id, "work")
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir) opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDirs, diffDir, workDir)
} else { } else {
opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, lowerDirs)
} }
if len(optsList) > 0 { if len(optsList) > 0 {
opts = strings.Join(append([]string{opts}, optsList...), ",") opts = strings.Join(append([]string{opts}, optsList...), ",")
@ -2007,11 +2020,34 @@ func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error {
return os.RemoveAll(stagingDirectory) return os.RemoveAll(stagingDirectory)
} }
func (d *Driver) supportsDataOnlyLayers() (bool, error) {
feature := "dataonly-layers"
overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(d.runhome, feature)
if err == nil {
if overlayCacheResult {
logrus.Debugf("Cached value indicated that data-only layers for overlay are supported")
return true, nil
}
logrus.Debugf("Cached value indicated that data-only layers for overlay are not supported")
return false, errors.New(overlayCacheText)
}
supportsDataOnly, err := supportsDataOnlyLayers(d.home)
if err2 := cachedFeatureRecord(d.runhome, feature, supportsDataOnly, ""); err2 != nil {
return false, fmt.Errorf("recording overlay data-only layers support status: %w", err2)
}
return supportsDataOnly, err
}
func (d *Driver) useComposeFs() bool { func (d *Driver) useComposeFs() bool {
if !composeFsSupported() || unshare.IsRootless() { if !composeFsSupported() || unshare.IsRootless() {
return false return false
} }
return true supportsDataOnlyLayers, err := d.supportsDataOnlyLayers()
if err != nil {
logrus.Debugf("Check for data-only layers failed with: %v", err)
return false
}
return supportsDataOnlyLayers
} }
// ApplyDiff applies the changes in the new layer using the specified function // ApplyDiff applies the changes in the new layer using the specified function

View File

@ -114,6 +114,16 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File
// AttachLoopDevice attaches the given sparse file to the next // AttachLoopDevice attaches the given sparse file to the next
// available loopback device. It returns an opened *os.File. // available loopback device. It returns an opened *os.File.
func AttachLoopDevice(sparseName string) (loop *os.File, err error) { func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
return attachLoopDevice(sparseName, false)
}
// AttachLoopDeviceRO attaches the given sparse file opened read-only to
// the next available loopback device. It returns an opened *os.File.
func AttachLoopDeviceRO(sparseName string) (loop *os.File, err error) {
return attachLoopDevice(sparseName, true)
}
func attachLoopDevice(sparseName string, readonly bool) (loop *os.File, err error) {
// Try to retrieve the next available loopback device via syscall. // Try to retrieve the next available loopback device via syscall.
// If it fails, we discard error and start looping for a // If it fails, we discard error and start looping for a
// loopback from index 0. // loopback from index 0.
@ -122,8 +132,14 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
logrus.Debugf("Error retrieving the next available loopback: %s", err) logrus.Debugf("Error retrieving the next available loopback: %s", err)
} }
var sparseFile *os.File
// OpenFile adds O_CLOEXEC // OpenFile adds O_CLOEXEC
sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0o644) if readonly {
sparseFile, err = os.OpenFile(sparseName, os.O_RDONLY, 0o644)
} else {
sparseFile, err = os.OpenFile(sparseName, os.O_RDWR, 0o644)
}
if err != nil { if err != nil {
logrus.Errorf("Opening sparse file: %v", err) logrus.Errorf("Opening sparse file: %v", err)
return nil, ErrAttachLoopbackDevice return nil, ErrAttachLoopbackDevice

View File

@ -28,7 +28,7 @@ func EnsureRemoveAll(dir string) error {
// track retries // track retries
exitOnErr := make(map[string]int) exitOnErr := make(map[string]int)
maxRetry := 100 maxRetry := 1000
// Attempt a simple remove all first, this avoids the more expensive // Attempt a simple remove all first, this avoids the more expensive
// RecursiveUnmount call if not needed. // RecursiveUnmount call if not needed.
@ -38,7 +38,7 @@ func EnsureRemoveAll(dir string) error {
// Attempt to unmount anything beneath this dir first // Attempt to unmount anything beneath this dir first
if err := mount.RecursiveUnmount(dir); err != nil { if err := mount.RecursiveUnmount(dir); err != nil {
logrus.Debugf("RecusiveUnmount on %s failed: %v", dir, err) logrus.Debugf("RecursiveUnmount on %s failed: %v", dir, err)
} }
for { for {
@ -94,6 +94,6 @@ func EnsureRemoveAll(dir string) error {
return err return err
} }
exitOnErr[pe.Path]++ exitOnErr[pe.Path]++
time.Sleep(100 * time.Millisecond) time.Sleep(10 * time.Millisecond)
} }
} }

View File

@ -27,9 +27,8 @@ runroot = "/run/containers/storage"
# restorecon -R -v /NEWSTORAGEPATH # restorecon -R -v /NEWSTORAGEPATH
graphroot = "/var/lib/containers/storage" graphroot = "/var/lib/containers/storage"
# Optional value for image storage location # Optional alternate location of image store if a location separate from the
# If set, it must be different than graphroot. # container store is required. If set, it must be different than graphroot.
# imagestore = "" # imagestore = ""

View File

@ -2666,34 +2666,23 @@ func (s *store) DeleteContainer(id string) error {
} }
var wg multierror.Group var wg multierror.Group
wg.Go(func() error { return s.containerStore.Delete(id) })
middleDir := s.graphDriverName + "-containers" middleDir := s.graphDriverName + "-containers"
wg.Go(func() error { wg.Go(func() error {
gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
// attempt a simple rm -rf first
if err := os.RemoveAll(gcpath); err == nil {
return nil
}
// and if it fails get to the more complicated cleanup
return system.EnsureRemoveAll(gcpath) return system.EnsureRemoveAll(gcpath)
}) })
wg.Go(func() error { wg.Go(func() error {
rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
// attempt a simple rm -rf first
if err := os.RemoveAll(rcpath); err == nil {
return nil
}
// and if it fails get to the more complicated cleanup
return system.EnsureRemoveAll(rcpath) return system.EnsureRemoveAll(rcpath)
}) })
if multierr := wg.Wait(); multierr != nil { if multierr := wg.Wait(); multierr != nil {
return multierr.ErrorOrNil() return multierr.ErrorOrNil()
} }
return nil return s.containerStore.Delete(id)
}) })
} }
@ -3418,16 +3407,16 @@ func (s *store) Shutdown(force bool) ([]string, error) {
err = fmt.Errorf("a layer is mounted: %w", ErrLayerUsedByContainer) err = fmt.Errorf("a layer is mounted: %w", ErrLayerUsedByContainer)
} }
if err == nil { if err == nil {
err = s.graphDriver.Cleanup()
// We dont retain the lastWrite value, and treat this update as if someone else did the .Cleanup(), // We dont retain the lastWrite value, and treat this update as if someone else did the .Cleanup(),
// so that we reload after a .Shutdown() the same way other processes would. // so that we reload after a .Shutdown() the same way other processes would.
// Shutdown() is basically an error path, so reliability is more important than performance. // Shutdown() is basically an error path, so reliability is more important than performance.
if _, err2 := s.graphLock.RecordWrite(); err2 != nil { if _, err2 := s.graphLock.RecordWrite(); err2 != nil {
if err == nil { err = fmt.Errorf("(graphLock.RecordWrite failed: %w", err2)
err = err2
} else {
err = fmt.Errorf("(graphLock.RecordWrite failed: %v) %w", err2, err)
} }
// Do the Cleanup() only after we are sure that the change was recorded with RecordWrite(), so that
// the next user picks it.
if err == nil {
err = s.graphDriver.Cleanup()
} }
} }
return mounted, err return mounted, err

View File

@ -220,9 +220,8 @@ type StoreOptions struct {
// GraphRoot is the filesystem path under which we will store the // GraphRoot is the filesystem path under which we will store the
// contents of layers, images, and containers. // contents of layers, images, and containers.
GraphRoot string `json:"root,omitempty"` GraphRoot string `json:"root,omitempty"`
// Image Store is the location of image store which is seperated from the // Image Store is the alternate location of image store if a location
// container store. Usually this is not recommended unless users wants // separate from the container store is required.
// seperate store for image and containers.
ImageStore string `json:"imagestore,omitempty"` ImageStore string `json:"imagestore,omitempty"`
// RootlessStoragePath is the storage path for rootless users // RootlessStoragePath is the storage path for rootless users
// default $HOME/.local/share/containers/storage // default $HOME/.local/share/containers/storage

View File

@ -3,7 +3,7 @@
before: before:
hooks: hooks:
- ./gen.sh - ./gen.sh
- go install mvdan.cc/garble@v0.9.3 - go install mvdan.cc/garble@v0.10.1
builds: builds:
- -
@ -92,16 +92,7 @@ builds:
archives: archives:
- -
id: s2-binaries id: s2-binaries
name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
replacements:
aix: AIX
darwin: OSX
linux: Linux
windows: Windows
386: i386
amd64: x86_64
freebsd: FreeBSD
netbsd: NetBSD
format_overrides: format_overrides:
- goos: windows - goos: windows
format: zip format: zip
@ -125,7 +116,7 @@ changelog:
nfpms: nfpms:
- -
file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
vendor: Klaus Post vendor: Klaus Post
homepage: https://github.com/klauspost/compress homepage: https://github.com/klauspost/compress
maintainer: Klaus Post <klauspost@gmail.com> maintainer: Klaus Post <klauspost@gmail.com>
@ -134,8 +125,3 @@ nfpms:
formats: formats:
- deb - deb
- rpm - rpm
replacements:
darwin: Darwin
linux: Linux
freebsd: FreeBSD
amd64: x86_64

View File

@ -16,6 +16,10 @@ This package provides various compression algorithms.
# changelog # changelog
* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)
* zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829
* s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832
* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) * June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6)
* zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806
* zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824
@ -50,6 +54,9 @@ This package provides various compression algorithms.
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
<details>
<summary>See changes to v1.15.x</summary>
* Jan 21st, 2023 (v1.15.15) * Jan 21st, 2023 (v1.15.15)
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
@ -176,6 +183,8 @@ Stream decompression is now faster on asynchronous, since the goroutine allocati
While the release has been extensively tested, it is recommended to testing when upgrading. While the release has been extensively tested, it is recommended to testing when upgrading.
</details>
<details> <details>
<summary>See changes to v1.14.x</summary> <summary>See changes to v1.14.x</summary>
@ -636,6 +645,7 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. * [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. * [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. * [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index.
# license # license

View File

@ -7,6 +7,7 @@ package flate
import ( import (
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"io" "io"
"math" "math"
@ -833,6 +834,12 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
d.initDeflate() d.initDeflate()
d.fill = (*compressor).fillDeflate d.fill = (*compressor).fillDeflate
d.step = (*compressor).deflateLazy d.step = (*compressor).deflateLazy
case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize:
d.w.logNewTablePenalty = 7
d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize}
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillBlock
d.step = (*compressor).storeFast
default: default:
return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
} }
@ -929,6 +936,28 @@ func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
return zw, err return zw, err
} }
// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow.
const MinCustomWindowSize = 32
// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow.
const MaxCustomWindowSize = windowSize
// NewWriterWindow returns a new Writer compressing data with a custom window size.
// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize.
func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) {
if windowSize < MinCustomWindowSize {
return nil, errors.New("flate: requested window size less than MinWindowSize")
}
if windowSize > MaxCustomWindowSize {
return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize")
}
var dw Writer
if err := dw.d.init(w, -windowSize); err != nil {
return nil, err
}
return &dw, nil
}
// A Writer takes data written to it and writes the compressed // A Writer takes data written to it and writes the compressed
// form of that data to an underlying writer (see NewWriter). // form of that data to an underlying writer (see NewWriter).
type Writer struct { type Writer struct {

View File

@ -8,7 +8,6 @@ package flate
import ( import (
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"math/bits"
) )
type fastEnc interface { type fastEnc interface {
@ -192,25 +191,3 @@ func (e *fastGen) Reset() {
} }
e.hist = e.hist[:0] e.hist = e.hist[:0]
} }
// matchLen returns the maximum length.
// 'a' must be the shortest of the two.
func matchLen(a, b []byte) int {
var checked int
for len(a) >= 8 {
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
return checked + (bits.TrailingZeros64(diff) >> 3)
}
checked += 8
a = a[8:]
b = b[8:]
}
b = b[:len(a)]
for i := range a {
if a[i] != b[i] {
return i + checked
}
}
return len(a) + checked
}

View File

@ -308,3 +308,401 @@ emitRemainder:
emitLiteral(dst, src[nextEmit:]) emitLiteral(dst, src[nextEmit:])
} }
} }
// fastEncL5Window is a level 5 encoder,
// but with a custom window size.
type fastEncL5Window struct {
hist []byte
cur int32
maxOffset int32
table [tableSize]tableEntry
bTable [tableSize]tableEntryPrev
}
func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashShortBytes = 4
)
maxMatchOffset := e.maxOffset
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.bTable[:] {
e.bTable[i] = tableEntryPrev{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
for i := range e.bTable[:] {
v := e.bTable[i]
if v.Cur.offset <= minOff {
v.Cur.offset = 0
v.Prev.offset = 0
} else {
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
if v.Prev.offset <= minOff {
v.Prev.offset = 0
} else {
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
}
}
e.bTable[i] = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
const skipLog = 6
const doEvery = 1
nextS := s
var l int32
var t int32
for {
nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
// Fetch a short+long candidate
sCandidate := e.table[nextHashS]
lCandidate := e.bTable[nextHashL]
next := load6432(src, nextS)
entry := tableEntry{offset: s + e.cur}
e.table[nextHashS] = entry
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = entry, eLong.Cur
nextHashS = hashLen(next, tableBits, hashShortBytes)
nextHashL = hash7(next, tableBits)
t = lCandidate.Cur.offset - e.cur
if s-t < maxMatchOffset {
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
t2 := lCandidate.Prev.offset - e.cur
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
l = e.matchlen(s+4, t+4, src) + 4
ml1 := e.matchlen(s+4, t2+4, src) + 4
if ml1 > l {
t = t2
l = ml1
break
}
}
break
}
t = lCandidate.Prev.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
break
}
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
// Found a 4 match...
l = e.matchlen(s+4, t+4, src) + 4
lCandidate = e.bTable[nextHashL]
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
// If the next long is a candidate, use that...
t2 := lCandidate.Cur.offset - e.cur
if nextS-t2 < maxMatchOffset {
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
// If the previous long is a candidate, use that...
t2 = lCandidate.Prev.offset - e.cur
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
}
break
}
cv = next
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
if l == 0 {
// Extend the 4-byte match as long as possible.
l = e.matchlenLong(s+4, t+4, src) + 4
} else if l == maxMatchLength {
l += e.matchlenLong(s+l, t+l, src)
}
// Try to locate a better match by checking the end of best match...
if sAt := s + l; l < 30 && sAt < sLimit {
// Allow some bytes at the beginning to mismatch.
// Sweet spot is 2/3 bytes depending on input.
// 3 is only a little better when it is but sometimes a lot worse.
// The skipped bytes are tested in Extend backwards,
// and still picked up as part of the match if they do.
const skipBeginning = 2
eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
t2 := eLong - e.cur - l + skipBeginning
s2 := s + skipBeginning
off := s2 - t2
if t2 >= 0 && off < maxMatchOffset && off > 0 {
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
s = s2
}
}
}
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
if debugDeflate {
if t >= s {
panic(fmt.Sprintln("s-t", s, t))
}
if (s - t) > maxMatchOffset {
panic(fmt.Sprintln("mmo", s-t))
}
if l < baseMatchLength {
panic("bml")
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
goto emitRemainder
}
// Store every 3rd hash in-between.
if true {
const hashEvery = 3
i := s - l + 1
if i < s-1 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
// Do an long at i+1
cv >>= 8
t = tableEntry{offset: t.offset + 1}
eLong = &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
// We only have enough bits for a short entry at i+2
cv >>= 8
t = tableEntry{offset: t.offset + 1}
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
// Skip one - otherwise we risk hitting 's'
i += 4
for ; i < s-1; i += hashEvery {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
t2 := tableEntry{offset: t.offset + 1}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
}
}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s.
x := load6432(src, s-1)
o := e.cur + s - 1
prevHashS := hashLen(x, tableBits, hashShortBytes)
prevHashL := hash7(x, tableBits)
e.table[prevHashS] = tableEntry{offset: o}
eLong := &e.bTable[prevHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
cv = x >> 8
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}
// Reset the encoding table.
func (e *fastEncL5Window) Reset() {
// We keep the same allocs, since we are compressing the same block sizes.
if cap(e.hist) < allocHistory {
e.hist = make([]byte, 0, allocHistory)
}
// We offset current position so everything will be out of reach.
// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
if e.cur <= int32(bufferReset) {
e.cur += e.maxOffset + int32(len(e.hist))
}
e.hist = e.hist[:0]
}
func (e *fastEncL5Window) addBlock(src []byte) int32 {
// check if we have space already
maxMatchOffset := e.maxOffset
if len(e.hist)+len(src) > cap(e.hist) {
if cap(e.hist) == 0 {
e.hist = make([]byte, 0, allocHistory)
} else {
if cap(e.hist) < int(maxMatchOffset*2) {
panic("unexpected buffer size")
}
// Move down
offset := int32(len(e.hist)) - maxMatchOffset
copy(e.hist[0:maxMatchOffset], e.hist[offset:])
e.cur += offset
e.hist = e.hist[:maxMatchOffset]
}
}
s := int32(len(e.hist))
e.hist = append(e.hist, src...)
return s
}
// matchlen will return the match length between offsets and t in src.
// The maximum length returned is maxMatchLength - 4.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 {
if debugDecode {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
if int(s) >= len(src) {
panic(fmt.Sprint("s >= len(src):", s, len(src)))
}
if t < 0 {
panic(fmt.Sprint("t < 0:", t))
}
if s-t > e.maxOffset {
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
}
}
s1 := int(s) + maxMatchLength - 4
if s1 > len(src) {
s1 = len(src)
}
// Extend the match to be as long as possible.
return int32(matchLen(src[s:s1], src[t:]))
}
// matchlenLong will return the match length between offsets and t in src.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 {
if debugDeflate {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
if int(s) >= len(src) {
panic(fmt.Sprint("s >= len(src):", s, len(src)))
}
if t < 0 {
panic(fmt.Sprint("t < 0:", t))
}
if s-t > e.maxOffset {
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
}
}
// Extend the match to be as long as possible.
return int32(matchLen(src[s:], src[t:]))
}

View File

@ -0,0 +1,16 @@
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
package flate
// matchLen returns how many bytes match in a and b
//
// It assumes that:
//
// len(a) <= len(b) and len(a) > 0
//
//go:noescape
func matchLen(a []byte, b []byte) int

View File

@ -0,0 +1,68 @@
// Copied from S2 implementation.
//go:build !appengine && !noasm && gc && !noasm
#include "textflag.h"
// func matchLen(a []byte, b []byte) int
// Requires: BMI
TEXT ·matchLen(SB), NOSPLIT, $0-56
MOVQ a_base+0(FP), AX
MOVQ b_base+24(FP), CX
MOVQ a_len+8(FP), DX
// matchLen
XORL SI, SI
CMPL DX, $0x08
JB matchlen_match4_standalone
matchlen_loopback_standalone:
MOVQ (AX)(SI*1), BX
XORQ (CX)(SI*1), BX
TESTQ BX, BX
JZ matchlen_loop_standalone
#ifdef GOAMD64_v3
TZCNTQ BX, BX
#else
BSFQ BX, BX
#endif
SARQ $0x03, BX
LEAL (SI)(BX*1), SI
JMP gen_match_len_end
matchlen_loop_standalone:
LEAL -8(DX), DX
LEAL 8(SI), SI
CMPL DX, $0x08
JAE matchlen_loopback_standalone
matchlen_match4_standalone:
CMPL DX, $0x04
JB matchlen_match2_standalone
MOVL (AX)(SI*1), BX
CMPL (CX)(SI*1), BX
JNE matchlen_match2_standalone
LEAL -4(DX), DX
LEAL 4(SI), SI
matchlen_match2_standalone:
CMPL DX, $0x02
JB matchlen_match1_standalone
MOVW (AX)(SI*1), BX
CMPW (CX)(SI*1), BX
JNE matchlen_match1_standalone
LEAL -2(DX), DX
LEAL 2(SI), SI
matchlen_match1_standalone:
CMPL DX, $0x01
JB gen_match_len_end
MOVB (AX)(SI*1), BL
CMPB (CX)(SI*1), BL
JNE gen_match_len_end
INCL SI
gen_match_len_end:
MOVQ SI, ret+48(FP)
RET

View File

@ -0,0 +1,33 @@
//go:build !amd64 || appengine || !gc || noasm
// +build !amd64 appengine !gc noasm
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
package flate
import (
"encoding/binary"
"math/bits"
)
// matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two.
func matchLen(a, b []byte) (n int) {
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
if diff != 0 {
return n + bits.TrailingZeros64(diff)>>3
}
n += 8
}
for i := range a {
if a[i] != b[i] {
break
}
n++
}
return n
}

View File

@ -152,12 +152,11 @@ func (b *bitWriter) flushAlign() {
// close will write the alignment bit and write the final byte(s) // close will write the alignment bit and write the final byte(s)
// to the output. // to the output.
func (b *bitWriter) close() error { func (b *bitWriter) close() {
// End mark // End mark
b.addBits16Clean(1, 1) b.addBits16Clean(1, 1)
// flush until next byte. // flush until next byte.
b.flushAlign() b.flushAlign()
return nil
} }
// reset and continue writing by appending to out. // reset and continue writing by appending to out.

View File

@ -199,7 +199,8 @@ func (s *Scratch) compress(src []byte) error {
c2.flush(s.actualTableLog) c2.flush(s.actualTableLog)
c1.flush(s.actualTableLog) c1.flush(s.actualTableLog)
return s.bw.close() s.bw.close()
return nil
} }
// writeCount will write the normalized histogram count to header. // writeCount will write the normalized histogram count to header.

View File

@ -94,10 +94,9 @@ func (b *bitWriter) flushAlign() {
// close will write the alignment bit and write the final byte(s) // close will write the alignment bit and write the final byte(s)
// to the output. // to the output.
func (b *bitWriter) close() error { func (b *bitWriter) close() {
// End mark // End mark
b.addBits16Clean(1, 1) b.addBits16Clean(1, 1)
// flush until next byte. // flush until next byte.
b.flushAlign() b.flushAlign()
return nil
} }

View File

@ -227,10 +227,10 @@ func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err err
} }
func (s *Scratch) compress1X(src []byte) ([]byte, error) { func (s *Scratch) compress1X(src []byte) ([]byte, error) {
return s.compress1xDo(s.Out, src) return s.compress1xDo(s.Out, src), nil
} }
func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { func (s *Scratch) compress1xDo(dst, src []byte) []byte {
var bw = bitWriter{out: dst} var bw = bitWriter{out: dst}
// N is length divisible by 4. // N is length divisible by 4.
@ -260,8 +260,8 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
bw.encTwoSymbols(cTable, tmp[1], tmp[0]) bw.encTwoSymbols(cTable, tmp[1], tmp[0])
} }
} }
err := bw.close() bw.close()
return bw.out, err return bw.out
} }
var sixZeros [6]byte var sixZeros [6]byte
@ -283,12 +283,8 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
} }
src = src[len(toDo):] src = src[len(toDo):]
var err error
idx := len(s.Out) idx := len(s.Out)
s.Out, err = s.compress1xDo(s.Out, toDo) s.Out = s.compress1xDo(s.Out, toDo)
if err != nil {
return nil, err
}
if len(s.Out)-idx > math.MaxUint16 { if len(s.Out)-idx > math.MaxUint16 {
// We cannot store the size in the jump table // We cannot store the size in the jump table
return nil, ErrIncompressible return nil, ErrIncompressible
@ -315,7 +311,6 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
segmentSize := (len(src) + 3) / 4 segmentSize := (len(src) + 3) / 4
var wg sync.WaitGroup var wg sync.WaitGroup
var errs [4]error
wg.Add(4) wg.Add(4)
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
toDo := src toDo := src
@ -326,15 +321,12 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
// Separate goroutine for each block. // Separate goroutine for each block.
go func(i int) { go func(i int) {
s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
wg.Done() wg.Done()
}(i) }(i)
} }
wg.Wait() wg.Wait()
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
if errs[i] != nil {
return nil, errs[i]
}
o := s.tmpOut[i] o := s.tmpOut[i]
if len(o) > math.MaxUint16 { if len(o) > math.MaxUint16 {
// We cannot store the size in the jump table // We cannot store the size in the jump table

View File

@ -17,7 +17,6 @@ import (
// for aligning the input. // for aligning the input.
type bitReader struct { type bitReader struct {
in []byte in []byte
off uint // next byte to read is at in[off - 1]
value uint64 // Maybe use [16]byte, but shifting is awkward. value uint64 // Maybe use [16]byte, but shifting is awkward.
bitsRead uint8 bitsRead uint8
} }
@ -28,7 +27,6 @@ func (b *bitReader) init(in []byte) error {
return errors.New("corrupt stream: too short") return errors.New("corrupt stream: too short")
} }
b.in = in b.in = in
b.off = uint(len(in))
// The highest bit of the last byte indicates where to start // The highest bit of the last byte indicates where to start
v := in[len(in)-1] v := in[len(in)-1]
if v == 0 { if v == 0 {
@ -69,21 +67,19 @@ func (b *bitReader) fillFast() {
if b.bitsRead < 32 { if b.bitsRead < 32 {
return return
} }
// 2 bounds checks. v := b.in[len(b.in)-4:]
v := b.in[b.off-4:] b.in = b.in[:len(b.in)-4]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low) b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32 b.bitsRead -= 32
b.off -= 4
} }
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. // fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
func (b *bitReader) fillFastStart() { func (b *bitReader) fillFastStart() {
// Do single re-slice to avoid bounds checks. v := b.in[len(b.in)-8:]
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) b.in = b.in[:len(b.in)-8]
b.value = binary.LittleEndian.Uint64(v)
b.bitsRead = 0 b.bitsRead = 0
b.off -= 8
} }
// fill() will make sure at least 32 bits are available. // fill() will make sure at least 32 bits are available.
@ -91,25 +87,25 @@ func (b *bitReader) fill() {
if b.bitsRead < 32 { if b.bitsRead < 32 {
return return
} }
if b.off >= 4 { if len(b.in) >= 4 {
v := b.in[b.off-4:] v := b.in[len(b.in)-4:]
v = v[:4] b.in = b.in[:len(b.in)-4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low) b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32 b.bitsRead -= 32
b.off -= 4
return return
} }
for b.off > 0 {
b.value = (b.value << 8) | uint64(b.in[b.off-1]) b.bitsRead -= uint8(8 * len(b.in))
b.bitsRead -= 8 for len(b.in) > 0 {
b.off-- b.value = (b.value << 8) | uint64(b.in[len(b.in)-1])
b.in = b.in[:len(b.in)-1]
} }
} }
// finished returns true if all bits have been read from the bit stream. // finished returns true if all bits have been read from the bit stream.
func (b *bitReader) finished() bool { func (b *bitReader) finished() bool {
return b.off == 0 && b.bitsRead >= 64 return len(b.in) == 0 && b.bitsRead >= 64
} }
// overread returns true if more bits have been requested than is on the stream. // overread returns true if more bits have been requested than is on the stream.
@ -119,7 +115,7 @@ func (b *bitReader) overread() bool {
// remain returns the number of bits remaining. // remain returns the number of bits remaining.
func (b *bitReader) remain() uint { func (b *bitReader) remain() uint {
return b.off*8 + 64 - uint(b.bitsRead) return 8*uint(len(b.in)) + 64 - uint(b.bitsRead)
} }
// close the bitstream and returns an error if out-of-buffer reads occurred. // close the bitstream and returns an error if out-of-buffer reads occurred.

View File

@ -97,12 +97,11 @@ func (b *bitWriter) flushAlign() {
// close will write the alignment bit and write the final byte(s) // close will write the alignment bit and write the final byte(s)
// to the output. // to the output.
func (b *bitWriter) close() error { func (b *bitWriter) close() {
// End mark // End mark
b.addBits16Clean(1, 1) b.addBits16Clean(1, 1)
// flush until next byte. // flush until next byte.
b.flushAlign() b.flushAlign()
return nil
} }
// reset and continue writing by appending to out. // reset and continue writing by appending to out.

View File

@ -361,14 +361,21 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
if len(lits) >= 1024 { if len(lits) >= 1024 {
// Use 4 Streams. // Use 4 Streams.
out, reUsed, err = huff0.Compress4X(lits, b.litEnc) out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
} else if len(lits) > 32 { } else if len(lits) > 16 {
// Use 1 stream // Use 1 stream
single = true single = true
out, reUsed, err = huff0.Compress1X(lits, b.litEnc) out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
} else { } else {
err = huff0.ErrIncompressible err = huff0.ErrIncompressible
} }
if err == nil && len(out)+5 > len(lits) {
// If we are close, we may still be worse or equal to raw.
var lh literalsHeader
lh.setSizes(len(out), len(lits), single)
if len(out)+lh.size() >= len(lits) {
err = huff0.ErrIncompressible
}
}
switch err { switch err {
case huff0.ErrIncompressible: case huff0.ErrIncompressible:
if debugEncoder { if debugEncoder {
@ -503,7 +510,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
if len(b.literals) >= 1024 && !raw { if len(b.literals) >= 1024 && !raw {
// Use 4 Streams. // Use 4 Streams.
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
} else if len(b.literals) > 32 && !raw { } else if len(b.literals) > 16 && !raw {
// Use 1 stream // Use 1 stream
single = true single = true
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
@ -511,6 +518,17 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
err = huff0.ErrIncompressible err = huff0.ErrIncompressible
} }
if err == nil && len(out)+5 > len(b.literals) {
// If we are close, we may still be worse or equal to raw.
var lh literalsHeader
lh.setSize(len(b.literals))
szRaw := lh.size()
lh.setSizes(len(out), len(b.literals), single)
szComp := lh.size()
if len(out)+szComp >= len(b.literals)+szRaw {
err = huff0.ErrIncompressible
}
}
switch err { switch err {
case huff0.ErrIncompressible: case huff0.ErrIncompressible:
lh.setType(literalsBlockRaw) lh.setType(literalsBlockRaw)
@ -773,10 +791,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
ml.flush(mlEnc.actualTableLog) ml.flush(mlEnc.actualTableLog)
of.flush(ofEnc.actualTableLog) of.flush(ofEnc.actualTableLog)
ll.flush(llEnc.actualTableLog) ll.flush(llEnc.actualTableLog)
err = wr.close() wr.close()
if err != nil {
return err
}
b.output = wr.out b.output = wr.out
// Maybe even add a bigger margin. // Maybe even add a bigger margin.

View File

@ -1,10 +1,13 @@
package zstd package zstd
import ( import (
"bytes"
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"math"
"sort"
"github.com/klauspost/compress/huff0" "github.com/klauspost/compress/huff0"
) )
@ -14,7 +17,6 @@ type dict struct {
litEnc *huff0.Scratch litEnc *huff0.Scratch
llDec, ofDec, mlDec sequenceDec llDec, ofDec, mlDec sequenceDec
//llEnc, ofEnc, mlEnc []*fseEncoder
offsets [3]int offsets [3]int
content []byte content []byte
} }
@ -159,3 +161,374 @@ func InspectDictionary(b []byte) (interface {
d, err := loadDict(b) d, err := loadDict(b)
return d, err return d, err
} }
type BuildDictOptions struct {
// Dictionary ID.
ID uint32
// Content to use to create dictionary tables.
Contents [][]byte
// History to use for all blocks.
History []byte
// Offsets to use.
Offsets [3]int
// CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier.
// See https://github.com/facebook/zstd/issues/3724
CompatV155 bool
// Use the specified encoder level.
// The dictionary will be built using the specified encoder level,
// which will reflect speed and make the dictionary tailored for that level.
// If not set SpeedBestCompression will be used.
Level EncoderLevel
// DebugOut will write stats and other details here if set.
DebugOut io.Writer
}
func BuildDict(o BuildDictOptions) ([]byte, error) {
initPredefined()
hist := o.History
contents := o.Contents
debug := o.DebugOut != nil
println := func(args ...interface{}) {
if o.DebugOut != nil {
fmt.Fprintln(o.DebugOut, args...)
}
}
printf := func(s string, args ...interface{}) {
if o.DebugOut != nil {
fmt.Fprintf(o.DebugOut, s, args...)
}
}
print := func(args ...interface{}) {
if o.DebugOut != nil {
fmt.Fprint(o.DebugOut, args...)
}
}
if int64(len(hist)) > dictMaxLength {
return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength))
}
if len(hist) < 8 {
return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8)
}
if len(contents) == 0 {
return nil, errors.New("no content provided")
}
d := dict{
id: o.ID,
litEnc: nil,
llDec: sequenceDec{},
ofDec: sequenceDec{},
mlDec: sequenceDec{},
offsets: o.Offsets,
content: hist,
}
block := blockEnc{lowMem: false}
block.init()
enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}})
if o.Level != 0 {
eOpts := encoderOptions{
level: o.Level,
blockSize: maxMatchLen,
windowSize: maxMatchLen,
dict: &d,
lowMem: false,
}
enc = eOpts.encoder()
} else {
o.Level = SpeedBestCompression
}
var (
remain [256]int
ll [256]int
ml [256]int
of [256]int
)
addValues := func(dst *[256]int, src []byte) {
for _, v := range src {
dst[v]++
}
}
addHist := func(dst *[256]int, src *[256]uint32) {
for i, v := range src {
dst[i] += int(v)
}
}
seqs := 0
nUsed := 0
litTotal := 0
newOffsets := make(map[uint32]int, 1000)
for _, b := range contents {
block.reset(nil)
if len(b) < 8 {
continue
}
nUsed++
enc.Reset(&d, true)
enc.Encode(&block, b)
addValues(&remain, block.literals)
litTotal += len(block.literals)
seqs += len(block.sequences)
block.genCodes()
addHist(&ll, block.coders.llEnc.Histogram())
addHist(&ml, block.coders.mlEnc.Histogram())
addHist(&of, block.coders.ofEnc.Histogram())
for i, seq := range block.sequences {
if i > 3 {
break
}
offset := seq.offset
if offset == 0 {
continue
}
if offset > 3 {
newOffsets[offset-3]++
} else {
newOffsets[uint32(o.Offsets[offset-1])]++
}
}
}
// Find most used offsets.
var sortedOffsets []uint32
for k := range newOffsets {
sortedOffsets = append(sortedOffsets, k)
}
sort.Slice(sortedOffsets, func(i, j int) bool {
a, b := sortedOffsets[i], sortedOffsets[j]
if a == b {
// Prefer the longer offset
return sortedOffsets[i] > sortedOffsets[j]
}
return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]]
})
if len(sortedOffsets) > 3 {
if debug {
print("Offsets:")
for i, v := range sortedOffsets {
if i > 20 {
break
}
printf("[%d: %d],", v, newOffsets[v])
}
println("")
}
sortedOffsets = sortedOffsets[:3]
}
for i, v := range sortedOffsets {
o.Offsets[i] = int(v)
}
if debug {
println("New repeat offsets", o.Offsets)
}
if nUsed == 0 || seqs == 0 {
return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs)
}
if debug {
println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal)
}
if seqs/nUsed < 512 {
// Use 512 as minimum.
nUsed = seqs / 512
}
copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
hist := dst.Histogram()
var maxSym uint8
var maxCount int
var fakeLength int
for i, v := range src {
if v > 0 {
v = v / nUsed
if v == 0 {
v = 1
}
}
if v > maxCount {
maxCount = v
}
if v != 0 {
maxSym = uint8(i)
}
fakeLength += v
hist[i] = uint32(v)
}
dst.HistogramFinished(maxSym, maxCount)
dst.reUsed = false
dst.useRLE = false
err := dst.normalizeCount(fakeLength)
if err != nil {
return nil, err
}
if debug {
println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength)
}
return dst.writeCount(nil)
}
if debug {
print("Literal lengths: ")
}
llTable, err := copyHist(block.coders.llEnc, &ll)
if err != nil {
return nil, err
}
if debug {
print("Match lengths: ")
}
mlTable, err := copyHist(block.coders.mlEnc, &ml)
if err != nil {
return nil, err
}
if debug {
print("Offsets: ")
}
ofTable, err := copyHist(block.coders.ofEnc, &of)
if err != nil {
return nil, err
}
// Literal table
avgSize := litTotal
if avgSize > huff0.BlockSizeMax/2 {
avgSize = huff0.BlockSizeMax / 2
}
huffBuff := make([]byte, 0, avgSize)
// Target size
div := litTotal / avgSize
if div < 1 {
div = 1
}
if debug {
println("Huffman weights:")
}
for i, n := range remain[:] {
if n > 0 {
n = n / div
// Allow all entries to be represented.
if n == 0 {
n = 1
}
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
if debug {
printf("[%d: %d], ", i, n)
}
}
}
if o.CompatV155 && remain[255]/div == 0 {
huffBuff = append(huffBuff, 255)
}
scratch := &huff0.Scratch{TableLog: 11}
for tries := 0; tries < 255; tries++ {
scratch = &huff0.Scratch{TableLog: 11}
_, _, err = huff0.Compress1X(huffBuff, scratch)
if err == nil {
break
}
if debug {
printf("Try %d: Huffman error: %v\n", tries+1, err)
}
huffBuff = huffBuff[:0]
if tries == 250 {
if debug {
println("Huffman: Bailing out with predefined table")
}
// Bail out.... Just generate something
huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...)
for i := 0; i < 128; i++ {
huffBuff = append(huffBuff, byte(i))
}
continue
}
if errors.Is(err, huff0.ErrIncompressible) {
// Try truncating least common.
for i, n := range remain[:] {
if n > 0 {
n = n / (div * (i + 1))
if n > 0 {
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
}
}
}
if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 {
huffBuff = append(huffBuff, 255)
}
if len(huffBuff) == 0 {
huffBuff = append(huffBuff, 0, 255)
}
}
if errors.Is(err, huff0.ErrUseRLE) {
for i, n := range remain[:] {
n = n / (div * (i + 1))
// Allow all entries to be represented.
if n == 0 {
n = 1
}
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
}
}
}
var out bytes.Buffer
out.Write([]byte(dictMagic))
out.Write(binary.LittleEndian.AppendUint32(nil, o.ID))
out.Write(scratch.OutTable)
if debug {
println("huff table:", len(scratch.OutTable), "bytes")
println("of table:", len(ofTable), "bytes")
println("ml table:", len(mlTable), "bytes")
println("ll table:", len(llTable), "bytes")
}
out.Write(ofTable)
out.Write(mlTable)
out.Write(llTable)
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0])))
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1])))
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2])))
out.Write(hist)
if debug {
_, err := loadDict(out.Bytes())
if err != nil {
panic(err)
}
i, err := InspectDictionary(out.Bytes())
if err != nil {
panic(err)
}
println("ID:", i.ID())
println("Content size:", i.ContentSize())
println("Encoder:", i.LitEncoder() != nil)
println("Offsets:", i.Offsets())
var totalSize int
for _, b := range contents {
totalSize += len(b)
}
encWith := func(opts ...EOption) int {
enc, err := NewWriter(nil, opts...)
if err != nil {
panic(err)
}
defer enc.Close()
var dst []byte
var totalSize int
for _, b := range contents {
dst = enc.EncodeAll(b, dst[:0])
totalSize += len(dst)
}
return totalSize
}
plain := encWith(WithEncoderLevel(o.Level))
withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes()))
println("Input size:", totalSize)
println("Plain Compressed:", plain)
println("Dict Compressed:", withDict)
println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)")
}
return out.Bytes(), nil
}

View File

@ -227,10 +227,7 @@ func (e *Encoder) nextBlock(final bool) error {
DictID: e.o.dict.ID(), DictID: e.o.dict.ID(),
} }
dst, err := fh.appendTo(tmp[:0]) dst := fh.appendTo(tmp[:0])
if err != nil {
return err
}
s.headerWritten = true s.headerWritten = true
s.wWg.Wait() s.wWg.Wait()
var n2 int var n2 int
@ -483,7 +480,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
Checksum: false, Checksum: false,
DictID: 0, DictID: 0,
} }
dst, _ = fh.appendTo(dst) dst = fh.appendTo(dst)
// Write raw block as last one only. // Write raw block as last one only.
var blk blockHeader var blk blockHeader
@ -518,10 +515,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
dst = make([]byte, 0, len(src)) dst = make([]byte, 0, len(src))
} }
dst, err := fh.appendTo(dst) dst = fh.appendTo(dst)
if err != nil {
panic(err)
}
// If we can do everything in one block, prefer that. // If we can do everything in one block, prefer that.
if len(src) <= e.o.blockSize { if len(src) <= e.o.blockSize {
@ -581,6 +575,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
// Add padding with content from crypto/rand.Reader // Add padding with content from crypto/rand.Reader
if e.o.pad > 0 { if e.o.pad > 0 {
add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
var err error
dst, err = skippableFrame(dst, add, rand.Reader) dst, err = skippableFrame(dst, add, rand.Reader)
if err != nil { if err != nil {
panic(err) panic(err)

View File

@ -22,7 +22,7 @@ type frameHeader struct {
const maxHeaderSize = 14 const maxHeaderSize = 14
func (f frameHeader) appendTo(dst []byte) ([]byte, error) { func (f frameHeader) appendTo(dst []byte) []byte {
dst = append(dst, frameMagic...) dst = append(dst, frameMagic...)
var fhd uint8 var fhd uint8
if f.Checksum { if f.Checksum {
@ -88,7 +88,7 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
default: default:
panic("invalid fcs") panic("invalid fcs")
} }
return dst, nil return dst
} }
const skippableFrameHeader = 4 + 4 const skippableFrameHeader = 4 + 4

View File

@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
var ll, mo, ml int var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) { if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
// inlined function: // inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState) // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
@ -452,18 +452,13 @@ func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol)
// extra bits are stored in reverse order. // extra bits are stored in reverse order.
br.fill() br.fill()
if s.maxBits <= 32 {
mo += br.getBits(moB)
ml += br.getBits(mlB)
ll += br.getBits(llB)
} else {
mo += br.getBits(moB) mo += br.getBits(moB)
if s.maxBits > 32 {
br.fill() br.fill()
}
// matchlength+literal length, max 32 bits // matchlength+literal length, max 32 bits
ml += br.getBits(mlB) ml += br.getBits(mlB)
ll += br.getBits(llB) ll += br.getBits(llB)
}
mo = s.adjustOffset(mo, ll, moB) mo = s.adjustOffset(mo, ll, moB)
return return
} }

View File

@ -5,11 +5,11 @@
// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: CMOV // Requires: CMOV
TEXT ·sequenceDecs_decode_amd64(SB), $8-32 TEXT ·sequenceDecs_decode_amd64(SB), $8-32
MOVQ br+8(FP), AX MOVQ br+8(FP), CX
MOVQ 32(AX), DX MOVQ 24(CX), DX
MOVBQZX 40(AX), BX MOVBQZX 32(CX), BX
MOVQ 24(AX), SI MOVQ (CX), AX
MOVQ (AX), AX MOVQ 8(CX), SI
ADDQ SI, AX ADDQ SI, AX
MOVQ AX, (SP) MOVQ AX, (SP)
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -301,9 +301,9 @@ sequenceDecs_decode_amd64_match_len_ofs_ok:
MOVQ R12, 152(AX) MOVQ R12, 152(AX)
MOVQ R13, 160(AX) MOVQ R13, 160(AX)
MOVQ br+8(FP), AX MOVQ br+8(FP), AX
MOVQ DX, 32(AX) MOVQ DX, 24(AX)
MOVB BL, 40(AX) MOVB BL, 32(AX)
MOVQ SI, 24(AX) MOVQ SI, 8(AX)
// Return success // Return success
MOVQ $0x00000000, ret+24(FP) MOVQ $0x00000000, ret+24(FP)
@ -336,11 +336,11 @@ error_overread:
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: CMOV // Requires: CMOV
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
MOVQ br+8(FP), AX MOVQ br+8(FP), CX
MOVQ 32(AX), DX MOVQ 24(CX), DX
MOVBQZX 40(AX), BX MOVBQZX 32(CX), BX
MOVQ 24(AX), SI MOVQ (CX), AX
MOVQ (AX), AX MOVQ 8(CX), SI
ADDQ SI, AX ADDQ SI, AX
MOVQ AX, (SP) MOVQ AX, (SP)
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -603,9 +603,9 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok:
MOVQ R12, 152(AX) MOVQ R12, 152(AX)
MOVQ R13, 160(AX) MOVQ R13, 160(AX)
MOVQ br+8(FP), AX MOVQ br+8(FP), AX
MOVQ DX, 32(AX) MOVQ DX, 24(AX)
MOVB BL, 40(AX) MOVB BL, 32(AX)
MOVQ SI, 24(AX) MOVQ SI, 8(AX)
// Return success // Return success
MOVQ $0x00000000, ret+24(FP) MOVQ $0x00000000, ret+24(FP)
@ -638,11 +638,11 @@ error_overread:
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV // Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
MOVQ br+8(FP), CX MOVQ br+8(FP), BX
MOVQ 32(CX), AX MOVQ 24(BX), AX
MOVBQZX 40(CX), DX MOVBQZX 32(BX), DX
MOVQ 24(CX), BX MOVQ (BX), CX
MOVQ (CX), CX MOVQ 8(BX), BX
ADDQ BX, CX ADDQ BX, CX
MOVQ CX, (SP) MOVQ CX, (SP)
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
@ -892,9 +892,9 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok:
MOVQ R11, 152(CX) MOVQ R11, 152(CX)
MOVQ R12, 160(CX) MOVQ R12, 160(CX)
MOVQ br+8(FP), CX MOVQ br+8(FP), CX
MOVQ AX, 32(CX) MOVQ AX, 24(CX)
MOVB DL, 40(CX) MOVB DL, 32(CX)
MOVQ BX, 24(CX) MOVQ BX, 8(CX)
// Return success // Return success
MOVQ $0x00000000, ret+24(FP) MOVQ $0x00000000, ret+24(FP)
@ -927,11 +927,11 @@ error_overread:
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV // Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
MOVQ br+8(FP), CX MOVQ br+8(FP), BX
MOVQ 32(CX), AX MOVQ 24(BX), AX
MOVBQZX 40(CX), DX MOVBQZX 32(BX), DX
MOVQ 24(CX), BX MOVQ (BX), CX
MOVQ (CX), CX MOVQ 8(BX), BX
ADDQ BX, CX ADDQ BX, CX
MOVQ CX, (SP) MOVQ CX, (SP)
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
@ -1152,9 +1152,9 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok:
MOVQ R11, 152(CX) MOVQ R11, 152(CX)
MOVQ R12, 160(CX) MOVQ R12, 160(CX)
MOVQ br+8(FP), CX MOVQ br+8(FP), CX
MOVQ AX, 32(CX) MOVQ AX, 24(CX)
MOVB DL, 40(CX) MOVB DL, 32(CX)
MOVQ BX, 24(CX) MOVQ BX, 8(CX)
// Return success // Return success
MOVQ $0x00000000, ret+24(FP) MOVQ $0x00000000, ret+24(FP)
@ -1797,11 +1797,11 @@ empty_seqs:
// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// Requires: CMOV, SSE // Requires: CMOV, SSE
TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
MOVQ br+8(FP), AX MOVQ br+8(FP), CX
MOVQ 32(AX), DX MOVQ 24(CX), DX
MOVBQZX 40(AX), BX MOVBQZX 32(CX), BX
MOVQ 24(AX), SI MOVQ (CX), AX
MOVQ (AX), AX MOVQ 8(CX), SI
ADDQ SI, AX ADDQ SI, AX
MOVQ AX, (SP) MOVQ AX, (SP)
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -2295,9 +2295,9 @@ handle_loop:
loop_finished: loop_finished:
MOVQ br+8(FP), AX MOVQ br+8(FP), AX
MOVQ DX, 32(AX) MOVQ DX, 24(AX)
MOVB BL, 40(AX) MOVB BL, 32(AX)
MOVQ SI, 24(AX) MOVQ SI, 8(AX)
// Update the context // Update the context
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -2362,11 +2362,11 @@ error_not_enough_space:
// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// Requires: BMI, BMI2, CMOV, SSE // Requires: BMI, BMI2, CMOV, SSE
TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
MOVQ br+8(FP), CX MOVQ br+8(FP), BX
MOVQ 32(CX), AX MOVQ 24(BX), AX
MOVBQZX 40(CX), DX MOVBQZX 32(BX), DX
MOVQ 24(CX), BX MOVQ (BX), CX
MOVQ (CX), CX MOVQ 8(BX), BX
ADDQ BX, CX ADDQ BX, CX
MOVQ CX, (SP) MOVQ CX, (SP)
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
@ -2818,9 +2818,9 @@ handle_loop:
loop_finished: loop_finished:
MOVQ br+8(FP), CX MOVQ br+8(FP), CX
MOVQ AX, 32(CX) MOVQ AX, 24(CX)
MOVB DL, 40(CX) MOVB DL, 32(CX)
MOVQ BX, 24(CX) MOVQ BX, 8(CX)
// Update the context // Update the context
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -2885,11 +2885,11 @@ error_not_enough_space:
// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// Requires: CMOV, SSE // Requires: CMOV, SSE
TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
MOVQ br+8(FP), AX MOVQ br+8(FP), CX
MOVQ 32(AX), DX MOVQ 24(CX), DX
MOVBQZX 40(AX), BX MOVBQZX 32(CX), BX
MOVQ 24(AX), SI MOVQ (CX), AX
MOVQ (AX), AX MOVQ 8(CX), SI
ADDQ SI, AX ADDQ SI, AX
MOVQ AX, (SP) MOVQ AX, (SP)
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -3485,9 +3485,9 @@ handle_loop:
loop_finished: loop_finished:
MOVQ br+8(FP), AX MOVQ br+8(FP), AX
MOVQ DX, 32(AX) MOVQ DX, 24(AX)
MOVB BL, 40(AX) MOVB BL, 32(AX)
MOVQ SI, 24(AX) MOVQ SI, 8(AX)
// Update the context // Update the context
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -3552,11 +3552,11 @@ error_not_enough_space:
// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// Requires: BMI, BMI2, CMOV, SSE // Requires: BMI, BMI2, CMOV, SSE
TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
MOVQ br+8(FP), CX MOVQ br+8(FP), BX
MOVQ 32(CX), AX MOVQ 24(BX), AX
MOVBQZX 40(CX), DX MOVBQZX 32(BX), DX
MOVQ 24(CX), BX MOVQ (BX), CX
MOVQ (CX), CX MOVQ 8(BX), BX
ADDQ BX, CX ADDQ BX, CX
MOVQ CX, (SP) MOVQ CX, (SP)
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
@ -4110,9 +4110,9 @@ handle_loop:
loop_finished: loop_finished:
MOVQ br+8(FP), CX MOVQ br+8(FP), CX
MOVQ AX, 32(CX) MOVQ AX, 24(CX)
MOVB DL, 40(CX) MOVB DL, 32(CX)
MOVQ BX, 24(CX) MOVQ BX, 8(CX)
// Update the context // Update the context
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX

View File

@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
} }
for i := range seqs { for i := range seqs {
var ll, mo, ml int var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) { if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
// inlined function: // inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState) // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)

View File

@ -95,10 +95,9 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
var written int64 var written int64
var readHeader bool var readHeader bool
{ {
var header []byte header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
var n int
header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
var n int
n, r.err = w.Write(header) n, r.err = w.Write(header)
if r.err != nil { if r.err != nil {
return written, r.err return written, r.err

4
vendor/modules.txt vendored
View File

@ -339,7 +339,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process github.com/containers/psgo/internal/process
# github.com/containers/storage v1.50.2 # github.com/containers/storage v1.50.3-0.20231005112617-44418abb2d89
## explicit; go 1.19 ## explicit; go 1.19
github.com/containers/storage github.com/containers/storage
github.com/containers/storage/drivers github.com/containers/storage/drivers
@ -684,7 +684,7 @@ github.com/josharian/intern
# github.com/json-iterator/go v1.1.12 # github.com/json-iterator/go v1.1.12
## explicit; go 1.12 ## explicit; go 1.12
github.com/json-iterator/go github.com/json-iterator/go
# github.com/klauspost/compress v1.16.7 # github.com/klauspost/compress v1.17.0
## explicit; go 1.18 ## explicit; go 1.18
github.com/klauspost/compress github.com/klauspost/compress
github.com/klauspost/compress/flate github.com/klauspost/compress/flate