diff --git a/go.mod b/go.mod
index b78bd83b3d..ada985908e 100644
--- a/go.mod
+++ b/go.mod
@@ -17,7 +17,7 @@ require (
 	github.com/containers/image/v5 v5.20.1-0.20220310094651-0d8056ee346f
 	github.com/containers/ocicrypt v1.1.2
 	github.com/containers/psgo v1.7.2
-	github.com/containers/storage v1.38.3-0.20220308085612-93ce26691863
+	github.com/containers/storage v1.38.3-0.20220321121613-8e565392dd91
 	github.com/coreos/go-systemd/v22 v22.3.2
 	github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
 	github.com/cyphar/filepath-securejoin v0.2.3
diff --git a/go.sum b/go.sum
index ae45625bd9..19c6252889 100644
--- a/go.sum
+++ b/go.sum
@@ -324,8 +324,9 @@ github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sb
 github.com/containerd/stargz-snapshotter/estargz v0.10.1/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
 github.com/containerd/stargz-snapshotter/estargz v0.11.0/go.mod h1:/KsZXsJRllMbTKFfG0miFQWViQKdI9+9aSXs+HN0+ac=
 github.com/containerd/stargz-snapshotter/estargz v0.11.1/go.mod h1:6VoPcf4M1wvnogWxqc4TqBWWErCS+R+ucnPZId2VbpQ=
-github.com/containerd/stargz-snapshotter/estargz v0.11.2 h1:0P0vWmfrEeTtZ4BBRrpuyu/HxR9HPBLfeljGOra5f6g=
 github.com/containerd/stargz-snapshotter/estargz v0.11.2/go.mod h1:rjbdAXaytDSIrAy2WAy2kUrJ4ehzDS0eUQLlIb5UCY0=
+github.com/containerd/stargz-snapshotter/estargz v0.11.3 h1:k2kN16Px6LYuv++qFqK+JTcYqc8bEVxzGpf8/gFBL5M=
+github.com/containerd/stargz-snapshotter/estargz v0.11.3/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
 github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
 github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
 github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
@@ -377,8 +378,9 @@ github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c
 github.com/containers/storage v1.38.0/go.mod h1:lBzt28gAk5ADZuRtwdndRJyqX22vnRaXmlF+7ktfMYc=
 github.com/containers/storage v1.38.2/go.mod h1:INP0RPLHWBxx+pTsO5uiHlDUGHDFvWZPWprAbAlQWPQ=
 github.com/containers/storage v1.38.3-0.20220301151551-d06b0f81c0aa/go.mod h1:LkkL34WRi4dI4jt9Cp+ImdZi/P5i36glSHimT5CP5zM=
-github.com/containers/storage v1.38.3-0.20220308085612-93ce26691863 h1:10k6Dl+Bm9zgsxP7qv0mnrhd7+XlCmgQWKgkydwZ7vQ=
 github.com/containers/storage v1.38.3-0.20220308085612-93ce26691863/go.mod h1:uhf9mPUP+uYajC2/S0A9NaCVa2JJ6+1C254ue4Edv2g=
+github.com/containers/storage v1.38.3-0.20220321121613-8e565392dd91 h1:gEbkqcBM3XFbIz6L9bpJyUEcuDd8vi8jzyrneVS8At4=
+github.com/containers/storage v1.38.3-0.20220321121613-8e565392dd91/go.mod h1:UAD0cKLouN4BOQRgZut/nMjrh/EnTCjSNPgp4ZuGWMs=
 github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
 github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
 github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -876,8 +878,9 @@ github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e
 github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
 github.com/klauspost/compress v1.14.3/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
 github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U=
 github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
+github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
 github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
 github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
 github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
index 1844213707..895d6645ab 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
@@ -3,7 +3,7 @@ module github.com/containerd/stargz-snapshotter/estargz
 go 1.16
 
 require (
-	github.com/klauspost/compress v1.15.0
+	github.com/klauspost/compress v1.15.1
 	github.com/opencontainers/go-digest v1.0.0
 	github.com/vbatts/tar-split v0.11.2
 	golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
index 0eb0b7a101..8b44342da0 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
@@ -1,8 +1,8 @@
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U=
-github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
+github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
 github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 00a3b5e4d1..a780ef5da3 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -292,6 +292,31 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
 		backingFs = fsName
 	}
 
+	runhome := filepath.Join(options.RunRoot, filepath.Base(home))
+	rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
+	if err != nil {
+		return nil, err
+	}
+
+	// Create the driver home dir
+	if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil {
+		return nil, err
+	}
+
+	if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil {
+		return nil, err
+	}
+
+	if opts.mountProgram == "" {
+		if supported, err := SupportsNativeOverlay(home, runhome); err != nil {
+			return nil, err
+		} else if !supported {
+			if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
+				opts.mountProgram = path
+			}
+		}
+	}
+
 	if opts.mountProgram != "" {
 		if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil {
 			m := os.FileMode(0700)
@@ -316,20 +341,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
 		}
 	}
 
-	rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
-	if err != nil {
-		return nil, err
-	}
-
-	// Create the driver home dir
-	if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil {
-		return nil, err
-	}
-	runhome := filepath.Join(options.RunRoot, filepath.Base(home))
-	if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil {
-		return nil, err
-	}
-
 	var usingMetacopy bool
 	var supportsDType bool
 	var supportsVolatile *bool
@@ -569,14 +580,11 @@ func cachedFeatureRecord(runhome, feature string, supported bool, text string) (
 	return err
 }
 
-func SupportsNativeOverlay(graphroot, rundir string) (bool, error) {
-	if os.Geteuid() != 0 || graphroot == "" || rundir == "" {
+func SupportsNativeOverlay(home, runhome string) (bool, error) {
+	if os.Geteuid() != 0 || home == "" || runhome == "" {
 		return false, nil
 	}
 
-	home := filepath.Join(graphroot, "overlay")
-	runhome := filepath.Join(rundir, "overlay")
-
 	var contents string
 	flagContent, err := ioutil.ReadFile(getMountProgramFlagFile(home))
 	if err == nil {
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index 3e8d51f6a6..4da8384afe 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -6,13 +6,13 @@ require (
 	github.com/BurntSushi/toml v1.0.0
 	github.com/Microsoft/go-winio v0.5.2
 	github.com/Microsoft/hcsshim v0.9.2
-	github.com/containerd/stargz-snapshotter/estargz v0.11.2
+	github.com/containerd/stargz-snapshotter/estargz v0.11.3
 	github.com/cyphar/filepath-securejoin v0.2.3
 	github.com/docker/go-units v0.4.0
 	github.com/google/go-intervals v0.0.2
 	github.com/hashicorp/go-multierror v1.1.1
 	github.com/json-iterator/go v1.1.12
-	github.com/klauspost/compress v1.15.0
+	github.com/klauspost/compress v1.15.1
 	github.com/klauspost/pgzip v1.2.5
 	github.com/mattn/go-shellwords v1.0.12
 	github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
@@ -23,7 +23,7 @@ require (
 	github.com/opencontainers/selinux v1.10.0
 	github.com/pkg/errors v0.9.1
 	github.com/sirupsen/logrus v1.8.1
-	github.com/stretchr/testify v1.7.0
+	github.com/stretchr/testify v1.7.1
 	github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
 	github.com/tchap/go-patricia v2.3.0+incompatible
 	github.com/ulikunitz/xz v0.5.10
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index ef6b711cf5..b995da734f 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -176,8 +176,8 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY
 github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
 github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
 github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
-github.com/containerd/stargz-snapshotter/estargz v0.11.2 h1:0P0vWmfrEeTtZ4BBRrpuyu/HxR9HPBLfeljGOra5f6g=
-github.com/containerd/stargz-snapshotter/estargz v0.11.2/go.mod h1:rjbdAXaytDSIrAy2WAy2kUrJ4ehzDS0eUQLlIb5UCY0=
+github.com/containerd/stargz-snapshotter/estargz v0.11.3 h1:k2kN16Px6LYuv++qFqK+JTcYqc8bEVxzGpf8/gFBL5M=
+github.com/containerd/stargz-snapshotter/estargz v0.11.3/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
 github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
 github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
 github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
@@ -424,8 +424,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
 github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
 github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U=
-github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
+github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
 github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
 github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
 github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -622,8 +622,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
 github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
 github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go
index 567985b98f..a71c6d2efd 100644
--- a/vendor/github.com/containers/storage/types/options.go
+++ b/vendor/github.com/containers/storage/types/options.go
@@ -3,14 +3,12 @@ package types
 import (
 	"fmt"
 	"os"
-	"os/exec"
 	"path/filepath"
 	"strings"
 	"sync"
 	"time"
 
 	"github.com/BurntSushi/toml"
-	"github.com/containers/storage/drivers/overlay"
 	cfg "github.com/containers/storage/pkg/config"
 	"github.com/containers/storage/pkg/idtools"
 	"github.com/sirupsen/logrus"
@@ -225,25 +223,11 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti
 		opts.GraphDriverName = overlayDriver
 	}
 
-	if opts.GraphDriverName == "" || opts.GraphDriverName == overlayDriver {
-		supported, err := overlay.SupportsNativeOverlay(opts.GraphRoot, rootlessRuntime)
-		if err != nil {
-			return opts, err
-		}
-		if supported {
-			opts.GraphDriverName = overlayDriver
-		} else {
-			if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
-				opts.GraphDriverName = overlayDriver
-				opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)}
-			}
-		}
-		if opts.GraphDriverName == overlayDriver {
-			for _, o := range systemOpts.GraphDriverOptions {
-				if strings.Contains(o, "ignore_chown_errors") {
-					opts.GraphDriverOptions = append(opts.GraphDriverOptions, o)
-					break
-				}
+	if opts.GraphDriverName == overlayDriver {
+		for _, o := range systemOpts.GraphDriverOptions {
+			if strings.Contains(o, "ignore_chown_errors") {
+				opts.GraphDriverOptions = append(opts.GraphDriverOptions, o)
+				break
 			}
 		}
 	}
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 9ddf39f6f3..0e2dc116ad 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -17,6 +17,23 @@ This package provides various compression algorithms.
 
 # changelog
 
+* Mar 3, 2022 (v1.15.0)
+	* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
+	* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
+	* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
+	* flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
+	* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
+	* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
+
+<details>
+	<summary>See  Details</summary>
+Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
+
+Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
+
+While the release has been extensively tested, it is recommended to testing when upgrading.
+</details>
+
 * Feb 22, 2022 (v1.14.4)
 	* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
 	* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
diff --git a/vendor/github.com/klauspost/compress/huff0/autogen.go b/vendor/github.com/klauspost/compress/huff0/autogen.go
new file mode 100644
index 0000000000..ff2c69d60c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/autogen.go
@@ -0,0 +1,5 @@
+package huff0
+
+//go:generate go run generate.go
+//go:generate asmfmt -w decompress_amd64.s
+//go:generate asmfmt -w decompress_8b_amd64.s
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
index 03562db16f..451160edda 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitreader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -165,6 +165,11 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
 	return uint16(b.value >> ((64 - n) & 63))
 }
 
+// peekTopBits(n) is equvialent to peekBitFast(64 - n)
+func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
+	return uint16(b.value >> n)
+}
+
 func (b *bitReaderShifted) advance(n uint8) {
 	b.bitsRead += n
 	b.value <<= n & 63
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 3ae7d46771..04f6529955 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -725,189 +725,6 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
 	return dst, br.close()
 }
 
-// Decompress4X will decompress a 4X encoded stream.
-// The length of the supplied input must match the end of a block exactly.
-// The *capacity* of the dst slice must match the destination size of
-// the uncompressed data exactly.
-func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
-	if len(d.dt.single) == 0 {
-		return nil, errors.New("no table loaded")
-	}
-	if len(src) < 6+(4*1) {
-		return nil, errors.New("input too small")
-	}
-	if use8BitTables && d.actualTableLog <= 8 {
-		return d.decompress4X8bit(dst, src)
-	}
-
-	var br [4]bitReaderShifted
-	// Decode "jump table"
-	start := 6
-	for i := 0; i < 3; i++ {
-		length := int(src[i*2]) | (int(src[i*2+1]) << 8)
-		if start+length >= len(src) {
-			return nil, errors.New("truncated input (or invalid offset)")
-		}
-		err := br[i].init(src[start : start+length])
-		if err != nil {
-			return nil, err
-		}
-		start += length
-	}
-	err := br[3].init(src[start:])
-	if err != nil {
-		return nil, err
-	}
-
-	// destination, offset to match first output
-	dstSize := cap(dst)
-	dst = dst[:dstSize]
-	out := dst
-	dstEvery := (dstSize + 3) / 4
-
-	const tlSize = 1 << tableLogMax
-	const tlMask = tlSize - 1
-	single := d.dt.single[:tlSize]
-
-	// Use temp table to avoid bound checks/append penalty.
-	buf := d.buffer()
-	var off uint8
-	var decoded int
-
-	// Decode 2 values from each decoder/loop.
-	const bufoff = 256
-	for {
-		if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
-			break
-		}
-
-		{
-			const stream = 0
-			const stream2 = 1
-			br[stream].fillFast()
-			br[stream2].fillFast()
-
-			val := br[stream].peekBitsFast(d.actualTableLog)
-			val2 := br[stream2].peekBitsFast(d.actualTableLog)
-			v := single[val&tlMask]
-			v2 := single[val2&tlMask]
-			br[stream].advance(uint8(v.entry))
-			br[stream2].advance(uint8(v2.entry))
-			buf[stream][off] = uint8(v.entry >> 8)
-			buf[stream2][off] = uint8(v2.entry >> 8)
-
-			val = br[stream].peekBitsFast(d.actualTableLog)
-			val2 = br[stream2].peekBitsFast(d.actualTableLog)
-			v = single[val&tlMask]
-			v2 = single[val2&tlMask]
-			br[stream].advance(uint8(v.entry))
-			br[stream2].advance(uint8(v2.entry))
-			buf[stream][off+1] = uint8(v.entry >> 8)
-			buf[stream2][off+1] = uint8(v2.entry >> 8)
-		}
-
-		{
-			const stream = 2
-			const stream2 = 3
-			br[stream].fillFast()
-			br[stream2].fillFast()
-
-			val := br[stream].peekBitsFast(d.actualTableLog)
-			val2 := br[stream2].peekBitsFast(d.actualTableLog)
-			v := single[val&tlMask]
-			v2 := single[val2&tlMask]
-			br[stream].advance(uint8(v.entry))
-			br[stream2].advance(uint8(v2.entry))
-			buf[stream][off] = uint8(v.entry >> 8)
-			buf[stream2][off] = uint8(v2.entry >> 8)
-
-			val = br[stream].peekBitsFast(d.actualTableLog)
-			val2 = br[stream2].peekBitsFast(d.actualTableLog)
-			v = single[val&tlMask]
-			v2 = single[val2&tlMask]
-			br[stream].advance(uint8(v.entry))
-			br[stream2].advance(uint8(v2.entry))
-			buf[stream][off+1] = uint8(v.entry >> 8)
-			buf[stream2][off+1] = uint8(v2.entry >> 8)
-		}
-
-		off += 2
-
-		if off == 0 {
-			if bufoff > dstEvery {
-				d.bufs.Put(buf)
-				return nil, errors.New("corruption detected: stream overrun 1")
-			}
-			copy(out, buf[0][:])
-			copy(out[dstEvery:], buf[1][:])
-			copy(out[dstEvery*2:], buf[2][:])
-			copy(out[dstEvery*3:], buf[3][:])
-			out = out[bufoff:]
-			decoded += bufoff * 4
-			// There must at least be 3 buffers left.
-			if len(out) < dstEvery*3 {
-				d.bufs.Put(buf)
-				return nil, errors.New("corruption detected: stream overrun 2")
-			}
-		}
-	}
-	if off > 0 {
-		ioff := int(off)
-		if len(out) < dstEvery*3+ioff {
-			d.bufs.Put(buf)
-			return nil, errors.New("corruption detected: stream overrun 3")
-		}
-		copy(out, buf[0][:off])
-		copy(out[dstEvery:], buf[1][:off])
-		copy(out[dstEvery*2:], buf[2][:off])
-		copy(out[dstEvery*3:], buf[3][:off])
-		decoded += int(off) * 4
-		out = out[off:]
-	}
-
-	// Decode remaining.
-	remainBytes := dstEvery - (decoded / 4)
-	for i := range br {
-		offset := dstEvery * i
-		endsAt := offset + remainBytes
-		if endsAt > len(out) {
-			endsAt = len(out)
-		}
-		br := &br[i]
-		bitsLeft := br.remaining()
-		for bitsLeft > 0 {
-			br.fill()
-			if offset >= endsAt {
-				d.bufs.Put(buf)
-				return nil, errors.New("corruption detected: stream overrun 4")
-			}
-
-			// Read value and increment offset.
-			val := br.peekBitsFast(d.actualTableLog)
-			v := single[val&tlMask].entry
-			nBits := uint8(v)
-			br.advance(nBits)
-			bitsLeft -= uint(nBits)
-			out[offset] = uint8(v >> 8)
-			offset++
-		}
-		if offset != endsAt {
-			d.bufs.Put(buf)
-			return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
-		}
-		decoded += offset - dstEvery*i
-		err = br.close()
-		if err != nil {
-			return nil, err
-		}
-	}
-	d.bufs.Put(buf)
-	if dstSize != decoded {
-		return nil, errors.New("corruption detected: short output block")
-	}
-	return dst, nil
-}
-
 // Decompress4X will decompress a 4X encoded stream.
 // The length of the supplied input must match the end of a block exactly.
 // The *capacity* of the dst slice must match the destination size of
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
new file mode 100644
index 0000000000..0d6cb1a962
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
@@ -0,0 +1,488 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+#define bufoff      256 // see decompress.go, we're using [4][256]byte table
+
+// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+//	peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
+TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
+#define off             R8
+#define buffer          DI
+#define table           SI
+
+#define br_bits_read    R9
+#define br_value        R10
+#define br_offset       R11
+#define peek_bits       R12
+#define exhausted       DX
+
+#define br0             R13
+#define br1             R14
+#define br2             R15
+#define br3             BP
+
+	MOVQ BP, 0(SP)
+
+	XORQ exhausted, exhausted // exhausted = false
+	XORQ off, off             // off = 0
+
+	MOVBQZX peekBits+32(FP), peek_bits
+	MOVQ    buf+40(FP), buffer
+	MOVQ    tbl+48(FP), table
+
+	MOVQ pbr0+0(FP), br0
+	MOVQ pbr1+8(FP), br1
+	MOVQ pbr2+16(FP), br2
+	MOVQ pbr3+24(FP), br3
+
+main_loop:
+
+	// const stream = 0
+	// br0.fillFast()
+	MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
+	MOVQ    bitReaderShifted_value(br0), br_value
+	MOVQ    bitReaderShifted_off(br0), br_offset
+
+	// if b.bitsRead >= 32 {
+	CMPQ br_bits_read, $32
+	JB   skip_fill0
+
+	SUBQ $32, br_bits_read // b.bitsRead -= 32
+	SUBQ $4, br_offset     // b.off -= 4
+
+	// v := b.in[b.off-4 : b.off]
+	// v = v[:4]
+	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	MOVQ bitReaderShifted_in(br0), AX
+	MOVL 0(br_offset)(AX*1), AX       // AX = uint32(b.in[b.off:b.off+4])
+
+	// b.value |= uint64(low) << (b.bitsRead & 63)
+	MOVQ br_bits_read, CX
+	SHLQ CL, AX
+	ORQ  AX, br_value
+
+	// exhausted = exhausted || (br0.off < 4)
+	CMPQ  br_offset, $4
+	SETLT DL
+	ORB   DL, DH
+
+	// }
+skip_fill0:
+
+	// val0 := br0.peekTopBits(peekBits)
+	MOVQ br_value, AX
+	MOVQ peek_bits, CX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+	// v0 := table[val0&mask]
+	MOVW 0(table)(AX*2), AX // AX - v0
+
+	// br0.advance(uint8(v0.entry))
+	MOVB    AH, BL           // BL = uint8(v0.entry >> 8)
+	MOVBQZX AL, CX
+	SHLQ    CL, br_value     // value <<= n
+	ADDQ    CX, br_bits_read // bits_read += n
+
+	// val1 := br0.peekTopBits(peekBits)
+	MOVQ peek_bits, CX
+	MOVQ br_value, AX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+	// v1 := table[val1&mask]
+	MOVW 0(table)(AX*2), AX // AX - v1
+
+	// br0.advance(uint8(v1.entry))
+	MOVB    AH, BH           // BH = uint8(v1.entry >> 8)
+	MOVBQZX AL, CX
+	SHLQ    CX, br_value     // value <<= n
+	ADDQ    CX, br_bits_read // bits_read += n
+
+	// these two writes get coalesced
+	// buf[stream][off] = uint8(v0.entry >> 8)
+	// buf[stream][off+1] = uint8(v1.entry >> 8)
+	MOVW BX, 0(buffer)(off*1)
+
+	// SECOND PART:
+	// val2 := br0.peekTopBits(peekBits)
+	MOVQ br_value, AX
+	MOVQ peek_bits, CX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+	// v2 := table[val0&mask]
+	MOVW 0(table)(AX*2), AX // AX - v0
+
+	// br0.advance(uint8(v0.entry))
+	MOVB    AH, BL           // BL = uint8(v0.entry >> 8)
+	MOVBQZX AL, CX
+	SHLQ    CL, br_value     // value <<= n
+	ADDQ    CX, br_bits_read // bits_read += n
+
+	// val3 := br0.peekTopBits(peekBits)
+	MOVQ peek_bits, CX
+	MOVQ br_value, AX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+	// v3 := table[val1&mask]
+	MOVW 0(table)(AX*2), AX // AX - v1
+
+	// br0.advance(uint8(v1.entry))
+	MOVB    AH, BH           // BH = uint8(v1.entry >> 8)
+	MOVBQZX AL, CX
+	SHLQ    CX, br_value     // value <<= n
+	ADDQ    CX, br_bits_read // bits_read += n
+
+	// these two writes get coalesced
+	// buf[stream][off+2] = uint8(v2.entry >> 8)
+	// buf[stream][off+3] = uint8(v3.entry >> 8)
+	MOVW BX, 0+2(buffer)(off*1)
+
+	// update the bitrader reader structure
+	MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
+	MOVQ br_value, bitReaderShifted_value(br0)
+	MOVQ br_offset, bitReaderShifted_off(br0)
+
+	// const stream = 1
+	// br1.fillFast()
+	MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
+	MOVQ    bitReaderShifted_value(br1), br_value
+	MOVQ    bitReaderShifted_off(br1), br_offset
+
+	// if b.bitsRead >= 32 {
+	CMPQ br_bits_read, $32
+	JB   skip_fill1
+
+	SUBQ $32, br_bits_read // b.bitsRead -= 32
+	SUBQ $4, br_offset     // b.off -= 4
+
+	// v := b.in[b.off-4 : b.off]
+	// v = v[:4]
+	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	MOVQ bitReaderShifted_in(br1), AX
+	MOVL 0(br_offset)(AX*1), AX       // AX = uint32(b.in[b.off:b.off+4])
+
+	// b.value |= uint64(low) << (b.bitsRead & 63)
+	MOVQ br_bits_read, CX
+	SHLQ CL, AX
+	ORQ  AX, br_value
+
+	// exhausted = exhausted || (br1.off < 4)
+	CMPQ  br_offset, $4
+	SETLT DL
+	ORB   DL, DH
+
+	// }
+skip_fill1:
+
+	// val0 := br1.peekTopBits(peekBits)
+	MOVQ br_value, AX
+	MOVQ peek_bits, CX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+	// v0 := table[val0&mask]
+	MOVW 0(table)(AX*2), AX // AX - v0
+
+	// br1.advance(uint8(v0.entry))
+	MOVB    AH, BL           // BL = uint8(v0.entry >> 8)
+	MOVBQZX AL, CX
+	SHLQ    CL, br_value     // value <<= n
+	ADDQ    CX, br_bits_read // bits_read += n
+
+	// val1 := br1.peekTopBits(peekBits)
+	MOVQ peek_bits, CX
+	MOVQ br_value, AX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+	// v1 := table[val1&mask]
+	MOVW 0(table)(AX*2), AX // AX - v1
+
+	// br1.advance(uint8(v1.entry))
+	MOVB    AH, BH           // BH = uint8(v1.entry >> 8)
+	MOVBQZX AL, CX
+	SHLQ    CX, br_value     // value <<= n
+	ADDQ    CX, br_bits_read // bits_read += n
+
+	// these two writes get coalesced
+	// buf[stream][off] = uint8(v0.entry >> 8)
+	// buf[stream][off+1] = uint8(v1.entry >> 8)
+	MOVW BX, 256(buffer)(off*1)
+
+	// SECOND PART:
+	// val2 := br1.peekTopBits(peekBits)
+	MOVQ br_value, AX
+	MOVQ peek_bits, CX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+	// v2 := table[val0&mask]
+	MOVW 0(table)(AX*2), AX // AX - v0
+
+	// br1.advance(uint8(v0.entry))
+	MOVB    AH, BL           // BL = uint8(v0.entry >> 8)
+	MOVBQZX AL, CX
+	SHLQ    CL, br_value     // value <<= n
+	ADDQ    CX, br_bits_read // bits_read += n
+
+	// val3 := br1.peekTopBits(peekBits)
+	MOVQ peek_bits, CX
+	MOVQ br_value, AX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+	// v3 := table[val1&mask]
+	MOVW 0(table)(AX*2), AX // AX - v1
+
+	// br1.advance(uint8(v1.entry))
+	MOVB    AH, BH           // BH = uint8(v1.entry >> 8)
+	MOVBQZX AL, CX
+	SHLQ    CX, br_value     // value <<= n
+	ADDQ    CX, br_bits_read // bits_read += n
+
+	// these two writes get coalesced
+	// buf[stream][off+2] = uint8(v2.entry >> 8)
+	// buf[stream][off+3] = uint8(v3.entry >> 8)
+	MOVW BX, 256+2(buffer)(off*1)
+
+	// update the bitrader reader structure
+	MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
+	MOVQ br_value, bitReaderShifted_value(br1)
+	MOVQ br_offset, bitReaderShifted_off(br1)
+
+	// const stream = 2
+	// br2.fillFast()
+	MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
+	MOVQ    bitReaderShifted_value(br2), br_value
+	MOVQ    bitReaderShifted_off(br2), br_offset
+
+	// if b.bitsRead >= 32 {
+	CMPQ br_bits_read, $32
+	JB   skip_fill2
+
+	SUBQ $32, br_bits_read // b.bitsRead -= 32
+	SUBQ $4, br_offset     // b.off -= 4
+
+	// v := b.in[b.off-4 : b.off]
+	// v = v[:4]
+	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	MOVQ bitReaderShifted_in(br2), AX
+	MOVL 0(br_offset)(AX*1), AX       // AX = uint32(b.in[b.off:b.off+4])
+
+	// b.value |= uint64(low) << (b.bitsRead & 63)
+	MOVQ br_bits_read, CX
+	SHLQ CL, AX
+	ORQ  AX, br_value
+
+	// exhausted = exhausted || (br2.off < 4)
+	CMPQ  br_offset, $4
+	SETLT DL
+	ORB   DL, DH
+
+	// }
+skip_fill2:
+
+	// val0 := br2.peekTopBits(peekBits)
+	MOVQ br_value, AX
+	MOVQ peek_bits, CX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+	// v0 := table[val0&mask]
+	MOVW 0(table)(AX*2), AX // AX - v0
+
+	// br2.advance(uint8(v0.entry))
+	MOVB    AH, BL           // BL = uint8(v0.entry >> 8)
+	MOVBQZX AL, CX
+	SHLQ    CL, br_value     // value <<= n
+	ADDQ    CX, br_bits_read // bits_read += n
+
+	// val1 := br2.peekTopBits(peekBits)
+	MOVQ peek_bits, CX
+	MOVQ br_value, AX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+	// v1 := table[val1&mask]
+	MOVW 0(table)(AX*2), AX // AX - v1
+
+	// br2.advance(uint8(v1.entry))
+	MOVB    AH, BH           // BH = uint8(v1.entry >> 8)
+	MOVBQZX AL, CX
+	SHLQ    CX, br_value     // value <<= n
+	ADDQ    CX, br_bits_read // bits_read += n
+
+	// these two writes get coalesced
+	// buf[stream][off] = uint8(v0.entry >> 8)
+	// buf[stream][off+1] = uint8(v1.entry >> 8)
+	MOVW BX, 512(buffer)(off*1)
+
+	// SECOND PART:
+	// val2 := br2.peekTopBits(peekBits)
+	MOVQ br_value, AX
+	MOVQ peek_bits, CX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+	// v2 := table[val0&mask]
+	MOVW 0(table)(AX*2), AX // AX - v0
+
+	// br2.advance(uint8(v0.entry))
+	MOVB    AH, BL           // BL = uint8(v0.entry >> 8)
+	MOVBQZX AL, CX
+	SHLQ    CL, br_value     // value <<= n
+	ADDQ    CX, br_bits_read // bits_read += n
+
+	// val3 := br2.peekTopBits(peekBits)
+	MOVQ peek_bits, CX
+	MOVQ br_value, AX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+	// v3 := table[val1&mask]
+	MOVW 0(table)(AX*2), AX // AX - v1
+
+	// br2.advance(uint8(v1.entry))
+	MOVB    AH, BH           // BH = uint8(v1.entry >> 8)
+	MOVBQZX AL, CX
+	SHLQ    CX, br_value     // value <<= n
+	ADDQ    CX, br_bits_read // bits_read += n
+
+	// these two writes get coalesced
+	// buf[stream][off+2] = uint8(v2.entry >> 8)
+	// buf[stream][off+3] = uint8(v3.entry >> 8)
+	MOVW BX, 512+2(buffer)(off*1)
+
+	// update the bitrader reader structure
+	MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
+	MOVQ br_value, bitReaderShifted_value(br2)
+	MOVQ br_offset, bitReaderShifted_off(br2)
+
+	// const stream = 3
+	// br3.fillFast()
+	MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
+	MOVQ    bitReaderShifted_value(br3), br_value
+	MOVQ    bitReaderShifted_off(br3), br_offset
+
+	// if b.bitsRead >= 32 {
+	CMPQ br_bits_read, $32
+	JB   skip_fill3
+
+	SUBQ $32, br_bits_read // b.bitsRead -= 32
+	SUBQ $4, br_offset     // b.off -= 4
+
+	// v := b.in[b.off-4 : b.off]
+	// v = v[:4]
+	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	MOVQ bitReaderShifted_in(br3), AX
+	MOVL 0(br_offset)(AX*1), AX       // AX = uint32(b.in[b.off:b.off+4])
+
+	// b.value |= uint64(low) << (b.bitsRead & 63)
+	MOVQ br_bits_read, CX
+	SHLQ CL, AX
+	ORQ  AX, br_value
+
+	// exhausted = exhausted || (br3.off < 4)
+	CMPQ  br_offset, $4
+	SETLT DL
+	ORB   DL, DH
+
+	// }
+skip_fill3:
+
+	// val0 := br3.peekTopBits(peekBits)
+	MOVQ br_value, AX
+	MOVQ peek_bits, CX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+	// v0 := table[val0&mask]
+	MOVW 0(table)(AX*2), AX // AX - v0
+
+	// br3.advance(uint8(v0.entry))
+	MOVB    AH, BL           // BL = uint8(v0.entry >> 8)
+	MOVBQZX AL, CX
+	SHLQ    CL, br_value     // value <<= n
+	ADDQ    CX, br_bits_read // bits_read += n
+
+	// val1 := br3.peekTopBits(peekBits)
+	MOVQ peek_bits, CX
+	MOVQ br_value, AX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+	// v1 := table[val1&mask]
+	MOVW 0(table)(AX*2), AX // AX - v1
+
+	// br3.advance(uint8(v1.entry))
+	MOVB    AH, BH           // BH = uint8(v1.entry >> 8)
+	MOVBQZX AL, CX
+	SHLQ    CX, br_value     // value <<= n
+	ADDQ    CX, br_bits_read // bits_read += n
+
+	// these two writes get coalesced
+	// buf[stream][off] = uint8(v0.entry >> 8)
+	// buf[stream][off+1] = uint8(v1.entry >> 8)
+	MOVW BX, 768(buffer)(off*1)
+
+	// SECOND PART:
+	// val2 := br3.peekTopBits(peekBits)
+	MOVQ br_value, AX
+	MOVQ peek_bits, CX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+	// v2 := table[val0&mask]
+	MOVW 0(table)(AX*2), AX // AX - v0
+
+	// br3.advance(uint8(v0.entry))
+	MOVB    AH, BL           // BL = uint8(v0.entry >> 8)
+	MOVBQZX AL, CX
+	SHLQ    CL, br_value     // value <<= n
+	ADDQ    CX, br_bits_read // bits_read += n
+
+	// val3 := br3.peekTopBits(peekBits)
+	MOVQ peek_bits, CX
+	MOVQ br_value, AX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+	// v3 := table[val1&mask]
+	MOVW 0(table)(AX*2), AX // AX - v1
+
+	// br3.advance(uint8(v1.entry))
+	MOVB    AH, BH           // BH = uint8(v1.entry >> 8)
+	MOVBQZX AL, CX
+	SHLQ    CX, br_value     // value <<= n
+	ADDQ    CX, br_bits_read // bits_read += n
+
+	// these two writes get coalesced
+	// buf[stream][off+2] = uint8(v2.entry >> 8)
+	// buf[stream][off+3] = uint8(v3.entry >> 8)
+	MOVW BX, 768+2(buffer)(off*1)
+
+	// update the bitrader reader structure
+	MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
+	MOVQ br_value, bitReaderShifted_value(br3)
+	MOVQ br_offset, bitReaderShifted_off(br3)
+
+	ADDQ $4, off // off += 2
+
+	TESTB DH, DH // any br[i].ofs < 4?
+	JNZ   end
+
+	CMPQ off, $bufoff
+	JL   main_loop
+
+end:
+	MOVQ 0(SP), BP
+
+	MOVB off, ret+56(FP)
+	RET
+
+#undef off
+#undef buffer
+#undef table
+
+#undef br_bits_read
+#undef br_value
+#undef br_offset
+#undef peek_bits
+#undef exhausted
+
+#undef br0
+#undef br1
+#undef br2
+#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
new file mode 100644
index 0000000000..6d477a2c11
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
@@ -0,0 +1,197 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+
+#define bufoff      256     // see decompress.go, we're using [4][256]byte table
+
+//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+//	peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
+TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
+#define off             R8
+#define buffer          DI
+#define table           SI
+
+#define br_bits_read    R9
+#define br_value        R10
+#define br_offset       R11
+#define peek_bits       R12
+#define exhausted       DX
+
+#define br0             R13
+#define br1             R14
+#define br2             R15
+#define br3             BP
+
+    MOVQ    BP, 0(SP)
+
+    XORQ    exhausted, exhausted    // exhausted = false
+    XORQ    off, off                // off = 0
+
+    MOVBQZX peekBits+32(FP), peek_bits
+    MOVQ    buf+40(FP), buffer
+    MOVQ    tbl+48(FP), table
+
+    MOVQ    pbr0+0(FP), br0
+    MOVQ    pbr1+8(FP), br1
+    MOVQ    pbr2+16(FP), br2
+    MOVQ    pbr3+24(FP), br3
+
+main_loop:
+{{ define "decode_2_values_x86" }}
+    // const stream = {{ var "id" }}
+    // br{{ var "id"}}.fillFast()
+    MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
+    MOVQ    bitReaderShifted_value(br{{ var "id" }}), br_value
+    MOVQ    bitReaderShifted_off(br{{ var "id" }}), br_offset
+
+	// if b.bitsRead >= 32 {
+    CMPQ    br_bits_read, $32
+    JB      skip_fill{{ var "id" }}
+
+    SUBQ    $32, br_bits_read       // b.bitsRead -= 32
+    SUBQ    $4, br_offset           // b.off -= 4
+
+	// v := b.in[b.off-4 : b.off]
+	// v = v[:4]
+	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+    MOVQ    bitReaderShifted_in(br{{ var "id" }}), AX
+    MOVL    0(br_offset)(AX*1), AX  // AX = uint32(b.in[b.off:b.off+4])
+
+	// b.value |= uint64(low) << (b.bitsRead & 63)
+    MOVQ    br_bits_read, CX
+    SHLQ    CL, AX
+    ORQ     AX, br_value
+
+    // exhausted = exhausted || (br{{ var "id"}}.off < 4)
+    CMPQ    br_offset, $4
+    SETLT   DL
+    ORB     DL, DH
+    // }
+skip_fill{{ var "id" }}:
+
+    // val0 := br{{ var "id"}}.peekTopBits(peekBits)
+    MOVQ    br_value, AX
+    MOVQ    peek_bits, CX
+    SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask
+
+    // v0 := table[val0&mask]
+    MOVW    0(table)(AX*2), AX      // AX - v0
+
+    // br{{ var "id"}}.advance(uint8(v0.entry))
+    MOVB    AH, BL                  // BL = uint8(v0.entry >> 8)
+    MOVBQZX AL, CX
+    SHLQ    CL, br_value            // value <<= n
+    ADDQ    CX, br_bits_read        // bits_read += n
+
+    // val1 := br{{ var "id"}}.peekTopBits(peekBits)
+    MOVQ    peek_bits, CX
+    MOVQ    br_value, AX
+    SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask
+
+    // v1 := table[val1&mask]
+    MOVW    0(table)(AX*2), AX      // AX - v1
+
+    // br{{ var "id"}}.advance(uint8(v1.entry))
+    MOVB    AH, BH                  // BH = uint8(v1.entry >> 8)
+    MOVBQZX AL, CX
+    SHLQ    CX, br_value            // value <<= n
+    ADDQ    CX, br_bits_read        // bits_read += n
+
+
+    // these two writes get coalesced
+    // buf[stream][off] = uint8(v0.entry >> 8)
+    // buf[stream][off+1] = uint8(v1.entry >> 8)
+    MOVW    BX, {{ var "bufofs" }}(buffer)(off*1)
+
+    // SECOND PART:
+    // val2 := br{{ var "id"}}.peekTopBits(peekBits)
+    MOVQ    br_value, AX
+    MOVQ    peek_bits, CX
+    SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask
+
+    // v2 := table[val0&mask]
+    MOVW    0(table)(AX*2), AX      // AX - v0
+
+    // br{{ var "id"}}.advance(uint8(v0.entry))
+    MOVB    AH, BL                  // BL = uint8(v0.entry >> 8)
+    MOVBQZX AL, CX
+    SHLQ    CL, br_value            // value <<= n
+    ADDQ    CX, br_bits_read        // bits_read += n
+
+    // val3 := br{{ var "id"}}.peekTopBits(peekBits)
+    MOVQ    peek_bits, CX
+    MOVQ    br_value, AX
+    SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask
+
+    // v3 := table[val1&mask]
+    MOVW    0(table)(AX*2), AX      // AX - v1
+
+    // br{{ var "id"}}.advance(uint8(v1.entry))
+    MOVB    AH, BH                  // BH = uint8(v1.entry >> 8)
+    MOVBQZX AL, CX
+    SHLQ    CX, br_value            // value <<= n
+    ADDQ    CX, br_bits_read        // bits_read += n
+
+
+    // these two writes get coalesced
+    // buf[stream][off+2] = uint8(v2.entry >> 8)
+    // buf[stream][off+3] = uint8(v3.entry >> 8)
+    MOVW    BX, {{ var "bufofs" }}+2(buffer)(off*1)
+
+    // update the bitrader reader structure
+    MOVB    br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
+    MOVQ    br_value, bitReaderShifted_value(br{{ var "id" }})
+    MOVQ    br_offset, bitReaderShifted_off(br{{ var "id" }})
+{{ end }}
+
+    {{ set "id" "0" }}
+    {{ set "ofs" "0" }}
+    {{ set "bufofs" "0" }} {{/* id * bufoff */}}
+    {{ template "decode_2_values_x86" . }}
+
+    {{ set "id" "1" }}
+    {{ set "ofs" "8" }}
+    {{ set "bufofs" "256" }}
+    {{ template "decode_2_values_x86" . }}
+
+    {{ set "id" "2" }}
+    {{ set "ofs" "16" }}
+    {{ set "bufofs" "512" }}
+    {{ template "decode_2_values_x86" . }}
+
+    {{ set "id" "3" }}
+    {{ set "ofs" "24" }}
+    {{ set "bufofs" "768" }}
+    {{ template "decode_2_values_x86" . }}
+
+    ADDQ    $4, off     // off += 2
+
+    TESTB   DH, DH      // any br[i].ofs < 4?
+    JNZ     end
+
+    CMPQ    off, $bufoff
+    JL      main_loop
+end:
+    MOVQ    0(SP), BP
+
+    MOVB    off, ret+56(FP)
+    RET
+#undef  off
+#undef  buffer
+#undef  table
+
+#undef  br_bits_read
+#undef  br_value
+#undef  br_offset
+#undef  peek_bits
+#undef  exhausted
+
+#undef  br0
+#undef  br1
+#undef  br2
+#undef  br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
new file mode 100644
index 0000000000..d47f6644f3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -0,0 +1,181 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+// This file contains the specialisation of Decoder.Decompress4X
+// that uses an asm implementation of its main loop.
+package huff0
+
+import (
+	"errors"
+	"fmt"
+)
+
+// decompress4x_main_loop_x86 is an x86 assembler implementation
+// of Decompress4X when tablelog > 8.
+// go:noescape
+func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+	peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
+
+// decompress4x_8b_loop_x86 is an x86 assembler implementation
+// of Decompress4X when tablelog <= 8 which decodes 4 entries
+// per loop.
+// go:noescape
+func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+	peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
+
+// fallback8BitSize is the size where using Go version is faster.
+const fallback8BitSize = 800
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+	if len(d.dt.single) == 0 {
+		return nil, errors.New("no table loaded")
+	}
+	if len(src) < 6+(4*1) {
+		return nil, errors.New("input too small")
+	}
+
+	use8BitTables := d.actualTableLog <= 8
+	if cap(dst) < fallback8BitSize && use8BitTables {
+		return d.decompress4X8bit(dst, src)
+	}
+	var br [4]bitReaderShifted
+	// Decode "jump table"
+	start := 6
+	for i := 0; i < 3; i++ {
+		length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+		if start+length >= len(src) {
+			return nil, errors.New("truncated input (or invalid offset)")
+		}
+		err := br[i].init(src[start : start+length])
+		if err != nil {
+			return nil, err
+		}
+		start += length
+	}
+	err := br[3].init(src[start:])
+	if err != nil {
+		return nil, err
+	}
+
+	// destination, offset to match first output
+	dstSize := cap(dst)
+	dst = dst[:dstSize]
+	out := dst
+	dstEvery := (dstSize + 3) / 4
+
+	const tlSize = 1 << tableLogMax
+	const tlMask = tlSize - 1
+	single := d.dt.single[:tlSize]
+
+	// Use temp table to avoid bound checks/append penalty.
+	buf := d.buffer()
+	var off uint8
+	var decoded int
+
+	const debug = false
+
+	// see: bitReaderShifted.peekBitsFast()
+	peekBits := uint8((64 - d.actualTableLog) & 63)
+
+	// Decode 2 values from each decoder/loop.
+	const bufoff = 256
+	for {
+		if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+			break
+		}
+
+		if use8BitTables {
+			off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
+		} else {
+			off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
+		}
+		if debug {
+			fmt.Print("DEBUG: ")
+			fmt.Printf("off=%d,", off)
+			for i := 0; i < 4; i++ {
+				fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
+					i, br[i].bitsRead, br[i].value, br[i].off)
+			}
+			fmt.Println("")
+		}
+
+		if off != 0 {
+			break
+		}
+
+		if bufoff > dstEvery {
+			d.bufs.Put(buf)
+			return nil, errors.New("corruption detected: stream overrun 1")
+		}
+		copy(out, buf[0][:])
+		copy(out[dstEvery:], buf[1][:])
+		copy(out[dstEvery*2:], buf[2][:])
+		copy(out[dstEvery*3:], buf[3][:])
+		out = out[bufoff:]
+		decoded += bufoff * 4
+		// There must at least be 3 buffers left.
+		if len(out) < dstEvery*3 {
+			d.bufs.Put(buf)
+			return nil, errors.New("corruption detected: stream overrun 2")
+		}
+	}
+	if off > 0 {
+		ioff := int(off)
+		if len(out) < dstEvery*3+ioff {
+			d.bufs.Put(buf)
+			return nil, errors.New("corruption detected: stream overrun 3")
+		}
+		copy(out, buf[0][:off])
+		copy(out[dstEvery:], buf[1][:off])
+		copy(out[dstEvery*2:], buf[2][:off])
+		copy(out[dstEvery*3:], buf[3][:off])
+		decoded += int(off) * 4
+		out = out[off:]
+	}
+
+	// Decode remaining.
+	remainBytes := dstEvery - (decoded / 4)
+	for i := range br {
+		offset := dstEvery * i
+		endsAt := offset + remainBytes
+		if endsAt > len(out) {
+			endsAt = len(out)
+		}
+		br := &br[i]
+		bitsLeft := br.remaining()
+		for bitsLeft > 0 {
+			br.fill()
+			if offset >= endsAt {
+				d.bufs.Put(buf)
+				return nil, errors.New("corruption detected: stream overrun 4")
+			}
+
+			// Read value and increment offset.
+			val := br.peekBitsFast(d.actualTableLog)
+			v := single[val&tlMask].entry
+			nBits := uint8(v)
+			br.advance(nBits)
+			bitsLeft -= uint(nBits)
+			out[offset] = uint8(v >> 8)
+			offset++
+		}
+		if offset != endsAt {
+			d.bufs.Put(buf)
+			return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+		}
+		decoded += offset - dstEvery*i
+		err = br.close()
+		if err != nil {
+			return nil, err
+		}
+	}
+	d.bufs.Put(buf)
+	if dstSize != decoded {
+		return nil, errors.New("corruption detected: short output block")
+	}
+	return dst, nil
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
new file mode 100644
index 0000000000..2edad3ea5a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
@@ -0,0 +1,506 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+#ifdef GOAMD64_v4
+#ifndef GOAMD64_v3
+#define GOAMD64_v3
+#endif
+#endif
+
+#define bufoff      256 // see decompress.go, we're using [4][256]byte table
+
+// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+//	peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
+TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
+#define off             R8
+#define buffer          DI
+#define table           SI
+
+#define br_bits_read    R9
+#define br_value        R10
+#define br_offset       R11
+#define peek_bits       R12
+#define exhausted       DX
+
+#define br0             R13
+#define br1             R14
+#define br2             R15
+#define br3             BP
+
+	MOVQ BP, 0(SP)
+
+	XORQ exhausted, exhausted // exhausted = false
+	XORQ off, off             // off = 0
+
+	MOVBQZX peekBits+32(FP), peek_bits
+	MOVQ    buf+40(FP), buffer
+	MOVQ    tbl+48(FP), table
+
+	MOVQ pbr0+0(FP), br0
+	MOVQ pbr1+8(FP), br1
+	MOVQ pbr2+16(FP), br2
+	MOVQ pbr3+24(FP), br3
+
+main_loop:
+
+	// const stream = 0
+	// br0.fillFast()
+	MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
+	MOVQ    bitReaderShifted_value(br0), br_value
+	MOVQ    bitReaderShifted_off(br0), br_offset
+
+	// We must have at least 2 * max tablelog left
+	CMPQ br_bits_read, $64-22
+	JBE  skip_fill0
+
+	SUBQ $32, br_bits_read // b.bitsRead -= 32
+	SUBQ $4, br_offset     // b.off -= 4
+
+	// v := b.in[b.off-4 : b.off]
+	// v = v[:4]
+	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	MOVQ bitReaderShifted_in(br0), AX
+
+	// b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+	SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+
+#else
+	MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+	MOVQ br_bits_read, CX
+	SHLQ CL, AX
+
+#endif
+
+	ORQ AX, br_value
+
+	// exhausted = exhausted || (br0.off < 4)
+	CMPQ  br_offset, $4
+	SETLT DL
+	ORB   DL, DH
+
+	// }
+skip_fill0:
+
+	// val0 := br0.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+	SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+	MOVQ br_value, AX
+	MOVQ peek_bits, CX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+#endif
+
+	// v0 := table[val0&mask]
+	MOVW 0(table)(AX*2), AX // AX - v0
+
+	// br0.advance(uint8(v0.entry))
+	MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+	MOVBQZX AL, CX
+	SHLXQ   AX, br_value, br_value // value <<= n
+
+#else
+	MOVBQZX AL, CX
+	SHLQ    CL, br_value // value <<= n
+
+#endif
+
+	ADDQ CX, br_bits_read // bits_read += n
+
+#ifdef GOAMD64_v3
+	SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+	// val1 := br0.peekTopBits(peekBits)
+	MOVQ peek_bits, CX
+	MOVQ br_value, AX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+#endif
+
+	// v1 := table[val1&mask]
+	MOVW 0(table)(AX*2), AX // AX - v1
+
+	// br0.advance(uint8(v1.entry))
+	MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+	MOVBQZX AL, CX
+	SHLXQ   AX, br_value, br_value // value <<= n
+
+#else
+	MOVBQZX AL, CX
+	SHLQ    CL, br_value // value <<= n
+
+#endif
+
+	ADDQ CX, br_bits_read // bits_read += n
+
+	// these two writes get coalesced
+	// buf[stream][off] = uint8(v0.entry >> 8)
+	// buf[stream][off+1] = uint8(v1.entry >> 8)
+	MOVW BX, 0(buffer)(off*1)
+
+	// update the bitrader reader structure
+	MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
+	MOVQ br_value, bitReaderShifted_value(br0)
+	MOVQ br_offset, bitReaderShifted_off(br0)
+
+	// const stream = 1
+	// br1.fillFast()
+	MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
+	MOVQ    bitReaderShifted_value(br1), br_value
+	MOVQ    bitReaderShifted_off(br1), br_offset
+
+	// We must have at least 2 * max tablelog left
+	CMPQ br_bits_read, $64-22
+	JBE  skip_fill1
+
+	SUBQ $32, br_bits_read // b.bitsRead -= 32
+	SUBQ $4, br_offset     // b.off -= 4
+
+	// v := b.in[b.off-4 : b.off]
+	// v = v[:4]
+	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	MOVQ bitReaderShifted_in(br1), AX
+
+	// b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+	SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+
+#else
+	MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+	MOVQ br_bits_read, CX
+	SHLQ CL, AX
+
+#endif
+
+	ORQ AX, br_value
+
+	// exhausted = exhausted || (br1.off < 4)
+	CMPQ  br_offset, $4
+	SETLT DL
+	ORB   DL, DH
+
+	// }
+skip_fill1:
+
+	// val0 := br1.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+	SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+	MOVQ br_value, AX
+	MOVQ peek_bits, CX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+#endif
+
+	// v0 := table[val0&mask]
+	MOVW 0(table)(AX*2), AX // AX - v0
+
+	// br1.advance(uint8(v0.entry))
+	MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+	MOVBQZX AL, CX
+	SHLXQ   AX, br_value, br_value // value <<= n
+
+#else
+	MOVBQZX AL, CX
+	SHLQ    CL, br_value // value <<= n
+
+#endif
+
+	ADDQ CX, br_bits_read // bits_read += n
+
+#ifdef GOAMD64_v3
+	SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+	// val1 := br1.peekTopBits(peekBits)
+	MOVQ peek_bits, CX
+	MOVQ br_value, AX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+#endif
+
+	// v1 := table[val1&mask]
+	MOVW 0(table)(AX*2), AX // AX - v1
+
+	// br1.advance(uint8(v1.entry))
+	MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+	MOVBQZX AL, CX
+	SHLXQ   AX, br_value, br_value // value <<= n
+
+#else
+	MOVBQZX AL, CX
+	SHLQ    CL, br_value // value <<= n
+
+#endif
+
+	ADDQ CX, br_bits_read // bits_read += n
+
+	// these two writes get coalesced
+	// buf[stream][off] = uint8(v0.entry >> 8)
+	// buf[stream][off+1] = uint8(v1.entry >> 8)
+	MOVW BX, 256(buffer)(off*1)
+
+	// update the bitrader reader structure
+	MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
+	MOVQ br_value, bitReaderShifted_value(br1)
+	MOVQ br_offset, bitReaderShifted_off(br1)
+
+	// const stream = 2
+	// br2.fillFast()
+	MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
+	MOVQ    bitReaderShifted_value(br2), br_value
+	MOVQ    bitReaderShifted_off(br2), br_offset
+
+	// We must have at least 2 * max tablelog left
+	CMPQ br_bits_read, $64-22
+	JBE  skip_fill2
+
+	SUBQ $32, br_bits_read // b.bitsRead -= 32
+	SUBQ $4, br_offset     // b.off -= 4
+
+	// v := b.in[b.off-4 : b.off]
+	// v = v[:4]
+	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	MOVQ bitReaderShifted_in(br2), AX
+
+	// b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+	SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+
+#else
+	MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+	MOVQ br_bits_read, CX
+	SHLQ CL, AX
+
+#endif
+
+	ORQ AX, br_value
+
+	// exhausted = exhausted || (br2.off < 4)
+	CMPQ  br_offset, $4
+	SETLT DL
+	ORB   DL, DH
+
+	// }
+skip_fill2:
+
+	// val0 := br2.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+	SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+	MOVQ br_value, AX
+	MOVQ peek_bits, CX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+#endif
+
+	// v0 := table[val0&mask]
+	MOVW 0(table)(AX*2), AX // AX - v0
+
+	// br2.advance(uint8(v0.entry))
+	MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+	MOVBQZX AL, CX
+	SHLXQ   AX, br_value, br_value // value <<= n
+
+#else
+	MOVBQZX AL, CX
+	SHLQ    CL, br_value // value <<= n
+
+#endif
+
+	ADDQ CX, br_bits_read // bits_read += n
+
+#ifdef GOAMD64_v3
+	SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+	// val1 := br2.peekTopBits(peekBits)
+	MOVQ peek_bits, CX
+	MOVQ br_value, AX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+#endif
+
+	// v1 := table[val1&mask]
+	MOVW 0(table)(AX*2), AX // AX - v1
+
+	// br2.advance(uint8(v1.entry))
+	MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+	MOVBQZX AL, CX
+	SHLXQ   AX, br_value, br_value // value <<= n
+
+#else
+	MOVBQZX AL, CX
+	SHLQ    CL, br_value // value <<= n
+
+#endif
+
+	ADDQ CX, br_bits_read // bits_read += n
+
+	// these two writes get coalesced
+	// buf[stream][off] = uint8(v0.entry >> 8)
+	// buf[stream][off+1] = uint8(v1.entry >> 8)
+	MOVW BX, 512(buffer)(off*1)
+
+	// update the bitrader reader structure
+	MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
+	MOVQ br_value, bitReaderShifted_value(br2)
+	MOVQ br_offset, bitReaderShifted_off(br2)
+
+	// const stream = 3
+	// br3.fillFast()
+	MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
+	MOVQ    bitReaderShifted_value(br3), br_value
+	MOVQ    bitReaderShifted_off(br3), br_offset
+
+	// We must have at least 2 * max tablelog left
+	CMPQ br_bits_read, $64-22
+	JBE  skip_fill3
+
+	SUBQ $32, br_bits_read // b.bitsRead -= 32
+	SUBQ $4, br_offset     // b.off -= 4
+
+	// v := b.in[b.off-4 : b.off]
+	// v = v[:4]
+	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	MOVQ bitReaderShifted_in(br3), AX
+
+	// b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+	SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+
+#else
+	MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+	MOVQ br_bits_read, CX
+	SHLQ CL, AX
+
+#endif
+
+	ORQ AX, br_value
+
+	// exhausted = exhausted || (br3.off < 4)
+	CMPQ  br_offset, $4
+	SETLT DL
+	ORB   DL, DH
+
+	// }
+skip_fill3:
+
+	// val0 := br3.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+	SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+	MOVQ br_value, AX
+	MOVQ peek_bits, CX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+#endif
+
+	// v0 := table[val0&mask]
+	MOVW 0(table)(AX*2), AX // AX - v0
+
+	// br3.advance(uint8(v0.entry))
+	MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+	MOVBQZX AL, CX
+	SHLXQ   AX, br_value, br_value // value <<= n
+
+#else
+	MOVBQZX AL, CX
+	SHLQ    CL, br_value // value <<= n
+
+#endif
+
+	ADDQ CX, br_bits_read // bits_read += n
+
+#ifdef GOAMD64_v3
+	SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+	// val1 := br3.peekTopBits(peekBits)
+	MOVQ peek_bits, CX
+	MOVQ br_value, AX
+	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
+
+#endif
+
+	// v1 := table[val1&mask]
+	MOVW 0(table)(AX*2), AX // AX - v1
+
+	// br3.advance(uint8(v1.entry))
+	MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+	MOVBQZX AL, CX
+	SHLXQ   AX, br_value, br_value // value <<= n
+
+#else
+	MOVBQZX AL, CX
+	SHLQ    CL, br_value // value <<= n
+
+#endif
+
+	ADDQ CX, br_bits_read // bits_read += n
+
+	// these two writes get coalesced
+	// buf[stream][off] = uint8(v0.entry >> 8)
+	// buf[stream][off+1] = uint8(v1.entry >> 8)
+	MOVW BX, 768(buffer)(off*1)
+
+	// update the bitrader reader structure
+	MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
+	MOVQ br_value, bitReaderShifted_value(br3)
+	MOVQ br_offset, bitReaderShifted_off(br3)
+
+	ADDQ $2, off // off += 2
+
+	TESTB DH, DH // any br[i].ofs < 4?
+	JNZ   end
+
+	CMPQ off, $bufoff
+	JL   main_loop
+
+end:
+	MOVQ 0(SP), BP
+
+	MOVB off, ret+56(FP)
+	RET
+
+#undef off
+#undef buffer
+#undef table
+
+#undef br_bits_read
+#undef br_value
+#undef br_offset
+#undef peek_bits
+#undef exhausted
+
+#undef br0
+#undef br1
+#undef br2
+#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
new file mode 100644
index 0000000000..330d86ae15
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
@@ -0,0 +1,195 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+#ifdef GOAMD64_v4
+#ifndef GOAMD64_v3
+#define GOAMD64_v3
+#endif
+#endif
+
+#define bufoff      256     // see decompress.go, we're using [4][256]byte table
+
+//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+//	peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
+TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
+#define off             R8
+#define buffer          DI
+#define table           SI
+
+#define br_bits_read    R9
+#define br_value        R10
+#define br_offset       R11
+#define peek_bits       R12
+#define exhausted       DX
+
+#define br0             R13
+#define br1             R14
+#define br2             R15
+#define br3             BP
+
+    MOVQ    BP, 0(SP)
+
+    XORQ    exhausted, exhausted    // exhausted = false
+    XORQ    off, off                // off = 0
+
+    MOVBQZX peekBits+32(FP), peek_bits
+    MOVQ    buf+40(FP), buffer
+    MOVQ    tbl+48(FP), table
+
+    MOVQ    pbr0+0(FP), br0
+    MOVQ    pbr1+8(FP), br1
+    MOVQ    pbr2+16(FP), br2
+    MOVQ    pbr3+24(FP), br3
+
+main_loop:
+{{ define "decode_2_values_x86" }}
+    // const stream = {{ var "id" }}
+    // br{{ var "id"}}.fillFast()
+    MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
+    MOVQ    bitReaderShifted_value(br{{ var "id" }}), br_value
+    MOVQ    bitReaderShifted_off(br{{ var "id" }}), br_offset
+
+    // We must have at least 2 * max tablelog left
+    CMPQ    br_bits_read, $64-22
+    JBE     skip_fill{{ var "id" }}
+
+    SUBQ    $32, br_bits_read       // b.bitsRead -= 32
+    SUBQ    $4, br_offset           // b.off -= 4
+
+	// v := b.in[b.off-4 : b.off]
+	// v = v[:4]
+	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+    MOVQ    bitReaderShifted_in(br{{ var "id" }}), AX
+
+	// b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+    SHLXQ   br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+#else
+    MOVL    0(br_offset)(AX*1), AX  // AX = uint32(b.in[b.off:b.off+4])
+    MOVQ    br_bits_read, CX
+    SHLQ    CL, AX
+#endif
+
+    ORQ     AX, br_value
+
+    // exhausted = exhausted || (br{{ var "id"}}.off < 4)
+    CMPQ    br_offset, $4
+    SETLT   DL
+    ORB     DL, DH
+    // }
+skip_fill{{ var "id" }}:
+
+    // val0 := br{{ var "id"}}.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+    SHRXQ   peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+#else
+    MOVQ    br_value, AX
+    MOVQ    peek_bits, CX
+    SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask
+#endif
+
+    // v0 := table[val0&mask]
+    MOVW    0(table)(AX*2), AX      // AX - v0
+
+    // br{{ var "id"}}.advance(uint8(v0.entry))
+    MOVB    AH, BL                  // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+    MOVBQZX AL, CX
+    SHLXQ   AX, br_value, br_value // value <<= n
+#else
+    MOVBQZX AL, CX
+    SHLQ    CL, br_value            // value <<= n
+#endif
+
+    ADDQ    CX, br_bits_read        // bits_read += n
+
+
+#ifdef GOAMD64_v3
+    SHRXQ    peek_bits, br_value, AX  // AX = (value >> peek_bits) & mask
+#else
+    // val1 := br{{ var "id"}}.peekTopBits(peekBits)
+    MOVQ    peek_bits, CX
+    MOVQ    br_value, AX
+    SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask
+#endif
+
+    // v1 := table[val1&mask]
+    MOVW    0(table)(AX*2), AX      // AX - v1
+
+    // br{{ var "id"}}.advance(uint8(v1.entry))
+    MOVB    AH, BH                  // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+    MOVBQZX AL, CX
+    SHLXQ   AX, br_value, br_value // value <<= n
+#else
+    MOVBQZX AL, CX
+    SHLQ    CL, br_value            // value <<= n
+#endif
+
+    ADDQ    CX, br_bits_read        // bits_read += n
+
+
+    // these two writes get coalesced
+    // buf[stream][off] = uint8(v0.entry >> 8)
+    // buf[stream][off+1] = uint8(v1.entry >> 8)
+    MOVW    BX, {{ var "bufofs" }}(buffer)(off*1)
+
+    // update the bitrader reader structure
+    MOVB    br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
+    MOVQ    br_value, bitReaderShifted_value(br{{ var "id" }})
+    MOVQ    br_offset, bitReaderShifted_off(br{{ var "id" }})
+{{ end }}
+
+    {{ set "id" "0" }}
+    {{ set "ofs" "0" }}
+    {{ set "bufofs" "0" }} {{/* id * bufoff */}}
+    {{ template "decode_2_values_x86" . }}
+
+    {{ set "id" "1" }}
+    {{ set "ofs" "8" }}
+    {{ set "bufofs" "256" }}
+    {{ template "decode_2_values_x86" . }}
+
+    {{ set "id" "2" }}
+    {{ set "ofs" "16" }}
+    {{ set "bufofs" "512" }}
+    {{ template "decode_2_values_x86" . }}
+
+    {{ set "id" "3" }}
+    {{ set "ofs" "24" }}
+    {{ set "bufofs" "768" }}
+    {{ template "decode_2_values_x86" . }}
+
+    ADDQ    $2, off     // off += 2
+
+    TESTB   DH, DH      // any br[i].ofs < 4?
+    JNZ     end
+
+    CMPQ    off, $bufoff
+    JL      main_loop
+end:
+    MOVQ    0(SP), BP
+
+    MOVB    off, ret+56(FP)
+    RET
+#undef  off
+#undef  buffer
+#undef  table
+
+#undef  br_bits_read
+#undef  br_value
+#undef  br_offset
+#undef  peek_bits
+#undef  exhausted
+
+#undef  br0
+#undef  br1
+#undef  br2
+#undef  br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
new file mode 100644
index 0000000000..126b4d68a9
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
@@ -0,0 +1,193 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+// This file contains a generic implementation of Decoder.Decompress4X.
+package huff0
+
+import (
+	"errors"
+	"fmt"
+)
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+	if len(d.dt.single) == 0 {
+		return nil, errors.New("no table loaded")
+	}
+	if len(src) < 6+(4*1) {
+		return nil, errors.New("input too small")
+	}
+	if use8BitTables && d.actualTableLog <= 8 {
+		return d.decompress4X8bit(dst, src)
+	}
+
+	var br [4]bitReaderShifted
+	// Decode "jump table"
+	start := 6
+	for i := 0; i < 3; i++ {
+		length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+		if start+length >= len(src) {
+			return nil, errors.New("truncated input (or invalid offset)")
+		}
+		err := br[i].init(src[start : start+length])
+		if err != nil {
+			return nil, err
+		}
+		start += length
+	}
+	err := br[3].init(src[start:])
+	if err != nil {
+		return nil, err
+	}
+
+	// destination, offset to match first output
+	dstSize := cap(dst)
+	dst = dst[:dstSize]
+	out := dst
+	dstEvery := (dstSize + 3) / 4
+
+	const tlSize = 1 << tableLogMax
+	const tlMask = tlSize - 1
+	single := d.dt.single[:tlSize]
+
+	// Use temp table to avoid bound checks/append penalty.
+	buf := d.buffer()
+	var off uint8
+	var decoded int
+
+	// Decode 2 values from each decoder/loop.
+	const bufoff = 256
+	for {
+		if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+			break
+		}
+
+		{
+			const stream = 0
+			const stream2 = 1
+			br[stream].fillFast()
+			br[stream2].fillFast()
+
+			val := br[stream].peekBitsFast(d.actualTableLog)
+			val2 := br[stream2].peekBitsFast(d.actualTableLog)
+			v := single[val&tlMask]
+			v2 := single[val2&tlMask]
+			br[stream].advance(uint8(v.entry))
+			br[stream2].advance(uint8(v2.entry))
+			buf[stream][off] = uint8(v.entry >> 8)
+			buf[stream2][off] = uint8(v2.entry >> 8)
+
+			val = br[stream].peekBitsFast(d.actualTableLog)
+			val2 = br[stream2].peekBitsFast(d.actualTableLog)
+			v = single[val&tlMask]
+			v2 = single[val2&tlMask]
+			br[stream].advance(uint8(v.entry))
+			br[stream2].advance(uint8(v2.entry))
+			buf[stream][off+1] = uint8(v.entry >> 8)
+			buf[stream2][off+1] = uint8(v2.entry >> 8)
+		}
+
+		{
+			const stream = 2
+			const stream2 = 3
+			br[stream].fillFast()
+			br[stream2].fillFast()
+
+			val := br[stream].peekBitsFast(d.actualTableLog)
+			val2 := br[stream2].peekBitsFast(d.actualTableLog)
+			v := single[val&tlMask]
+			v2 := single[val2&tlMask]
+			br[stream].advance(uint8(v.entry))
+			br[stream2].advance(uint8(v2.entry))
+			buf[stream][off] = uint8(v.entry >> 8)
+			buf[stream2][off] = uint8(v2.entry >> 8)
+
+			val = br[stream].peekBitsFast(d.actualTableLog)
+			val2 = br[stream2].peekBitsFast(d.actualTableLog)
+			v = single[val&tlMask]
+			v2 = single[val2&tlMask]
+			br[stream].advance(uint8(v.entry))
+			br[stream2].advance(uint8(v2.entry))
+			buf[stream][off+1] = uint8(v.entry >> 8)
+			buf[stream2][off+1] = uint8(v2.entry >> 8)
+		}
+
+		off += 2
+
+		if off == 0 {
+			if bufoff > dstEvery {
+				d.bufs.Put(buf)
+				return nil, errors.New("corruption detected: stream overrun 1")
+			}
+			copy(out, buf[0][:])
+			copy(out[dstEvery:], buf[1][:])
+			copy(out[dstEvery*2:], buf[2][:])
+			copy(out[dstEvery*3:], buf[3][:])
+			out = out[bufoff:]
+			decoded += bufoff * 4
+			// There must at least be 3 buffers left.
+			if len(out) < dstEvery*3 {
+				d.bufs.Put(buf)
+				return nil, errors.New("corruption detected: stream overrun 2")
+			}
+		}
+	}
+	if off > 0 {
+		ioff := int(off)
+		if len(out) < dstEvery*3+ioff {
+			d.bufs.Put(buf)
+			return nil, errors.New("corruption detected: stream overrun 3")
+		}
+		copy(out, buf[0][:off])
+		copy(out[dstEvery:], buf[1][:off])
+		copy(out[dstEvery*2:], buf[2][:off])
+		copy(out[dstEvery*3:], buf[3][:off])
+		decoded += int(off) * 4
+		out = out[off:]
+	}
+
+	// Decode remaining.
+	remainBytes := dstEvery - (decoded / 4)
+	for i := range br {
+		offset := dstEvery * i
+		endsAt := offset + remainBytes
+		if endsAt > len(out) {
+			endsAt = len(out)
+		}
+		br := &br[i]
+		bitsLeft := br.remaining()
+		for bitsLeft > 0 {
+			br.fill()
+			if offset >= endsAt {
+				d.bufs.Put(buf)
+				return nil, errors.New("corruption detected: stream overrun 4")
+			}
+
+			// Read value and increment offset.
+			val := br.peekBitsFast(d.actualTableLog)
+			v := single[val&tlMask].entry
+			nBits := uint8(v)
+			br.advance(nBits)
+			bitsLeft -= uint(nBits)
+			out[offset] = uint8(v >> 8)
+			offset++
+		}
+		if offset != endsAt {
+			d.bufs.Put(buf)
+			return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+		}
+		decoded += offset - dstEvery*i
+		err = br.close()
+		if err != nil {
+			return nil, err
+		}
+	}
+	d.bufs.Put(buf)
+	if dstSize != decoded {
+		return nil, errors.New("corruption detected: short output block")
+	}
+	return dst, nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index c876c591ac..e3445ac194 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -153,10 +153,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
 
 This package:
 file    out     level   insize      outsize     millis  mb/s
-silesia.tar zskp    1   211947520   73101992    643     313.87
-silesia.tar zskp    2   211947520   67504318    969     208.38
-silesia.tar zskp    3   211947520   64595893    2007    100.68
-silesia.tar zskp    4   211947520   60995370    8825    22.90
+silesia.tar zskp    1   211947520   73821326    634     318.47
+silesia.tar zskp    2   211947520   67655404    1508    133.96
+silesia.tar zskp    3   211947520   64746933    3000    67.37
+silesia.tar zskp    4   211947520   60073508    16926   11.94
 
 cgo zstd:
 silesia.tar zstd    1   211947520   73605392    543     371.56
@@ -165,94 +165,94 @@ silesia.tar zstd    6   211947520   62916450    1913    105.66
 silesia.tar zstd    9   211947520   60212393    5063    39.92
 
 gzip, stdlib/this package:
-silesia.tar gzstd   1   211947520   80007735    1654    122.21
-silesia.tar gzkp    1   211947520   80136201    1152    175.45
+silesia.tar gzstd   1   211947520   80007735    1498    134.87
+silesia.tar gzkp    1   211947520   80088272    1009    200.31
 
 GOB stream of binary data. Highly compressible.
 https://files.klauspost.com/compress/gob-stream.7z
 
 file        out     level   insize  outsize     millis  mb/s
-gob-stream  zskp    1   1911399616  235022249   3088    590.30
-gob-stream  zskp    2   1911399616  205669791   3786    481.34
-gob-stream  zskp    3   1911399616  175034659   9636    189.17
-gob-stream  zskp    4   1911399616  165609838   50369   36.19
+gob-stream  zskp    1   1911399616  233948096   3230    564.34
+gob-stream  zskp    2   1911399616  203997694   4997    364.73
+gob-stream  zskp    3   1911399616  173526523   13435   135.68
+gob-stream  zskp    4   1911399616  162195235   47559   38.33
 
 gob-stream  zstd    1   1911399616  249810424   2637    691.26
 gob-stream  zstd    3   1911399616  208192146   3490    522.31
 gob-stream  zstd    6   1911399616  193632038   6687    272.56
 gob-stream  zstd    9   1911399616  177620386   16175   112.70
 
-gob-stream  gzstd   1   1911399616  357382641   10251   177.82
-gob-stream  gzkp    1   1911399616  359753026   5438    335.20
+gob-stream  gzstd   1   1911399616  357382013   9046    201.49
+gob-stream  gzkp    1   1911399616  359136669   4885    373.08
 
 The test data for the Large Text Compression Benchmark is the first
 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
 http://mattmahoney.net/dc/textdata.html
 
 file    out level   insize      outsize     millis  mb/s
-enwik9  zskp    1   1000000000  343848582   3609    264.18
-enwik9  zskp    2   1000000000  317276632   5746    165.97
-enwik9  zskp    3   1000000000  292243069   12162   78.41
-enwik9  zskp    4   1000000000  262183768   82837   11.51
+enwik9  zskp    1   1000000000  343833605   3687    258.64
+enwik9  zskp    2   1000000000  317001237   7672    124.29
+enwik9  zskp    3   1000000000  291915823   15923   59.89
+enwik9  zskp    4   1000000000  261710291   77697   12.27
 
 enwik9  zstd    1   1000000000  358072021   3110    306.65
 enwik9  zstd    3   1000000000  313734672   4784    199.35
 enwik9  zstd    6   1000000000  295138875   10290   92.68
 enwik9  zstd    9   1000000000  278348700   28549   33.40
 
-enwik9  gzstd   1   1000000000  382578136   9604    99.30
-enwik9  gzkp    1   1000000000  383825945   6544    145.73
+enwik9  gzstd   1   1000000000  382578136   8608    110.78
+enwik9  gzkp    1   1000000000  382781160   5628    169.45
 
 Highly compressible JSON file.
 https://files.klauspost.com/compress/github-june-2days-2019.json.zst
 
 file                        out level   insize      outsize     millis  mb/s
-github-june-2days-2019.json zskp    1   6273951764  699045015   10620   563.40
-github-june-2days-2019.json zskp    2   6273951764  617881763   11687   511.96
-github-june-2days-2019.json zskp    3   6273951764  524340691   34043   175.75
-github-june-2days-2019.json zskp    4   6273951764  470320075   170190  35.16
+github-june-2days-2019.json zskp    1   6273951764  697439532   9789    611.17
+github-june-2days-2019.json zskp    2   6273951764  610876538   18553   322.49
+github-june-2days-2019.json zskp    3   6273951764  517662858   44186   135.41
+github-june-2days-2019.json zskp    4   6273951764  464617114   165373  36.18
 
 github-june-2days-2019.json zstd    1   6273951764  766284037   8450    708.00
 github-june-2days-2019.json zstd    3   6273951764  661889476   10927   547.57
 github-june-2days-2019.json zstd    6   6273951764  642756859   22996   260.18
 github-june-2days-2019.json zstd    9   6273951764  601974523   52413   114.16
 
-github-june-2days-2019.json gzstd   1   6273951764  1164400847  29948   199.79
-github-june-2days-2019.json gzkp    1   6273951764  1125417694  21788   274.61
+github-june-2days-2019.json gzstd   1   6273951764  1164397768  26793   223.32
+github-june-2days-2019.json gzkp    1   6273951764  1120631856  17693   338.16
 
 VM Image, Linux mint with a few installed applications:
 https://files.klauspost.com/compress/rawstudio-mint14.7z
 
 file                    out level   insize      outsize     millis  mb/s
-rawstudio-mint14.tar    zskp    1   8558382592  3667489370  20210   403.84
-rawstudio-mint14.tar    zskp    2   8558382592  3364592300  31873   256.07
-rawstudio-mint14.tar    zskp    3   8558382592  3158085214  77675   105.08
-rawstudio-mint14.tar    zskp    4   8558382592  2965110639  857750  9.52
+rawstudio-mint14.tar    zskp    1   8558382592  3718400221  18206   448.29
+rawstudio-mint14.tar    zskp    2   8558382592  3326118337  37074   220.15
+rawstudio-mint14.tar    zskp    3   8558382592  3163842361  87306   93.49
+rawstudio-mint14.tar    zskp    4   8558382592  2970480650  783862  10.41
 
 rawstudio-mint14.tar    zstd    1   8558382592  3609250104  17136   476.27
 rawstudio-mint14.tar    zstd    3   8558382592  3341679997  29262   278.92
 rawstudio-mint14.tar    zstd    6   8558382592  3235846406  77904   104.77
 rawstudio-mint14.tar    zstd    9   8558382592  3160778861  140946  57.91
 
-rawstudio-mint14.tar    gzstd   1   8558382592  3926257486  57722   141.40
-rawstudio-mint14.tar    gzkp    1   8558382592  3962605659  45113   180.92
+rawstudio-mint14.tar    gzstd   1   8558382592  3926234992  51345   158.96
+rawstudio-mint14.tar    gzkp    1   8558382592  3960117298  36722   222.26
 
 CSV data:
 https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
 
 file                    out level   insize      outsize     millis  mb/s
-nyc-taxi-data-10M.csv   zskp    1   3325605752  641339945   8925    355.35
-nyc-taxi-data-10M.csv   zskp    2   3325605752  591748091   11268   281.44
-nyc-taxi-data-10M.csv   zskp    3   3325605752  530289687   25239   125.66
-nyc-taxi-data-10M.csv   zskp    4   3325605752  476268884   135958  23.33
+nyc-taxi-data-10M.csv   zskp    1   3325605752  641319332   9462    335.17
+nyc-taxi-data-10M.csv   zskp    2   3325605752  588976126   17570   180.50
+nyc-taxi-data-10M.csv   zskp    3   3325605752  529329260   32432   97.79
+nyc-taxi-data-10M.csv   zskp    4   3325605752  474949772   138025  22.98
 
 nyc-taxi-data-10M.csv   zstd    1   3325605752  687399637   8233    385.18
 nyc-taxi-data-10M.csv   zstd    3   3325605752  598514411   10065   315.07
 nyc-taxi-data-10M.csv   zstd    6   3325605752  570522953   20038   158.27
 nyc-taxi-data-10M.csv   zstd    9   3325605752  517554797   64565   49.12
 
-nyc-taxi-data-10M.csv   gzstd   1   3325605752  928656485   23876   132.83
-nyc-taxi-data-10M.csv   gzkp    1   3325605752  922257165   16780   189.00
+nyc-taxi-data-10M.csv   gzstd   1   3325605752  928654908   21270   149.11
+nyc-taxi-data-10M.csv   gzkp    1   3325605752  922273214   13929   227.68
 ```
 
 ## Decompressor
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 607b62ee37..7d567a54a0 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -167,6 +167,11 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
 			}
 			return ErrCompressedSizeTooBig
 		}
+		// Empty compressed blocks must at least be 2 bytes
+		// for Literals_Block_Type and one for Sequences_Section_Header.
+		if cSize < 2 {
+			return ErrBlockTooSmall
+		}
 	case blockTypeRaw:
 		if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
 			if debugDecoder {
@@ -491,6 +496,9 @@ func (b *blockDec) decodeCompressed(hist *history) error {
 }
 
 func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
+	if debugDecoder {
+		printf("prepareSequences: %d byte(s) input\n", len(in))
+	}
 	// Decode Sequences
 	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
 	if len(in) < 1 {
@@ -499,8 +507,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
 	var nSeqs int
 	seqHeader := in[0]
 	switch {
-	case seqHeader == 0:
-		in = in[1:]
 	case seqHeader < 128:
 		nSeqs = int(seqHeader)
 		in = in[1:]
@@ -517,6 +523,13 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
 		nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
 		in = in[3:]
 	}
+	if nSeqs == 0 && len(in) != 0 {
+		// When no sequences, there should not be any more data...
+		if debugDecoder {
+			printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in))
+		}
+		return ErrUnexpectedBlockSize
+	}
 
 	var seqs = &hist.decoders
 	seqs.nSeqs = nSeqs
@@ -635,6 +648,7 @@ func (b *blockDec) decodeSequences(hist *history) error {
 		hist.decoders.seqSize = len(hist.decoders.literals)
 		return nil
 	}
+	hist.decoders.windowSize = hist.windowSize
 	hist.decoders.prevOffset = hist.recentOffsets
 	err := hist.decoders.decode(b.sequence)
 	hist.recentOffsets = hist.decoders.prevOffset
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index a93dfaf100..9fcdaac1dc 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -348,10 +348,10 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
 			frame.history.setDict(&dict)
 		}
 
-		if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
+		if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
 			return dst, ErrDecoderSizeExceeded
 		}
-		if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
+		if frame.FrameContentSize < 1<<30 {
 			// Never preallocate more than 1 GB up front.
 			if cap(dst)-len(dst) < int(frame.FrameContentSize) {
 				dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
@@ -514,7 +514,7 @@ func (d *Decoder) nextBlockSync() (ok bool) {
 
 		// Check frame size (before CRC)
 		d.syncStream.decodedFrame += uint64(len(d.current.b))
-		if d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame > d.frame.FrameContentSize {
+		if d.syncStream.decodedFrame > d.frame.FrameContentSize {
 			if debugDecoder {
 				printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
 			}
@@ -523,7 +523,7 @@ func (d *Decoder) nextBlockSync() (ok bool) {
 		}
 
 		// Check FCS
-		if d.current.d.Last && d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame != d.frame.FrameContentSize {
+		if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize {
 			if debugDecoder {
 				printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
 			}
@@ -700,6 +700,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
 				}
 				hist.decoders = block.async.newHist.decoders
 				hist.recentOffsets = block.async.newHist.recentOffsets
+				hist.windowSize = block.async.newHist.windowSize
 				if block.async.newHist.dict != nil {
 					hist.setDict(block.async.newHist.dict)
 				}
@@ -811,11 +812,11 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
 			}
 			if !hasErr {
 				decodedFrame += uint64(len(do.b))
-				if fcs > 0 && decodedFrame > fcs {
+				if decodedFrame > fcs {
 					println("fcs exceeded", block.Last, fcs, decodedFrame)
 					do.err = ErrFrameSizeExceeded
 					hasErr = true
-				} else if block.Last && fcs > 0 && decodedFrame != fcs {
+				} else if block.Last && fcs != fcsUnknown && decodedFrame != fcs {
 					do.err = ErrFrameSizeMismatch
 					hasErr = true
 				} else {
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 29c3176b05..11089d2232 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -197,7 +197,7 @@ func (d *frameDec) reset(br byteBuffer) error {
 	default:
 		fcsSize = 1 << v
 	}
-	d.FrameContentSize = 0
+	d.FrameContentSize = fcsUnknown
 	if fcsSize > 0 {
 		b, err := br.readSmall(fcsSize)
 		if err != nil {
@@ -343,12 +343,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
 			err = ErrDecoderSizeExceeded
 			break
 		}
-		if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
-			println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
-			err = ErrFrameSizeExceeded
-			break
-		}
-		if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
+		if uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
 			println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize)
 			err = ErrFrameSizeExceeded
 			break
@@ -356,13 +351,13 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
 		if dec.Last {
 			break
 		}
-		if debugDecoder && d.FrameContentSize > 0 {
+		if debugDecoder {
 			println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize)
 		}
 	}
 	dst = d.history.b
 	if err == nil {
-		if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
+		if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
 			err = ErrFrameSizeMismatch
 		} else if d.HasCheckSum {
 			var n int
diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz.go b/vendor/github.com/klauspost/compress/zstd/fuzz.go
index fda8a74228..7f2210e053 100644
--- a/vendor/github.com/klauspost/compress/zstd/fuzz.go
+++ b/vendor/github.com/klauspost/compress/zstd/fuzz.go
@@ -1,5 +1,5 @@
-//go:build gofuzz
-// +build gofuzz
+//go:build ignorecrc
+// +build ignorecrc
 
 // Copyright 2019+ Klaus Post. All rights reserved.
 // License information can be found in the LICENSE file.
diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go
index 0515b201cc..6811c68a89 100644
--- a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go
+++ b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go
@@ -1,5 +1,5 @@
-//go:build !gofuzz
-// +build !gofuzz
+//go:build !ignorecrc
+// +build !ignorecrc
 
 // Copyright 2019+ Klaus Post. All rights reserved.
 // License information can be found in the LICENSE file.
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index 213736ad77..819f1461b7 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -107,7 +107,10 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
 	llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
 	s.seqSize = 0
 	litRemain := len(s.literals)
-
+	maxBlockSize := maxCompressedBlockSize
+	if s.windowSize < maxBlockSize {
+		maxBlockSize = s.windowSize
+	}
 	for i := range seqs {
 		var ll, mo, ml int
 		if br.off > 4+((maxOffsetBits+16+16)>>3) {
@@ -192,7 +195,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
 		}
 		s.seqSize += ll + ml
 		if s.seqSize > maxBlockSize {
-			return fmt.Errorf("output (%d) bigger than max block size", s.seqSize)
+			return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
 		}
 		litRemain -= ll
 		if litRemain < 0 {
@@ -230,7 +233,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
 	}
 	s.seqSize += litRemain
 	if s.seqSize > maxBlockSize {
-		return fmt.Errorf("output (%d) bigger than max block size", s.seqSize)
+		return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
 	}
 	err := br.close()
 	if err != nil {
@@ -347,6 +350,10 @@ func (s *sequenceDecs) decodeSync(history *history) error {
 	llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
 	hist := history.b[history.ignoreBuffer:]
 	out := s.out
+	maxBlockSize := maxCompressedBlockSize
+	if s.windowSize < maxBlockSize {
+		maxBlockSize = s.windowSize
+	}
 
 	for i := seqs - 1; i >= 0; i-- {
 		if br.overread() {
@@ -426,7 +433,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
 		}
 		size := ll + ml + len(out)
 		if size-startSize > maxBlockSize {
-			return fmt.Errorf("output (%d) bigger than max block size", size)
+			return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
 		}
 		if size > cap(out) {
 			// Not enough size, which can happen under high volume block streaming conditions
@@ -535,6 +542,11 @@ func (s *sequenceDecs) decodeSync(history *history) error {
 		}
 	}
 
+	// Check if space for literals
+	if len(s.literals)+len(s.out)-startSize > maxBlockSize {
+		return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
+	}
+
 	// Add final literals
 	s.out = append(out, s.literals...)
 	return br.close()
diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go
index 967f29b312..ffffcbc254 100644
--- a/vendor/github.com/klauspost/compress/zstd/zip.go
+++ b/vendor/github.com/klauspost/compress/zstd/zip.go
@@ -20,7 +20,7 @@ const ZipMethodPKWare = 20
 
 var zipReaderPool sync.Pool
 
-// newZipReader cannot be used since we would leak goroutines...
+// newZipReader creates a pooled zip decompressor.
 func newZipReader(r io.Reader) io.ReadCloser {
 	dec, ok := zipReaderPool.Get().(*Decoder)
 	if ok {
@@ -44,10 +44,14 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
 	r.mu.Lock()
 	defer r.mu.Unlock()
 	if r.dec == nil {
-		return 0, errors.New("Read after Close")
+		return 0, errors.New("read after close or EOF")
 	}
 	dec, err := r.dec.Read(p)
-
+	if err == io.EOF {
+		err = r.dec.Reset(nil)
+		zipReaderPool.Put(r.dec)
+		r.dec = nil
+	}
 	return dec, err
 }
 
@@ -112,11 +116,5 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
 // ZipDecompressor returns a decompressor that can be registered with zip libraries.
 // See ZipCompressor for example.
 func ZipDecompressor() func(r io.Reader) io.ReadCloser {
-	return func(r io.Reader) io.ReadCloser {
-		d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
-		if err != nil {
-			panic(err)
-		}
-		return d.IOReadCloser()
-	}
+	return newZipReader
 }
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index 0b0c2571dd..c1c90b4a07 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -39,6 +39,9 @@ const zstdMinMatch = 3
 // Reset the buffer offset when reaching this.
 const bufferReset = math.MaxInt32 - MaxWindowSize
 
+// fcsUnknown is used for unknown frame content size.
+const fcsUnknown = math.MaxUint64
+
 var (
 	// ErrReservedBlockType is returned when a reserved block type is found.
 	// Typically this indicates wrong or corrupted input.
@@ -52,6 +55,10 @@ var (
 	// Typically returned on invalid input.
 	ErrBlockTooSmall = errors.New("block too small")
 
+	// ErrUnexpectedBlockSize is returned when a block has unexpected size.
+	// Typically returned on invalid input.
+	ErrUnexpectedBlockSize = errors.New("unexpected block size")
+
 	// ErrMagicMismatch is returned when a "magic" number isn't what is expected.
 	// Typically this indicates wrong or corrupted input.
 	ErrMagicMismatch = errors.New("invalid input: magic number mismatch")
diff --git a/vendor/modules.txt b/vendor/modules.txt
index c3dae287f2..90d924d8d2 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -69,7 +69,7 @@ github.com/containerd/containerd/log
 github.com/containerd/containerd/pkg/userns
 github.com/containerd/containerd/platforms
 github.com/containerd/containerd/sys
-# github.com/containerd/stargz-snapshotter/estargz v0.11.2
+# github.com/containerd/stargz-snapshotter/estargz v0.11.3
 github.com/containerd/stargz-snapshotter/estargz
 github.com/containerd/stargz-snapshotter/estargz/errorutil
 # github.com/containernetworking/cni v1.0.1
@@ -233,7 +233,7 @@ github.com/containers/psgo/internal/dev
 github.com/containers/psgo/internal/host
 github.com/containers/psgo/internal/proc
 github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.38.3-0.20220308085612-93ce26691863
+# github.com/containers/storage v1.38.3-0.20220321121613-8e565392dd91
 ## explicit
 github.com/containers/storage
 github.com/containers/storage/drivers
@@ -455,7 +455,7 @@ github.com/jinzhu/copier
 # github.com/json-iterator/go v1.1.12
 ## explicit
 github.com/json-iterator/go
-# github.com/klauspost/compress v1.15.0
+# github.com/klauspost/compress v1.15.1
 github.com/klauspost/compress
 github.com/klauspost/compress/flate
 github.com/klauspost/compress/fse