mirror of
https://github.com/containers/podman.git
synced 2025-06-28 06:18:57 +08:00
Merge pull request #9994 from containers/dependabot/go_modules/github.com/containers/image/v5-5.11.0
Bump github.com/containers/image/v5 from 5.10.5 to 5.11.0
This commit is contained in:
2
go.mod
2
go.mod
@ -13,7 +13,7 @@ require (
|
||||
github.com/containers/buildah v1.20.1-0.20210402144408-36a37402d0c8
|
||||
github.com/containers/common v0.35.4
|
||||
github.com/containers/conmon v2.0.20+incompatible
|
||||
github.com/containers/image/v5 v5.10.5
|
||||
github.com/containers/image/v5 v5.11.0
|
||||
github.com/containers/ocicrypt v1.1.0
|
||||
github.com/containers/psgo v1.5.2
|
||||
github.com/containers/storage v1.28.1
|
||||
|
8
go.sum
8
go.sum
@ -180,8 +180,9 @@ github.com/containers/common v0.35.4 h1:szyWRncsHkBwCVpu1dkEOXUjkwCetlfcLmKJTwo1
|
||||
github.com/containers/common v0.35.4/go.mod h1:rMzxgD7nMGw++cEbsp+NZv0UJO4rgXbm7F7IbJPTwIE=
|
||||
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||
github.com/containers/image/v5 v5.10.5 h1:VK1UbsZMzjdw5Xqr3Im9h4iOqHWU0naFs+I78kavc7I=
|
||||
github.com/containers/image/v5 v5.10.5/go.mod h1:SgIbWEedCNBbn2FI5cH0/jed1Ecy2s8XK5zTxvJTzII=
|
||||
github.com/containers/image/v5 v5.11.0 h1:SwxGucW1AZ8H/5KH9jW70lo9WyuOrtxafutyQ9RPPLw=
|
||||
github.com/containers/image/v5 v5.11.0/go.mod h1:dCbUB4w6gmxIEOCsE0tZQppr8iBoXb4Evr74ZKlmwoI=
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
||||
@ -474,8 +475,9 @@ github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
|
||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.12 h1:famVnQVu7QwryBN4jNseQdUKES71ZAOnB6UQQJPZvqk=
|
||||
github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.13 h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4=
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@ -491,6 +493,7 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a h1:weJVJJRzAJBFRlAiJQROKQs8oC9vOxvm4rZmBBk0ONw=
|
||||
github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
@ -771,7 +774,6 @@ github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
|
||||
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
|
||||
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
|
||||
github.com/vbauerster/mpb/v5 v5.4.0 h1:n8JPunifvQvh6P1D1HAl2Ur9YcmKT1tpoUuiea5mlmg=
|
||||
github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8JDaKXR4JGI=
|
||||
github.com/vbauerster/mpb/v6 v6.0.3 h1:j+twHHhSUe8aXWaT/27E98G5cSBeqEuJSVCMjmLg0PI=
|
||||
github.com/vbauerster/mpb/v6 v6.0.3/go.mod h1:5luBx4rDLWxpA4t6I5sdeeQuZhqDxc+wr5Nqf35+tnM=
|
||||
|
192
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
192
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
@ -16,6 +16,7 @@ import (
|
||||
"github.com/containers/image/v5/image"
|
||||
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/internal/pkg/platform"
|
||||
internalTypes "github.com/containers/image/v5/internal/types"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
@ -28,8 +29,8 @@ import (
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vbauerster/mpb/v5"
|
||||
"github.com/vbauerster/mpb/v5/decor"
|
||||
"github.com/vbauerster/mpb/v6"
|
||||
"github.com/vbauerster/mpb/v6/decor"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
@ -46,7 +47,7 @@ var (
|
||||
// ErrDecryptParamsMissing is returned if there is missing decryption parameters
|
||||
ErrDecryptParamsMissing = errors.New("Necessary DecryptParameters not present")
|
||||
|
||||
// maxParallelDownloads is used to limit the maxmimum number of parallel
|
||||
// maxParallelDownloads is used to limit the maximum number of parallel
|
||||
// downloads. Let's follow Firefox by limiting it to 6.
|
||||
maxParallelDownloads = uint(6)
|
||||
)
|
||||
@ -108,19 +109,20 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
// copier allows us to keep track of diffID values for blobs, and other
|
||||
// data shared across one or more images in a possible manifest list.
|
||||
type copier struct {
|
||||
dest types.ImageDestination
|
||||
rawSource types.ImageSource
|
||||
reportWriter io.Writer
|
||||
progressOutput io.Writer
|
||||
progressInterval time.Duration
|
||||
progress chan types.ProgressProperties
|
||||
blobInfoCache internalblobinfocache.BlobInfoCache2
|
||||
copyInParallel bool
|
||||
compressionFormat compression.Algorithm
|
||||
compressionLevel *int
|
||||
ociDecryptConfig *encconfig.DecryptConfig
|
||||
ociEncryptConfig *encconfig.EncryptConfig
|
||||
maxParallelDownloads uint
|
||||
dest types.ImageDestination
|
||||
rawSource types.ImageSource
|
||||
reportWriter io.Writer
|
||||
progressOutput io.Writer
|
||||
progressInterval time.Duration
|
||||
progress chan types.ProgressProperties
|
||||
blobInfoCache internalblobinfocache.BlobInfoCache2
|
||||
copyInParallel bool
|
||||
compressionFormat compression.Algorithm
|
||||
compressionLevel *int
|
||||
ociDecryptConfig *encconfig.DecryptConfig
|
||||
ociEncryptConfig *encconfig.EncryptConfig
|
||||
maxParallelDownloads uint
|
||||
downloadForeignLayers bool
|
||||
}
|
||||
|
||||
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
|
||||
@ -194,6 +196,13 @@ type Options struct {
|
||||
OciDecryptConfig *encconfig.DecryptConfig
|
||||
// MaxParallelDownloads indicates the maximum layers to pull at the same time. A reasonable default is used if this is left as 0.
|
||||
MaxParallelDownloads uint
|
||||
// When OptimizeDestinationImageAlreadyExists is set, optimize the copy assuming that the destination image already
|
||||
// exists (and is equivalent). Making the eventual (no-op) copy more performant for this case. Enabling the option
|
||||
// is slightly pessimistic if the destination image doesn't exist, or is not equivalent.
|
||||
OptimizeDestinationImageAlreadyExists bool
|
||||
// Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type
|
||||
// to not indicate "nondistributable".
|
||||
DownloadForeignLayers bool
|
||||
}
|
||||
|
||||
// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value
|
||||
@ -269,10 +278,11 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
|
||||
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
|
||||
// we might want to add a separate CommonCtx — or would that be too confusing?
|
||||
blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)),
|
||||
ociDecryptConfig: options.OciDecryptConfig,
|
||||
ociEncryptConfig: options.OciEncryptConfig,
|
||||
maxParallelDownloads: options.MaxParallelDownloads,
|
||||
blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)),
|
||||
ociDecryptConfig: options.OciDecryptConfig,
|
||||
ociEncryptConfig: options.OciEncryptConfig,
|
||||
maxParallelDownloads: options.MaxParallelDownloads,
|
||||
downloadForeignLayers: options.DownloadForeignLayers,
|
||||
}
|
||||
// Default to using gzip compression unless specified otherwise.
|
||||
if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil {
|
||||
@ -361,6 +371,45 @@ func supportsMultipleImages(dest types.ImageDestination) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the
|
||||
// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal.
|
||||
func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src types.Image, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
|
||||
srcManifest, _, err := src.Manifest(ctx)
|
||||
if err != nil {
|
||||
return false, nil, "", "", errors.Wrapf(err, "Error reading manifest from image")
|
||||
}
|
||||
|
||||
srcManifestDigest, err := manifest.Digest(srcManifest)
|
||||
if err != nil {
|
||||
return false, nil, "", "", errors.Wrapf(err, "Error calculating manifest digest")
|
||||
}
|
||||
|
||||
destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx)
|
||||
if err != nil {
|
||||
logrus.Debugf("Unable to create destination image %s source: %v", dest.Reference(), err)
|
||||
return false, nil, "", "", nil
|
||||
}
|
||||
|
||||
destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance)
|
||||
if err != nil {
|
||||
logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err)
|
||||
return false, nil, "", "", nil
|
||||
}
|
||||
|
||||
destManifestDigest, err := manifest.Digest(destManifest)
|
||||
if err != nil {
|
||||
return false, nil, "", "", errors.Wrapf(err, "Error calculating manifest digest")
|
||||
}
|
||||
|
||||
logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest)
|
||||
if srcManifestDigest != destManifestDigest {
|
||||
return false, nil, "", "", nil
|
||||
}
|
||||
|
||||
// Destination and source manifests, types and digests should all be equivalent
|
||||
return true, destManifest, destManifestType, destManifestDigest, nil
|
||||
}
|
||||
|
||||
// copyMultipleImages copies some or all of an image list's instances, using
|
||||
// policyContext to validate source image admissibility.
|
||||
func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, copiedManifestType string, retErr error) {
|
||||
@ -646,6 +695,26 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
// If encrypted and decryption keys provided, we should try to decrypt
|
||||
ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || ic.c.ociEncryptConfig != nil
|
||||
|
||||
// If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal
|
||||
if options.OptimizeDestinationImageAlreadyExists {
|
||||
shouldUpdateSigs := len(sigs) > 0 || options.SignBy != "" // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
|
||||
noPendingManifestUpdates := ic.noPendingManifestUpdates()
|
||||
|
||||
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates)
|
||||
if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates {
|
||||
isSrcDestManifestEqual, retManifest, retManifestType, retManifestDigest, err := compareImageDestinationManifestEqual(ctx, options, src, targetInstance, c.dest)
|
||||
if err != nil {
|
||||
logrus.Warnf("Failed to compare destination image manifest: %v", err)
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
if isSrcDestManifestEqual {
|
||||
c.Printf("Skipping: image already present at destination\n")
|
||||
return retManifest, retManifestType, retManifestDigest, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := ic.copyLayers(ctx); err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
@ -702,6 +771,9 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
return nil, "", "", fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
||||
}
|
||||
}
|
||||
if targetInstance != nil {
|
||||
targetInstance = &retManifestDigest
|
||||
}
|
||||
|
||||
if options.SignBy != "" {
|
||||
newSig, err := c.createSignature(manifestBytes, options.SignBy)
|
||||
@ -781,6 +853,10 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ic *imageCopier) noPendingManifestUpdates() bool {
|
||||
return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly})
|
||||
}
|
||||
|
||||
// isTTY returns true if the io.Writer is a file and a tty.
|
||||
func isTTY(w io.Writer) bool {
|
||||
if f, ok := w.(*os.File); ok {
|
||||
@ -834,7 +910,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
defer copySemaphore.Release(1)
|
||||
defer copyGroup.Done()
|
||||
cld := copyLayerData{}
|
||||
if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
|
||||
if !ic.c.downloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
|
||||
// DiffIDs are, currently, needed only when converting from schema1.
|
||||
// In which case src.LayerInfos will not have URLs because schema1
|
||||
// does not support them.
|
||||
@ -845,7 +921,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
|
||||
}
|
||||
} else {
|
||||
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool)
|
||||
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index)
|
||||
}
|
||||
data[index] = cld
|
||||
}
|
||||
@ -901,6 +977,8 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
diffIDs[i] = cld.diffID
|
||||
}
|
||||
|
||||
// WARNING: If you are adding new reasons to change ic.manifestUpdates, also update the
|
||||
// OptimizeDestinationImageAlreadyExists short-circuit conditions
|
||||
ic.manifestUpdates.InformationOnly.LayerInfos = destInfos
|
||||
if ic.diffIDsAreNeeded {
|
||||
ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
|
||||
@ -929,7 +1007,7 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool {
|
||||
// and its digest.
|
||||
func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) {
|
||||
pendingImage := ic.src
|
||||
if !reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) {
|
||||
if !ic.noPendingManifestUpdates() {
|
||||
if !ic.canModifyManifest {
|
||||
return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden")
|
||||
}
|
||||
@ -1012,10 +1090,9 @@ func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind
|
||||
),
|
||||
)
|
||||
} else {
|
||||
bar = pool.AddSpinner(info.Size,
|
||||
mpb.SpinnerOnLeft,
|
||||
bar = pool.Add(0,
|
||||
mpb.NewSpinnerFiller([]string{".", "..", "...", "....", ""}, mpb.SpinnerOnLeft),
|
||||
mpb.BarFillerClearOnComplete(),
|
||||
mpb.SpinnerStyle([]string{".", "..", "...", "....", ""}),
|
||||
mpb.PrependDecorators(
|
||||
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||
),
|
||||
@ -1040,7 +1117,7 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
|
||||
progressPool, progressCleanup := c.newProgressPool(ctx)
|
||||
defer progressCleanup()
|
||||
bar := c.createProgressBar(progressPool, srcInfo, "config", "done")
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar)
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@ -1066,7 +1143,7 @@ type diffIDResult struct {
|
||||
|
||||
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
|
||||
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) {
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int) (types.BlobInfo, digest.Digest, error) {
|
||||
// If the srcInfo doesn't contain compression information, try to compute it from the
|
||||
// MediaType, which was either read from a manifest by way of LayerInfos() or constructed
|
||||
// by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob,
|
||||
@ -1099,7 +1176,26 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
// a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
|
||||
// Fixing that will probably require passing more information to TryReusingBlob() than the current version of
|
||||
// the ImageDestination interface lets us pass in.
|
||||
reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
|
||||
var (
|
||||
blobInfo types.BlobInfo
|
||||
reused bool
|
||||
err error
|
||||
)
|
||||
// Note: the storage destination optimizes the committing of
|
||||
// layers which requires passing the index of the layer.
|
||||
// Hence, we need to special case and cast.
|
||||
dest, ok := ic.c.dest.(internalTypes.ImageDestinationWithOptions)
|
||||
if ok {
|
||||
options := internalTypes.TryReusingBlobOptions{
|
||||
Cache: ic.c.blobInfoCache,
|
||||
CanSubstitute: ic.canSubstituteBlobs,
|
||||
LayerIndex: &layerIndex,
|
||||
}
|
||||
reused, blobInfo, err = dest.TryReusingBlobWithOptions(ctx, srcInfo, options)
|
||||
} else {
|
||||
reused, blobInfo, err = ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
@ -1141,7 +1237,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
|
||||
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done")
|
||||
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar)
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
@ -1172,7 +1268,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
// perhaps (de/re/)compressing the stream,
|
||||
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
|
||||
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
|
||||
var diffIDChan chan diffIDResult
|
||||
|
||||
@ -1197,7 +1293,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
|
||||
}
|
||||
}
|
||||
|
||||
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, toEncrypt, bar) // Sets err to nil on success
|
||||
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, toEncrypt, bar, layerIndex) // Sets err to nil on success
|
||||
return blobInfo, diffIDChan, err
|
||||
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
|
||||
}
|
||||
@ -1249,7 +1345,7 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
|
||||
// and returns a complete blobInfo of the copied blob.
|
||||
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
|
||||
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, error) {
|
||||
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int) (types.BlobInfo, error) {
|
||||
if isConfig { // This is guaranteed by the caller, but set it here to be explicit.
|
||||
canModifyBlob = false
|
||||
}
|
||||
@ -1267,8 +1363,9 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest)
|
||||
}
|
||||
|
||||
var destStream io.Reader = digestingReader
|
||||
|
||||
// === Decrypt the stream, if required.
|
||||
var decrypted bool
|
||||
if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil {
|
||||
newDesc := imgspecv1.Descriptor{
|
||||
@ -1298,12 +1395,13 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
isCompressed := decompressor != nil
|
||||
destStream = bar.ProxyReader(destStream)
|
||||
|
||||
if expectedCompressionFormat, known := expectedCompressionFormats[srcInfo.MediaType]; known && isCompressed && compressionFormat.Name() != expectedCompressionFormat.Name() {
|
||||
logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name())
|
||||
}
|
||||
|
||||
// === Update progress bars
|
||||
destStream = bar.ProxyReader(destStream)
|
||||
|
||||
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
|
||||
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
|
||||
if getOriginalLayerCopyWriter != nil {
|
||||
@ -1312,6 +1410,8 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
}
|
||||
|
||||
// === Deal with layer compression/decompression if necessary
|
||||
// WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
|
||||
// short-circuit conditions
|
||||
var inputInfo types.BlobInfo
|
||||
var compressionOperation types.LayerCompression
|
||||
uploadCompressionFormat := &c.compressionFormat
|
||||
@ -1393,7 +1493,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
}
|
||||
}
|
||||
|
||||
// Perform image encryption for valid mediatypes if ociEncryptConfig provided
|
||||
// === Encrypt the stream for valid mediatypes if ociEncryptConfig provided
|
||||
var (
|
||||
encrypted bool
|
||||
finalizer ocicrypt.EncryptLayerFinalizer
|
||||
@ -1441,7 +1541,23 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
}
|
||||
|
||||
// === Finally, send the layer stream to dest.
|
||||
uploadedInfo, err := c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig)
|
||||
var uploadedInfo types.BlobInfo
|
||||
// Note: the storage destination optimizes the committing of layers
|
||||
// which requires passing the index of the layer. Hence, we need to
|
||||
// special case and cast.
|
||||
dest, ok := c.dest.(internalTypes.ImageDestinationWithOptions)
|
||||
if ok {
|
||||
options := internalTypes.PutBlobOptions{
|
||||
Cache: c.blobInfoCache,
|
||||
IsConfig: isConfig,
|
||||
}
|
||||
if !isConfig {
|
||||
options.LayerIndex = &layerIndex
|
||||
}
|
||||
uploadedInfo, err = dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options)
|
||||
} else {
|
||||
uploadedInfo, err = c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig)
|
||||
}
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@ -502,6 +502,8 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
|
||||
attempts == backoffNumIterations {
|
||||
return res, err
|
||||
}
|
||||
// close response body before retry or context done
|
||||
res.Body.Close()
|
||||
|
||||
delay = parseRetryAfter(res, delay)
|
||||
if delay > backoffMaxDelay {
|
||||
|
1
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
1
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
@ -139,6 +139,7 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
|
||||
return "", err
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return "", errors.Wrapf(registryHTTPResponseToError(res), "Error reading digest %s in %s", tagOrDigest, dr.ref.Name())
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@ -445,7 +445,7 @@ func successStatus(status int) bool {
|
||||
return status >= 200 && status <= 399
|
||||
}
|
||||
|
||||
// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error.
|
||||
// isManifestInvalidError returns true iff err from client.HandleErrorResponse is a “manifest invalid” error.
|
||||
func isManifestInvalidError(err error) bool {
|
||||
errors, ok := err.(errcode.Errors)
|
||||
if !ok || len(errors) == 0 {
|
||||
|
2
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
@ -251,6 +251,7 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
logrus.Debug(err)
|
||||
resp.Body.Close()
|
||||
continue
|
||||
}
|
||||
break
|
||||
@ -290,6 +291,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
||||
return nil, 0, err
|
||||
}
|
||||
if err := httpResponseToError(res, "Error fetching blob"); err != nil {
|
||||
res.Body.Close()
|
||||
return nil, 0, err
|
||||
}
|
||||
cache.RecordKnownLocation(s.physicalRef.Transport(), bicTransportScope(s.physicalRef), info.Digest, newBICLocationReference(s.physicalRef))
|
||||
|
20
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
20
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
@ -123,14 +123,6 @@ var compatibility = map[string][]string{
|
||||
"arm64": {"v8"},
|
||||
}
|
||||
|
||||
// baseVariants contains, for a specified architecture, a variant that is known to be
|
||||
// supported by _all_ machines using that architecture.
|
||||
// Architectures that don’t have variants, or where there are possible versions without
|
||||
// an established variant name, should not have an entry here.
|
||||
var baseVariants = map[string]string{
|
||||
"arm64": "v8",
|
||||
}
|
||||
|
||||
// WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user,
|
||||
// the most compatible platform is first.
|
||||
// If some option (arch, os, variant) is not present, a value from current platform is detected.
|
||||
@ -158,6 +150,8 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
|
||||
|
||||
var variants []string = nil
|
||||
if wantedVariant != "" {
|
||||
// If the user requested a specific variant, we'll walk down
|
||||
// the list from most to least compatible.
|
||||
if compatibility[wantedArch] != nil {
|
||||
variantOrder := compatibility[wantedArch]
|
||||
for i, v := range variantOrder {
|
||||
@ -171,12 +165,14 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
|
||||
// user wants a variant which we know nothing about - not even compatibility
|
||||
variants = []string{wantedVariant}
|
||||
}
|
||||
// Make sure to have a candidate with an empty variant as well.
|
||||
variants = append(variants, "")
|
||||
} else {
|
||||
variants = append(variants, "") // No variant specified, use a “no variant specified” image if present
|
||||
if baseVariant, ok := baseVariants[wantedArch]; ok {
|
||||
// But also accept an image with the “base” variant for the architecture, if it exists.
|
||||
variants = append(variants, baseVariant)
|
||||
// Make sure to have a candidate with an empty variant as well.
|
||||
variants = append(variants, "")
|
||||
// If available add the entire compatibility matrix for the specific architecture.
|
||||
if possibleVariants, ok := compatibility[wantedArch]; ok {
|
||||
variants = append(variants, possibleVariants...)
|
||||
}
|
||||
}
|
||||
|
||||
|
53
vendor/github.com/containers/image/v5/internal/types/types.go
generated
vendored
Normal file
53
vendor/github.com/containers/image/v5/internal/types/types.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
publicTypes "github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// ImageDestinationWithOptions is an internal extension to the ImageDestination
|
||||
// interface.
|
||||
type ImageDestinationWithOptions interface {
|
||||
publicTypes.ImageDestination
|
||||
|
||||
// PutBlobWithOptions is a wrapper around PutBlob. If
|
||||
// options.LayerIndex is set, the blob will be committed directly.
|
||||
// Either by the calling goroutine or by another goroutine already
|
||||
// committing layers.
|
||||
//
|
||||
// Please note that TryReusingBlobWithOptions and PutBlobWithOptions
|
||||
// *must* be used the together. Mixing the two with non "WithOptions"
|
||||
// functions is not supported.
|
||||
PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo publicTypes.BlobInfo, options PutBlobOptions) (publicTypes.BlobInfo, error)
|
||||
|
||||
// TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If
|
||||
// options.LayerIndex is set, the reused blob will be recoreded as
|
||||
// already pulled.
|
||||
//
|
||||
// Please note that TryReusingBlobWithOptions and PutBlobWithOptions
|
||||
// *must* be used the together. Mixing the two with non "WithOptions"
|
||||
// functions is not supported.
|
||||
TryReusingBlobWithOptions(ctx context.Context, blobinfo publicTypes.BlobInfo, options TryReusingBlobOptions) (bool, publicTypes.BlobInfo, error)
|
||||
}
|
||||
|
||||
// PutBlobOptions are used in PutBlobWithOptions.
|
||||
type PutBlobOptions struct {
|
||||
// Cache to look up blob infos.
|
||||
Cache publicTypes.BlobInfoCache
|
||||
// Denotes whether the blob is a config or not.
|
||||
IsConfig bool
|
||||
// The corresponding index in the layer slice.
|
||||
LayerIndex *int
|
||||
}
|
||||
|
||||
// TryReusingBlobOptions are used in TryReusingBlobWithOptions.
|
||||
type TryReusingBlobOptions struct {
|
||||
// Cache to look up blob infos.
|
||||
Cache publicTypes.BlobInfoCache
|
||||
// Use an equivalent of the desired blob.
|
||||
CanSubstitute bool
|
||||
// The corresponding index in the layer slice.
|
||||
LayerIndex *int
|
||||
}
|
17
vendor/github.com/containers/image/v5/manifest/manifest.go
generated
vendored
17
vendor/github.com/containers/image/v5/manifest/manifest.go
generated
vendored
@ -30,7 +30,7 @@ const (
|
||||
DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
|
||||
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers.
|
||||
DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar"
|
||||
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzippped schema 2 foreign layers.
|
||||
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers.
|
||||
DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
|
||||
)
|
||||
|
||||
@ -132,9 +132,16 @@ func GuessMIMEType(manifest []byte) string {
|
||||
if err := json.Unmarshal(manifest, &ociMan); err != nil {
|
||||
return ""
|
||||
}
|
||||
if ociMan.Config.MediaType == imgspecv1.MediaTypeImageConfig {
|
||||
switch ociMan.Config.MediaType {
|
||||
case imgspecv1.MediaTypeImageConfig:
|
||||
return imgspecv1.MediaTypeImageManifest
|
||||
case DockerV2Schema2ConfigMediaType:
|
||||
// This case should not happen since a Docker image
|
||||
// must declare a top-level media type and
|
||||
// `meta.MediaType` has already been checked.
|
||||
return DockerV2Schema2MediaType
|
||||
}
|
||||
// Maybe an image index or an OCI artifact.
|
||||
ociIndex := struct {
|
||||
Manifests []imgspecv1.Descriptor `json:"manifests"`
|
||||
}{}
|
||||
@ -145,9 +152,13 @@ func GuessMIMEType(manifest []byte) string {
|
||||
if ociMan.Config.MediaType == "" {
|
||||
return imgspecv1.MediaTypeImageIndex
|
||||
}
|
||||
// FIXME: this is mixing media types of manifests and configs.
|
||||
return ociMan.Config.MediaType
|
||||
}
|
||||
return DockerV2Schema2MediaType
|
||||
// It's most likely an OCI artifact with a custom config media
|
||||
// type which is not (and cannot) be covered by the media-type
|
||||
// checks cabove.
|
||||
return imgspecv1.MediaTypeImageManifest
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/oci/layout/oci_src.go
generated
vendored
2
vendor/github.com/containers/image/v5/oci/layout/oci_src.go
generated
vendored
@ -15,6 +15,7 @@ import (
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type ociImageSource struct {
|
||||
@ -94,6 +95,7 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
|
||||
|
||||
m, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error HERE")
|
||||
return nil, "", err
|
||||
}
|
||||
if mimeType == "" {
|
||||
|
24
vendor/github.com/containers/image/v5/openshift/openshift-copies.go
generated
vendored
24
vendor/github.com/containers/image/v5/openshift/openshift-copies.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
"golang.org/x/net/http2"
|
||||
)
|
||||
|
||||
// restTLSClientConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.TLSClientConfig.
|
||||
// restTLSClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/restclient.TLSClientConfig.
|
||||
// restTLSClientConfig contains settings to enable transport layer security
|
||||
type restTLSClientConfig struct {
|
||||
// Server requires TLS client certificate authentication
|
||||
@ -45,7 +45,7 @@ type restTLSClientConfig struct {
|
||||
CAData []byte
|
||||
}
|
||||
|
||||
// restConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.Config.
|
||||
// restConfig is a modified copy of k8s.io/kubernetes/pkg/client/restclient.Config.
|
||||
// Config holds the common attributes that can be passed to a Kubernetes client on
|
||||
// initialization.
|
||||
type restConfig struct {
|
||||
@ -254,7 +254,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, conf
|
||||
// we want this order of precedence for user identification
|
||||
// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files)
|
||||
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
|
||||
// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file
|
||||
// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file
|
||||
// 4. if there is not enough information to identify the user, prompt if possible
|
||||
func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) {
|
||||
mergedConfig := &restConfig{}
|
||||
@ -538,7 +538,7 @@ func (e errConfigurationInvalid) Error() string {
|
||||
// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config
|
||||
// Callers can put the chain together however they want, but we'd recommend:
|
||||
// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath
|
||||
// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if thie file is not present
|
||||
// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if this file is not present
|
||||
type clientConfigLoadingRules struct {
|
||||
Precedence []string
|
||||
}
|
||||
@ -741,7 +741,7 @@ func resolvePaths(refs []*string, base string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// restClientFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.RESTClientFor.
|
||||
// restClientFor is a modified copy of k8s.io/kubernetes/pkg/client/restclient.RESTClientFor.
|
||||
// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config
|
||||
// object. Note that a RESTClient may require fields that are optional when initializing a Client.
|
||||
// A RESTClient created by this method is generic - it expects to operate on an API that follows
|
||||
@ -769,7 +769,7 @@ func restClientFor(config *restConfig) (*url.URL, *http.Client, error) {
|
||||
return baseURL, httpClient, nil
|
||||
}
|
||||
|
||||
// defaultServerURL is a modified copy of k8s.io/kubernets/pkg/client/restclient.DefaultServerURL.
|
||||
// defaultServerURL is a modified copy of k8s.io/kubernetes/pkg/client/restclient.DefaultServerURL.
|
||||
// DefaultServerURL converts a host, host:port, or URL string to the default base server API path
|
||||
// to use with a Client at a given API version following the standard conventions for a
|
||||
// Kubernetes API.
|
||||
@ -800,7 +800,7 @@ func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
|
||||
return hostURL, nil
|
||||
}
|
||||
|
||||
// defaultServerURLFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.defaultServerURLFor.
|
||||
// defaultServerURLFor is a modified copy of k8s.io/kubernetes/pkg/client/restclient.defaultServerURLFor.
|
||||
// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It
|
||||
// requires Host and Version to be set prior to being called.
|
||||
func defaultServerURLFor(config *restConfig) (*url.URL, error) {
|
||||
@ -818,7 +818,7 @@ func defaultServerURLFor(config *restConfig) (*url.URL, error) {
|
||||
return defaultServerURL(host, defaultTLS)
|
||||
}
|
||||
|
||||
// transportFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.transportFor.
|
||||
// transportFor is a modified copy of k8s.io/kubernetes/pkg/client/restclient.transportFor.
|
||||
// TransportFor returns an http.RoundTripper that will provide the authentication
|
||||
// or transport level security defined by the provided Config. Will return the
|
||||
// default http.DefaultTransport if no special case behavior is needed.
|
||||
@ -827,7 +827,7 @@ func transportFor(config *restConfig) (http.RoundTripper, error) {
|
||||
return transportNew(config)
|
||||
}
|
||||
|
||||
// isConfigTransportTLS is a modified copy of k8s.io/kubernets/pkg/client/restclient.IsConfigTransportTLS.
|
||||
// isConfigTransportTLS is a modified copy of k8s.io/kubernetes/pkg/client/restclient.IsConfigTransportTLS.
|
||||
// IsConfigTransportTLS returns true if and only if the provided
|
||||
// config will result in a protected connection to the server when it
|
||||
// is passed to restclient.RESTClientFor(). Use to determine when to
|
||||
@ -1055,11 +1055,11 @@ func (c *restConfig) HasCertAuth() bool {
|
||||
// Config holds the information needed to build connect to remote kubernetes clusters as a given user
|
||||
// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
|
||||
type clientcmdConfig struct {
|
||||
// Clusters is a map of referencable names to cluster configs
|
||||
// Clusters is a map of referenceable names to cluster configs
|
||||
Clusters clustersMap `json:"clusters"`
|
||||
// AuthInfos is a map of referencable names to user configs
|
||||
// AuthInfos is a map of referenceable names to user configs
|
||||
AuthInfos authInfosMap `json:"users"`
|
||||
// Contexts is a map of referencable names to context configs
|
||||
// Contexts is a map of referenceable names to context configs
|
||||
Contexts contextsMap `json:"contexts"`
|
||||
// CurrentContext is the name of the context that you would like to use by default
|
||||
CurrentContext string `json:"current-context"`
|
||||
|
4
vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go
generated
vendored
4
vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go
generated
vendored
@ -282,7 +282,7 @@ func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope type
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket with corresponding compression info from compressionBucket (if compressionBucket is not nil), and returns the result of appending them to candidates.
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in scopeBucket with corresponding compression info from compressionBucket (if compressionBucket is not nil), and returns the result of appending them to candidates.
|
||||
func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime {
|
||||
digestKey := []byte(digest.String())
|
||||
b := scopeBucket.Bucket(digestKey)
|
||||
@ -321,7 +321,7 @@ func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
|
2
vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go
generated
vendored
2
vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go
generated
vendored
@ -20,7 +20,7 @@ const (
|
||||
systemBlobInfoCacheDir = "/var/lib/containers/cache"
|
||||
)
|
||||
|
||||
// blobInfoCacheDir returns a path to a blob info cache appropripate for sys and euid.
|
||||
// blobInfoCacheDir returns a path to a blob info cache appropriate for sys and euid.
|
||||
// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory.
|
||||
func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) {
|
||||
if sys != nil && sys.BlobInfoCacheDir != "" {
|
||||
|
4
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
4
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
@ -120,7 +120,7 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso
|
||||
mem.compressors[blobDigest] = compressorName
|
||||
}
|
||||
|
||||
// appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
|
||||
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime {
|
||||
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
|
||||
for l, t := range locations {
|
||||
@ -146,7 +146,7 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
|
2
vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
generated
vendored
2
vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
generated
vendored
@ -42,7 +42,7 @@ func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.B
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
|
393
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
393
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
@ -6,14 +6,17 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
helperclient "github.com/docker/docker-credential-helpers/client"
|
||||
"github.com/docker/docker-credential-helpers/credentials"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -41,12 +44,6 @@ var (
|
||||
dockerLegacyHomePath = ".dockercfg"
|
||||
nonLinuxAuthFilePath = filepath.FromSlash(".config/containers/auth.json")
|
||||
|
||||
// Note that the keyring support has been disabled as it was causing
|
||||
// regressions. Before enabling, please revisit TODO(keyring) comments
|
||||
// which need to be addressed if the need remerged to support the
|
||||
// kernel keyring.
|
||||
enableKeyring = false
|
||||
|
||||
// ErrNotLoggedIn is returned for users not logged into a registry
|
||||
// that they are trying to logout of
|
||||
ErrNotLoggedIn = errors.New("not logged in")
|
||||
@ -54,72 +51,114 @@ var (
|
||||
ErrNotSupported = errors.New("not supported")
|
||||
)
|
||||
|
||||
// SetAuthentication stores the username and password in the auth.json file
|
||||
// SetAuthentication stores the username and password in the credential helper or file
|
||||
func SetAuthentication(sys *types.SystemContext, registry, username, password string) error {
|
||||
return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
if ch, exists := auths.CredHelpers[registry]; exists {
|
||||
return false, setAuthToCredHelper(ch, registry, username, password)
|
||||
}
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the credentials to kernel keyring if enableKeyring is true.
|
||||
// The keyring might not work in all environments (e.g., missing capability) and isn't supported on all platforms.
|
||||
// Hence, we want to fall-back to using the authfile in case the keyring failed.
|
||||
// However, if the enableKeyring is false, we want adhere to the user specification and not use the keyring.
|
||||
if enableKeyring {
|
||||
err := setAuthToKernelKeyring(registry, username, password)
|
||||
if err == nil {
|
||||
logrus.Debugf("credentials for (%s, %s) were stored in the kernel keyring\n", registry, username)
|
||||
return false, nil
|
||||
}
|
||||
logrus.Debugf("failed to authenticate with the kernel keyring, falling back to authfiles. %v", err)
|
||||
// Make sure to collect all errors.
|
||||
var multiErr error
|
||||
for _, helper := range helpers {
|
||||
var err error
|
||||
switch helper {
|
||||
// Special-case the built-in helpers for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
if ch, exists := auths.CredHelpers[registry]; exists {
|
||||
return false, setAuthToCredHelper(ch, registry, username, password)
|
||||
}
|
||||
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
|
||||
newCreds := dockerAuthConfig{Auth: creds}
|
||||
auths.AuthConfigs[registry] = newCreds
|
||||
return true, nil
|
||||
})
|
||||
// External helpers.
|
||||
default:
|
||||
err = setAuthToCredHelper(helper, registry, username, password)
|
||||
}
|
||||
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
|
||||
newCreds := dockerAuthConfig{Auth: creds}
|
||||
auths.AuthConfigs[registry] = newCreds
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", registry, helper, err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Stored credentials for %s in credential helper %s", registry, helper)
|
||||
return nil
|
||||
}
|
||||
return multiErr
|
||||
}
|
||||
|
||||
// GetAllCredentials returns the registry credentials for all registries stored
|
||||
// in either the auth.json file or the docker/config.json.
|
||||
// in any of the configured credential helpers.
|
||||
func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthConfig, error) {
|
||||
// Note: we need to read the auth files in the inverse order to prevent
|
||||
// a priority inversion when writing to the map.
|
||||
authConfigs := make(map[string]types.DockerAuthConfig)
|
||||
paths := getAuthFilePaths(sys, homedir.Get())
|
||||
for i := len(paths) - 1; i >= 0; i-- {
|
||||
path := paths[i]
|
||||
// readJSONFile returns an empty map in case the path doesn't exist.
|
||||
auths, err := readJSONFile(path.path, path.legacyFormat)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading JSON file %q", path.path)
|
||||
}
|
||||
// To keep things simple, let's first extract all registries from all
|
||||
// possible sources, and then call `GetCredentials` on them. That
|
||||
// prevents us from having to reverse engineer the logic in
|
||||
// `GetCredentials`.
|
||||
allRegistries := make(map[string]bool)
|
||||
addRegistry := func(s string) {
|
||||
allRegistries[s] = true
|
||||
}
|
||||
|
||||
for registry, data := range auths.AuthConfigs {
|
||||
conf, err := decodeDockerAuth(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
authConfigs[normalizeRegistry(registry)] = conf
|
||||
}
|
||||
|
||||
// Credential helpers may override credentials from the auth file.
|
||||
for registry, credHelper := range auths.CredHelpers {
|
||||
username, password, err := getAuthFromCredHelper(credHelper, registry)
|
||||
if err != nil {
|
||||
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
|
||||
continue
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, helper := range helpers {
|
||||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
for _, path := range getAuthFilePaths(sys, homedir.Get()) {
|
||||
// readJSONFile returns an empty map in case the path doesn't exist.
|
||||
auths, err := readJSONFile(path.path, path.legacyFormat)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading JSON file %q", path.path)
|
||||
}
|
||||
// Credential helpers in the auth file have a
|
||||
// direct mapping to a registry, so we can just
|
||||
// walk the map.
|
||||
for registry := range auths.CredHelpers {
|
||||
addRegistry(registry)
|
||||
}
|
||||
for registry := range auths.AuthConfigs {
|
||||
addRegistry(registry)
|
||||
}
|
||||
}
|
||||
// External helpers.
|
||||
default:
|
||||
creds, err := listAuthsFromCredHelper(helper)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err)
|
||||
}
|
||||
switch errors.Cause(err) {
|
||||
case nil:
|
||||
for registry := range creds {
|
||||
addRegistry(registry)
|
||||
}
|
||||
case exec.ErrNotFound:
|
||||
// It's okay if the helper doesn't exist.
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conf := types.DockerAuthConfig{Username: username, Password: password}
|
||||
authConfigs[normalizeRegistry(registry)] = conf
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(keyring): if we ever re-enable the keyring support, we had to
|
||||
// query all credentials from the keyring here.
|
||||
// Now use `GetCredentials` to the specific auth configs for each
|
||||
// previously listed registry.
|
||||
authConfigs := make(map[string]types.DockerAuthConfig)
|
||||
for registry := range allRegistries {
|
||||
authConf, err := GetCredentials(sys, registry)
|
||||
if err != nil {
|
||||
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
|
||||
// Ignore if the credentials could not be found (anymore).
|
||||
continue
|
||||
}
|
||||
// Note: we rely on the logging in `GetCredentials`.
|
||||
return nil, err
|
||||
}
|
||||
authConfigs[registry] = authConf
|
||||
}
|
||||
|
||||
return authConfigs, nil
|
||||
}
|
||||
@ -159,7 +198,9 @@ func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath {
|
||||
return paths
|
||||
}
|
||||
|
||||
// GetCredentials returns the registry credentials stored in either auth.json
|
||||
// GetCredentials returns the registry credentials stored in the
|
||||
// registry-specific credential helpers or in the default global credentials
|
||||
// helpers with falling back to using either auth.json
|
||||
// file or .docker/config.json, including support for OAuth2 and IdentityToken.
|
||||
// If an entry is not found, an empty struct is returned.
|
||||
func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) {
|
||||
@ -170,41 +211,65 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth
|
||||
// it exists only to allow testing it with an artificial home directory.
|
||||
func getCredentialsWithHomeDir(sys *types.SystemContext, registry, homeDir string) (types.DockerAuthConfig, error) {
|
||||
if sys != nil && sys.DockerAuthConfig != nil {
|
||||
logrus.Debug("Returning credentials from DockerAuthConfig")
|
||||
logrus.Debugf("Returning credentials for %s from DockerAuthConfig", registry)
|
||||
return *sys.DockerAuthConfig, nil
|
||||
}
|
||||
|
||||
if enableKeyring {
|
||||
username, password, err := getAuthFromKernelKeyring(registry)
|
||||
if err == nil {
|
||||
logrus.Debug("returning credentials from kernel keyring")
|
||||
return types.DockerAuthConfig{
|
||||
Username: username,
|
||||
Password: password,
|
||||
}, nil
|
||||
// Anonymous function to query credentials from auth files.
|
||||
getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, error) {
|
||||
for _, path := range getAuthFilePaths(sys, homeDir) {
|
||||
authConfig, err := findAuthentication(registry, path.path, path.legacyFormat)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, err
|
||||
}
|
||||
|
||||
if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" {
|
||||
return authConfig, nil
|
||||
}
|
||||
}
|
||||
return types.DockerAuthConfig{}, nil
|
||||
}
|
||||
|
||||
for _, path := range getAuthFilePaths(sys, homeDir) {
|
||||
authConfig, err := findAuthentication(registry, path.path, path.legacyFormat)
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, err
|
||||
}
|
||||
|
||||
var multiErr error
|
||||
for _, helper := range helpers {
|
||||
var creds types.DockerAuthConfig
|
||||
var err error
|
||||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
creds, err = getCredentialsFromAuthFiles()
|
||||
// External helpers.
|
||||
default:
|
||||
creds, err = getAuthFromCredHelper(helper, registry)
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("Credentials not found")
|
||||
return types.DockerAuthConfig{}, err
|
||||
logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", registry, helper, err)
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" {
|
||||
logrus.Debugf("Returning credentials from %s", path.path)
|
||||
return authConfig, nil
|
||||
if len(creds.Username)+len(creds.Password)+len(creds.IdentityToken) == 0 {
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Found credentials for %s in credential helper %s", registry, helper)
|
||||
return creds, nil
|
||||
}
|
||||
if multiErr != nil {
|
||||
return types.DockerAuthConfig{}, multiErr
|
||||
}
|
||||
|
||||
logrus.Debugf("Credentials not found")
|
||||
logrus.Debugf("No credentials for %s found", registry)
|
||||
return types.DockerAuthConfig{}, nil
|
||||
}
|
||||
|
||||
// GetAuthentication returns the registry credentials stored in
|
||||
// either auth.json file or .docker/config.json
|
||||
// If an entry is not found empty strings are returned for the username and password
|
||||
// GetAuthentication returns the registry credentials stored in the
|
||||
// registry-specific credential helpers or in the default global credentials
|
||||
// helpers with falling back to using either auth.json file or
|
||||
// .docker/config.json
|
||||
//
|
||||
// Deprecated: This API only has support for username and password. To get the
|
||||
// support for oauth2 in docker registry authentication, we added the new
|
||||
@ -227,53 +292,132 @@ func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir st
|
||||
return auth.Username, auth.Password, nil
|
||||
}
|
||||
|
||||
// RemoveAuthentication deletes the credentials stored in auth.json
|
||||
// RemoveAuthentication removes credentials for `registry` from all possible
|
||||
// sources such as credential helpers and auth files.
|
||||
func RemoveAuthentication(sys *types.SystemContext, registry string) error {
|
||||
return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
// First try cred helpers.
|
||||
if ch, exists := auths.CredHelpers[registry]; exists {
|
||||
return false, deleteAuthFromCredHelper(ch, registry)
|
||||
}
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Next if keyring is enabled try kernel keyring
|
||||
if enableKeyring {
|
||||
err := deleteAuthFromKernelKeyring(registry)
|
||||
if err == nil {
|
||||
logrus.Debugf("credentials for %s were deleted from the kernel keyring", registry)
|
||||
return false, nil
|
||||
var multiErr error
|
||||
isLoggedIn := false
|
||||
|
||||
removeFromCredHelper := func(helper string) {
|
||||
err := deleteAuthFromCredHelper(helper, registry)
|
||||
if err == nil {
|
||||
logrus.Debugf("Credentials for %q were deleted from credential helper %s", registry, helper)
|
||||
isLoggedIn = true
|
||||
return
|
||||
}
|
||||
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
|
||||
logrus.Debugf("Not logged in to %s with credential helper %s", registry, helper)
|
||||
return
|
||||
}
|
||||
multiErr = multierror.Append(multiErr, errors.Wrapf(err, "error removing credentials for %s from credential helper %s", registry, helper))
|
||||
}
|
||||
|
||||
for _, helper := range helpers {
|
||||
var err error
|
||||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
if innerHelper, exists := auths.CredHelpers[registry]; exists {
|
||||
removeFromCredHelper(innerHelper)
|
||||
}
|
||||
if _, ok := auths.AuthConfigs[registry]; ok {
|
||||
isLoggedIn = true
|
||||
delete(auths.AuthConfigs, registry)
|
||||
} else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok {
|
||||
isLoggedIn = true
|
||||
delete(auths.AuthConfigs, normalizeRegistry(registry))
|
||||
}
|
||||
return true, multiErr
|
||||
})
|
||||
if err != nil {
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
}
|
||||
logrus.Debugf("failed to delete credentials from the kernel keyring, falling back to authfiles")
|
||||
// External helpers.
|
||||
default:
|
||||
removeFromCredHelper(helper)
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := auths.AuthConfigs[registry]; ok {
|
||||
delete(auths.AuthConfigs, registry)
|
||||
} else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok {
|
||||
delete(auths.AuthConfigs, normalizeRegistry(registry))
|
||||
} else {
|
||||
return false, ErrNotLoggedIn
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if multiErr != nil {
|
||||
return multiErr
|
||||
}
|
||||
if !isLoggedIn {
|
||||
return ErrNotLoggedIn
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveAllAuthentication deletes all the credentials stored in auth.json and kernel keyring
|
||||
// RemoveAllAuthentication deletes all the credentials stored in credential
|
||||
// helpers and auth files.
|
||||
func RemoveAllAuthentication(sys *types.SystemContext) error {
|
||||
return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
if enableKeyring {
|
||||
err := removeAllAuthFromKernelKeyring()
|
||||
if err == nil {
|
||||
logrus.Debugf("removing all credentials from kernel keyring")
|
||||
return false, nil
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var multiErr error
|
||||
for _, helper := range helpers {
|
||||
var err error
|
||||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
for registry, helper := range auths.CredHelpers {
|
||||
// Helpers in auth files are expected
|
||||
// to exist, so no special treatment
|
||||
// for them.
|
||||
if err := deleteAuthFromCredHelper(helper, registry); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
auths.CredHelpers = make(map[string]string)
|
||||
auths.AuthConfigs = make(map[string]dockerAuthConfig)
|
||||
return true, nil
|
||||
})
|
||||
// External helpers.
|
||||
default:
|
||||
var creds map[string]string
|
||||
creds, err = listAuthsFromCredHelper(helper)
|
||||
switch errors.Cause(err) {
|
||||
case nil:
|
||||
for registry := range creds {
|
||||
err = deleteAuthFromCredHelper(helper, registry)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
case exec.ErrNotFound:
|
||||
// It's okay if the helper doesn't exist.
|
||||
continue
|
||||
default:
|
||||
// fall through
|
||||
}
|
||||
logrus.Debugf("error removing credentials from kernel keyring")
|
||||
}
|
||||
auths.CredHelpers = make(map[string]string)
|
||||
auths.AuthConfigs = make(map[string]dockerAuthConfig)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Debugf("Error removing credentials from credential helper %s: %v", helper, err)
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("All credentials removed from credential helper %s", helper)
|
||||
}
|
||||
|
||||
return multiErr
|
||||
}
|
||||
|
||||
// getPathToAuth gets the path of the auth.json file used for reading and writing credentials
|
||||
func listAuthsFromCredHelper(credHelper string) (map[string]string, error) {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
return helperclient.List(p)
|
||||
}
|
||||
|
||||
// getPathToAuth gets the path of the auth.json file used for reading and writting credentials
|
||||
// returns the path, and a bool specifies whether the file is in legacy format
|
||||
func getPathToAuth(sys *types.SystemContext) (string, bool, error) {
|
||||
return getPathToAuthWithOS(sys, runtime.GOOS)
|
||||
@ -387,14 +531,17 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (
|
||||
return nil
|
||||
}
|
||||
|
||||
func getAuthFromCredHelper(credHelper, registry string) (string, string, error) {
|
||||
func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
creds, err := helperclient.Get(p, registry)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
return types.DockerAuthConfig{}, err
|
||||
}
|
||||
return creds.Username, creds.Secret, nil
|
||||
return types.DockerAuthConfig{
|
||||
Username: creds.Username,
|
||||
Password: creds.Secret,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func setAuthToCredHelper(credHelper, registry, username, password string) error {
|
||||
@ -423,15 +570,7 @@ func findAuthentication(registry, path string, legacyFormat bool) (types.DockerA
|
||||
|
||||
// First try cred helpers. They should always be normalized.
|
||||
if ch, exists := auths.CredHelpers[registry]; exists {
|
||||
username, password, err := getAuthFromCredHelper(ch, registry)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, err
|
||||
}
|
||||
|
||||
return types.DockerAuthConfig{
|
||||
Username: username,
|
||||
Password: password,
|
||||
}, nil
|
||||
return getAuthFromCredHelper(ch, registry)
|
||||
}
|
||||
|
||||
// I'm feeling lucky
|
||||
|
16
vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go
generated
vendored
16
vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go
generated
vendored
@ -9,9 +9,13 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const keyDescribePrefix = "container-registry-login:"
|
||||
// NOTE: none of the functions here are currently used. If we ever want to
|
||||
// reenable keyring support, we should introduce a similar built-in credential
|
||||
// helpers as for `sysregistriesv2.AuthenticationFileHelper`.
|
||||
|
||||
func getAuthFromKernelKeyring(registry string) (string, string, error) {
|
||||
const keyDescribePrefix = "container-registry-login:" // nolint
|
||||
|
||||
func getAuthFromKernelKeyring(registry string) (string, string, error) { // nolint
|
||||
userkeyring, err := keyctl.UserKeyring()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
@ -31,7 +35,7 @@ func getAuthFromKernelKeyring(registry string) (string, string, error) {
|
||||
return parts[0], parts[1], nil
|
||||
}
|
||||
|
||||
func deleteAuthFromKernelKeyring(registry string) error {
|
||||
func deleteAuthFromKernelKeyring(registry string) error { // nolint
|
||||
userkeyring, err := keyctl.UserKeyring()
|
||||
|
||||
if err != nil {
|
||||
@ -44,7 +48,7 @@ func deleteAuthFromKernelKeyring(registry string) error {
|
||||
return key.Unlink()
|
||||
}
|
||||
|
||||
func removeAllAuthFromKernelKeyring() error {
|
||||
func removeAllAuthFromKernelKeyring() error { // nolint
|
||||
keys, err := keyctl.ReadUserKeyring()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -77,7 +81,7 @@ func removeAllAuthFromKernelKeyring() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func setAuthToKernelKeyring(registry, username, password string) error {
|
||||
func setAuthToKernelKeyring(registry, username, password string) error { // nolint
|
||||
keyring, err := keyctl.SessionKeyring()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -110,6 +114,6 @@ func setAuthToKernelKeyring(registry, username, password string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func genDescription(registry string) string {
|
||||
func genDescription(registry string) string { // nolint
|
||||
return fmt.Sprintf("%s%s", keyDescribePrefix, registry)
|
||||
}
|
||||
|
5
vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
generated
vendored
5
vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
generated
vendored
@ -313,7 +313,10 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
|
||||
}
|
||||
// Error out if there's no matching alias and no search registries.
|
||||
if len(unqualifiedSearchRegistries) == 0 {
|
||||
return nil, errors.Errorf("short-name %q did not resolve to an alias and no unqualified-search registries are defined in %q", name, usrConfig)
|
||||
if usrConfig != "" {
|
||||
return nil, errors.Errorf("short-name %q did not resolve to an alias and no unqualified-search registries are defined in %q", name, usrConfig)
|
||||
}
|
||||
return nil, errors.Errorf("short-name %q did not resolve to an alias and no containers-registries.conf(5) was found", name)
|
||||
}
|
||||
resolved.originDescription = usrConfig
|
||||
|
||||
|
178
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
178
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
@ -30,16 +30,24 @@ const builtinRegistriesConfPath = "/etc/containers/registries.conf"
|
||||
// systemRegistriesConfDirPath is the path to the system-wide registry
|
||||
// configuration directory and is used to add/subtract potential registries for
|
||||
// obtaining images. You can override this at build time with
|
||||
// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirecotyPath=$your_path'
|
||||
// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirectoryPath=$your_path'
|
||||
var systemRegistriesConfDirPath = builtinRegistriesConfDirPath
|
||||
|
||||
// builtinRegistriesConfDirPath is the path to the registry configuration directory.
|
||||
// DO NOT change this, instead see systemRegistriesConfDirectoryPath above.
|
||||
const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d"
|
||||
|
||||
// AuthenticationFileHelper is a special key for credential helpers indicating
|
||||
// the usage of consulting containers-auth.json files instead of a credential
|
||||
// helper.
|
||||
const AuthenticationFileHelper = "containers-auth.json"
|
||||
|
||||
// Endpoint describes a remote location of a registry.
|
||||
type Endpoint struct {
|
||||
// The endpoint's remote location.
|
||||
// The endpoint's remote location. Can be empty iff Prefix contains
|
||||
// wildcard in the format: "*.example.com" for subdomain matching.
|
||||
// Please refer to FindRegistry / PullSourcesFromReference instead
|
||||
// of accessing/interpreting `Location` directly.
|
||||
Location string `toml:"location,omitempty"`
|
||||
// If true, certs verification will be skipped and HTTP (non-TLS)
|
||||
// connections will be allowed.
|
||||
@ -57,11 +65,26 @@ var userRegistriesDir = filepath.FromSlash(".config/containers/registries.conf.d
|
||||
// The function errors if the newly created reference is not parsable.
|
||||
func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) {
|
||||
refString := ref.String()
|
||||
if !refMatchesPrefix(refString, prefix) {
|
||||
var newNamedRef string
|
||||
// refMatchingPrefix returns the length of the match. Everything that
|
||||
// follows the match gets appended to registries location.
|
||||
prefixLen := refMatchingPrefix(refString, prefix)
|
||||
if prefixLen == -1 {
|
||||
return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString)
|
||||
}
|
||||
|
||||
newNamedRef := strings.Replace(refString, prefix, e.Location, 1)
|
||||
// In the case of an empty `location` field, simply return the original
|
||||
// input ref as-is.
|
||||
//
|
||||
// FIXME: already validated in postProcessRegistries, so check can probably
|
||||
// be dropped.
|
||||
// https://github.com/containers/image/pull/1191#discussion_r610621608
|
||||
if e.Location == "" {
|
||||
if prefix[:2] != "*." {
|
||||
return nil, fmt.Errorf("invalid prefix '%v' for empty location, should be in the format: *.example.com", prefix)
|
||||
}
|
||||
return ref, nil
|
||||
}
|
||||
newNamedRef = e.Location + refString[prefixLen:]
|
||||
newParsedRef, err := reference.ParseNamed(newNamedRef)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error rewriting reference")
|
||||
@ -77,6 +100,11 @@ type Registry struct {
|
||||
// and we pull from "example.com/bar/myimage:latest", the image will
|
||||
// effectively be pulled from "example.com/foo/bar/myimage:latest".
|
||||
// If no Prefix is specified, it defaults to the specified location.
|
||||
// Prefix can also be in the format: "*.example.com" for matching
|
||||
// subdomains. The wildcard should only be in the beginning and should also
|
||||
// not contain any namespaces or special characters: "/", "@" or ":".
|
||||
// Please refer to FindRegistry / PullSourcesFromReference instead
|
||||
// of accessing/interpreting `Prefix` directly.
|
||||
Prefix string `toml:"prefix"`
|
||||
// A registry is an Endpoint too
|
||||
Endpoint
|
||||
@ -154,6 +182,14 @@ type V2RegistriesConf struct {
|
||||
Registries []Registry `toml:"registry"`
|
||||
// An array of host[:port] (not prefix!) entries to use for resolving unqualified image references
|
||||
UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"`
|
||||
// An array of global credential helpers to use for authentication
|
||||
// (e.g., ["pass", "secretservice"]). The helpers are consulted in the
|
||||
// specified order. Note that "containers-auth.json" is a reserved
|
||||
// value for consulting auth files as specified in
|
||||
// containers-auth.json(5).
|
||||
//
|
||||
// If empty, CredentialHelpers defaults to ["containers-auth.json"].
|
||||
CredentialHelpers []string `toml:"credential-helpers"`
|
||||
|
||||
// ShortNameMode defines how short-name resolution should be handled by
|
||||
// _consumers_ of this package. Depending on the mode, the user should
|
||||
@ -177,7 +213,7 @@ func (config *V2RegistriesConf) Nonempty() bool {
|
||||
|
||||
// parsedConfig is the result of parsing, and possibly merging, configuration files;
|
||||
// it is the boundary between the process of reading+ingesting the files, and
|
||||
// later interpreting the configuraiton based on caller’s requests.
|
||||
// later interpreting the configuration based on caller’s requests.
|
||||
type parsedConfig struct {
|
||||
// NOTE: Update also parsedConfig.updateWithConfigurationFrom!
|
||||
|
||||
@ -212,9 +248,15 @@ func (e *InvalidRegistries) Error() string {
|
||||
func parseLocation(input string) (string, error) {
|
||||
trimmed := strings.TrimRight(input, "/")
|
||||
|
||||
if trimmed == "" {
|
||||
return "", &InvalidRegistries{s: "invalid location: cannot be empty"}
|
||||
}
|
||||
// FIXME: This check needs to exist but fails for empty Location field with
|
||||
// wildcarded prefix. Removal of this check "only" allows invalid input in,
|
||||
// and does not prevent correct operation.
|
||||
// https://github.com/containers/image/pull/1191#discussion_r610122617
|
||||
//
|
||||
// if trimmed == "" {
|
||||
// return "", &InvalidRegistries{s: "invalid location: cannot be empty"}
|
||||
// }
|
||||
//
|
||||
|
||||
if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") {
|
||||
msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input)
|
||||
@ -293,12 +335,20 @@ func (config *V2RegistriesConf) postProcessRegistries() error {
|
||||
}
|
||||
|
||||
if reg.Prefix == "" {
|
||||
if reg.Location == "" {
|
||||
return &InvalidRegistries{s: "invalid condition: both location and prefix are unset"}
|
||||
}
|
||||
reg.Prefix = reg.Location
|
||||
} else {
|
||||
reg.Prefix, err = parseLocation(reg.Prefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// FIXME: allow config authors to always use Prefix.
|
||||
// https://github.com/containers/image/pull/1191#discussion_r610622495
|
||||
if reg.Prefix[:2] != "*." && reg.Location == "" {
|
||||
return &InvalidRegistries{s: "invalid condition: location is unset and prefix is not in the format: *.example.com"}
|
||||
}
|
||||
}
|
||||
|
||||
// make sure mirrors are valid
|
||||
@ -307,8 +357,19 @@ func (config *V2RegistriesConf) postProcessRegistries() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//FIXME: unqualifiedSearchRegistries now also accepts empty values
|
||||
//and shouldn't
|
||||
// https://github.com/containers/image/pull/1191#discussion_r610623216
|
||||
if mir.Location == "" {
|
||||
return &InvalidRegistries{s: "invalid condition: mirror location is unset"}
|
||||
}
|
||||
}
|
||||
if reg.Location == "" {
|
||||
regMap[reg.Prefix] = append(regMap[reg.Prefix], reg)
|
||||
} else {
|
||||
regMap[reg.Location] = append(regMap[reg.Location], reg)
|
||||
}
|
||||
regMap[reg.Location] = append(regMap[reg.Location], reg)
|
||||
}
|
||||
|
||||
// Given a registry can be mentioned multiple times (e.g., to have
|
||||
@ -318,7 +379,13 @@ func (config *V2RegistriesConf) postProcessRegistries() error {
|
||||
// Note: we need to iterate over the registries array to ensure a
|
||||
// deterministic behavior which is not guaranteed by maps.
|
||||
for _, reg := range config.Registries {
|
||||
others, ok := regMap[reg.Location]
|
||||
var others []*Registry
|
||||
var ok bool
|
||||
if reg.Location == "" {
|
||||
others, ok = regMap[reg.Prefix]
|
||||
} else {
|
||||
others, ok = regMap[reg.Location]
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("Internal error in V2RegistriesConf.PostProcess: entry in regMap is missing")
|
||||
}
|
||||
@ -450,7 +517,7 @@ func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) confi
|
||||
return wrapper
|
||||
}
|
||||
|
||||
// ConfigurationSourceDescription returns a string containres paths of registries.conf and registries.conf.d
|
||||
// ConfigurationSourceDescription returns a string containers paths of registries.conf and registries.conf.d
|
||||
func ConfigurationSourceDescription(ctx *types.SystemContext) string {
|
||||
wrapper := newConfigWrapper(ctx)
|
||||
configSources := []string{wrapper.configPath}
|
||||
@ -601,11 +668,17 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC
|
||||
config.shortNameMode = defaultShortNameMode
|
||||
}
|
||||
|
||||
if len(config.partialV2.CredentialHelpers) == 0 {
|
||||
config.partialV2.CredentialHelpers = []string{AuthenticationFileHelper}
|
||||
}
|
||||
|
||||
// populate the cache
|
||||
configCache[wrapper] = config
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// GetRegistries has been deprecated. Use FindRegistry instead.
|
||||
//
|
||||
// GetRegistries loads and returns the registries specified in the config.
|
||||
// Note the parsed content of registry config files is cached. For reloading,
|
||||
// use `InvalidateCache` and re-call `GetRegistries`.
|
||||
@ -663,27 +736,72 @@ func GetShortNameMode(ctx *types.SystemContext) (types.ShortNameMode, error) {
|
||||
return config.shortNameMode, err
|
||||
}
|
||||
|
||||
// refMatchesPrefix returns true iff ref,
|
||||
// CredentialHelpers returns the global top-level credential helpers.
|
||||
func CredentialHelpers(sys *types.SystemContext) ([]string, error) {
|
||||
config, err := getConfig(sys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config.partialV2.CredentialHelpers, nil
|
||||
}
|
||||
|
||||
// refMatchingSubdomainPrefix returns the length of ref
|
||||
// iff ref, which is a registry, repository namespace, repository or image reference (as formatted by
|
||||
// reference.Domain(), reference.Named.Name() or reference.Reference.String()
|
||||
// — note that this requires the name to start with an explicit hostname!),
|
||||
// matches a Registry.Prefix value containing wildcarded subdomains in the
|
||||
// format: *.example.com. Wildcards are only accepted at the beginning, so
|
||||
// other formats like example.*.com will not work. Wildcarded prefixes also
|
||||
// cannot contain port numbers or namespaces in them.
|
||||
func refMatchingSubdomainPrefix(ref, prefix string) int {
|
||||
index := strings.Index(ref, prefix[1:])
|
||||
if index == -1 {
|
||||
return -1
|
||||
}
|
||||
if strings.Contains(ref[:index], "/") {
|
||||
return -1
|
||||
}
|
||||
index += len(prefix[1:])
|
||||
if index == len(ref) {
|
||||
return index
|
||||
}
|
||||
switch ref[index] {
|
||||
case ':', '/', '@':
|
||||
return index
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// refMatchingPrefix returns the length of the prefix iff ref,
|
||||
// which is a registry, repository namespace, repository or image reference (as formatted by
|
||||
// reference.Domain(), reference.Named.Name() or reference.Reference.String()
|
||||
// — note that this requires the name to start with an explicit hostname!),
|
||||
// matches a Registry.Prefix value.
|
||||
// (This is split from the caller primarily to make testing easier.)
|
||||
func refMatchesPrefix(ref, prefix string) bool {
|
||||
func refMatchingPrefix(ref, prefix string) int {
|
||||
switch {
|
||||
case prefix[0:2] == "*.":
|
||||
return refMatchingSubdomainPrefix(ref, prefix)
|
||||
case len(ref) < len(prefix):
|
||||
return false
|
||||
return -1
|
||||
case len(ref) == len(prefix):
|
||||
return ref == prefix
|
||||
if ref == prefix {
|
||||
return len(prefix)
|
||||
}
|
||||
return -1
|
||||
case len(ref) > len(prefix):
|
||||
if !strings.HasPrefix(ref, prefix) {
|
||||
return false
|
||||
return -1
|
||||
}
|
||||
c := ref[len(prefix)]
|
||||
// This allows "example.com:5000" to match "example.com",
|
||||
// which is unintended; that will get fixed eventually, DON'T RELY
|
||||
// ON THE CURRENT BEHAVIOR.
|
||||
return c == ':' || c == '/' || c == '@'
|
||||
if c == ':' || c == '/' || c == '@' {
|
||||
return len(prefix)
|
||||
}
|
||||
return -1
|
||||
default:
|
||||
panic("Internal error: impossible comparison outcome")
|
||||
}
|
||||
@ -700,10 +818,16 @@ func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return findRegistryWithParsedConfig(config, ref)
|
||||
}
|
||||
|
||||
// findRegistryWithParsedConfig implements `FindRegistry` with a pre-loaded
|
||||
// parseConfig.
|
||||
func findRegistryWithParsedConfig(config *parsedConfig, ref string) (*Registry, error) {
|
||||
reg := Registry{}
|
||||
prefixLen := 0
|
||||
for _, r := range config.partialV2.Registries {
|
||||
if refMatchesPrefix(ref, r.Prefix) {
|
||||
if refMatchingPrefix(ref, r.Prefix) != -1 {
|
||||
length := len(r.Prefix)
|
||||
if length > prefixLen {
|
||||
reg = r
|
||||
@ -772,6 +896,17 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) {
|
||||
res.shortNameMode = types.ShortNameModeInvalid
|
||||
}
|
||||
|
||||
// Valid wildcarded prefixes must be in the format: *.example.com
|
||||
// FIXME: Move to postProcessRegistries
|
||||
// https://github.com/containers/image/pull/1191#discussion_r610623829
|
||||
for i := range res.partialV2.Registries {
|
||||
prefix := res.partialV2.Registries[i].Prefix
|
||||
if prefix[:2] == "*." && strings.ContainsAny(prefix, "/@:") {
|
||||
msg := fmt.Sprintf("Wildcarded prefix should be in the format: *.example.com. Current prefix %q is incorrectly formatted", prefix)
|
||||
return nil, &InvalidRegistries{s: msg}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse and validate short-name aliases.
|
||||
cache, err := newShortNameAliasCache(path, &res.partialV2.shortNameAliasConf)
|
||||
if err != nil {
|
||||
@ -825,6 +960,11 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) {
|
||||
c.unqualifiedSearchRegistriesOrigin = updates.unqualifiedSearchRegistriesOrigin
|
||||
}
|
||||
|
||||
// == Merge credential helpers:
|
||||
if updates.partialV2.CredentialHelpers != nil {
|
||||
c.partialV2.CredentialHelpers = updates.partialV2.CredentialHelpers
|
||||
}
|
||||
|
||||
// == Merge shortNameMode:
|
||||
// We don’t maintain c.partialV2.ShortNameMode.
|
||||
if updates.shortNameMode != types.ShortNameModeInvalid {
|
||||
|
427
vendor/github.com/containers/image/v5/storage/storage_image.go
generated
vendored
427
vendor/github.com/containers/image/v5/storage/storage_image.go
generated
vendored
@ -18,6 +18,7 @@ import (
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/image"
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
internalTypes "github.com/containers/image/v5/internal/types"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/v5/types"
|
||||
@ -45,6 +46,7 @@ var (
|
||||
type storageImageSource struct {
|
||||
imageRef storageReference
|
||||
image *storage.Image
|
||||
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
|
||||
layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
|
||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
|
||||
@ -54,17 +56,31 @@ type storageImageSource struct {
|
||||
|
||||
type storageImageDestination struct {
|
||||
imageRef storageReference
|
||||
directory string // Temporary directory where we store blobs until Commit() time
|
||||
nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
|
||||
manifest []byte // Manifest contents, temporary
|
||||
signatures []byte // Signature contents, temporary
|
||||
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
|
||||
putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions
|
||||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
|
||||
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
|
||||
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
|
||||
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
|
||||
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
|
||||
directory string // Temporary directory where we store blobs until Commit() time
|
||||
nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
|
||||
manifest []byte // Manifest contents, temporary
|
||||
signatures []byte // Signature contents, temporary
|
||||
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
|
||||
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
|
||||
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
|
||||
|
||||
// A storage destination may be used concurrently. Accesses are
|
||||
// serialized via a mutex. Please refer to the individual comments
|
||||
// below for details.
|
||||
lock sync.Mutex
|
||||
// Mapping from layer (by index) to the associated ID in the storage.
|
||||
// It's protected *implicitly* since `commitLayer()`, at any given
|
||||
// time, can only be executed by *one* goroutine. Please refer to
|
||||
// `queueOrCommit()` for further details on how the single-caller
|
||||
// guarantee is implemented.
|
||||
indexToStorageID map[int]*string
|
||||
// All accesses to below data are protected by `lock` which is made
|
||||
// *explicit* in the code.
|
||||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
|
||||
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
|
||||
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
|
||||
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
|
||||
indexToPulledBlob map[int]*types.BlobInfo // Mapping from layer (by index) to pulled down blob
|
||||
}
|
||||
|
||||
type storageImageCloser struct {
|
||||
@ -96,6 +112,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef stor
|
||||
// Build the reader object.
|
||||
image := &storageImageSource{
|
||||
imageRef: imageRef,
|
||||
systemContext: sys,
|
||||
image: img,
|
||||
layerPosition: make(map[digest.Digest]int),
|
||||
SignatureSizes: []int{},
|
||||
@ -131,8 +148,35 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
|
||||
if info.Digest == image.GzippedEmptyLayerDigest {
|
||||
return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
|
||||
}
|
||||
|
||||
// NOTE: the blob is first written to a temporary file and subsequently
|
||||
// closed. The intention is to keep the time we own the storage lock
|
||||
// as short as possible to allow other processes to access the storage.
|
||||
rc, n, _, err = s.getBlobAndLayerID(info)
|
||||
return rc, n, err
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
tmpFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "")
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(tmpFile, rc); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if _, err := tmpFile.Seek(0, 0); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
wrapper := ioutils.NewReadCloserWrapper(tmpFile, func() error {
|
||||
defer os.Remove(tmpFile.Name())
|
||||
return tmpFile.Close()
|
||||
})
|
||||
|
||||
return wrapper, n, err
|
||||
}
|
||||
|
||||
// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given.
|
||||
@ -347,14 +391,16 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
|
||||
return nil, errors.Wrapf(err, "error creating a temporary directory")
|
||||
}
|
||||
image := &storageImageDestination{
|
||||
imageRef: imageRef,
|
||||
directory: directory,
|
||||
signatureses: make(map[digest.Digest][]byte),
|
||||
blobDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
fileSizes: make(map[digest.Digest]int64),
|
||||
filenames: make(map[digest.Digest]string),
|
||||
SignatureSizes: []int{},
|
||||
SignaturesSizes: make(map[digest.Digest][]int),
|
||||
imageRef: imageRef,
|
||||
directory: directory,
|
||||
signatureses: make(map[digest.Digest][]byte),
|
||||
blobDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
fileSizes: make(map[digest.Digest]int64),
|
||||
filenames: make(map[digest.Digest]string),
|
||||
SignatureSizes: []int{},
|
||||
SignaturesSizes: make(map[digest.Digest][]int),
|
||||
indexToStorageID: make(map[int]*string),
|
||||
indexToPulledBlob: make(map[int]*types.BlobInfo),
|
||||
}
|
||||
return image, nil
|
||||
}
|
||||
@ -381,6 +427,26 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
|
||||
return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
|
||||
}
|
||||
|
||||
// PutBlobWithOptions is a wrapper around PutBlob. If options.LayerIndex is
|
||||
// set, the blob will be committed directly. Either by the calling goroutine
|
||||
// or by another goroutine already committing layers.
|
||||
//
|
||||
// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be
|
||||
// used the together. Mixing the two with non "WithOptions" functions is not
|
||||
// supported.
|
||||
func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options internalTypes.PutBlobOptions) (types.BlobInfo, error) {
|
||||
info, err := s.PutBlob(ctx, stream, blobinfo, options.Cache, options.IsConfig)
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
|
||||
if options.IsConfig || options.LayerIndex == nil {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
return info, s.queueOrCommit(ctx, info, *options.LayerIndex)
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (s *storageImageDestination) HasThreadSafePutBlob() bool {
|
||||
return true
|
||||
@ -436,11 +502,11 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
return errorBlobInfo, errors.WithStack(ErrBlobSizeMismatch)
|
||||
}
|
||||
// Record information about the blob.
|
||||
s.putBlobMutex.Lock()
|
||||
s.lock.Lock()
|
||||
s.blobDiffIDs[hasher.Digest()] = diffID.Digest()
|
||||
s.fileSizes[hasher.Digest()] = counter.Count
|
||||
s.filenames[hasher.Digest()] = filename
|
||||
s.putBlobMutex.Unlock()
|
||||
s.lock.Unlock()
|
||||
blobDigest := blobinfo.Digest
|
||||
if blobDigest.Validate() != nil {
|
||||
blobDigest = hasher.Digest()
|
||||
@ -458,6 +524,22 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If
|
||||
// options.LayerIndex is set, the reused blob will be recoreded as already
|
||||
// pulled.
|
||||
//
|
||||
// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be
|
||||
// used the together. Mixing the two with the non "WithOptions" functions
|
||||
// is not supported.
|
||||
func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options internalTypes.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
||||
reused, info, err := s.TryReusingBlob(ctx, blobinfo, options.Cache, options.CanSubstitute)
|
||||
if err != nil || !reused || options.LayerIndex == nil {
|
||||
return reused, info, err
|
||||
}
|
||||
|
||||
return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex)
|
||||
}
|
||||
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
@ -469,8 +551,8 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
// May use and/or update cache.
|
||||
func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
// lock the entire method as it executes fairly quickly
|
||||
s.putBlobMutex.Lock()
|
||||
defer s.putBlobMutex.Unlock()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
if blobinfo.Digest == "" {
|
||||
return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`)
|
||||
}
|
||||
@ -607,6 +689,192 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er
|
||||
return nil, errors.New("blob not found")
|
||||
}
|
||||
|
||||
// queueOrCommit queues in the specified blob to be committed to the storage.
|
||||
// If no other goroutine is already committing layers, the layer and all
|
||||
// subsequent layers (if already queued) will be committed to the storage.
|
||||
func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.BlobInfo, index int) error {
|
||||
// NOTE: whenever the code below is touched, make sure that all code
|
||||
// paths unlock the lock and to unlock it exactly once.
|
||||
//
|
||||
// Conceptually, the code is divided in two stages:
|
||||
//
|
||||
// 1) Queue in work by marking the layer as ready to be committed.
|
||||
// If at least one previous/parent layer with a lower index has
|
||||
// not yet been committed, return early.
|
||||
//
|
||||
// 2) Process the queued-in work by committing the "ready" layers
|
||||
// in sequence. Make sure that more items can be queued-in
|
||||
// during the comparatively I/O expensive task of committing a
|
||||
// layer.
|
||||
//
|
||||
// The conceptual benefit of this design is that caller can continue
|
||||
// pulling layers after an early return. At any given time, only one
|
||||
// caller is the "worker" routine comitting layers. All other routines
|
||||
// can continue pulling and queuing in layers.
|
||||
s.lock.Lock()
|
||||
s.indexToPulledBlob[index] = &blob
|
||||
|
||||
// We're still waiting for at least one previous/parent layer to be
|
||||
// committed, so there's nothing to do.
|
||||
if index != s.currentIndex {
|
||||
s.lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
for info := s.indexToPulledBlob[index]; info != nil; info = s.indexToPulledBlob[index] {
|
||||
s.lock.Unlock()
|
||||
layerInfo := manifest.LayerInfo{
|
||||
BlobInfo: *info,
|
||||
EmptyLayer: info.Digest == image.GzippedEmptyLayerDigest,
|
||||
}
|
||||
// Note: commitLayer locks on-demand.
|
||||
if err := s.commitLayer(ctx, layerInfo, index); err != nil {
|
||||
return err
|
||||
}
|
||||
s.lock.Lock()
|
||||
index++
|
||||
}
|
||||
|
||||
// Set the index at the very end to make sure that only one routine
|
||||
// enters stage 2).
|
||||
s.currentIndex = index
|
||||
s.lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// commitLayer commits the specified blob with the given index to the storage.
|
||||
// Note that the previous layer is expected to already be committed.
|
||||
//
|
||||
// Caution: this function must be called without holding `s.lock`. Callers
|
||||
// must guarantee that, at any given time, at most one goroutine may execute
|
||||
// `commitLayer()`.
|
||||
func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error {
|
||||
// Already commited? Return early.
|
||||
if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start with an empty string or the previous layer ID. Note that
|
||||
// `s.indexToStorageID` can only be accessed by *one* goroutine at any
|
||||
// given time. Hence, we don't need to lock accesses.
|
||||
var lastLayer string
|
||||
if prev := s.indexToStorageID[index-1]; prev != nil {
|
||||
lastLayer = *prev
|
||||
}
|
||||
|
||||
// Carry over the previous ID for empty non-base layers.
|
||||
if blob.EmptyLayer && index > 0 {
|
||||
s.indexToStorageID[index] = &lastLayer
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if there's already a layer with the ID that we'd give to the result of applying
|
||||
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
|
||||
s.lock.Lock()
|
||||
diffID, haveDiffID := s.blobDiffIDs[blob.Digest]
|
||||
s.lock.Unlock()
|
||||
if !haveDiffID {
|
||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
|
||||
// or to even check if we had it.
|
||||
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
|
||||
// that relies on using a blob digest that has never been seen by the store had better call
|
||||
// TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
|
||||
// so far we are going to accommodate that (if we should be doing that at all).
|
||||
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
|
||||
// NOTE: use `TryReusingBlob` to prevent recursion.
|
||||
has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
|
||||
}
|
||||
if !has {
|
||||
return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String())
|
||||
}
|
||||
diffID, haveDiffID = s.blobDiffIDs[blob.Digest]
|
||||
if !haveDiffID {
|
||||
return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String())
|
||||
}
|
||||
}
|
||||
id := diffID.Hex()
|
||||
if lastLayer != "" {
|
||||
id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex()
|
||||
}
|
||||
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
|
||||
// There's already a layer that should have the right contents, just reuse it.
|
||||
lastLayer = layer.ID
|
||||
s.indexToStorageID[index] = &lastLayer
|
||||
return nil
|
||||
}
|
||||
// Check if we previously cached a file with that blob's contents. If we didn't,
|
||||
// then we need to read the desired contents from a layer.
|
||||
s.lock.Lock()
|
||||
filename, ok := s.filenames[blob.Digest]
|
||||
s.lock.Unlock()
|
||||
if !ok {
|
||||
// Try to find the layer with contents matching that blobsum.
|
||||
layer := ""
|
||||
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID)
|
||||
if err2 == nil && len(layers) > 0 {
|
||||
layer = layers[0].ID
|
||||
} else {
|
||||
layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest)
|
||||
if err2 == nil && len(layers) > 0 {
|
||||
layer = layers[0].ID
|
||||
}
|
||||
}
|
||||
if layer == "" {
|
||||
return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest)
|
||||
}
|
||||
// Read the layer's contents.
|
||||
noCompression := archive.Uncompressed
|
||||
diffOptions := &storage.DiffOptions{
|
||||
Compression: &noCompression,
|
||||
}
|
||||
diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
|
||||
if err2 != nil {
|
||||
return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest)
|
||||
}
|
||||
// Copy the layer diff to a file. Diff() takes a lock that it holds
|
||||
// until the ReadCloser that it returns is closed, and PutLayer() wants
|
||||
// the same lock, so the diff can't just be directly streamed from one
|
||||
// to the other.
|
||||
filename = s.computeNextBlobCacheFile()
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
diff.Close()
|
||||
return errors.Wrapf(err, "error creating temporary file %q", filename)
|
||||
}
|
||||
// Copy the data to the file.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using
|
||||
// ctx.Done().
|
||||
_, err = io.Copy(file, diff)
|
||||
diff.Close()
|
||||
file.Close()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error storing blob to file %q", filename)
|
||||
}
|
||||
// Make sure that we can find this file later, should we need the layer's
|
||||
// contents again.
|
||||
s.lock.Lock()
|
||||
s.filenames[blob.Digest] = filename
|
||||
s.lock.Unlock()
|
||||
}
|
||||
// Read the cached blob and use it as a diff.
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error opening file %q", filename)
|
||||
}
|
||||
defer file.Close()
|
||||
// Build the new layer using the diff, regardless of where it came from.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file)
|
||||
if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
|
||||
return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest)
|
||||
}
|
||||
|
||||
s.indexToStorageID[index] = &layer.ID
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
|
||||
if len(s.manifest) == 0 {
|
||||
return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
|
||||
@ -644,108 +912,19 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
||||
return errors.Wrapf(err, "error parsing manifest")
|
||||
}
|
||||
layerBlobs := man.LayerInfos()
|
||||
// Extract or find the layers.
|
||||
lastLayer := ""
|
||||
for _, blob := range layerBlobs {
|
||||
if blob.EmptyLayer {
|
||||
continue
|
||||
// Extract, commit, or find the layers.
|
||||
for i, blob := range layerBlobs {
|
||||
if err := s.commitLayer(ctx, blob, i); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if there's already a layer with the ID that we'd give to the result of applying
|
||||
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
|
||||
diffID, haveDiffID := s.blobDiffIDs[blob.Digest]
|
||||
if !haveDiffID {
|
||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
|
||||
// or to even check if we had it.
|
||||
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
|
||||
// that relies on using a blob digest that has never been seen by the store had better call
|
||||
// TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
|
||||
// so far we are going to accommodate that (if we should be doing that at all).
|
||||
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
|
||||
has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
|
||||
}
|
||||
if !has {
|
||||
return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String())
|
||||
}
|
||||
diffID, haveDiffID = s.blobDiffIDs[blob.Digest]
|
||||
if !haveDiffID {
|
||||
return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String())
|
||||
}
|
||||
}
|
||||
var lastLayer string
|
||||
if len(layerBlobs) > 0 { // Can happen when using caches
|
||||
prev := s.indexToStorageID[len(layerBlobs)-1]
|
||||
if prev == nil {
|
||||
return errors.Errorf("Internal error: StorageImageDestination.Commit(): previous layer %d hasn't been commited (lastLayer == nil)", len(layerBlobs)-1)
|
||||
}
|
||||
id := diffID.Hex()
|
||||
if lastLayer != "" {
|
||||
id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex()
|
||||
}
|
||||
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
|
||||
// There's already a layer that should have the right contents, just reuse it.
|
||||
lastLayer = layer.ID
|
||||
continue
|
||||
}
|
||||
// Check if we previously cached a file with that blob's contents. If we didn't,
|
||||
// then we need to read the desired contents from a layer.
|
||||
filename, ok := s.filenames[blob.Digest]
|
||||
if !ok {
|
||||
// Try to find the layer with contents matching that blobsum.
|
||||
layer := ""
|
||||
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID)
|
||||
if err2 == nil && len(layers) > 0 {
|
||||
layer = layers[0].ID
|
||||
} else {
|
||||
layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest)
|
||||
if err2 == nil && len(layers) > 0 {
|
||||
layer = layers[0].ID
|
||||
}
|
||||
}
|
||||
if layer == "" {
|
||||
return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest)
|
||||
}
|
||||
// Read the layer's contents.
|
||||
noCompression := archive.Uncompressed
|
||||
diffOptions := &storage.DiffOptions{
|
||||
Compression: &noCompression,
|
||||
}
|
||||
diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
|
||||
if err2 != nil {
|
||||
return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest)
|
||||
}
|
||||
// Copy the layer diff to a file. Diff() takes a lock that it holds
|
||||
// until the ReadCloser that it returns is closed, and PutLayer() wants
|
||||
// the same lock, so the diff can't just be directly streamed from one
|
||||
// to the other.
|
||||
filename = s.computeNextBlobCacheFile()
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
diff.Close()
|
||||
return errors.Wrapf(err, "error creating temporary file %q", filename)
|
||||
}
|
||||
// Copy the data to the file.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using
|
||||
// ctx.Done().
|
||||
_, err = io.Copy(file, diff)
|
||||
diff.Close()
|
||||
file.Close()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error storing blob to file %q", filename)
|
||||
}
|
||||
// Make sure that we can find this file later, should we need the layer's
|
||||
// contents again.
|
||||
s.filenames[blob.Digest] = filename
|
||||
}
|
||||
// Read the cached blob and use it as a diff.
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error opening file %q", filename)
|
||||
}
|
||||
defer file.Close()
|
||||
// Build the new layer using the diff, regardless of where it came from.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file)
|
||||
if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
|
||||
return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest)
|
||||
}
|
||||
lastLayer = layer.ID
|
||||
lastLayer = *prev
|
||||
}
|
||||
|
||||
// If one of those blobs was a configuration blob, then we can try to dig out the date when the image
|
||||
|
10
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
10
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
@ -147,7 +147,7 @@ type BlobInfo struct {
|
||||
}
|
||||
|
||||
// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present.
|
||||
// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest).
|
||||
// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data about blobs keyed by (scope, digest).
|
||||
// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable.
|
||||
//
|
||||
// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
|
||||
@ -179,7 +179,7 @@ type BICReplacementCandidate struct {
|
||||
// It records two kinds of data:
|
||||
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
|
||||
// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
|
||||
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion),
|
||||
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression),
|
||||
// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
|
||||
//
|
||||
// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
|
||||
@ -219,7 +219,7 @@ type BlobInfoCache interface {
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate
|
||||
@ -582,7 +582,7 @@ type SystemContext struct {
|
||||
|
||||
// === OCI.Transport overrides ===
|
||||
// If not "", a directory containing a CA certificate (ending with ".crt"),
|
||||
// a client certificate (ending with ".cert") and a client ceritificate key
|
||||
// a client certificate (ending with ".cert") and a client certificate key
|
||||
// (ending with ".key") used when downloading OCI image layers.
|
||||
OCICertPath string
|
||||
// Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
|
||||
@ -594,7 +594,7 @@ type SystemContext struct {
|
||||
|
||||
// === docker.Transport overrides ===
|
||||
// If not "", a directory containing a CA certificate (ending with ".crt"),
|
||||
// a client certificate (ending with ".cert") and a client ceritificate key
|
||||
// a client certificate (ending with ".cert") and a client certificate key
|
||||
// (ending with ".key") used when talking to a Docker Registry.
|
||||
DockerCertPath string
|
||||
// If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above.
|
||||
|
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@ -6,9 +6,9 @@ const (
|
||||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 10
|
||||
VersionMinor = 11
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 5
|
||||
VersionPatch = 0
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
|
6
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
6
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
@ -645,15 +645,15 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
|
||||
d.fill = (*compressor).fillBlock
|
||||
d.step = (*compressor).store
|
||||
case level == ConstantCompression:
|
||||
d.w.logNewTablePenalty = 4
|
||||
d.window = make([]byte, maxStoreBlockSize)
|
||||
d.w.logNewTablePenalty = 8
|
||||
d.window = make([]byte, 32<<10)
|
||||
d.fill = (*compressor).fillBlock
|
||||
d.step = (*compressor).storeHuff
|
||||
case level == DefaultCompression:
|
||||
level = 5
|
||||
fallthrough
|
||||
case level >= 1 && level <= 6:
|
||||
d.w.logNewTablePenalty = 6
|
||||
d.w.logNewTablePenalty = 8
|
||||
d.fast = newFastEnc(level)
|
||||
d.window = make([]byte, maxStoreBlockSize)
|
||||
d.fill = (*compressor).fillBlock
|
||||
|
26
vendor/github.com/klauspost/compress/flate/fast_encoder.go
generated
vendored
26
vendor/github.com/klauspost/compress/flate/fast_encoder.go
generated
vendored
@ -6,6 +6,7 @@
|
||||
package flate
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
)
|
||||
@ -65,26 +66,15 @@ func load32(b []byte, i int) uint32 {
|
||||
}
|
||||
|
||||
func load64(b []byte, i int) uint64 {
|
||||
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
|
||||
b = b[i:]
|
||||
b = b[:8]
|
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||
return binary.LittleEndian.Uint64(b[i:])
|
||||
}
|
||||
|
||||
func load3232(b []byte, i int32) uint32 {
|
||||
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
|
||||
b = b[i:]
|
||||
b = b[:4]
|
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||
return binary.LittleEndian.Uint32(b[i:])
|
||||
}
|
||||
|
||||
func load6432(b []byte, i int32) uint64 {
|
||||
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
|
||||
b = b[i:]
|
||||
b = b[:8]
|
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||
return binary.LittleEndian.Uint64(b[i:])
|
||||
}
|
||||
|
||||
func hash(u uint32) uint32 {
|
||||
@ -225,9 +215,9 @@ func (e *fastGen) Reset() {
|
||||
func matchLen(a, b []byte) int {
|
||||
b = b[:len(a)]
|
||||
var checked int
|
||||
if len(a) > 4 {
|
||||
if len(a) >= 4 {
|
||||
// Try 4 bytes first
|
||||
if diff := load32(a, 0) ^ load32(b, 0); diff != 0 {
|
||||
if diff := binary.LittleEndian.Uint32(a) ^ binary.LittleEndian.Uint32(b); diff != 0 {
|
||||
return bits.TrailingZeros32(diff) >> 3
|
||||
}
|
||||
// Switch to 8 byte matching.
|
||||
@ -236,7 +226,7 @@ func matchLen(a, b []byte) int {
|
||||
b = b[4:]
|
||||
for len(a) >= 8 {
|
||||
b = b[:len(a)]
|
||||
if diff := load64(a, 0) ^ load64(b, 0); diff != 0 {
|
||||
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
|
||||
return checked + (bits.TrailingZeros64(diff) >> 3)
|
||||
}
|
||||
checked += 8
|
||||
@ -247,7 +237,7 @@ func matchLen(a, b []byte) int {
|
||||
b = b[:len(a)]
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return int(i) + checked
|
||||
return i + checked
|
||||
}
|
||||
}
|
||||
return len(a) + checked
|
||||
|
39
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
39
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
@ -5,6 +5,7 @@
|
||||
package flate
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
)
|
||||
|
||||
@ -206,7 +207,7 @@ func (w *huffmanBitWriter) write(b []byte) {
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
|
||||
w.bits |= uint64(b) << (w.nbits & reg16SizeMask64)
|
||||
w.bits |= uint64(b) << w.nbits
|
||||
w.nbits += nb
|
||||
if w.nbits >= 48 {
|
||||
w.writeOutBits()
|
||||
@ -420,13 +421,11 @@ func (w *huffmanBitWriter) writeOutBits() {
|
||||
w.bits >>= 48
|
||||
w.nbits -= 48
|
||||
n := w.nbytes
|
||||
w.bytes[n] = byte(bits)
|
||||
w.bytes[n+1] = byte(bits >> 8)
|
||||
w.bytes[n+2] = byte(bits >> 16)
|
||||
w.bytes[n+3] = byte(bits >> 24)
|
||||
w.bytes[n+4] = byte(bits >> 32)
|
||||
w.bytes[n+5] = byte(bits >> 40)
|
||||
|
||||
// We over-write, but faster...
|
||||
binary.LittleEndian.PutUint64(w.bytes[n:], bits)
|
||||
n += 6
|
||||
|
||||
if n >= bufferFlushSize {
|
||||
if w.err != nil {
|
||||
n = 0
|
||||
@ -435,6 +434,7 @@ func (w *huffmanBitWriter) writeOutBits() {
|
||||
w.write(w.bytes[:n])
|
||||
n = 0
|
||||
}
|
||||
|
||||
w.nbytes = n
|
||||
}
|
||||
|
||||
@ -759,7 +759,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
} else {
|
||||
// inlined
|
||||
c := lengths[lengthCode&31]
|
||||
w.bits |= uint64(c.code) << (w.nbits & reg16SizeMask64)
|
||||
w.bits |= uint64(c.code) << w.nbits
|
||||
w.nbits += c.len
|
||||
if w.nbits >= 48 {
|
||||
w.writeOutBits()
|
||||
@ -779,7 +779,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
} else {
|
||||
// inlined
|
||||
c := offs[offsetCode&31]
|
||||
w.bits |= uint64(c.code) << (w.nbits & reg16SizeMask64)
|
||||
w.bits |= uint64(c.code) << w.nbits
|
||||
w.nbits += c.len
|
||||
if w.nbits >= 48 {
|
||||
w.writeOutBits()
|
||||
@ -830,8 +830,8 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
// Assume header is around 70 bytes:
|
||||
// https://stackoverflow.com/a/25454430
|
||||
const guessHeaderSizeBits = 70 * 8
|
||||
estBits, estExtra := histogramSize(input, w.literalFreq[:], !eof && !sync)
|
||||
estBits += w.lastHeader + 15
|
||||
estBits := histogramSize(input, w.literalFreq[:], !eof && !sync)
|
||||
estBits += w.lastHeader + len(input)/32
|
||||
if w.lastHeader == 0 {
|
||||
estBits += guessHeaderSizeBits
|
||||
}
|
||||
@ -845,9 +845,9 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
return
|
||||
}
|
||||
|
||||
reuseSize := 0
|
||||
if w.lastHeader > 0 {
|
||||
reuseSize := w.literalEncoding.bitLength(w.literalFreq[:256])
|
||||
estBits += estExtra
|
||||
reuseSize = w.literalEncoding.bitLength(w.literalFreq[:256])
|
||||
|
||||
if estBits < reuseSize {
|
||||
// We owe an EOB
|
||||
@ -859,6 +859,10 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
const numLiterals = endBlockMarker + 1
|
||||
const numOffsets = 1
|
||||
if w.lastHeader == 0 {
|
||||
if !eof && !sync {
|
||||
// Generate a slightly suboptimal tree that can be used for all.
|
||||
fillHist(w.literalFreq[:numLiterals])
|
||||
}
|
||||
w.literalFreq[endBlockMarker] = 1
|
||||
w.literalEncoding.generate(w.literalFreq[:numLiterals], 15)
|
||||
|
||||
@ -878,19 +882,14 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
for _, t := range input {
|
||||
// Bitwriting inlined, ~30% speedup
|
||||
c := encoding[t]
|
||||
w.bits |= uint64(c.code) << ((w.nbits) & reg16SizeMask64)
|
||||
w.bits |= uint64(c.code) << w.nbits
|
||||
w.nbits += c.len
|
||||
if w.nbits >= 48 {
|
||||
bits := w.bits
|
||||
w.bits >>= 48
|
||||
w.nbits -= 48
|
||||
n := w.nbytes
|
||||
w.bytes[n] = byte(bits)
|
||||
w.bytes[n+1] = byte(bits >> 8)
|
||||
w.bytes[n+2] = byte(bits >> 16)
|
||||
w.bytes[n+3] = byte(bits >> 24)
|
||||
w.bytes[n+4] = byte(bits >> 32)
|
||||
w.bytes[n+5] = byte(bits >> 40)
|
||||
binary.LittleEndian.PutUint64(w.bytes[n:], bits)
|
||||
n += 6
|
||||
if n >= bufferFlushSize {
|
||||
if w.err != nil {
|
||||
|
55
vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
55
vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
@ -122,6 +122,16 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int {
|
||||
return total
|
||||
}
|
||||
|
||||
func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
|
||||
var total int
|
||||
for _, f := range b {
|
||||
if f != 0 {
|
||||
total += int(h.codes[f].len)
|
||||
}
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// Return the number of literals assigned to each bit size in the Huffman encoding
|
||||
//
|
||||
// This method is only called when list.length >= 3
|
||||
@ -327,37 +337,40 @@ func atLeastOne(v float32) float32 {
|
||||
return v
|
||||
}
|
||||
|
||||
// Unassigned values are assigned '1' in the histogram.
|
||||
func fillHist(b []uint16) {
|
||||
for i, v := range b {
|
||||
if v == 0 {
|
||||
b[i] = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// histogramSize accumulates a histogram of b in h.
|
||||
// An estimated size in bits is returned.
|
||||
// Unassigned values are assigned '1' in the histogram.
|
||||
// len(h) must be >= 256, and h's elements must be all zeroes.
|
||||
func histogramSize(b []byte, h []uint16, fill bool) (int, int) {
|
||||
func histogramSize(b []byte, h []uint16, fill bool) (bits int) {
|
||||
h = h[:256]
|
||||
for _, t := range b {
|
||||
h[t]++
|
||||
}
|
||||
invTotal := 1.0 / float32(len(b))
|
||||
shannon := float32(0.0)
|
||||
var extra float32
|
||||
total := len(b)
|
||||
if fill {
|
||||
oneBits := atLeastOne(-mFastLog2(invTotal))
|
||||
for i, v := range h[:] {
|
||||
if v > 0 {
|
||||
n := float32(v)
|
||||
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
|
||||
} else {
|
||||
h[i] = 1
|
||||
extra += oneBits
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, v := range h[:] {
|
||||
if v > 0 {
|
||||
n := float32(v)
|
||||
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
|
||||
for _, v := range h {
|
||||
if v == 0 {
|
||||
total++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return int(shannon + 0.99), int(extra + 0.99)
|
||||
invTotal := 1.0 / float32(total)
|
||||
shannon := float32(0.0)
|
||||
for _, v := range h {
|
||||
if v > 0 {
|
||||
n := float32(v)
|
||||
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
|
||||
}
|
||||
}
|
||||
|
||||
return int(shannon + 0.99)
|
||||
}
|
||||
|
2
vendor/github.com/klauspost/compress/flate/level2.go
generated
vendored
2
vendor/github.com/klauspost/compress/flate/level2.go
generated
vendored
@ -155,7 +155,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
|
||||
|
||||
// Store every second hash in-between, but offset by 1.
|
||||
for i := s - l + 2; i < s-5; i += 7 {
|
||||
x := load6432(src, int32(i))
|
||||
x := load6432(src, i)
|
||||
nextHash := hash4u(uint32(x), bTableBits)
|
||||
e.table[nextHash] = tableEntry{offset: e.cur + i}
|
||||
// Skip one
|
||||
|
10
vendor/github.com/klauspost/compress/fse/compress.go
generated
vendored
10
vendor/github.com/klauspost/compress/fse/compress.go
generated
vendored
@ -301,7 +301,7 @@ func (s *Scratch) writeCount() error {
|
||||
out[outP+1] = byte(bitStream >> 8)
|
||||
outP += (bitCount + 7) / 8
|
||||
|
||||
if uint16(charnum) > s.symbolLen {
|
||||
if charnum > s.symbolLen {
|
||||
return errors.New("internal error: charnum > s.symbolLen")
|
||||
}
|
||||
s.Out = out[:outP]
|
||||
@ -331,7 +331,7 @@ type cTable struct {
|
||||
func (s *Scratch) allocCtable() {
|
||||
tableSize := 1 << s.actualTableLog
|
||||
// get tableSymbol that is big enough.
|
||||
if cap(s.ct.tableSymbol) < int(tableSize) {
|
||||
if cap(s.ct.tableSymbol) < tableSize {
|
||||
s.ct.tableSymbol = make([]byte, tableSize)
|
||||
}
|
||||
s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
|
||||
@ -565,8 +565,8 @@ func (s *Scratch) normalizeCount2() error {
|
||||
distributed uint32
|
||||
total = uint32(s.br.remain())
|
||||
tableLog = s.actualTableLog
|
||||
lowThreshold = uint32(total >> tableLog)
|
||||
lowOne = uint32((total * 3) >> (tableLog + 1))
|
||||
lowThreshold = total >> tableLog
|
||||
lowOne = (total * 3) >> (tableLog + 1)
|
||||
)
|
||||
for i, cnt := range s.count[:s.symbolLen] {
|
||||
if cnt == 0 {
|
||||
@ -591,7 +591,7 @@ func (s *Scratch) normalizeCount2() error {
|
||||
|
||||
if (total / toDistribute) > lowOne {
|
||||
// risk of rounding to zero
|
||||
lowOne = uint32((total * 3) / (toDistribute * 2))
|
||||
lowOne = (total * 3) / (toDistribute * 2)
|
||||
for i, cnt := range s.count[:s.symbolLen] {
|
||||
if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
|
||||
s.norm[i] = 1
|
||||
|
4
vendor/github.com/klauspost/compress/fse/decompress.go
generated
vendored
4
vendor/github.com/klauspost/compress/fse/decompress.go
generated
vendored
@ -172,7 +172,7 @@ type decSymbol struct {
|
||||
// allocDtable will allocate decoding tables if they are not big enough.
|
||||
func (s *Scratch) allocDtable() {
|
||||
tableSize := 1 << s.actualTableLog
|
||||
if cap(s.decTable) < int(tableSize) {
|
||||
if cap(s.decTable) < tableSize {
|
||||
s.decTable = make([]decSymbol, tableSize)
|
||||
}
|
||||
s.decTable = s.decTable[:tableSize]
|
||||
@ -340,7 +340,7 @@ type decoder struct {
|
||||
func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) {
|
||||
d.dt = dt
|
||||
d.br = in
|
||||
d.state = uint16(in.getBits(tableLog))
|
||||
d.state = in.getBits(tableLog)
|
||||
}
|
||||
|
||||
// next returns the next symbol and sets the next state.
|
||||
|
4
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
4
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
@ -403,7 +403,7 @@ func (s *Scratch) buildCTable() error {
|
||||
var startNode = int16(s.symbolLen)
|
||||
nonNullRank := s.symbolLen - 1
|
||||
|
||||
nodeNb := int16(startNode)
|
||||
nodeNb := startNode
|
||||
huffNode := s.nodes[1 : huffNodesLen+1]
|
||||
|
||||
// This overlays the slice above, but allows "-1" index lookups.
|
||||
@ -580,7 +580,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
||||
|
||||
// Get pos of last (smallest) symbol per rank
|
||||
{
|
||||
currentNbBits := uint8(maxNbBits)
|
||||
currentNbBits := maxNbBits
|
||||
for pos := int(n); pos >= 0; pos-- {
|
||||
if huffNode[pos].nbBits >= currentNbBits {
|
||||
continue
|
||||
|
2
vendor/github.com/klauspost/compress/snappy/snappy.go
generated
vendored
2
vendor/github.com/klauspost/compress/snappy/snappy.go
generated
vendored
@ -94,5 +94,5 @@ var crcTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
func crc(b []byte) uint32 {
|
||||
c := crc32.Update(0, crcTable, b)
|
||||
return uint32(c>>15|c<<17) + 0xa282ead8
|
||||
return c>>15 | c<<17 + 0xa282ead8
|
||||
}
|
||||
|
42
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
42
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
@ -22,28 +22,44 @@ type blockEnc struct {
|
||||
dictLitEnc *huff0.Scratch
|
||||
wr bitWriter
|
||||
|
||||
extraLits int
|
||||
last bool
|
||||
|
||||
extraLits int
|
||||
output []byte
|
||||
recentOffsets [3]uint32
|
||||
prevRecentOffsets [3]uint32
|
||||
|
||||
last bool
|
||||
lowMem bool
|
||||
}
|
||||
|
||||
// init should be used once the block has been created.
|
||||
// If called more than once, the effect is the same as calling reset.
|
||||
func (b *blockEnc) init() {
|
||||
if cap(b.literals) < maxCompressedLiteralSize {
|
||||
b.literals = make([]byte, 0, maxCompressedLiteralSize)
|
||||
}
|
||||
const defSeqs = 200
|
||||
b.literals = b.literals[:0]
|
||||
if cap(b.sequences) < defSeqs {
|
||||
b.sequences = make([]seq, 0, defSeqs)
|
||||
}
|
||||
if cap(b.output) < maxCompressedBlockSize {
|
||||
b.output = make([]byte, 0, maxCompressedBlockSize)
|
||||
if b.lowMem {
|
||||
// 1K literals
|
||||
if cap(b.literals) < 1<<10 {
|
||||
b.literals = make([]byte, 0, 1<<10)
|
||||
}
|
||||
const defSeqs = 20
|
||||
if cap(b.sequences) < defSeqs {
|
||||
b.sequences = make([]seq, 0, defSeqs)
|
||||
}
|
||||
// 1K
|
||||
if cap(b.output) < 1<<10 {
|
||||
b.output = make([]byte, 0, 1<<10)
|
||||
}
|
||||
} else {
|
||||
if cap(b.literals) < maxCompressedBlockSize {
|
||||
b.literals = make([]byte, 0, maxCompressedBlockSize)
|
||||
}
|
||||
const defSeqs = 200
|
||||
if cap(b.sequences) < defSeqs {
|
||||
b.sequences = make([]seq, 0, defSeqs)
|
||||
}
|
||||
if cap(b.output) < maxCompressedBlockSize {
|
||||
b.output = make([]byte, 0, maxCompressedBlockSize)
|
||||
}
|
||||
}
|
||||
|
||||
if b.coders.mlEnc == nil {
|
||||
b.coders.mlEnc = &fseEncoder{}
|
||||
b.coders.mlPrev = &fseEncoder{}
|
||||
|
40
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
40
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
@ -7,6 +7,10 @@ import (
|
||||
"github.com/klauspost/compress/zstd/internal/xxhash"
|
||||
)
|
||||
|
||||
const (
|
||||
dictShardBits = 6
|
||||
)
|
||||
|
||||
type fastBase struct {
|
||||
// cur is the offset at the start of hist
|
||||
cur int32
|
||||
@ -17,6 +21,7 @@ type fastBase struct {
|
||||
tmp [8]byte
|
||||
blk *blockEnc
|
||||
lastDictID uint32
|
||||
lowMem bool
|
||||
}
|
||||
|
||||
// CRC returns the underlying CRC writer.
|
||||
@ -57,15 +62,10 @@ func (e *fastBase) addBlock(src []byte) int32 {
|
||||
// check if we have space already
|
||||
if len(e.hist)+len(src) > cap(e.hist) {
|
||||
if cap(e.hist) == 0 {
|
||||
l := e.maxMatchOff * 2
|
||||
// Make it at least 1MB.
|
||||
if l < 1<<20 {
|
||||
l = 1 << 20
|
||||
}
|
||||
e.hist = make([]byte, 0, l)
|
||||
e.ensureHist(len(src))
|
||||
} else {
|
||||
if cap(e.hist) < int(e.maxMatchOff*2) {
|
||||
panic("unexpected buffer size")
|
||||
if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) {
|
||||
panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff))
|
||||
}
|
||||
// Move down
|
||||
offset := int32(len(e.hist)) - e.maxMatchOff
|
||||
@ -79,6 +79,28 @@ func (e *fastBase) addBlock(src []byte) int32 {
|
||||
return s
|
||||
}
|
||||
|
||||
// ensureHist will ensure that history can keep at least this many bytes.
|
||||
func (e *fastBase) ensureHist(n int) {
|
||||
if cap(e.hist) >= n {
|
||||
return
|
||||
}
|
||||
l := e.maxMatchOff
|
||||
if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize {
|
||||
l += maxCompressedBlockSize
|
||||
} else {
|
||||
l += e.maxMatchOff
|
||||
}
|
||||
// Make it at least 1MB.
|
||||
if l < 1<<20 && !e.lowMem {
|
||||
l = 1 << 20
|
||||
}
|
||||
// Make it at least the requested size.
|
||||
if l < int32(n) {
|
||||
l = int32(n)
|
||||
}
|
||||
e.hist = make([]byte, 0, l)
|
||||
}
|
||||
|
||||
// useBlock will replace the block with the provided one,
|
||||
// but transfer recent offsets from the previous.
|
||||
func (e *fastBase) UseBlock(enc *blockEnc) {
|
||||
@ -117,7 +139,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
|
||||
// Reset the encoding table.
|
||||
func (e *fastBase) resetBase(d *dict, singleBlock bool) {
|
||||
if e.blk == nil {
|
||||
e.blk = &blockEnc{}
|
||||
e.blk = &blockEnc{lowMem: e.lowMem}
|
||||
e.blk.init()
|
||||
} else {
|
||||
e.blk.reset(nil)
|
||||
|
1
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
1
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
@ -407,6 +407,7 @@ encodeLoop:
|
||||
// Most notable difference is that src will not be copied for history and
|
||||
// we do not need to check for max match length.
|
||||
func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
|
||||
e.ensureHist(len(src))
|
||||
e.Encode(blk, src)
|
||||
}
|
||||
|
||||
|
591
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
591
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
@ -16,6 +16,12 @@ const (
|
||||
// This greatly depends on the type of input.
|
||||
betterShortTableBits = 13 // Bits used in the short match table
|
||||
betterShortTableSize = 1 << betterShortTableBits // Size of the table
|
||||
|
||||
betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table
|
||||
betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard
|
||||
|
||||
betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table
|
||||
betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard
|
||||
)
|
||||
|
||||
type prevEntry struct {
|
||||
@ -31,10 +37,17 @@ type prevEntry struct {
|
||||
// and that it is longer (lazy matching).
|
||||
type betterFastEncoder struct {
|
||||
fastBase
|
||||
table [betterShortTableSize]tableEntry
|
||||
longTable [betterLongTableSize]prevEntry
|
||||
dictTable []tableEntry
|
||||
dictLongTable []prevEntry
|
||||
table [betterShortTableSize]tableEntry
|
||||
longTable [betterLongTableSize]prevEntry
|
||||
}
|
||||
|
||||
type betterFastEncoderDict struct {
|
||||
betterFastEncoder
|
||||
dictTable []tableEntry
|
||||
dictLongTable []prevEntry
|
||||
shortTableShardDirty [betterShortTableShardCnt]bool
|
||||
longTableShardDirty [betterLongTableShardCnt]bool
|
||||
allDirty bool
|
||||
}
|
||||
|
||||
// Encode improves compression...
|
||||
@ -516,11 +529,511 @@ encodeLoop:
|
||||
// Most notable difference is that src will not be copied for history and
|
||||
// we do not need to check for max match length.
|
||||
func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
|
||||
e.ensureHist(len(src))
|
||||
e.Encode(blk, src)
|
||||
}
|
||||
|
||||
// Encode improves compression...
|
||||
func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
||||
const (
|
||||
// Input margin is the number of bytes we read (8)
|
||||
// and the maximum we will read ahead (2)
|
||||
inputMargin = 8 + 2
|
||||
minNonLiteralBlockSize = 16
|
||||
)
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
for i := range e.longTable[:] {
|
||||
e.longTable[i] = prevEntry{}
|
||||
}
|
||||
e.cur = e.maxMatchOff
|
||||
e.allDirty = true
|
||||
break
|
||||
}
|
||||
// Shift down everything in the table that isn't already too far away.
|
||||
minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
|
||||
for i := range e.table[:] {
|
||||
v := e.table[i].offset
|
||||
if v < minOff {
|
||||
v = 0
|
||||
} else {
|
||||
v = v - e.cur + e.maxMatchOff
|
||||
}
|
||||
e.table[i].offset = v
|
||||
}
|
||||
for i := range e.longTable[:] {
|
||||
v := e.longTable[i].offset
|
||||
v2 := e.longTable[i].prev
|
||||
if v < minOff {
|
||||
v = 0
|
||||
v2 = 0
|
||||
} else {
|
||||
v = v - e.cur + e.maxMatchOff
|
||||
if v2 < minOff {
|
||||
v2 = 0
|
||||
} else {
|
||||
v2 = v2 - e.cur + e.maxMatchOff
|
||||
}
|
||||
}
|
||||
e.longTable[i] = prevEntry{
|
||||
offset: v,
|
||||
prev: v2,
|
||||
}
|
||||
}
|
||||
e.allDirty = true
|
||||
e.cur = e.maxMatchOff
|
||||
break
|
||||
}
|
||||
|
||||
s := e.addBlock(src)
|
||||
blk.size = len(src)
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
blk.extraLits = len(src)
|
||||
blk.literals = blk.literals[:len(src)]
|
||||
copy(blk.literals, src)
|
||||
return
|
||||
}
|
||||
|
||||
// Override src
|
||||
src = e.hist
|
||||
sLimit := int32(len(src)) - inputMargin
|
||||
// stepSize is the number of bytes to skip on every main loop iteration.
|
||||
// It should be >= 1.
|
||||
const stepSize = 1
|
||||
|
||||
const kSearchStrength = 9
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := s
|
||||
cv := load6432(src, s)
|
||||
|
||||
// Relative offsets
|
||||
offset1 := int32(blk.recentOffsets[0])
|
||||
offset2 := int32(blk.recentOffsets[1])
|
||||
|
||||
addLiterals := func(s *seq, until int32) {
|
||||
if until == nextEmit {
|
||||
return
|
||||
}
|
||||
blk.literals = append(blk.literals, src[nextEmit:until]...)
|
||||
s.litLen = uint32(until - nextEmit)
|
||||
}
|
||||
if debug {
|
||||
println("recent offsets:", blk.recentOffsets)
|
||||
}
|
||||
|
||||
encodeLoop:
|
||||
for {
|
||||
var t int32
|
||||
// We allow the encoder to optionally turn off repeat offsets across blocks
|
||||
canRepeat := len(blk.sequences) > 2
|
||||
var matched int32
|
||||
|
||||
for {
|
||||
if debugAsserts && canRepeat && offset1 == 0 {
|
||||
panic("offset0 was 0")
|
||||
}
|
||||
|
||||
nextHashS := hash5(cv, betterShortTableBits)
|
||||
nextHashL := hash8(cv, betterLongTableBits)
|
||||
candidateL := e.longTable[nextHashL]
|
||||
candidateS := e.table[nextHashS]
|
||||
|
||||
const repOff = 1
|
||||
repIndex := s - offset1 + repOff
|
||||
off := s + e.cur
|
||||
e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
|
||||
e.markLongShardDirty(nextHashL)
|
||||
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
|
||||
e.markShortShardDirty(nextHashS)
|
||||
|
||||
if canRepeat {
|
||||
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||
// Consider history as well.
|
||||
var seq seq
|
||||
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||
|
||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
||||
|
||||
// We might be able to match backwards.
|
||||
// Extend as long as we can.
|
||||
start := s + repOff
|
||||
// We end the search early, so we don't risk 0 literals
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
|
||||
repIndex--
|
||||
start--
|
||||
seq.matchLen++
|
||||
}
|
||||
addLiterals(&seq, start)
|
||||
|
||||
// rep 0
|
||||
seq.offset = 1
|
||||
if debugSequences {
|
||||
println("repeat sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
|
||||
// Index match start+1 (long) -> s - 1
|
||||
index0 := s + repOff
|
||||
s += lenght + repOff
|
||||
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
if debug {
|
||||
println("repeat ended", s, lenght)
|
||||
|
||||
}
|
||||
break encodeLoop
|
||||
}
|
||||
// Index skipped...
|
||||
for index0 < s-1 {
|
||||
cv0 := load6432(src, index0)
|
||||
cv1 := cv0 >> 8
|
||||
h0 := hash8(cv0, betterLongTableBits)
|
||||
off := index0 + e.cur
|
||||
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
|
||||
e.markLongShardDirty(h0)
|
||||
h1 := hash5(cv1, betterShortTableBits)
|
||||
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
|
||||
e.markShortShardDirty(h1)
|
||||
index0 += 2
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
continue
|
||||
}
|
||||
const repOff2 = 1
|
||||
|
||||
// We deviate from the reference encoder and also check offset 2.
|
||||
// Still slower and not much better, so disabled.
|
||||
// repIndex = s - offset2 + repOff2
|
||||
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
|
||||
// Consider history as well.
|
||||
var seq seq
|
||||
lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
|
||||
|
||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
||||
|
||||
// We might be able to match backwards.
|
||||
// Extend as long as we can.
|
||||
start := s + repOff2
|
||||
// We end the search early, so we don't risk 0 literals
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
|
||||
repIndex--
|
||||
start--
|
||||
seq.matchLen++
|
||||
}
|
||||
addLiterals(&seq, start)
|
||||
|
||||
// rep 2
|
||||
seq.offset = 2
|
||||
if debugSequences {
|
||||
println("repeat sequence 2", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
|
||||
index0 := s + repOff2
|
||||
s += lenght + repOff2
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
if debug {
|
||||
println("repeat ended", s, lenght)
|
||||
|
||||
}
|
||||
break encodeLoop
|
||||
}
|
||||
|
||||
// Index skipped...
|
||||
for index0 < s-1 {
|
||||
cv0 := load6432(src, index0)
|
||||
cv1 := cv0 >> 8
|
||||
h0 := hash8(cv0, betterLongTableBits)
|
||||
off := index0 + e.cur
|
||||
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
|
||||
e.markLongShardDirty(h0)
|
||||
h1 := hash5(cv1, betterShortTableBits)
|
||||
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
|
||||
e.markShortShardDirty(h1)
|
||||
index0 += 2
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
// Swap offsets
|
||||
offset1, offset2 = offset2, offset1
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Find the offsets of our two matches.
|
||||
coffsetL := candidateL.offset - e.cur
|
||||
coffsetLP := candidateL.prev - e.cur
|
||||
|
||||
// Check if we have a long match.
|
||||
if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
|
||||
// Found a long match, at least 8 bytes.
|
||||
matched = e.matchlen(s+8, coffsetL+8, src) + 8
|
||||
t = coffsetL
|
||||
if debugAsserts && s <= t {
|
||||
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
|
||||
}
|
||||
if debugAsserts && s-t > e.maxMatchOff {
|
||||
panic("s - t >e.maxMatchOff")
|
||||
}
|
||||
if debugMatches {
|
||||
println("long match")
|
||||
}
|
||||
|
||||
if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
|
||||
// Found a long match, at least 8 bytes.
|
||||
prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8
|
||||
if prevMatch > matched {
|
||||
matched = prevMatch
|
||||
t = coffsetLP
|
||||
}
|
||||
if debugAsserts && s <= t {
|
||||
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
|
||||
}
|
||||
if debugAsserts && s-t > e.maxMatchOff {
|
||||
panic("s - t >e.maxMatchOff")
|
||||
}
|
||||
if debugMatches {
|
||||
println("long match")
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Check if we have a long match on prev.
|
||||
if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
|
||||
// Found a long match, at least 8 bytes.
|
||||
matched = e.matchlen(s+8, coffsetLP+8, src) + 8
|
||||
t = coffsetLP
|
||||
if debugAsserts && s <= t {
|
||||
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
|
||||
}
|
||||
if debugAsserts && s-t > e.maxMatchOff {
|
||||
panic("s - t >e.maxMatchOff")
|
||||
}
|
||||
if debugMatches {
|
||||
println("long match")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
coffsetS := candidateS.offset - e.cur
|
||||
|
||||
// Check if we have a short match.
|
||||
if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
|
||||
// found a regular match
|
||||
matched = e.matchlen(s+4, coffsetS+4, src) + 4
|
||||
|
||||
// See if we can find a long match at s+1
|
||||
const checkAt = 1
|
||||
cv := load6432(src, s+checkAt)
|
||||
nextHashL = hash8(cv, betterLongTableBits)
|
||||
candidateL = e.longTable[nextHashL]
|
||||
coffsetL = candidateL.offset - e.cur
|
||||
|
||||
// We can store it, since we have at least a 4 byte match.
|
||||
e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset}
|
||||
e.markLongShardDirty(nextHashL)
|
||||
if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
|
||||
// Found a long match, at least 8 bytes.
|
||||
matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
|
||||
if matchedNext > matched {
|
||||
t = coffsetL
|
||||
s += checkAt
|
||||
matched = matchedNext
|
||||
if debugMatches {
|
||||
println("long match (after short)")
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Check prev long...
|
||||
coffsetL = candidateL.prev - e.cur
|
||||
if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
|
||||
// Found a long match, at least 8 bytes.
|
||||
matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
|
||||
if matchedNext > matched {
|
||||
t = coffsetL
|
||||
s += checkAt
|
||||
matched = matchedNext
|
||||
if debugMatches {
|
||||
println("prev long match (after short)")
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
t = coffsetS
|
||||
if debugAsserts && s <= t {
|
||||
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
|
||||
}
|
||||
if debugAsserts && s-t > e.maxMatchOff {
|
||||
panic("s - t >e.maxMatchOff")
|
||||
}
|
||||
if debugAsserts && t < 0 {
|
||||
panic("t<0")
|
||||
}
|
||||
if debugMatches {
|
||||
println("short match")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// No match found, move forward in input.
|
||||
s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
|
||||
if s >= sLimit {
|
||||
break encodeLoop
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. Update recent offsets.
|
||||
// We'll later see if more than 4 bytes.
|
||||
offset2 = offset1
|
||||
offset1 = s - t
|
||||
|
||||
if debugAsserts && s <= t {
|
||||
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
|
||||
}
|
||||
|
||||
if debugAsserts && canRepeat && int(offset1) > len(src) {
|
||||
panic("invalid offset")
|
||||
}
|
||||
|
||||
// Extend the n-byte match as long as possible.
|
||||
l := matched
|
||||
|
||||
// Extend backwards
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
|
||||
s--
|
||||
t--
|
||||
l++
|
||||
}
|
||||
|
||||
// Write our sequence
|
||||
var seq seq
|
||||
seq.litLen = uint32(s - nextEmit)
|
||||
seq.matchLen = uint32(l - zstdMinMatch)
|
||||
if seq.litLen > 0 {
|
||||
blk.literals = append(blk.literals, src[nextEmit:s]...)
|
||||
}
|
||||
seq.offset = uint32(s-t) + 3
|
||||
s += l
|
||||
if debugSequences {
|
||||
println("sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
break encodeLoop
|
||||
}
|
||||
|
||||
// Index match start+1 (long) -> s - 1
|
||||
index0 := s - l + 1
|
||||
for index0 < s-1 {
|
||||
cv0 := load6432(src, index0)
|
||||
cv1 := cv0 >> 8
|
||||
h0 := hash8(cv0, betterLongTableBits)
|
||||
off := index0 + e.cur
|
||||
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
|
||||
e.markLongShardDirty(h0)
|
||||
h1 := hash5(cv1, betterShortTableBits)
|
||||
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
|
||||
e.markShortShardDirty(h1)
|
||||
index0 += 2
|
||||
}
|
||||
|
||||
cv = load6432(src, s)
|
||||
if !canRepeat {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check offset 2
|
||||
for {
|
||||
o2 := s - offset2
|
||||
if load3232(src, o2) != uint32(cv) {
|
||||
// Do regular search
|
||||
break
|
||||
}
|
||||
|
||||
// Store this, since we have it.
|
||||
nextHashS := hash5(cv, betterShortTableBits)
|
||||
nextHashL := hash8(cv, betterLongTableBits)
|
||||
|
||||
// We have at least 4 byte match.
|
||||
// No need to check backwards. We come straight from a match
|
||||
l := 4 + e.matchlen(s+4, o2+4, src)
|
||||
|
||||
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
|
||||
e.markLongShardDirty(nextHashL)
|
||||
e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)}
|
||||
e.markShortShardDirty(nextHashS)
|
||||
seq.matchLen = uint32(l) - zstdMinMatch
|
||||
seq.litLen = 0
|
||||
|
||||
// Since litlen is always 0, this is offset 1.
|
||||
seq.offset = 1
|
||||
s += l
|
||||
nextEmit = s
|
||||
if debugSequences {
|
||||
println("sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
|
||||
// Swap offset 1 and 2.
|
||||
offset1, offset2 = offset2, offset1
|
||||
if s >= sLimit {
|
||||
// Finished
|
||||
break encodeLoop
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
}
|
||||
}
|
||||
|
||||
if int(nextEmit) < len(src) {
|
||||
blk.literals = append(blk.literals, src[nextEmit:]...)
|
||||
blk.extraLits = len(src) - int(nextEmit)
|
||||
}
|
||||
blk.recentOffsets[0] = uint32(offset1)
|
||||
blk.recentOffsets[1] = uint32(offset2)
|
||||
if debug {
|
||||
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
|
||||
}
|
||||
}
|
||||
|
||||
// ResetDict will reset and set a dictionary if not nil
|
||||
func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) {
|
||||
e.resetBase(d, singleBlock)
|
||||
if d != nil {
|
||||
panic("betterFastEncoder: Reset with dict")
|
||||
}
|
||||
}
|
||||
|
||||
// ResetDict will reset and set a dictionary if not nil
|
||||
func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
|
||||
e.resetBase(d, singleBlock)
|
||||
if d == nil {
|
||||
return
|
||||
@ -557,6 +1070,7 @@ func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) {
|
||||
}
|
||||
}
|
||||
e.lastDictID = d.id
|
||||
e.allDirty = true
|
||||
}
|
||||
|
||||
// Init or copy dict table
|
||||
@ -585,11 +1099,72 @@ func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) {
|
||||
}
|
||||
}
|
||||
e.lastDictID = d.id
|
||||
e.allDirty = true
|
||||
}
|
||||
// Reset table to initial state
|
||||
copy(e.longTable[:], e.dictLongTable)
|
||||
|
||||
e.cur = e.maxMatchOff
|
||||
// Reset table to initial state
|
||||
copy(e.table[:], e.dictTable)
|
||||
{
|
||||
dirtyShardCnt := 0
|
||||
if !e.allDirty {
|
||||
for i := range e.shortTableShardDirty {
|
||||
if e.shortTableShardDirty[i] {
|
||||
dirtyShardCnt++
|
||||
}
|
||||
}
|
||||
}
|
||||
const shardCnt = betterShortTableShardCnt
|
||||
const shardSize = betterShortTableShardSize
|
||||
if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
|
||||
copy(e.table[:], e.dictTable)
|
||||
for i := range e.shortTableShardDirty {
|
||||
e.shortTableShardDirty[i] = false
|
||||
}
|
||||
} else {
|
||||
for i := range e.shortTableShardDirty {
|
||||
if !e.shortTableShardDirty[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
|
||||
e.shortTableShardDirty[i] = false
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
dirtyShardCnt := 0
|
||||
if !e.allDirty {
|
||||
for i := range e.shortTableShardDirty {
|
||||
if e.shortTableShardDirty[i] {
|
||||
dirtyShardCnt++
|
||||
}
|
||||
}
|
||||
}
|
||||
const shardCnt = betterLongTableShardCnt
|
||||
const shardSize = betterLongTableShardSize
|
||||
if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
|
||||
copy(e.longTable[:], e.dictLongTable)
|
||||
for i := range e.longTableShardDirty {
|
||||
e.longTableShardDirty[i] = false
|
||||
}
|
||||
} else {
|
||||
for i := range e.longTableShardDirty {
|
||||
if !e.longTableShardDirty[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize])
|
||||
e.longTableShardDirty[i] = false
|
||||
}
|
||||
}
|
||||
}
|
||||
e.cur = e.maxMatchOff
|
||||
e.allDirty = false
|
||||
}
|
||||
|
||||
func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) {
|
||||
e.longTableShardDirty[entryNum/betterLongTableShardSize] = true
|
||||
}
|
||||
|
||||
func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) {
|
||||
e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true
|
||||
}
|
||||
|
414
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
414
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
@ -11,6 +11,9 @@ const (
|
||||
dFastLongTableSize = 1 << dFastLongTableBits // Size of the table
|
||||
dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
|
||||
|
||||
dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table
|
||||
dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard
|
||||
|
||||
dFastShortTableBits = tableBits // Bits used in the short match table
|
||||
dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
|
||||
dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
|
||||
@ -18,8 +21,14 @@ const (
|
||||
|
||||
type doubleFastEncoder struct {
|
||||
fastEncoder
|
||||
longTable [dFastLongTableSize]tableEntry
|
||||
dictLongTable []tableEntry
|
||||
longTable [dFastLongTableSize]tableEntry
|
||||
}
|
||||
|
||||
type doubleFastEncoderDict struct {
|
||||
fastEncoderDict
|
||||
longTable [dFastLongTableSize]tableEntry
|
||||
dictLongTable []tableEntry
|
||||
longTableShardDirty [dLongTableShardCnt]bool
|
||||
}
|
||||
|
||||
// Encode mimmics functionality in zstd_dfast.c
|
||||
@ -678,9 +687,379 @@ encodeLoop:
|
||||
}
|
||||
}
|
||||
|
||||
// Encode will encode the content, with a dictionary if initialized for it.
|
||||
func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
||||
const (
|
||||
// Input margin is the number of bytes we read (8)
|
||||
// and the maximum we will read ahead (2)
|
||||
inputMargin = 8 + 2
|
||||
minNonLiteralBlockSize = 16
|
||||
)
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
for i := range e.longTable[:] {
|
||||
e.longTable[i] = tableEntry{}
|
||||
}
|
||||
e.markAllShardsDirty()
|
||||
e.cur = e.maxMatchOff
|
||||
break
|
||||
}
|
||||
// Shift down everything in the table that isn't already too far away.
|
||||
minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
|
||||
for i := range e.table[:] {
|
||||
v := e.table[i].offset
|
||||
if v < minOff {
|
||||
v = 0
|
||||
} else {
|
||||
v = v - e.cur + e.maxMatchOff
|
||||
}
|
||||
e.table[i].offset = v
|
||||
}
|
||||
for i := range e.longTable[:] {
|
||||
v := e.longTable[i].offset
|
||||
if v < minOff {
|
||||
v = 0
|
||||
} else {
|
||||
v = v - e.cur + e.maxMatchOff
|
||||
}
|
||||
e.longTable[i].offset = v
|
||||
}
|
||||
e.markAllShardsDirty()
|
||||
e.cur = e.maxMatchOff
|
||||
break
|
||||
}
|
||||
|
||||
s := e.addBlock(src)
|
||||
blk.size = len(src)
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
blk.extraLits = len(src)
|
||||
blk.literals = blk.literals[:len(src)]
|
||||
copy(blk.literals, src)
|
||||
return
|
||||
}
|
||||
|
||||
// Override src
|
||||
src = e.hist
|
||||
sLimit := int32(len(src)) - inputMargin
|
||||
// stepSize is the number of bytes to skip on every main loop iteration.
|
||||
// It should be >= 1.
|
||||
const stepSize = 1
|
||||
|
||||
const kSearchStrength = 8
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := s
|
||||
cv := load6432(src, s)
|
||||
|
||||
// Relative offsets
|
||||
offset1 := int32(blk.recentOffsets[0])
|
||||
offset2 := int32(blk.recentOffsets[1])
|
||||
|
||||
addLiterals := func(s *seq, until int32) {
|
||||
if until == nextEmit {
|
||||
return
|
||||
}
|
||||
blk.literals = append(blk.literals, src[nextEmit:until]...)
|
||||
s.litLen = uint32(until - nextEmit)
|
||||
}
|
||||
if debug {
|
||||
println("recent offsets:", blk.recentOffsets)
|
||||
}
|
||||
|
||||
encodeLoop:
|
||||
for {
|
||||
var t int32
|
||||
// We allow the encoder to optionally turn off repeat offsets across blocks
|
||||
canRepeat := len(blk.sequences) > 2
|
||||
|
||||
for {
|
||||
if debugAsserts && canRepeat && offset1 == 0 {
|
||||
panic("offset0 was 0")
|
||||
}
|
||||
|
||||
nextHashS := hash5(cv, dFastShortTableBits)
|
||||
nextHashL := hash8(cv, dFastLongTableBits)
|
||||
candidateL := e.longTable[nextHashL]
|
||||
candidateS := e.table[nextHashS]
|
||||
|
||||
const repOff = 1
|
||||
repIndex := s - offset1 + repOff
|
||||
entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
|
||||
e.longTable[nextHashL] = entry
|
||||
e.markLongShardDirty(nextHashL)
|
||||
e.table[nextHashS] = entry
|
||||
e.markShardDirty(nextHashS)
|
||||
|
||||
if canRepeat {
|
||||
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||
// Consider history as well.
|
||||
var seq seq
|
||||
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||
|
||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
||||
|
||||
// We might be able to match backwards.
|
||||
// Extend as long as we can.
|
||||
start := s + repOff
|
||||
// We end the search early, so we don't risk 0 literals
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
|
||||
repIndex--
|
||||
start--
|
||||
seq.matchLen++
|
||||
}
|
||||
addLiterals(&seq, start)
|
||||
|
||||
// rep 0
|
||||
seq.offset = 1
|
||||
if debugSequences {
|
||||
println("repeat sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
s += lenght + repOff
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
if debug {
|
||||
println("repeat ended", s, lenght)
|
||||
|
||||
}
|
||||
break encodeLoop
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Find the offsets of our two matches.
|
||||
coffsetL := s - (candidateL.offset - e.cur)
|
||||
coffsetS := s - (candidateS.offset - e.cur)
|
||||
|
||||
// Check if we have a long match.
|
||||
if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
|
||||
// Found a long match, likely at least 8 bytes.
|
||||
// Reference encoder checks all 8 bytes, we only check 4,
|
||||
// but the likelihood of both the first 4 bytes and the hash matching should be enough.
|
||||
t = candidateL.offset - e.cur
|
||||
if debugAsserts && s <= t {
|
||||
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
|
||||
}
|
||||
if debugAsserts && s-t > e.maxMatchOff {
|
||||
panic("s - t >e.maxMatchOff")
|
||||
}
|
||||
if debugMatches {
|
||||
println("long match")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Check if we have a short match.
|
||||
if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
|
||||
// found a regular match
|
||||
// See if we can find a long match at s+1
|
||||
const checkAt = 1
|
||||
cv := load6432(src, s+checkAt)
|
||||
nextHashL = hash8(cv, dFastLongTableBits)
|
||||
candidateL = e.longTable[nextHashL]
|
||||
coffsetL = s - (candidateL.offset - e.cur) + checkAt
|
||||
|
||||
// We can store it, since we have at least a 4 byte match.
|
||||
e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
|
||||
e.markLongShardDirty(nextHashL)
|
||||
if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
|
||||
// Found a long match, likely at least 8 bytes.
|
||||
// Reference encoder checks all 8 bytes, we only check 4,
|
||||
// but the likelihood of both the first 4 bytes and the hash matching should be enough.
|
||||
t = candidateL.offset - e.cur
|
||||
s += checkAt
|
||||
if debugMatches {
|
||||
println("long match (after short)")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
t = candidateS.offset - e.cur
|
||||
if debugAsserts && s <= t {
|
||||
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
|
||||
}
|
||||
if debugAsserts && s-t > e.maxMatchOff {
|
||||
panic("s - t >e.maxMatchOff")
|
||||
}
|
||||
if debugAsserts && t < 0 {
|
||||
panic("t<0")
|
||||
}
|
||||
if debugMatches {
|
||||
println("short match")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// No match found, move forward in input.
|
||||
s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
|
||||
if s >= sLimit {
|
||||
break encodeLoop
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. Update recent offsets.
|
||||
// We'll later see if more than 4 bytes.
|
||||
offset2 = offset1
|
||||
offset1 = s - t
|
||||
|
||||
if debugAsserts && s <= t {
|
||||
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
|
||||
}
|
||||
|
||||
if debugAsserts && canRepeat && int(offset1) > len(src) {
|
||||
panic("invalid offset")
|
||||
}
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
l := e.matchlen(s+4, t+4, src) + 4
|
||||
|
||||
// Extend backwards
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
|
||||
s--
|
||||
t--
|
||||
l++
|
||||
}
|
||||
|
||||
// Write our sequence
|
||||
var seq seq
|
||||
seq.litLen = uint32(s - nextEmit)
|
||||
seq.matchLen = uint32(l - zstdMinMatch)
|
||||
if seq.litLen > 0 {
|
||||
blk.literals = append(blk.literals, src[nextEmit:s]...)
|
||||
}
|
||||
seq.offset = uint32(s-t) + 3
|
||||
s += l
|
||||
if debugSequences {
|
||||
println("sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
break encodeLoop
|
||||
}
|
||||
|
||||
// Index match start+1 (long) and start+2 (short)
|
||||
index0 := s - l + 1
|
||||
// Index match end-2 (long) and end-1 (short)
|
||||
index1 := s - 2
|
||||
|
||||
cv0 := load6432(src, index0)
|
||||
cv1 := load6432(src, index1)
|
||||
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
|
||||
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
|
||||
longHash1 := hash8(cv0, dFastLongTableBits)
|
||||
longHash2 := hash8(cv0, dFastLongTableBits)
|
||||
e.longTable[longHash1] = te0
|
||||
e.longTable[longHash2] = te1
|
||||
e.markLongShardDirty(longHash1)
|
||||
e.markLongShardDirty(longHash2)
|
||||
cv0 >>= 8
|
||||
cv1 >>= 8
|
||||
te0.offset++
|
||||
te1.offset++
|
||||
te0.val = uint32(cv0)
|
||||
te1.val = uint32(cv1)
|
||||
hashVal1 := hash5(cv0, dFastShortTableBits)
|
||||
hashVal2 := hash5(cv1, dFastShortTableBits)
|
||||
e.table[hashVal1] = te0
|
||||
e.markShardDirty(hashVal1)
|
||||
e.table[hashVal2] = te1
|
||||
e.markShardDirty(hashVal2)
|
||||
|
||||
cv = load6432(src, s)
|
||||
|
||||
if !canRepeat {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check offset 2
|
||||
for {
|
||||
o2 := s - offset2
|
||||
if load3232(src, o2) != uint32(cv) {
|
||||
// Do regular search
|
||||
break
|
||||
}
|
||||
|
||||
// Store this, since we have it.
|
||||
nextHashS := hash5(cv, dFastShortTableBits)
|
||||
nextHashL := hash8(cv, dFastLongTableBits)
|
||||
|
||||
// We have at least 4 byte match.
|
||||
// No need to check backwards. We come straight from a match
|
||||
l := 4 + e.matchlen(s+4, o2+4, src)
|
||||
|
||||
entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
|
||||
e.longTable[nextHashL] = entry
|
||||
e.markLongShardDirty(nextHashL)
|
||||
e.table[nextHashS] = entry
|
||||
e.markShardDirty(nextHashS)
|
||||
seq.matchLen = uint32(l) - zstdMinMatch
|
||||
seq.litLen = 0
|
||||
|
||||
// Since litlen is always 0, this is offset 1.
|
||||
seq.offset = 1
|
||||
s += l
|
||||
nextEmit = s
|
||||
if debugSequences {
|
||||
println("sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
|
||||
// Swap offset 1 and 2.
|
||||
offset1, offset2 = offset2, offset1
|
||||
if s >= sLimit {
|
||||
// Finished
|
||||
break encodeLoop
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
}
|
||||
}
|
||||
|
||||
if int(nextEmit) < len(src) {
|
||||
blk.literals = append(blk.literals, src[nextEmit:]...)
|
||||
blk.extraLits = len(src) - int(nextEmit)
|
||||
}
|
||||
blk.recentOffsets[0] = uint32(offset1)
|
||||
blk.recentOffsets[1] = uint32(offset2)
|
||||
if debug {
|
||||
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
|
||||
}
|
||||
// If we encoded more than 64K mark all dirty.
|
||||
if len(src) > 64<<10 {
|
||||
e.markAllShardsDirty()
|
||||
}
|
||||
}
|
||||
|
||||
// ResetDict will reset and set a dictionary if not nil
|
||||
func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) {
|
||||
e.fastEncoder.Reset(d, singleBlock)
|
||||
if d != nil {
|
||||
panic("doubleFastEncoder: Reset with dict not supported")
|
||||
}
|
||||
}
|
||||
|
||||
// ResetDict will reset and set a dictionary if not nil
|
||||
func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
|
||||
allDirty := e.allDirty
|
||||
e.fastEncoderDict.Reset(d, singleBlock)
|
||||
if d == nil {
|
||||
return
|
||||
}
|
||||
@ -706,8 +1085,37 @@ func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) {
|
||||
}
|
||||
}
|
||||
e.lastDictID = d.id
|
||||
e.allDirty = true
|
||||
}
|
||||
// Reset table to initial state
|
||||
e.cur = e.maxMatchOff
|
||||
copy(e.longTable[:], e.dictLongTable)
|
||||
|
||||
dirtyShardCnt := 0
|
||||
if !allDirty {
|
||||
for i := range e.longTableShardDirty {
|
||||
if e.longTableShardDirty[i] {
|
||||
dirtyShardCnt++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
|
||||
copy(e.longTable[:], e.dictLongTable)
|
||||
for i := range e.longTableShardDirty {
|
||||
e.longTableShardDirty[i] = false
|
||||
}
|
||||
return
|
||||
}
|
||||
for i := range e.longTableShardDirty {
|
||||
if !e.longTableShardDirty[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
|
||||
e.longTableShardDirty[i] = false
|
||||
}
|
||||
}
|
||||
|
||||
func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) {
|
||||
e.longTableShardDirty[entryNum/dLongTableShardSize] = true
|
||||
}
|
||||
|
371
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
371
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
@ -11,9 +11,11 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
tableBits = 15 // Bits used in the table
|
||||
tableSize = 1 << tableBits // Size of the table
|
||||
tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
|
||||
tableBits = 15 // Bits used in the table
|
||||
tableSize = 1 << tableBits // Size of the table
|
||||
tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table
|
||||
tableShardSize = tableSize / tableShardCnt // Size of an individual shard
|
||||
tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
|
||||
maxMatchLength = 131074
|
||||
)
|
||||
|
||||
@ -24,8 +26,14 @@ type tableEntry struct {
|
||||
|
||||
type fastEncoder struct {
|
||||
fastBase
|
||||
table [tableSize]tableEntry
|
||||
dictTable []tableEntry
|
||||
table [tableSize]tableEntry
|
||||
}
|
||||
|
||||
type fastEncoderDict struct {
|
||||
fastEncoder
|
||||
dictTable []tableEntry
|
||||
tableShardDirty [tableShardCnt]bool
|
||||
allDirty bool
|
||||
}
|
||||
|
||||
// Encode mimmics functionality in zstd_fast.c
|
||||
@ -617,8 +625,322 @@ encodeLoop:
|
||||
}
|
||||
}
|
||||
|
||||
// Encode will encode the content, with a dictionary if initialized for it.
|
||||
func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
||||
const (
|
||||
inputMargin = 8
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
)
|
||||
if e.allDirty || len(src) > 32<<10 {
|
||||
e.fastEncoder.Encode(blk, src)
|
||||
e.allDirty = true
|
||||
return
|
||||
}
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
e.cur = e.maxMatchOff
|
||||
break
|
||||
}
|
||||
// Shift down everything in the table that isn't already too far away.
|
||||
minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
|
||||
for i := range e.table[:] {
|
||||
v := e.table[i].offset
|
||||
if v < minOff {
|
||||
v = 0
|
||||
} else {
|
||||
v = v - e.cur + e.maxMatchOff
|
||||
}
|
||||
e.table[i].offset = v
|
||||
}
|
||||
e.cur = e.maxMatchOff
|
||||
break
|
||||
}
|
||||
|
||||
s := e.addBlock(src)
|
||||
blk.size = len(src)
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
blk.extraLits = len(src)
|
||||
blk.literals = blk.literals[:len(src)]
|
||||
copy(blk.literals, src)
|
||||
return
|
||||
}
|
||||
|
||||
// Override src
|
||||
src = e.hist
|
||||
sLimit := int32(len(src)) - inputMargin
|
||||
// stepSize is the number of bytes to skip on every main loop iteration.
|
||||
// It should be >= 2.
|
||||
const stepSize = 2
|
||||
|
||||
// TEMPLATE
|
||||
const hashLog = tableBits
|
||||
// seems global, but would be nice to tweak.
|
||||
const kSearchStrength = 7
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := s
|
||||
cv := load6432(src, s)
|
||||
|
||||
// Relative offsets
|
||||
offset1 := int32(blk.recentOffsets[0])
|
||||
offset2 := int32(blk.recentOffsets[1])
|
||||
|
||||
addLiterals := func(s *seq, until int32) {
|
||||
if until == nextEmit {
|
||||
return
|
||||
}
|
||||
blk.literals = append(blk.literals, src[nextEmit:until]...)
|
||||
s.litLen = uint32(until - nextEmit)
|
||||
}
|
||||
if debug {
|
||||
println("recent offsets:", blk.recentOffsets)
|
||||
}
|
||||
|
||||
encodeLoop:
|
||||
for {
|
||||
// t will contain the match offset when we find one.
|
||||
// When existing the search loop, we have already checked 4 bytes.
|
||||
var t int32
|
||||
|
||||
// We will not use repeat offsets across blocks.
|
||||
// By not using them for the first 3 matches
|
||||
canRepeat := len(blk.sequences) > 2
|
||||
|
||||
for {
|
||||
if debugAsserts && canRepeat && offset1 == 0 {
|
||||
panic("offset0 was 0")
|
||||
}
|
||||
|
||||
nextHash := hash6(cv, hashLog)
|
||||
nextHash2 := hash6(cv>>8, hashLog)
|
||||
candidate := e.table[nextHash]
|
||||
candidate2 := e.table[nextHash2]
|
||||
repIndex := s - offset1 + 2
|
||||
|
||||
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
|
||||
e.markShardDirty(nextHash)
|
||||
e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
|
||||
e.markShardDirty(nextHash2)
|
||||
|
||||
if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
|
||||
// Consider history as well.
|
||||
var seq seq
|
||||
var length int32
|
||||
// length = 4 + e.matchlen(s+6, repIndex+4, src)
|
||||
{
|
||||
a := src[s+6:]
|
||||
b := src[repIndex+4:]
|
||||
endI := len(a) & (math.MaxInt32 - 7)
|
||||
length = int32(endI) + 4
|
||||
for i := 0; i < endI; i += 8 {
|
||||
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
|
||||
length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
seq.matchLen = uint32(length - zstdMinMatch)
|
||||
|
||||
// We might be able to match backwards.
|
||||
// Extend as long as we can.
|
||||
start := s + 2
|
||||
// We end the search early, so we don't risk 0 literals
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
sMin := s - e.maxMatchOff
|
||||
if sMin < 0 {
|
||||
sMin = 0
|
||||
}
|
||||
for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
|
||||
repIndex--
|
||||
start--
|
||||
seq.matchLen++
|
||||
}
|
||||
addLiterals(&seq, start)
|
||||
|
||||
// rep 0
|
||||
seq.offset = 1
|
||||
if debugSequences {
|
||||
println("repeat sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
s += length + 2
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
if debug {
|
||||
println("repeat ended", s, length)
|
||||
|
||||
}
|
||||
break encodeLoop
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
continue
|
||||
}
|
||||
coffset0 := s - (candidate.offset - e.cur)
|
||||
coffset1 := s - (candidate2.offset - e.cur) + 1
|
||||
if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
|
||||
// found a regular match
|
||||
t = candidate.offset - e.cur
|
||||
if debugAsserts && s <= t {
|
||||
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
|
||||
}
|
||||
if debugAsserts && s-t > e.maxMatchOff {
|
||||
panic("s - t >e.maxMatchOff")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
|
||||
// found a regular match
|
||||
t = candidate2.offset - e.cur
|
||||
s++
|
||||
if debugAsserts && s <= t {
|
||||
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
|
||||
}
|
||||
if debugAsserts && s-t > e.maxMatchOff {
|
||||
panic("s - t >e.maxMatchOff")
|
||||
}
|
||||
if debugAsserts && t < 0 {
|
||||
panic("t<0")
|
||||
}
|
||||
break
|
||||
}
|
||||
s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
|
||||
if s >= sLimit {
|
||||
break encodeLoop
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
}
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes.
|
||||
offset2 = offset1
|
||||
offset1 = s - t
|
||||
|
||||
if debugAsserts && s <= t {
|
||||
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
|
||||
}
|
||||
|
||||
if debugAsserts && canRepeat && int(offset1) > len(src) {
|
||||
panic("invalid offset")
|
||||
}
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//l := e.matchlen(s+4, t+4, src) + 4
|
||||
var l int32
|
||||
{
|
||||
a := src[s+4:]
|
||||
b := src[t+4:]
|
||||
endI := len(a) & (math.MaxInt32 - 7)
|
||||
l = int32(endI) + 4
|
||||
for i := 0; i < endI; i += 8 {
|
||||
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
|
||||
l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extend backwards
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
|
||||
s--
|
||||
t--
|
||||
l++
|
||||
}
|
||||
|
||||
// Write our sequence.
|
||||
var seq seq
|
||||
seq.litLen = uint32(s - nextEmit)
|
||||
seq.matchLen = uint32(l - zstdMinMatch)
|
||||
if seq.litLen > 0 {
|
||||
blk.literals = append(blk.literals, src[nextEmit:s]...)
|
||||
}
|
||||
// Don't use repeat offsets
|
||||
seq.offset = uint32(s-t) + 3
|
||||
s += l
|
||||
if debugSequences {
|
||||
println("sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
break encodeLoop
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
|
||||
// Check offset 2
|
||||
if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
|
||||
// We have at least 4 byte match.
|
||||
// No need to check backwards. We come straight from a match
|
||||
//l := 4 + e.matchlen(s+4, o2+4, src)
|
||||
var l int32
|
||||
{
|
||||
a := src[s+4:]
|
||||
b := src[o2+4:]
|
||||
endI := len(a) & (math.MaxInt32 - 7)
|
||||
l = int32(endI) + 4
|
||||
for i := 0; i < endI; i += 8 {
|
||||
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
|
||||
l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store this, since we have it.
|
||||
nextHash := hash6(cv, hashLog)
|
||||
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
|
||||
e.markShardDirty(nextHash)
|
||||
seq.matchLen = uint32(l) - zstdMinMatch
|
||||
seq.litLen = 0
|
||||
// Since litlen is always 0, this is offset 1.
|
||||
seq.offset = 1
|
||||
s += l
|
||||
nextEmit = s
|
||||
if debugSequences {
|
||||
println("sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
|
||||
// Swap offset 1 and 2.
|
||||
offset1, offset2 = offset2, offset1
|
||||
if s >= sLimit {
|
||||
break encodeLoop
|
||||
}
|
||||
// Prepare next loop.
|
||||
cv = load6432(src, s)
|
||||
}
|
||||
}
|
||||
|
||||
if int(nextEmit) < len(src) {
|
||||
blk.literals = append(blk.literals, src[nextEmit:]...)
|
||||
blk.extraLits = len(src) - int(nextEmit)
|
||||
}
|
||||
blk.recentOffsets[0] = uint32(offset1)
|
||||
blk.recentOffsets[1] = uint32(offset2)
|
||||
if debug {
|
||||
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
|
||||
}
|
||||
}
|
||||
|
||||
// ResetDict will reset and set a dictionary if not nil
|
||||
func (e *fastEncoder) Reset(d *dict, singleBlock bool) {
|
||||
e.resetBase(d, singleBlock)
|
||||
if d != nil {
|
||||
panic("fastEncoder: Reset with dict")
|
||||
}
|
||||
}
|
||||
|
||||
// ResetDict will reset and set a dictionary if not nil
|
||||
func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
|
||||
e.resetBase(d, singleBlock)
|
||||
if d == nil {
|
||||
return
|
||||
@ -653,9 +975,44 @@ func (e *fastEncoder) Reset(d *dict, singleBlock bool) {
|
||||
}
|
||||
}
|
||||
e.lastDictID = d.id
|
||||
e.allDirty = true
|
||||
}
|
||||
|
||||
e.cur = e.maxMatchOff
|
||||
// Reset table to initial state
|
||||
copy(e.table[:], e.dictTable)
|
||||
dirtyShardCnt := 0
|
||||
if !e.allDirty {
|
||||
for i := range e.tableShardDirty {
|
||||
if e.tableShardDirty[i] {
|
||||
dirtyShardCnt++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const shardCnt = tableShardCnt
|
||||
const shardSize = tableShardSize
|
||||
if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
|
||||
copy(e.table[:], e.dictTable)
|
||||
for i := range e.tableShardDirty {
|
||||
e.tableShardDirty[i] = false
|
||||
}
|
||||
e.allDirty = false
|
||||
return
|
||||
}
|
||||
for i := range e.tableShardDirty {
|
||||
if !e.tableShardDirty[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
|
||||
e.tableShardDirty[i] = false
|
||||
}
|
||||
e.allDirty = false
|
||||
}
|
||||
|
||||
func (e *fastEncoderDict) markAllShardsDirty() {
|
||||
e.allDirty = true
|
||||
}
|
||||
|
||||
func (e *fastEncoderDict) markShardDirty(entryNum uint32) {
|
||||
e.tableShardDirty[entryNum/tableShardSize] = true
|
||||
}
|
||||
|
10
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
10
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
@ -106,7 +106,7 @@ func (e *Encoder) Reset(w io.Writer) {
|
||||
s.encoder = e.o.encoder()
|
||||
}
|
||||
if s.writing == nil {
|
||||
s.writing = &blockEnc{}
|
||||
s.writing = &blockEnc{lowMem: e.o.lowMem}
|
||||
s.writing.init()
|
||||
}
|
||||
s.writing.initNewEncode()
|
||||
@ -176,6 +176,12 @@ func (e *Encoder) nextBlock(final bool) error {
|
||||
}
|
||||
if !s.headerWritten {
|
||||
// If we have a single block encode, do a sync compression.
|
||||
if final && len(s.filling) == 0 && !e.o.fullZero {
|
||||
s.headerWritten = true
|
||||
s.fullFrameWritten = true
|
||||
s.eofWritten = true
|
||||
return nil
|
||||
}
|
||||
if final && len(s.filling) > 0 {
|
||||
s.current = e.EncodeAll(s.filling, s.current[:0])
|
||||
var n2 int
|
||||
@ -471,7 +477,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
||||
}
|
||||
|
||||
// If less than 1MB, allocate a buffer up front.
|
||||
if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 {
|
||||
if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
|
||||
dst = make([]byte, 0, len(src))
|
||||
}
|
||||
dst, err := fh.appendTo(dst)
|
||||
|
38
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
38
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
@ -24,12 +24,12 @@ type encoderOptions struct {
|
||||
allLitEntropy bool
|
||||
customWindow bool
|
||||
customALEntropy bool
|
||||
lowMem bool
|
||||
dict *dict
|
||||
}
|
||||
|
||||
func (o *encoderOptions) setDefault() {
|
||||
*o = encoderOptions{
|
||||
// use less ram: true for now, but may change.
|
||||
concurrent: runtime.GOMAXPROCS(0),
|
||||
crc: true,
|
||||
single: nil,
|
||||
@ -37,20 +37,31 @@ func (o *encoderOptions) setDefault() {
|
||||
windowSize: 8 << 20,
|
||||
level: SpeedDefault,
|
||||
allLitEntropy: true,
|
||||
lowMem: false,
|
||||
}
|
||||
}
|
||||
|
||||
// encoder returns an encoder with the selected options.
|
||||
func (o encoderOptions) encoder() encoder {
|
||||
switch o.level {
|
||||
case SpeedDefault:
|
||||
return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}}}
|
||||
case SpeedBetterCompression:
|
||||
return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}}
|
||||
case SpeedBestCompression:
|
||||
return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}}
|
||||
case SpeedFastest:
|
||||
return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}}
|
||||
if o.dict != nil {
|
||||
return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
|
||||
}
|
||||
return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
|
||||
|
||||
case SpeedDefault:
|
||||
if o.dict != nil {
|
||||
return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}}
|
||||
}
|
||||
return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
|
||||
case SpeedBetterCompression:
|
||||
if o.dict != nil {
|
||||
return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
|
||||
}
|
||||
return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
|
||||
case SpeedBestCompression:
|
||||
return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
|
||||
}
|
||||
panic("unknown compression level")
|
||||
}
|
||||
@ -276,6 +287,17 @@ func WithSingleSegment(b bool) EOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithLowerEncoderMem will trade in some memory cases trade less memory usage for
|
||||
// slower encoding speed.
|
||||
// This will not change the window size which is the primary function for reducing
|
||||
// memory usage. See WithWindowSize.
|
||||
func WithLowerEncoderMem(b bool) EOption {
|
||||
return func(o *encoderOptions) error {
|
||||
o.lowMem = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithEncoderDict allows to register a dictionary that will be used for the encode.
|
||||
// The encoder *may* choose to use no dictionary instead for certain payloads.
|
||||
func WithEncoderDict(dict []byte) EOption {
|
||||
|
12
vendor/github.com/klauspost/compress/zstd/fse_encoder.go
generated
vendored
12
vendor/github.com/klauspost/compress/zstd/fse_encoder.go
generated
vendored
@ -97,7 +97,7 @@ func (s *fseEncoder) prepare() (*fseEncoder, error) {
|
||||
func (s *fseEncoder) allocCtable() {
|
||||
tableSize := 1 << s.actualTableLog
|
||||
// get tableSymbol that is big enough.
|
||||
if cap(s.ct.tableSymbol) < int(tableSize) {
|
||||
if cap(s.ct.tableSymbol) < tableSize {
|
||||
s.ct.tableSymbol = make([]byte, tableSize)
|
||||
}
|
||||
s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
|
||||
@ -202,13 +202,13 @@ func (s *fseEncoder) buildCTable() error {
|
||||
case 0:
|
||||
case -1, 1:
|
||||
symbolTT[i].deltaNbBits = tl
|
||||
symbolTT[i].deltaFindState = int16(total - 1)
|
||||
symbolTT[i].deltaFindState = total - 1
|
||||
total++
|
||||
default:
|
||||
maxBitsOut := uint32(tableLog) - highBit(uint32(v-1))
|
||||
minStatePlus := uint32(v) << maxBitsOut
|
||||
symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
|
||||
symbolTT[i].deltaFindState = int16(total - v)
|
||||
symbolTT[i].deltaFindState = total - v
|
||||
total += v
|
||||
}
|
||||
}
|
||||
@ -353,8 +353,8 @@ func (s *fseEncoder) normalizeCount2(length int) error {
|
||||
distributed uint32
|
||||
total = uint32(length)
|
||||
tableLog = s.actualTableLog
|
||||
lowThreshold = uint32(total >> tableLog)
|
||||
lowOne = uint32((total * 3) >> (tableLog + 1))
|
||||
lowThreshold = total >> tableLog
|
||||
lowOne = (total * 3) >> (tableLog + 1)
|
||||
)
|
||||
for i, cnt := range s.count[:s.symbolLen] {
|
||||
if cnt == 0 {
|
||||
@ -379,7 +379,7 @@ func (s *fseEncoder) normalizeCount2(length int) error {
|
||||
|
||||
if (total / toDistribute) > lowOne {
|
||||
// risk of rounding to zero
|
||||
lowOne = uint32((total * 3) / (toDistribute * 2))
|
||||
lowOne = (total * 3) / (toDistribute * 2)
|
||||
for i, cnt := range s.count[:s.symbolLen] {
|
||||
if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
|
||||
s.norm[i] = 1
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/snappy.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/snappy.go
generated
vendored
@ -417,7 +417,7 @@ var crcTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
func snappyCRC(b []byte) uint32 {
|
||||
c := crc32.Update(0, crcTable, b)
|
||||
return uint32(c>>15|c<<17) + 0xa282ead8
|
||||
return c>>15 | c<<17 + 0xa282ead8
|
||||
}
|
||||
|
||||
// snappyDecodedLen returns the length of the decoded block and the number of bytes
|
||||
|
5
vendor/github.com/vbauerster/mpb/v5/.gitignore
generated
vendored
5
vendor/github.com/vbauerster/mpb/v5/.gitignore
generated
vendored
@ -1,5 +0,0 @@
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
11
vendor/github.com/vbauerster/mpb/v5/.travis.yml
generated
vendored
11
vendor/github.com/vbauerster/mpb/v5/.travis.yml
generated
vendored
@ -1,11 +0,0 @@
|
||||
language: go
|
||||
arch:
|
||||
- amd64
|
||||
- ppc64le
|
||||
|
||||
go:
|
||||
- 1.14.x
|
||||
|
||||
script:
|
||||
- go test -race ./...
|
||||
- for i in _examples/*/; do go build $i/*.go || exit 1; done
|
118
vendor/github.com/vbauerster/mpb/v5/README.md
generated
vendored
118
vendor/github.com/vbauerster/mpb/v5/README.md
generated
vendored
@ -1,118 +0,0 @@
|
||||
# Multi Progress Bar
|
||||
|
||||
[](https://godoc.org/github.com/vbauerster/mpb)
|
||||
[](https://travis-ci.org/vbauerster/mpb)
|
||||
[](https://goreportcard.com/report/github.com/vbauerster/mpb)
|
||||
|
||||
**mpb** is a Go lib for rendering progress bars in terminal applications.
|
||||
|
||||
## Features
|
||||
|
||||
* __Multiple Bars__: Multiple progress bars are supported
|
||||
* __Dynamic Total__: Set total while bar is running
|
||||
* __Dynamic Add/Remove__: Dynamically add or remove bars
|
||||
* __Cancellation__: Cancel whole rendering process
|
||||
* __Predefined Decorators__: Elapsed time, [ewma](https://github.com/VividCortex/ewma) based ETA, Percentage, Bytes counter
|
||||
* __Decorator's width sync__: Synchronized decorator's width among multiple bars
|
||||
|
||||
## Usage
|
||||
|
||||
#### [Rendering single bar](_examples/singleBar/main.go)
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/vbauerster/mpb/v5"
|
||||
"github.com/vbauerster/mpb/v5/decor"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// initialize progress container, with custom width
|
||||
p := mpb.New(mpb.WithWidth(64))
|
||||
|
||||
total := 100
|
||||
name := "Single Bar:"
|
||||
// adding a single bar, which will inherit container's width
|
||||
bar := p.AddBar(int64(total),
|
||||
// override DefaultBarStyle, which is "[=>-]<+"
|
||||
mpb.BarStyle("╢▌▌░╟"),
|
||||
mpb.PrependDecorators(
|
||||
// display our name with one space on the right
|
||||
decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}),
|
||||
// replace ETA decorator with "done" message, OnComplete event
|
||||
decor.OnComplete(
|
||||
decor.AverageETA(decor.ET_STYLE_GO, decor.WC{W: 4}), "done",
|
||||
),
|
||||
),
|
||||
mpb.AppendDecorators(decor.Percentage()),
|
||||
)
|
||||
// simulating some work
|
||||
max := 100 * time.Millisecond
|
||||
for i := 0; i < total; i++ {
|
||||
time.Sleep(time.Duration(rand.Intn(10)+1) * max / 10)
|
||||
bar.Increment()
|
||||
}
|
||||
// wait for our bar to complete and flush
|
||||
p.Wait()
|
||||
}
|
||||
```
|
||||
|
||||
#### [Rendering multiple bars](_examples/multiBars/main.go)
|
||||
```go
|
||||
var wg sync.WaitGroup
|
||||
// pass &wg (optional), so p will wait for it eventually
|
||||
p := mpb.New(mpb.WithWaitGroup(&wg))
|
||||
total, numBars := 100, 3
|
||||
wg.Add(numBars)
|
||||
|
||||
for i := 0; i < numBars; i++ {
|
||||
name := fmt.Sprintf("Bar#%d:", i)
|
||||
bar := p.AddBar(int64(total),
|
||||
mpb.PrependDecorators(
|
||||
// simple name decorator
|
||||
decor.Name(name),
|
||||
// decor.DSyncWidth bit enables column width synchronization
|
||||
decor.Percentage(decor.WCSyncSpace),
|
||||
),
|
||||
mpb.AppendDecorators(
|
||||
// replace ETA decorator with "done" message, OnComplete event
|
||||
decor.OnComplete(
|
||||
// ETA decorator with ewma age of 60
|
||||
decor.EwmaETA(decor.ET_STYLE_GO, 60), "done",
|
||||
),
|
||||
),
|
||||
)
|
||||
// simulating some work
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
max := 100 * time.Millisecond
|
||||
for i := 0; i < total; i++ {
|
||||
// start variable is solely for EWMA calculation
|
||||
// EWMA's unit of measure is an iteration's duration
|
||||
start := time.Now()
|
||||
time.Sleep(time.Duration(rng.Intn(10)+1) * max / 10)
|
||||
bar.Increment()
|
||||
// we need to call DecoratorEwmaUpdate to fulfill ewma decorator's contract
|
||||
bar.DecoratorEwmaUpdate(time.Since(start))
|
||||
}
|
||||
}()
|
||||
}
|
||||
// Waiting for passed &wg and for all bars to complete and flush
|
||||
p.Wait()
|
||||
```
|
||||
|
||||
#### [Dynamic total](_examples/dynTotal/main.go)
|
||||
|
||||

|
||||
|
||||
#### [Complex example](_examples/complex/main.go)
|
||||
|
||||

|
||||
|
||||
#### [Bytes counters](_examples/io/main.go)
|
||||
|
||||

|
24
vendor/github.com/vbauerster/mpb/v5/UNLICENSE
generated
vendored
24
vendor/github.com/vbauerster/mpb/v5/UNLICENSE
generated
vendored
@ -1,24 +0,0 @@
|
||||
This is free and unencumbered software released into the public domain.
|
||||
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or
|
||||
distribute this software, either in source code form or as a compiled
|
||||
binary, for any purpose, commercial or non-commercial, and by any
|
||||
means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors
|
||||
of this software dedicate any and all copyright interest in the
|
||||
software to the public domain. We make this dedication for the benefit
|
||||
of the public at large and to the detriment of our heirs and
|
||||
successors. We intend this dedication to be an overt act of
|
||||
relinquishment in perpetuity of all present and future rights to this
|
||||
software under copyright law.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For more information, please refer to <http://unlicense.org/>
|
490
vendor/github.com/vbauerster/mpb/v5/bar.go
generated
vendored
490
vendor/github.com/vbauerster/mpb/v5/bar.go
generated
vendored
@ -1,490 +0,0 @@
|
||||
package mpb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/acarl005/stripansi"
|
||||
"github.com/mattn/go-runewidth"
|
||||
"github.com/vbauerster/mpb/v5/decor"
|
||||
)
|
||||
|
||||
// Bar represents a progress Bar.
|
||||
type Bar struct {
|
||||
priority int // used by heap
|
||||
index int // used by heap
|
||||
|
||||
extendedLines int
|
||||
toShutdown bool
|
||||
toDrop bool
|
||||
noPop bool
|
||||
hasEwmaDecorators bool
|
||||
operateState chan func(*bState)
|
||||
frameCh chan io.Reader
|
||||
syncTableCh chan [][]chan int
|
||||
completed chan bool
|
||||
|
||||
// cancel is called either by user or on complete event
|
||||
cancel func()
|
||||
// done is closed after cacheState is assigned
|
||||
done chan struct{}
|
||||
// cacheState is populated, right after close(shutdown)
|
||||
cacheState *bState
|
||||
|
||||
container *Progress
|
||||
dlogger *log.Logger
|
||||
recoveredPanic interface{}
|
||||
}
|
||||
|
||||
type extFunc func(in io.Reader, reqWidth int, st decor.Statistics) (out io.Reader, lines int)
|
||||
|
||||
type bState struct {
|
||||
id int
|
||||
priority int
|
||||
reqWidth int
|
||||
total int64
|
||||
current int64
|
||||
refill int64
|
||||
lastN int64
|
||||
iterated bool
|
||||
trimSpace bool
|
||||
toComplete bool
|
||||
completeFlushed bool
|
||||
ignoreComplete bool
|
||||
dropOnComplete bool
|
||||
noPop bool
|
||||
aDecorators []decor.Decorator
|
||||
pDecorators []decor.Decorator
|
||||
averageDecorators []decor.AverageDecorator
|
||||
ewmaDecorators []decor.EwmaDecorator
|
||||
shutdownListeners []decor.ShutdownListener
|
||||
bufP, bufB, bufA *bytes.Buffer
|
||||
filler BarFiller
|
||||
middleware func(BarFiller) BarFiller
|
||||
extender extFunc
|
||||
|
||||
// runningBar is a key for *pState.parkedBars
|
||||
runningBar *Bar
|
||||
|
||||
debugOut io.Writer
|
||||
}
|
||||
|
||||
func newBar(container *Progress, bs *bState) *Bar {
|
||||
logPrefix := fmt.Sprintf("%sbar#%02d ", container.dlogger.Prefix(), bs.id)
|
||||
ctx, cancel := context.WithCancel(container.ctx)
|
||||
|
||||
bar := &Bar{
|
||||
container: container,
|
||||
priority: bs.priority,
|
||||
toDrop: bs.dropOnComplete,
|
||||
noPop: bs.noPop,
|
||||
operateState: make(chan func(*bState)),
|
||||
frameCh: make(chan io.Reader, 1),
|
||||
syncTableCh: make(chan [][]chan int, 1),
|
||||
completed: make(chan bool, 1),
|
||||
done: make(chan struct{}),
|
||||
cancel: cancel,
|
||||
dlogger: log.New(bs.debugOut, logPrefix, log.Lshortfile),
|
||||
}
|
||||
|
||||
go bar.serve(ctx, bs)
|
||||
return bar
|
||||
}
|
||||
|
||||
// ProxyReader wraps r with metrics required for progress tracking.
|
||||
// Panics if r is nil.
|
||||
func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser {
|
||||
if r == nil {
|
||||
panic("expected non nil io.Reader")
|
||||
}
|
||||
return newProxyReader(r, b)
|
||||
}
|
||||
|
||||
// ID returs id of the bar.
|
||||
func (b *Bar) ID() int {
|
||||
result := make(chan int)
|
||||
select {
|
||||
case b.operateState <- func(s *bState) { result <- s.id }:
|
||||
return <-result
|
||||
case <-b.done:
|
||||
return b.cacheState.id
|
||||
}
|
||||
}
|
||||
|
||||
// Current returns bar's current number, in other words sum of all increments.
|
||||
func (b *Bar) Current() int64 {
|
||||
result := make(chan int64)
|
||||
select {
|
||||
case b.operateState <- func(s *bState) { result <- s.current }:
|
||||
return <-result
|
||||
case <-b.done:
|
||||
return b.cacheState.current
|
||||
}
|
||||
}
|
||||
|
||||
// SetRefill fills bar with refill rune up to amount argument.
|
||||
// Given default bar style is "[=>-]<+", refill rune is '+'.
|
||||
// To set bar style use mpb.BarStyle(string) BarOption.
|
||||
func (b *Bar) SetRefill(amount int64) {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
s.refill = amount
|
||||
}:
|
||||
case <-b.done:
|
||||
}
|
||||
}
|
||||
|
||||
// TraverseDecorators traverses all available decorators and calls cb func on each.
|
||||
func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
for _, decorators := range [...][]decor.Decorator{
|
||||
s.pDecorators,
|
||||
s.aDecorators,
|
||||
} {
|
||||
for _, d := range decorators {
|
||||
cb(extractBaseDecorator(d))
|
||||
}
|
||||
}
|
||||
}:
|
||||
case <-b.done:
|
||||
}
|
||||
}
|
||||
|
||||
// SetTotal sets total dynamically.
|
||||
// If total is less than or equal to zero it takes progress' current value.
|
||||
// A complete flag enables or disables complete event on `current >= total`.
|
||||
func (b *Bar) SetTotal(total int64, complete bool) {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
s.ignoreComplete = !complete
|
||||
if total <= 0 {
|
||||
s.total = s.current
|
||||
} else {
|
||||
s.total = total
|
||||
}
|
||||
if !s.ignoreComplete && !s.toComplete {
|
||||
s.current = s.total
|
||||
s.toComplete = true
|
||||
go b.refreshTillShutdown()
|
||||
}
|
||||
}:
|
||||
case <-b.done:
|
||||
}
|
||||
}
|
||||
|
||||
// SetCurrent sets progress' current to an arbitrary value.
|
||||
// Setting a negative value will cause a panic.
|
||||
func (b *Bar) SetCurrent(current int64) {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
s.iterated = true
|
||||
s.lastN = current - s.current
|
||||
s.current = current
|
||||
if !s.ignoreComplete && s.current >= s.total {
|
||||
s.current = s.total
|
||||
s.toComplete = true
|
||||
go b.refreshTillShutdown()
|
||||
}
|
||||
}:
|
||||
case <-b.done:
|
||||
}
|
||||
}
|
||||
|
||||
// Increment is a shorthand for b.IncrInt64(1).
|
||||
func (b *Bar) Increment() {
|
||||
b.IncrInt64(1)
|
||||
}
|
||||
|
||||
// IncrBy is a shorthand for b.IncrInt64(int64(n)).
|
||||
func (b *Bar) IncrBy(n int) {
|
||||
b.IncrInt64(int64(n))
|
||||
}
|
||||
|
||||
// IncrInt64 increments progress by amount of n.
|
||||
func (b *Bar) IncrInt64(n int64) {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
s.iterated = true
|
||||
s.lastN = n
|
||||
s.current += n
|
||||
if !s.ignoreComplete && s.current >= s.total {
|
||||
s.current = s.total
|
||||
s.toComplete = true
|
||||
go b.refreshTillShutdown()
|
||||
}
|
||||
}:
|
||||
case <-b.done:
|
||||
}
|
||||
}
|
||||
|
||||
// DecoratorEwmaUpdate updates all EWMA based decorators. Should be
|
||||
// called on each iteration, because EWMA's unit of measure is an
|
||||
// iteration's duration. Panics if called before *Bar.Incr... family
|
||||
// methods.
|
||||
func (b *Bar) DecoratorEwmaUpdate(dur time.Duration) {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
ewmaIterationUpdate(false, s, dur)
|
||||
}:
|
||||
case <-b.done:
|
||||
ewmaIterationUpdate(true, b.cacheState, dur)
|
||||
}
|
||||
}
|
||||
|
||||
// DecoratorAverageAdjust adjusts all average based decorators. Call
|
||||
// if you need to adjust start time of all average based decorators
|
||||
// or after progress resume.
|
||||
func (b *Bar) DecoratorAverageAdjust(start time.Time) {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
for _, d := range s.averageDecorators {
|
||||
d.AverageAdjust(start)
|
||||
}
|
||||
}:
|
||||
case <-b.done:
|
||||
}
|
||||
}
|
||||
|
||||
// SetPriority changes bar's order among multiple bars. Zero is highest
|
||||
// priority, i.e. bar will be on top. If you don't need to set priority
|
||||
// dynamically, better use BarPriority option.
|
||||
func (b *Bar) SetPriority(priority int) {
|
||||
select {
|
||||
case <-b.done:
|
||||
default:
|
||||
b.container.setBarPriority(b, priority)
|
||||
}
|
||||
}
|
||||
|
||||
// Abort interrupts bar's running goroutine. Call this, if you'd like
|
||||
// to stop/remove bar before completion event. It has no effect after
|
||||
// completion event. If drop is true bar will be removed as well.
|
||||
func (b *Bar) Abort(drop bool) {
|
||||
select {
|
||||
case <-b.done:
|
||||
default:
|
||||
if drop {
|
||||
b.container.dropBar(b)
|
||||
}
|
||||
b.cancel()
|
||||
}
|
||||
}
|
||||
|
||||
// Completed reports whether the bar is in completed state.
|
||||
func (b *Bar) Completed() bool {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) { b.completed <- s.toComplete }:
|
||||
return <-b.completed
|
||||
case <-b.done:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Bar) serve(ctx context.Context, s *bState) {
|
||||
defer b.container.bwg.Done()
|
||||
for {
|
||||
select {
|
||||
case op := <-b.operateState:
|
||||
op(s)
|
||||
case <-ctx.Done():
|
||||
b.cacheState = s
|
||||
close(b.done)
|
||||
// Notifying decorators about shutdown event
|
||||
for _, sl := range s.shutdownListeners {
|
||||
sl.Shutdown()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Bar) render(tw int) {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
stat := newStatistics(tw, s)
|
||||
defer func() {
|
||||
// recovering if user defined decorator panics for example
|
||||
if p := recover(); p != nil {
|
||||
if b.recoveredPanic == nil {
|
||||
s.extender = makePanicExtender(p)
|
||||
b.toShutdown = !b.toShutdown
|
||||
b.recoveredPanic = p
|
||||
}
|
||||
frame, lines := s.extender(nil, s.reqWidth, stat)
|
||||
b.extendedLines = lines
|
||||
b.frameCh <- frame
|
||||
b.dlogger.Println(p)
|
||||
}
|
||||
s.completeFlushed = s.toComplete
|
||||
}()
|
||||
frame, lines := s.extender(s.draw(stat), s.reqWidth, stat)
|
||||
b.extendedLines = lines
|
||||
b.toShutdown = s.toComplete && !s.completeFlushed
|
||||
b.frameCh <- frame
|
||||
}:
|
||||
case <-b.done:
|
||||
s := b.cacheState
|
||||
stat := newStatistics(tw, s)
|
||||
var r io.Reader
|
||||
if b.recoveredPanic == nil {
|
||||
r = s.draw(stat)
|
||||
}
|
||||
frame, lines := s.extender(r, s.reqWidth, stat)
|
||||
b.extendedLines = lines
|
||||
b.frameCh <- frame
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Bar) subscribeDecorators() {
|
||||
var averageDecorators []decor.AverageDecorator
|
||||
var ewmaDecorators []decor.EwmaDecorator
|
||||
var shutdownListeners []decor.ShutdownListener
|
||||
b.TraverseDecorators(func(d decor.Decorator) {
|
||||
if d, ok := d.(decor.AverageDecorator); ok {
|
||||
averageDecorators = append(averageDecorators, d)
|
||||
}
|
||||
if d, ok := d.(decor.EwmaDecorator); ok {
|
||||
ewmaDecorators = append(ewmaDecorators, d)
|
||||
}
|
||||
if d, ok := d.(decor.ShutdownListener); ok {
|
||||
shutdownListeners = append(shutdownListeners, d)
|
||||
}
|
||||
})
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
s.averageDecorators = averageDecorators
|
||||
s.ewmaDecorators = ewmaDecorators
|
||||
s.shutdownListeners = shutdownListeners
|
||||
}:
|
||||
b.hasEwmaDecorators = len(ewmaDecorators) != 0
|
||||
case <-b.done:
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Bar) refreshTillShutdown() {
|
||||
for {
|
||||
select {
|
||||
case b.container.refreshCh <- time.Now():
|
||||
case <-b.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Bar) wSyncTable() [][]chan int {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) { b.syncTableCh <- s.wSyncTable() }:
|
||||
return <-b.syncTableCh
|
||||
case <-b.done:
|
||||
return b.cacheState.wSyncTable()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *bState) draw(stat decor.Statistics) io.Reader {
|
||||
if !s.trimSpace {
|
||||
stat.AvailableWidth -= 2
|
||||
s.bufB.WriteByte(' ')
|
||||
defer s.bufB.WriteByte(' ')
|
||||
}
|
||||
|
||||
nlr := strings.NewReader("\n")
|
||||
tw := stat.AvailableWidth
|
||||
for _, d := range s.pDecorators {
|
||||
str := d.Decor(stat)
|
||||
stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str))
|
||||
s.bufP.WriteString(str)
|
||||
}
|
||||
if stat.AvailableWidth <= 0 {
|
||||
trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufP.String()), tw, "…"))
|
||||
s.bufP.Reset()
|
||||
return io.MultiReader(trunc, s.bufB, nlr)
|
||||
}
|
||||
|
||||
tw = stat.AvailableWidth
|
||||
for _, d := range s.aDecorators {
|
||||
str := d.Decor(stat)
|
||||
stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str))
|
||||
s.bufA.WriteString(str)
|
||||
}
|
||||
if stat.AvailableWidth <= 0 {
|
||||
trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufA.String()), tw, "…"))
|
||||
s.bufA.Reset()
|
||||
return io.MultiReader(s.bufP, s.bufB, trunc, nlr)
|
||||
}
|
||||
|
||||
s.filler.Fill(s.bufB, s.reqWidth, stat)
|
||||
|
||||
return io.MultiReader(s.bufP, s.bufB, s.bufA, nlr)
|
||||
}
|
||||
|
||||
func (s *bState) wSyncTable() [][]chan int {
|
||||
columns := make([]chan int, 0, len(s.pDecorators)+len(s.aDecorators))
|
||||
var pCount int
|
||||
for _, d := range s.pDecorators {
|
||||
if ch, ok := d.Sync(); ok {
|
||||
columns = append(columns, ch)
|
||||
pCount++
|
||||
}
|
||||
}
|
||||
var aCount int
|
||||
for _, d := range s.aDecorators {
|
||||
if ch, ok := d.Sync(); ok {
|
||||
columns = append(columns, ch)
|
||||
aCount++
|
||||
}
|
||||
}
|
||||
table := make([][]chan int, 2)
|
||||
table[0] = columns[0:pCount]
|
||||
table[1] = columns[pCount : pCount+aCount : pCount+aCount]
|
||||
return table
|
||||
}
|
||||
|
||||
func newStatistics(tw int, s *bState) decor.Statistics {
|
||||
return decor.Statistics{
|
||||
ID: s.id,
|
||||
AvailableWidth: tw,
|
||||
Total: s.total,
|
||||
Current: s.current,
|
||||
Refill: s.refill,
|
||||
Completed: s.completeFlushed,
|
||||
}
|
||||
}
|
||||
|
||||
func extractBaseDecorator(d decor.Decorator) decor.Decorator {
|
||||
if d, ok := d.(decor.Wrapper); ok {
|
||||
return extractBaseDecorator(d.Base())
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func ewmaIterationUpdate(done bool, s *bState, dur time.Duration) {
|
||||
if !done && !s.iterated {
|
||||
panic("increment required before ewma iteration update")
|
||||
} else {
|
||||
s.iterated = false
|
||||
}
|
||||
for _, d := range s.ewmaDecorators {
|
||||
d.EwmaUpdate(s.lastN, dur)
|
||||
}
|
||||
}
|
||||
|
||||
func makePanicExtender(p interface{}) extFunc {
|
||||
pstr := fmt.Sprint(p)
|
||||
stack := debug.Stack()
|
||||
stackLines := bytes.Count(stack, []byte("\n"))
|
||||
return func(_ io.Reader, _ int, st decor.Statistics) (io.Reader, int) {
|
||||
mr := io.MultiReader(
|
||||
strings.NewReader(runewidth.Truncate(pstr, st.AvailableWidth, "…")),
|
||||
strings.NewReader(fmt.Sprintf("\n%#v\n", st)),
|
||||
bytes.NewReader(stack),
|
||||
)
|
||||
return mr, stackLines + 1
|
||||
}
|
||||
}
|
30
vendor/github.com/vbauerster/mpb/v5/bar_filler.go
generated
vendored
30
vendor/github.com/vbauerster/mpb/v5/bar_filler.go
generated
vendored
@ -1,30 +0,0 @@
|
||||
package mpb
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/vbauerster/mpb/v5/decor"
|
||||
)
|
||||
|
||||
// BarFiller interface.
|
||||
// Bar (without decorators) renders itself by calling BarFiller's Fill method.
|
||||
//
|
||||
// `reqWidth` is requested width, which is set via:
|
||||
// func WithWidth(width int) ContainerOption
|
||||
// func BarWidth(width int) BarOption
|
||||
//
|
||||
// Default implementations can be obtained via:
|
||||
//
|
||||
// func NewBarFiller(style string, reverse bool) BarFiller
|
||||
// func NewSpinnerFiller(style []string, alignment SpinnerAlignment) BarFiller
|
||||
//
|
||||
type BarFiller interface {
|
||||
Fill(w io.Writer, reqWidth int, stat decor.Statistics)
|
||||
}
|
||||
|
||||
// BarFillerFunc is function type adapter to convert function into BarFiller.
|
||||
type BarFillerFunc func(w io.Writer, reqWidth int, stat decor.Statistics)
|
||||
|
||||
func (f BarFillerFunc) Fill(w io.Writer, reqWidth int, stat decor.Statistics) {
|
||||
f(w, reqWidth, stat)
|
||||
}
|
173
vendor/github.com/vbauerster/mpb/v5/bar_filler_bar.go
generated
vendored
173
vendor/github.com/vbauerster/mpb/v5/bar_filler_bar.go
generated
vendored
@ -1,173 +0,0 @@
|
||||
package mpb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/mattn/go-runewidth"
|
||||
"github.com/vbauerster/mpb/v5/decor"
|
||||
"github.com/vbauerster/mpb/v5/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
rLeft = iota
|
||||
rFill
|
||||
rTip
|
||||
rSpace
|
||||
rRight
|
||||
rRevTip
|
||||
rRefill
|
||||
)
|
||||
|
||||
// DefaultBarStyle is a string containing 7 runes.
|
||||
// Each rune is a building block of a progress bar.
|
||||
//
|
||||
// '1st rune' stands for left boundary rune
|
||||
//
|
||||
// '2nd rune' stands for fill rune
|
||||
//
|
||||
// '3rd rune' stands for tip rune
|
||||
//
|
||||
// '4th rune' stands for space rune
|
||||
//
|
||||
// '5th rune' stands for right boundary rune
|
||||
//
|
||||
// '6th rune' stands for reverse tip rune
|
||||
//
|
||||
// '7th rune' stands for refill rune
|
||||
//
|
||||
const DefaultBarStyle string = "[=>-]<+"
|
||||
|
||||
type barFiller struct {
|
||||
format [][]byte
|
||||
rwidth []int
|
||||
tip []byte
|
||||
refill int64
|
||||
reverse bool
|
||||
flush func(io.Writer, *space, [][]byte)
|
||||
}
|
||||
|
||||
type space struct {
|
||||
space []byte
|
||||
rwidth int
|
||||
count int
|
||||
}
|
||||
|
||||
// NewBarFiller constucts mpb.BarFiller, to be used with *Progress.Add(...) *Bar method.
|
||||
func NewBarFiller(style string, reverse bool) BarFiller {
|
||||
bf := &barFiller{
|
||||
format: make([][]byte, len(DefaultBarStyle)),
|
||||
rwidth: make([]int, len(DefaultBarStyle)),
|
||||
reverse: reverse,
|
||||
}
|
||||
bf.SetStyle(style)
|
||||
return bf
|
||||
}
|
||||
|
||||
func (s *barFiller) SetStyle(style string) {
|
||||
if !utf8.ValidString(style) {
|
||||
panic("invalid bar style")
|
||||
}
|
||||
if style == "" {
|
||||
style = DefaultBarStyle
|
||||
}
|
||||
src := make([][]byte, utf8.RuneCountInString(style))
|
||||
i := 0
|
||||
for _, r := range style {
|
||||
s.rwidth[i] = runewidth.RuneWidth(r)
|
||||
src[i] = []byte(string(r))
|
||||
i++
|
||||
}
|
||||
copy(s.format, src)
|
||||
s.SetReverse(s.reverse)
|
||||
}
|
||||
|
||||
func (s *barFiller) SetReverse(reverse bool) {
|
||||
if reverse {
|
||||
s.tip = s.format[rRevTip]
|
||||
s.flush = reverseFlush
|
||||
} else {
|
||||
s.tip = s.format[rTip]
|
||||
s.flush = regularFlush
|
||||
}
|
||||
s.reverse = reverse
|
||||
}
|
||||
|
||||
func (s *barFiller) Fill(w io.Writer, reqWidth int, stat decor.Statistics) {
|
||||
width := internal.WidthForBarFiller(reqWidth, stat.AvailableWidth)
|
||||
|
||||
if brackets := s.rwidth[rLeft] + s.rwidth[rRight]; width < brackets {
|
||||
return
|
||||
} else {
|
||||
// don't count brackets as progress
|
||||
width -= brackets
|
||||
}
|
||||
w.Write(s.format[rLeft])
|
||||
defer w.Write(s.format[rRight])
|
||||
|
||||
cwidth := int(internal.PercentageRound(stat.Total, stat.Current, width))
|
||||
space := &space{
|
||||
space: s.format[rSpace],
|
||||
rwidth: s.rwidth[rSpace],
|
||||
count: width - cwidth,
|
||||
}
|
||||
|
||||
index, refill := 0, 0
|
||||
bb := make([][]byte, cwidth)
|
||||
|
||||
if cwidth > 0 && cwidth != width {
|
||||
bb[index] = s.tip
|
||||
cwidth -= s.rwidth[rTip]
|
||||
index++
|
||||
}
|
||||
|
||||
if stat.Refill > 0 {
|
||||
refill = int(internal.PercentageRound(stat.Total, int64(stat.Refill), width))
|
||||
if refill > cwidth {
|
||||
refill = cwidth
|
||||
}
|
||||
cwidth -= refill
|
||||
}
|
||||
|
||||
for cwidth > 0 {
|
||||
bb[index] = s.format[rFill]
|
||||
cwidth -= s.rwidth[rFill]
|
||||
index++
|
||||
}
|
||||
|
||||
for refill > 0 {
|
||||
bb[index] = s.format[rRefill]
|
||||
refill -= s.rwidth[rRefill]
|
||||
index++
|
||||
}
|
||||
|
||||
if cwidth+refill < 0 || space.rwidth > 1 {
|
||||
buf := new(bytes.Buffer)
|
||||
s.flush(buf, space, bb[:index])
|
||||
io.WriteString(w, runewidth.Truncate(buf.String(), width, "…"))
|
||||
return
|
||||
}
|
||||
|
||||
s.flush(w, space, bb)
|
||||
}
|
||||
|
||||
func regularFlush(w io.Writer, space *space, bb [][]byte) {
|
||||
for i := len(bb) - 1; i >= 0; i-- {
|
||||
w.Write(bb[i])
|
||||
}
|
||||
for space.count > 0 {
|
||||
w.Write(space.space)
|
||||
space.count -= space.rwidth
|
||||
}
|
||||
}
|
||||
|
||||
func reverseFlush(w io.Writer, space *space, bb [][]byte) {
|
||||
for space.count > 0 {
|
||||
w.Write(space.space)
|
||||
space.count -= space.rwidth
|
||||
}
|
||||
for i := 0; i < len(bb); i++ {
|
||||
w.Write(bb[i])
|
||||
}
|
||||
}
|
63
vendor/github.com/vbauerster/mpb/v5/bar_filler_spinner.go
generated
vendored
63
vendor/github.com/vbauerster/mpb/v5/bar_filler_spinner.go
generated
vendored
@ -1,63 +0,0 @@
|
||||
package mpb
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/vbauerster/mpb/v5/decor"
|
||||
"github.com/vbauerster/mpb/v5/internal"
|
||||
)
|
||||
|
||||
// SpinnerAlignment enum.
|
||||
type SpinnerAlignment int
|
||||
|
||||
// SpinnerAlignment kinds.
|
||||
const (
|
||||
SpinnerOnLeft SpinnerAlignment = iota
|
||||
SpinnerOnMiddle
|
||||
SpinnerOnRight
|
||||
)
|
||||
|
||||
// DefaultSpinnerStyle is a slice of strings, which makes a spinner.
|
||||
var DefaultSpinnerStyle = []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
|
||||
|
||||
type spinnerFiller struct {
|
||||
frames []string
|
||||
count uint
|
||||
alignment SpinnerAlignment
|
||||
}
|
||||
|
||||
// NewSpinnerFiller constucts mpb.BarFiller, to be used with *Progress.Add(...) *Bar method.
|
||||
func NewSpinnerFiller(style []string, alignment SpinnerAlignment) BarFiller {
|
||||
if len(style) == 0 {
|
||||
style = DefaultSpinnerStyle
|
||||
}
|
||||
filler := &spinnerFiller{
|
||||
frames: style,
|
||||
alignment: alignment,
|
||||
}
|
||||
return filler
|
||||
}
|
||||
|
||||
func (s *spinnerFiller) Fill(w io.Writer, reqWidth int, stat decor.Statistics) {
|
||||
width := internal.WidthForBarFiller(reqWidth, stat.AvailableWidth)
|
||||
|
||||
frame := s.frames[s.count%uint(len(s.frames))]
|
||||
frameWidth := utf8.RuneCountInString(frame)
|
||||
|
||||
if width < frameWidth {
|
||||
return
|
||||
}
|
||||
|
||||
switch rest := width - frameWidth; s.alignment {
|
||||
case SpinnerOnLeft:
|
||||
io.WriteString(w, frame+strings.Repeat(" ", rest))
|
||||
case SpinnerOnMiddle:
|
||||
str := strings.Repeat(" ", rest/2) + frame + strings.Repeat(" ", rest/2+rest%2)
|
||||
io.WriteString(w, str)
|
||||
case SpinnerOnRight:
|
||||
io.WriteString(w, strings.Repeat(" ", rest)+frame)
|
||||
}
|
||||
s.count++
|
||||
}
|
213
vendor/github.com/vbauerster/mpb/v5/bar_option.go
generated
vendored
213
vendor/github.com/vbauerster/mpb/v5/bar_option.go
generated
vendored
@ -1,213 +0,0 @@
|
||||
package mpb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/vbauerster/mpb/v5/decor"
|
||||
)
|
||||
|
||||
// BarOption is a function option which changes the default behavior of a bar.
|
||||
type BarOption func(*bState)
|
||||
|
||||
func (s *bState) addDecorators(dest *[]decor.Decorator, decorators ...decor.Decorator) {
|
||||
type mergeWrapper interface {
|
||||
MergeUnwrap() []decor.Decorator
|
||||
}
|
||||
for _, decorator := range decorators {
|
||||
if mw, ok := decorator.(mergeWrapper); ok {
|
||||
*dest = append(*dest, mw.MergeUnwrap()...)
|
||||
}
|
||||
*dest = append(*dest, decorator)
|
||||
}
|
||||
}
|
||||
|
||||
// AppendDecorators let you inject decorators to the bar's right side.
|
||||
func AppendDecorators(decorators ...decor.Decorator) BarOption {
|
||||
return func(s *bState) {
|
||||
s.addDecorators(&s.aDecorators, decorators...)
|
||||
}
|
||||
}
|
||||
|
||||
// PrependDecorators let you inject decorators to the bar's left side.
|
||||
func PrependDecorators(decorators ...decor.Decorator) BarOption {
|
||||
return func(s *bState) {
|
||||
s.addDecorators(&s.pDecorators, decorators...)
|
||||
}
|
||||
}
|
||||
|
||||
// BarID sets bar id.
|
||||
func BarID(id int) BarOption {
|
||||
return func(s *bState) {
|
||||
s.id = id
|
||||
}
|
||||
}
|
||||
|
||||
// BarWidth sets bar width independent of the container.
|
||||
func BarWidth(width int) BarOption {
|
||||
return func(s *bState) {
|
||||
s.reqWidth = width
|
||||
}
|
||||
}
|
||||
|
||||
// BarQueueAfter queues this (being constructed) bar to relplace
|
||||
// runningBar after it has been completed.
|
||||
func BarQueueAfter(runningBar *Bar) BarOption {
|
||||
if runningBar == nil {
|
||||
return nil
|
||||
}
|
||||
return func(s *bState) {
|
||||
s.runningBar = runningBar
|
||||
}
|
||||
}
|
||||
|
||||
// BarRemoveOnComplete removes both bar's filler and its decorators
|
||||
// on complete event.
|
||||
func BarRemoveOnComplete() BarOption {
|
||||
return func(s *bState) {
|
||||
s.dropOnComplete = true
|
||||
}
|
||||
}
|
||||
|
||||
// BarFillerClearOnComplete clears bar's filler on complete event.
|
||||
// It's shortcut for BarFillerOnComplete("").
|
||||
func BarFillerClearOnComplete() BarOption {
|
||||
return BarFillerOnComplete("")
|
||||
}
|
||||
|
||||
// BarFillerOnComplete replaces bar's filler with message, on complete event.
|
||||
func BarFillerOnComplete(message string) BarOption {
|
||||
return BarFillerMiddleware(func(base BarFiller) BarFiller {
|
||||
return BarFillerFunc(func(w io.Writer, reqWidth int, st decor.Statistics) {
|
||||
if st.Completed {
|
||||
io.WriteString(w, message)
|
||||
} else {
|
||||
base.Fill(w, reqWidth, st)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// BarFillerMiddleware provides a way to augment default BarFiller.
|
||||
func BarFillerMiddleware(middle func(BarFiller) BarFiller) BarOption {
|
||||
return func(s *bState) {
|
||||
s.middleware = middle
|
||||
}
|
||||
}
|
||||
|
||||
// BarPriority sets bar's priority. Zero is highest priority, i.e. bar
|
||||
// will be on top. If `BarReplaceOnComplete` option is supplied, this
|
||||
// option is ignored.
|
||||
func BarPriority(priority int) BarOption {
|
||||
return func(s *bState) {
|
||||
s.priority = priority
|
||||
}
|
||||
}
|
||||
|
||||
// BarExtender is an option to extend bar to the next new line, with
|
||||
// arbitrary output.
|
||||
func BarExtender(filler BarFiller) BarOption {
|
||||
if filler == nil {
|
||||
return nil
|
||||
}
|
||||
return func(s *bState) {
|
||||
s.extender = makeExtFunc(filler)
|
||||
}
|
||||
}
|
||||
|
||||
func makeExtFunc(filler BarFiller) extFunc {
|
||||
buf := new(bytes.Buffer)
|
||||
return func(r io.Reader, reqWidth int, st decor.Statistics) (io.Reader, int) {
|
||||
filler.Fill(buf, reqWidth, st)
|
||||
return io.MultiReader(r, buf), bytes.Count(buf.Bytes(), []byte("\n"))
|
||||
}
|
||||
}
|
||||
|
||||
// BarFillerTrim bar filler is rendered with leading and trailing space
|
||||
// like ' [===] ' by default. With this option leading and trailing
|
||||
// space will be removed.
|
||||
func BarFillerTrim() BarOption {
|
||||
return func(s *bState) {
|
||||
s.trimSpace = true
|
||||
}
|
||||
}
|
||||
|
||||
// TrimSpace is an alias to BarFillerTrim.
|
||||
func TrimSpace() BarOption {
|
||||
return BarFillerTrim()
|
||||
}
|
||||
|
||||
// BarStyle overrides mpb.DefaultBarStyle which is "[=>-]<+".
|
||||
// It's ok to pass string containing just 5 runes, for example "╢▌▌░╟",
|
||||
// if you don't need to override '<' (reverse tip) and '+' (refill rune).
|
||||
func BarStyle(style string) BarOption {
|
||||
if style == "" {
|
||||
return nil
|
||||
}
|
||||
type styleSetter interface {
|
||||
SetStyle(string)
|
||||
}
|
||||
return func(s *bState) {
|
||||
if t, ok := s.filler.(styleSetter); ok {
|
||||
t.SetStyle(style)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BarNoPop disables bar pop out of container. Effective when
|
||||
// PopCompletedMode of container is enabled.
|
||||
func BarNoPop() BarOption {
|
||||
return func(s *bState) {
|
||||
s.noPop = true
|
||||
}
|
||||
}
|
||||
|
||||
// BarReverse reverse mode, bar will progress from right to left.
|
||||
func BarReverse() BarOption {
|
||||
type revSetter interface {
|
||||
SetReverse(bool)
|
||||
}
|
||||
return func(s *bState) {
|
||||
if t, ok := s.filler.(revSetter); ok {
|
||||
t.SetReverse(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SpinnerStyle sets custom spinner style.
|
||||
// Effective when Filler type is spinner.
|
||||
func SpinnerStyle(frames []string) BarOption {
|
||||
if len(frames) == 0 {
|
||||
return nil
|
||||
}
|
||||
chk := func(filler BarFiller) (interface{}, bool) {
|
||||
t, ok := filler.(*spinnerFiller)
|
||||
return t, ok
|
||||
}
|
||||
cb := func(t interface{}) {
|
||||
t.(*spinnerFiller).frames = frames
|
||||
}
|
||||
return MakeFillerTypeSpecificBarOption(chk, cb)
|
||||
}
|
||||
|
||||
// MakeFillerTypeSpecificBarOption makes BarOption specific to Filler's
|
||||
// actual type. If you implement your own Filler, so most probably
|
||||
// you'll need this. See BarStyle or SpinnerStyle for example.
|
||||
func MakeFillerTypeSpecificBarOption(
|
||||
typeChecker func(BarFiller) (interface{}, bool),
|
||||
cb func(interface{}),
|
||||
) BarOption {
|
||||
return func(s *bState) {
|
||||
if t, ok := typeChecker(s.filler); ok {
|
||||
cb(t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BarOptOn returns option when condition evaluates to true.
|
||||
func BarOptOn(option BarOption, condition func() bool) BarOption {
|
||||
if condition() {
|
||||
return option
|
||||
}
|
||||
return nil
|
||||
}
|
102
vendor/github.com/vbauerster/mpb/v5/container_option.go
generated
vendored
102
vendor/github.com/vbauerster/mpb/v5/container_option.go
generated
vendored
@ -1,102 +0,0 @@
|
||||
package mpb
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ContainerOption is a function option which changes the default
|
||||
// behavior of progress container, if passed to mpb.New(...ContainerOption).
|
||||
type ContainerOption func(*pState)
|
||||
|
||||
// WithWaitGroup provides means to have a single joint point. If
|
||||
// *sync.WaitGroup is provided, you can safely call just p.Wait()
|
||||
// without calling Wait() on provided *sync.WaitGroup. Makes sense
|
||||
// when there are more than one bar to render.
|
||||
func WithWaitGroup(wg *sync.WaitGroup) ContainerOption {
|
||||
return func(s *pState) {
|
||||
s.uwg = wg
|
||||
}
|
||||
}
|
||||
|
||||
// WithWidth sets container width. If not set underlying bars will
|
||||
// occupy whole term width.
|
||||
func WithWidth(width int) ContainerOption {
|
||||
return func(s *pState) {
|
||||
s.reqWidth = width
|
||||
}
|
||||
}
|
||||
|
||||
// WithRefreshRate overrides default 120ms refresh rate.
|
||||
func WithRefreshRate(d time.Duration) ContainerOption {
|
||||
return func(s *pState) {
|
||||
s.rr = d
|
||||
}
|
||||
}
|
||||
|
||||
// WithManualRefresh disables internal auto refresh time.Ticker.
|
||||
// Refresh will occur upon receive value from provided ch.
|
||||
func WithManualRefresh(ch <-chan time.Time) ContainerOption {
|
||||
return func(s *pState) {
|
||||
s.refreshSrc = ch
|
||||
}
|
||||
}
|
||||
|
||||
// WithRenderDelay delays rendering. By default rendering starts as
|
||||
// soon as bar is added, with this option it's possible to delay
|
||||
// rendering process by keeping provided chan unclosed. In other words
|
||||
// rendering will start as soon as provided chan is closed.
|
||||
func WithRenderDelay(ch <-chan struct{}) ContainerOption {
|
||||
return func(s *pState) {
|
||||
s.renderDelay = ch
|
||||
}
|
||||
}
|
||||
|
||||
// WithShutdownNotifier provided chanel will be closed, after all bars
|
||||
// have been rendered.
|
||||
func WithShutdownNotifier(ch chan struct{}) ContainerOption {
|
||||
return func(s *pState) {
|
||||
s.shutdownNotifier = ch
|
||||
}
|
||||
}
|
||||
|
||||
// WithOutput overrides default os.Stdout output. Setting it to nil
|
||||
// will effectively disable auto refresh rate and discard any output,
|
||||
// useful if you want to disable progress bars with little overhead.
|
||||
func WithOutput(w io.Writer) ContainerOption {
|
||||
return func(s *pState) {
|
||||
if w == nil {
|
||||
s.refreshSrc = make(chan time.Time)
|
||||
s.output = ioutil.Discard
|
||||
return
|
||||
}
|
||||
s.output = w
|
||||
}
|
||||
}
|
||||
|
||||
// WithDebugOutput sets debug output.
|
||||
func WithDebugOutput(w io.Writer) ContainerOption {
|
||||
if w == nil {
|
||||
return nil
|
||||
}
|
||||
return func(s *pState) {
|
||||
s.debugOut = w
|
||||
}
|
||||
}
|
||||
|
||||
// PopCompletedMode will pop and stop rendering completed bars.
|
||||
func PopCompletedMode() ContainerOption {
|
||||
return func(s *pState) {
|
||||
s.popCompleted = true
|
||||
}
|
||||
}
|
||||
|
||||
// ContainerOptOn returns option when condition evaluates to true.
|
||||
func ContainerOptOn(option ContainerOption, condition func() bool) ContainerOption {
|
||||
if condition() {
|
||||
return option
|
||||
}
|
||||
return nil
|
||||
}
|
7
vendor/github.com/vbauerster/mpb/v5/cwriter/util_bsd.go
generated
vendored
7
vendor/github.com/vbauerster/mpb/v5/cwriter/util_bsd.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
// +build darwin dragonfly freebsd netbsd openbsd
|
||||
|
||||
package cwriter
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = unix.TIOCGETA
|
7
vendor/github.com/vbauerster/mpb/v5/cwriter/util_linux.go
generated
vendored
7
vendor/github.com/vbauerster/mpb/v5/cwriter/util_linux.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
// +build aix linux
|
||||
|
||||
package cwriter
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = unix.TCGETS
|
7
vendor/github.com/vbauerster/mpb/v5/cwriter/util_solaris.go
generated
vendored
7
vendor/github.com/vbauerster/mpb/v5/cwriter/util_solaris.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
// +build solaris
|
||||
|
||||
package cwriter
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = unix.TCGETA
|
84
vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go
generated
vendored
84
vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go
generated
vendored
@ -1,84 +0,0 @@
|
||||
package cwriter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// NotATTY not a TeleTYpewriter error.
|
||||
var NotATTY = errors.New("not a terminal")
|
||||
|
||||
// http://ascii-table.com/ansi-escape-sequences.php
|
||||
const (
|
||||
escOpen = "\x1b["
|
||||
cuuAndEd = "A\x1b[J"
|
||||
)
|
||||
|
||||
// Writer is a buffered the writer that updates the terminal. The
|
||||
// contents of writer will be flushed when Flush is called.
|
||||
type Writer struct {
|
||||
out io.Writer
|
||||
buf bytes.Buffer
|
||||
lineCount int
|
||||
fd int
|
||||
isTerminal bool
|
||||
}
|
||||
|
||||
// New returns a new Writer with defaults.
|
||||
func New(out io.Writer) *Writer {
|
||||
w := &Writer{out: out}
|
||||
if f, ok := out.(*os.File); ok {
|
||||
w.fd = int(f.Fd())
|
||||
w.isTerminal = IsTerminal(w.fd)
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
||||
// Flush flushes the underlying buffer.
|
||||
func (w *Writer) Flush(lineCount int) (err error) {
|
||||
// some terminals interpret clear 0 lines as clear 1
|
||||
if w.lineCount > 0 {
|
||||
err = w.clearLines()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
w.lineCount = lineCount
|
||||
_, err = w.buf.WriteTo(w.out)
|
||||
return
|
||||
}
|
||||
|
||||
// Write appends the contents of p to the underlying buffer.
|
||||
func (w *Writer) Write(p []byte) (n int, err error) {
|
||||
return w.buf.Write(p)
|
||||
}
|
||||
|
||||
// WriteString writes string to the underlying buffer.
|
||||
func (w *Writer) WriteString(s string) (n int, err error) {
|
||||
return w.buf.WriteString(s)
|
||||
}
|
||||
|
||||
// ReadFrom reads from the provided io.Reader and writes to the
|
||||
// underlying buffer.
|
||||
func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
return w.buf.ReadFrom(r)
|
||||
}
|
||||
|
||||
// GetWidth returns width of underlying terminal.
|
||||
func (w *Writer) GetWidth() (int, error) {
|
||||
if !w.isTerminal {
|
||||
return -1, NotATTY
|
||||
}
|
||||
tw, _, err := GetSize(w.fd)
|
||||
return tw, err
|
||||
}
|
||||
|
||||
func (w *Writer) ansiCuuAndEd() (err error) {
|
||||
buf := make([]byte, 8)
|
||||
buf = strconv.AppendInt(buf[:copy(buf, escOpen)], int64(w.lineCount), 10)
|
||||
_, err = w.out.Write(append(buf, cuuAndEd...))
|
||||
return
|
||||
}
|
26
vendor/github.com/vbauerster/mpb/v5/cwriter/writer_posix.go
generated
vendored
26
vendor/github.com/vbauerster/mpb/v5/cwriter/writer_posix.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package cwriter
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func (w *Writer) clearLines() error {
|
||||
return w.ansiCuuAndEd()
|
||||
}
|
||||
|
||||
// GetSize returns the dimensions of the given terminal.
|
||||
func GetSize(fd int) (width, height int, err error) {
|
||||
ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
return int(ws.Col), int(ws.Row), nil
|
||||
}
|
||||
|
||||
// IsTerminal returns whether the given file descriptor is a terminal.
|
||||
func IsTerminal(fd int) bool {
|
||||
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
|
||||
return err == nil
|
||||
}
|
73
vendor/github.com/vbauerster/mpb/v5/cwriter/writer_windows.go
generated
vendored
73
vendor/github.com/vbauerster/mpb/v5/cwriter/writer_windows.go
generated
vendored
@ -1,73 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package cwriter
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var kernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
|
||||
var (
|
||||
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
|
||||
procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
|
||||
)
|
||||
|
||||
func (w *Writer) clearLines() error {
|
||||
if !w.isTerminal {
|
||||
// hope it's cygwin or similar
|
||||
return w.ansiCuuAndEd()
|
||||
}
|
||||
|
||||
var info windows.ConsoleScreenBufferInfo
|
||||
if err := windows.GetConsoleScreenBufferInfo(windows.Handle(w.fd), &info); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info.CursorPosition.Y -= int16(w.lineCount)
|
||||
if info.CursorPosition.Y < 0 {
|
||||
info.CursorPosition.Y = 0
|
||||
}
|
||||
_, _, _ = procSetConsoleCursorPosition.Call(
|
||||
uintptr(w.fd),
|
||||
uintptr(uint32(uint16(info.CursorPosition.Y))<<16|uint32(uint16(info.CursorPosition.X))),
|
||||
)
|
||||
|
||||
// clear the lines
|
||||
cursor := &windows.Coord{
|
||||
X: info.Window.Left,
|
||||
Y: info.CursorPosition.Y,
|
||||
}
|
||||
count := uint32(info.Size.X) * uint32(w.lineCount)
|
||||
_, _, _ = procFillConsoleOutputCharacter.Call(
|
||||
uintptr(w.fd),
|
||||
uintptr(' '),
|
||||
uintptr(count),
|
||||
*(*uintptr)(unsafe.Pointer(cursor)),
|
||||
uintptr(unsafe.Pointer(new(uint32))),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetSize returns the visible dimensions of the given terminal.
|
||||
//
|
||||
// These dimensions don't include any scrollback buffer height.
|
||||
func GetSize(fd int) (width, height int, err error) {
|
||||
var info windows.ConsoleScreenBufferInfo
|
||||
if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
// terminal.GetSize from crypto/ssh adds "+ 1" to both width and height:
|
||||
// https://go.googlesource.com/crypto/+/refs/heads/release-branch.go1.14/ssh/terminal/util_windows.go#75
|
||||
// but looks like this is a root cause of issue #66, so removing both "+ 1" have fixed it.
|
||||
return int(info.Window.Right - info.Window.Left), int(info.Window.Bottom - info.Window.Top), nil
|
||||
}
|
||||
|
||||
// IsTerminal returns whether the given file descriptor is a terminal.
|
||||
func IsTerminal(fd int) bool {
|
||||
var st uint32
|
||||
err := windows.GetConsoleMode(windows.Handle(fd), &st)
|
||||
return err == nil
|
||||
}
|
21
vendor/github.com/vbauerster/mpb/v5/decor/any.go
generated
vendored
21
vendor/github.com/vbauerster/mpb/v5/decor/any.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
package decor
|
||||
|
||||
// Any decorator displays text, that can be changed during decorator's
|
||||
// lifetime via provided DecorFunc.
|
||||
//
|
||||
// `fn` DecorFunc callback
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
func Any(fn DecorFunc, wcc ...WC) Decorator {
|
||||
return &any{initWC(wcc...), fn}
|
||||
}
|
||||
|
||||
type any struct {
|
||||
WC
|
||||
fn DecorFunc
|
||||
}
|
||||
|
||||
func (d *any) Decor(s Statistics) string {
|
||||
return d.FormatMsg(d.fn(s))
|
||||
}
|
243
vendor/github.com/vbauerster/mpb/v5/decor/counters.go
generated
vendored
243
vendor/github.com/vbauerster/mpb/v5/decor/counters.go
generated
vendored
@ -1,243 +0,0 @@
|
||||
package decor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
_ = iota
|
||||
UnitKiB
|
||||
UnitKB
|
||||
)
|
||||
|
||||
// CountersNoUnit is a wrapper around Counters with no unit param.
|
||||
func CountersNoUnit(pairFmt string, wcc ...WC) Decorator {
|
||||
return Counters(0, pairFmt, wcc...)
|
||||
}
|
||||
|
||||
// CountersKibiByte is a wrapper around Counters with predefined unit
|
||||
// UnitKiB (bytes/1024).
|
||||
func CountersKibiByte(pairFmt string, wcc ...WC) Decorator {
|
||||
return Counters(UnitKiB, pairFmt, wcc...)
|
||||
}
|
||||
|
||||
// CountersKiloByte is a wrapper around Counters with predefined unit
|
||||
// UnitKB (bytes/1000).
|
||||
func CountersKiloByte(pairFmt string, wcc ...WC) Decorator {
|
||||
return Counters(UnitKB, pairFmt, wcc...)
|
||||
}
|
||||
|
||||
// Counters decorator with dynamic unit measure adjustment.
|
||||
//
|
||||
// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
|
||||
//
|
||||
// `pairFmt` printf compatible verbs for current and total pair
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
// pairFmt example if unit=UnitKB:
|
||||
//
|
||||
// pairFmt="%.1f / %.1f" output: "1.0MB / 12.0MB"
|
||||
// pairFmt="% .1f / % .1f" output: "1.0 MB / 12.0 MB"
|
||||
// pairFmt="%d / %d" output: "1MB / 12MB"
|
||||
// pairFmt="% d / % d" output: "1 MB / 12 MB"
|
||||
//
|
||||
func Counters(unit int, pairFmt string, wcc ...WC) Decorator {
|
||||
producer := func(unit int, pairFmt string) DecorFunc {
|
||||
if pairFmt == "" {
|
||||
pairFmt = "%d / %d"
|
||||
} else if strings.Count(pairFmt, "%") != 2 {
|
||||
panic("expected pairFmt with exactly 2 verbs")
|
||||
}
|
||||
switch unit {
|
||||
case UnitKiB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(pairFmt, SizeB1024(s.Current), SizeB1024(s.Total))
|
||||
}
|
||||
case UnitKB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(pairFmt, SizeB1000(s.Current), SizeB1000(s.Total))
|
||||
}
|
||||
default:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(pairFmt, s.Current, s.Total)
|
||||
}
|
||||
}
|
||||
}
|
||||
return Any(producer(unit, pairFmt), wcc...)
|
||||
}
|
||||
|
||||
// TotalNoUnit is a wrapper around Total with no unit param.
|
||||
func TotalNoUnit(format string, wcc ...WC) Decorator {
|
||||
return Total(0, format, wcc...)
|
||||
}
|
||||
|
||||
// TotalKibiByte is a wrapper around Total with predefined unit
|
||||
// UnitKiB (bytes/1024).
|
||||
func TotalKibiByte(format string, wcc ...WC) Decorator {
|
||||
return Total(UnitKiB, format, wcc...)
|
||||
}
|
||||
|
||||
// TotalKiloByte is a wrapper around Total with predefined unit
|
||||
// UnitKB (bytes/1000).
|
||||
func TotalKiloByte(format string, wcc ...WC) Decorator {
|
||||
return Total(UnitKB, format, wcc...)
|
||||
}
|
||||
|
||||
// Total decorator with dynamic unit measure adjustment.
|
||||
//
|
||||
// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
|
||||
//
|
||||
// `format` printf compatible verb for Total
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
// format example if unit=UnitKiB:
|
||||
//
|
||||
// format="%.1f" output: "12.0MiB"
|
||||
// format="% .1f" output: "12.0 MiB"
|
||||
// format="%d" output: "12MiB"
|
||||
// format="% d" output: "12 MiB"
|
||||
//
|
||||
func Total(unit int, format string, wcc ...WC) Decorator {
|
||||
producer := func(unit int, format string) DecorFunc {
|
||||
if format == "" {
|
||||
format = "%d"
|
||||
} else if strings.Count(format, "%") != 1 {
|
||||
panic("expected format with exactly 1 verb")
|
||||
}
|
||||
|
||||
switch unit {
|
||||
case UnitKiB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, SizeB1024(s.Total))
|
||||
}
|
||||
case UnitKB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, SizeB1000(s.Total))
|
||||
}
|
||||
default:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, s.Total)
|
||||
}
|
||||
}
|
||||
}
|
||||
return Any(producer(unit, format), wcc...)
|
||||
}
|
||||
|
||||
// CurrentNoUnit is a wrapper around Current with no unit param.
|
||||
func CurrentNoUnit(format string, wcc ...WC) Decorator {
|
||||
return Current(0, format, wcc...)
|
||||
}
|
||||
|
||||
// CurrentKibiByte is a wrapper around Current with predefined unit
|
||||
// UnitKiB (bytes/1024).
|
||||
func CurrentKibiByte(format string, wcc ...WC) Decorator {
|
||||
return Current(UnitKiB, format, wcc...)
|
||||
}
|
||||
|
||||
// CurrentKiloByte is a wrapper around Current with predefined unit
|
||||
// UnitKB (bytes/1000).
|
||||
func CurrentKiloByte(format string, wcc ...WC) Decorator {
|
||||
return Current(UnitKB, format, wcc...)
|
||||
}
|
||||
|
||||
// Current decorator with dynamic unit measure adjustment.
|
||||
//
|
||||
// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
|
||||
//
|
||||
// `format` printf compatible verb for Current
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
// format example if unit=UnitKiB:
|
||||
//
|
||||
// format="%.1f" output: "12.0MiB"
|
||||
// format="% .1f" output: "12.0 MiB"
|
||||
// format="%d" output: "12MiB"
|
||||
// format="% d" output: "12 MiB"
|
||||
//
|
||||
func Current(unit int, format string, wcc ...WC) Decorator {
|
||||
producer := func(unit int, format string) DecorFunc {
|
||||
if format == "" {
|
||||
format = "%d"
|
||||
} else if strings.Count(format, "%") != 1 {
|
||||
panic("expected format with exactly 1 verb")
|
||||
}
|
||||
|
||||
switch unit {
|
||||
case UnitKiB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, SizeB1024(s.Current))
|
||||
}
|
||||
case UnitKB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, SizeB1000(s.Current))
|
||||
}
|
||||
default:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, s.Current)
|
||||
}
|
||||
}
|
||||
}
|
||||
return Any(producer(unit, format), wcc...)
|
||||
}
|
||||
|
||||
// InvertedCurrentNoUnit is a wrapper around InvertedCurrent with no unit param.
|
||||
func InvertedCurrentNoUnit(format string, wcc ...WC) Decorator {
|
||||
return InvertedCurrent(0, format, wcc...)
|
||||
}
|
||||
|
||||
// InvertedCurrentKibiByte is a wrapper around InvertedCurrent with predefined unit
|
||||
// UnitKiB (bytes/1024).
|
||||
func InvertedCurrentKibiByte(format string, wcc ...WC) Decorator {
|
||||
return InvertedCurrent(UnitKiB, format, wcc...)
|
||||
}
|
||||
|
||||
// InvertedCurrentKiloByte is a wrapper around InvertedCurrent with predefined unit
|
||||
// UnitKB (bytes/1000).
|
||||
func InvertedCurrentKiloByte(format string, wcc ...WC) Decorator {
|
||||
return InvertedCurrent(UnitKB, format, wcc...)
|
||||
}
|
||||
|
||||
// InvertedCurrent decorator with dynamic unit measure adjustment.
|
||||
//
|
||||
// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
|
||||
//
|
||||
// `format` printf compatible verb for InvertedCurrent
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
// format example if unit=UnitKiB:
|
||||
//
|
||||
// format="%.1f" output: "12.0MiB"
|
||||
// format="% .1f" output: "12.0 MiB"
|
||||
// format="%d" output: "12MiB"
|
||||
// format="% d" output: "12 MiB"
|
||||
//
|
||||
func InvertedCurrent(unit int, format string, wcc ...WC) Decorator {
|
||||
producer := func(unit int, format string) DecorFunc {
|
||||
if format == "" {
|
||||
format = "%d"
|
||||
} else if strings.Count(format, "%") != 1 {
|
||||
panic("expected format with exactly 1 verb")
|
||||
}
|
||||
|
||||
switch unit {
|
||||
case UnitKiB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, SizeB1024(s.Total-s.Current))
|
||||
}
|
||||
case UnitKB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, SizeB1000(s.Total-s.Current))
|
||||
}
|
||||
default:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, s.Total-s.Current)
|
||||
}
|
||||
}
|
||||
}
|
||||
return Any(producer(unit, format), wcc...)
|
||||
}
|
191
vendor/github.com/vbauerster/mpb/v5/decor/decorator.go
generated
vendored
191
vendor/github.com/vbauerster/mpb/v5/decor/decorator.go
generated
vendored
@ -1,191 +0,0 @@
|
||||
package decor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/acarl005/stripansi"
|
||||
"github.com/mattn/go-runewidth"
|
||||
)
|
||||
|
||||
const (
|
||||
// DidentRight bit specifies identation direction.
|
||||
// |foo |b | With DidentRight
|
||||
// | foo| b| Without DidentRight
|
||||
DidentRight = 1 << iota
|
||||
|
||||
// DextraSpace bit adds extra space, makes sense with DSyncWidth only.
|
||||
// When DidentRight bit set, the space will be added to the right,
|
||||
// otherwise to the left.
|
||||
DextraSpace
|
||||
|
||||
// DSyncWidth bit enables same column width synchronization.
|
||||
// Effective with multiple bars only.
|
||||
DSyncWidth
|
||||
|
||||
// DSyncWidthR is shortcut for DSyncWidth|DidentRight
|
||||
DSyncWidthR = DSyncWidth | DidentRight
|
||||
|
||||
// DSyncSpace is shortcut for DSyncWidth|DextraSpace
|
||||
DSyncSpace = DSyncWidth | DextraSpace
|
||||
|
||||
// DSyncSpaceR is shortcut for DSyncWidth|DextraSpace|DidentRight
|
||||
DSyncSpaceR = DSyncWidth | DextraSpace | DidentRight
|
||||
)
|
||||
|
||||
// TimeStyle enum.
|
||||
type TimeStyle int
|
||||
|
||||
// TimeStyle kinds.
|
||||
const (
|
||||
ET_STYLE_GO TimeStyle = iota
|
||||
ET_STYLE_HHMMSS
|
||||
ET_STYLE_HHMM
|
||||
ET_STYLE_MMSS
|
||||
)
|
||||
|
||||
// Statistics consists of progress related statistics, that Decorator
|
||||
// may need.
|
||||
type Statistics struct {
|
||||
ID int
|
||||
AvailableWidth int
|
||||
Total int64
|
||||
Current int64
|
||||
Refill int64
|
||||
Completed bool
|
||||
}
|
||||
|
||||
// Decorator interface.
|
||||
// Most of the time there is no need to implement this interface
|
||||
// manually, as decor package already provides a wide range of decorators
|
||||
// which implement this interface. If however built-in decorators don't
|
||||
// meet your needs, you're free to implement your own one by implementing
|
||||
// this particular interface. The easy way to go is to convert a
|
||||
// `DecorFunc` into a `Decorator` interface by using provided
|
||||
// `func Any(DecorFunc, ...WC) Decorator`.
|
||||
type Decorator interface {
|
||||
Configurator
|
||||
Synchronizer
|
||||
Decor(Statistics) string
|
||||
}
|
||||
|
||||
// DecorFunc func type.
|
||||
// To be used with `func Any`(DecorFunc, ...WC) Decorator`.
|
||||
type DecorFunc func(Statistics) string
|
||||
|
||||
// Synchronizer interface.
|
||||
// All decorators implement this interface implicitly. Its Sync
|
||||
// method exposes width sync channel, if DSyncWidth bit is set.
|
||||
type Synchronizer interface {
|
||||
Sync() (chan int, bool)
|
||||
}
|
||||
|
||||
// Configurator interface.
|
||||
type Configurator interface {
|
||||
GetConf() WC
|
||||
SetConf(WC)
|
||||
}
|
||||
|
||||
// Wrapper interface.
|
||||
// If you're implementing custom Decorator by wrapping a built-in one,
|
||||
// it is necessary to implement this interface to retain functionality
|
||||
// of built-in Decorator.
|
||||
type Wrapper interface {
|
||||
Base() Decorator
|
||||
}
|
||||
|
||||
// EwmaDecorator interface.
|
||||
// EWMA based decorators should implement this one.
|
||||
type EwmaDecorator interface {
|
||||
EwmaUpdate(int64, time.Duration)
|
||||
}
|
||||
|
||||
// AverageDecorator interface.
|
||||
// Average decorators should implement this interface to provide start
|
||||
// time adjustment facility, for resume-able tasks.
|
||||
type AverageDecorator interface {
|
||||
AverageAdjust(time.Time)
|
||||
}
|
||||
|
||||
// ShutdownListener interface.
|
||||
// If decorator needs to be notified once upon bar shutdown event, so
|
||||
// this is the right interface to implement.
|
||||
type ShutdownListener interface {
|
||||
Shutdown()
|
||||
}
|
||||
|
||||
// Global convenience instances of WC with sync width bit set.
|
||||
// To be used with multiple bars only, i.e. not effective for single bar usage.
|
||||
var (
|
||||
WCSyncWidth = WC{C: DSyncWidth}
|
||||
WCSyncWidthR = WC{C: DSyncWidthR}
|
||||
WCSyncSpace = WC{C: DSyncSpace}
|
||||
WCSyncSpaceR = WC{C: DSyncSpaceR}
|
||||
)
|
||||
|
||||
// WC is a struct with two public fields W and C, both of int type.
|
||||
// W represents width and C represents bit set of width related config.
|
||||
// A decorator should embed WC, to enable width synchronization.
|
||||
type WC struct {
|
||||
W int
|
||||
C int
|
||||
fill func(s string, w int) string
|
||||
wsync chan int
|
||||
}
|
||||
|
||||
// FormatMsg formats final message according to WC.W and WC.C.
|
||||
// Should be called by any Decorator implementation.
|
||||
func (wc *WC) FormatMsg(msg string) string {
|
||||
pureWidth := runewidth.StringWidth(msg)
|
||||
stripWidth := runewidth.StringWidth(stripansi.Strip(msg))
|
||||
maxCell := wc.W
|
||||
if (wc.C & DSyncWidth) != 0 {
|
||||
cellCount := stripWidth
|
||||
if (wc.C & DextraSpace) != 0 {
|
||||
cellCount++
|
||||
}
|
||||
wc.wsync <- cellCount
|
||||
maxCell = <-wc.wsync
|
||||
}
|
||||
return wc.fill(msg, maxCell+(pureWidth-stripWidth))
|
||||
}
|
||||
|
||||
// Init initializes width related config.
|
||||
func (wc *WC) Init() WC {
|
||||
wc.fill = runewidth.FillLeft
|
||||
if (wc.C & DidentRight) != 0 {
|
||||
wc.fill = runewidth.FillRight
|
||||
}
|
||||
if (wc.C & DSyncWidth) != 0 {
|
||||
// it's deliberate choice to override wsync on each Init() call,
|
||||
// this way globals like WCSyncSpace can be reused
|
||||
wc.wsync = make(chan int)
|
||||
}
|
||||
return *wc
|
||||
}
|
||||
|
||||
// Sync is implementation of Synchronizer interface.
|
||||
func (wc *WC) Sync() (chan int, bool) {
|
||||
if (wc.C&DSyncWidth) != 0 && wc.wsync == nil {
|
||||
panic(fmt.Sprintf("%T is not initialized", wc))
|
||||
}
|
||||
return wc.wsync, (wc.C & DSyncWidth) != 0
|
||||
}
|
||||
|
||||
// GetConf is implementation of Configurator interface.
|
||||
func (wc *WC) GetConf() WC {
|
||||
return *wc
|
||||
}
|
||||
|
||||
// SetConf is implementation of Configurator interface.
|
||||
func (wc *WC) SetConf(conf WC) {
|
||||
*wc = conf.Init()
|
||||
}
|
||||
|
||||
func initWC(wcc ...WC) WC {
|
||||
var wc WC
|
||||
for _, nwc := range wcc {
|
||||
wc = nwc
|
||||
}
|
||||
return wc.Init()
|
||||
}
|
21
vendor/github.com/vbauerster/mpb/v5/decor/doc.go
generated
vendored
21
vendor/github.com/vbauerster/mpb/v5/decor/doc.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
/*
|
||||
Package decor provides common decorators for "github.com/vbauerster/mpb/v5" module.
|
||||
|
||||
Some decorators returned by this package might have a closure state. It is ok to use
|
||||
decorators concurrently, unless you share the same decorator among multiple
|
||||
*mpb.Bar instances. To avoid data races, create new decorator per *mpb.Bar instance.
|
||||
|
||||
Don't:
|
||||
|
||||
p := mpb.New()
|
||||
name := decor.Name("bar")
|
||||
p.AddBar(100, mpb.AppendDecorators(name))
|
||||
p.AddBar(100, mpb.AppendDecorators(name))
|
||||
|
||||
Do:
|
||||
|
||||
p := mpb.New()
|
||||
p.AddBar(100, mpb.AppendDecorators(decor.Name("bar1")))
|
||||
p.AddBar(100, mpb.AppendDecorators(decor.Name("bar2")))
|
||||
*/
|
||||
package decor
|
35
vendor/github.com/vbauerster/mpb/v5/decor/elapsed.go
generated
vendored
35
vendor/github.com/vbauerster/mpb/v5/decor/elapsed.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
package decor
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Elapsed decorator. It's wrapper of NewElapsed.
|
||||
//
|
||||
// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
func Elapsed(style TimeStyle, wcc ...WC) Decorator {
|
||||
return NewElapsed(style, time.Now(), wcc...)
|
||||
}
|
||||
|
||||
// NewElapsed returns elapsed time decorator.
|
||||
//
|
||||
// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
|
||||
//
|
||||
// `startTime` start time
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
func NewElapsed(style TimeStyle, startTime time.Time, wcc ...WC) Decorator {
|
||||
var msg string
|
||||
producer := chooseTimeProducer(style)
|
||||
fn := func(s Statistics) string {
|
||||
if !s.Completed {
|
||||
msg = producer(time.Since(startTime))
|
||||
}
|
||||
return msg
|
||||
}
|
||||
return Any(fn, wcc...)
|
||||
}
|
203
vendor/github.com/vbauerster/mpb/v5/decor/eta.go
generated
vendored
203
vendor/github.com/vbauerster/mpb/v5/decor/eta.go
generated
vendored
@ -1,203 +0,0 @@
|
||||
package decor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/VividCortex/ewma"
|
||||
)
|
||||
|
||||
// TimeNormalizer interface. Implementors could be passed into
|
||||
// MovingAverageETA, in order to affect i.e. normalize its output.
|
||||
type TimeNormalizer interface {
|
||||
Normalize(time.Duration) time.Duration
|
||||
}
|
||||
|
||||
// TimeNormalizerFunc is function type adapter to convert function
|
||||
// into TimeNormalizer.
|
||||
type TimeNormalizerFunc func(time.Duration) time.Duration
|
||||
|
||||
func (f TimeNormalizerFunc) Normalize(src time.Duration) time.Duration {
|
||||
return f(src)
|
||||
}
|
||||
|
||||
// EwmaETA exponential-weighted-moving-average based ETA decorator.
|
||||
// For this decorator to work correctly you have to measure each
|
||||
// iteration's duration and pass it to the
|
||||
// *Bar.DecoratorEwmaUpdate(time.Duration) method after each increment.
|
||||
func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator {
|
||||
var average ewma.MovingAverage
|
||||
if age == 0 {
|
||||
average = ewma.NewMovingAverage()
|
||||
} else {
|
||||
average = ewma.NewMovingAverage(age)
|
||||
}
|
||||
return MovingAverageETA(style, NewThreadSafeMovingAverage(average), nil, wcc...)
|
||||
}
|
||||
|
||||
// MovingAverageETA decorator relies on MovingAverage implementation to calculate its average.
|
||||
//
|
||||
// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
|
||||
//
|
||||
// `average` implementation of MovingAverage interface
|
||||
//
|
||||
// `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer]
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
func MovingAverageETA(style TimeStyle, average ewma.MovingAverage, normalizer TimeNormalizer, wcc ...WC) Decorator {
|
||||
d := &movingAverageETA{
|
||||
WC: initWC(wcc...),
|
||||
average: average,
|
||||
normalizer: normalizer,
|
||||
producer: chooseTimeProducer(style),
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
type movingAverageETA struct {
|
||||
WC
|
||||
average ewma.MovingAverage
|
||||
normalizer TimeNormalizer
|
||||
producer func(time.Duration) string
|
||||
}
|
||||
|
||||
func (d *movingAverageETA) Decor(s Statistics) string {
|
||||
v := math.Round(d.average.Value())
|
||||
remaining := time.Duration((s.Total - s.Current) * int64(v))
|
||||
if d.normalizer != nil {
|
||||
remaining = d.normalizer.Normalize(remaining)
|
||||
}
|
||||
return d.FormatMsg(d.producer(remaining))
|
||||
}
|
||||
|
||||
func (d *movingAverageETA) EwmaUpdate(n int64, dur time.Duration) {
|
||||
durPerItem := float64(dur) / float64(n)
|
||||
if math.IsInf(durPerItem, 0) || math.IsNaN(durPerItem) {
|
||||
return
|
||||
}
|
||||
d.average.Add(durPerItem)
|
||||
}
|
||||
|
||||
// AverageETA decorator. It's wrapper of NewAverageETA.
|
||||
//
|
||||
// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
func AverageETA(style TimeStyle, wcc ...WC) Decorator {
|
||||
return NewAverageETA(style, time.Now(), nil, wcc...)
|
||||
}
|
||||
|
||||
// NewAverageETA decorator with user provided start time.
|
||||
//
|
||||
// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
|
||||
//
|
||||
// `startTime` start time
|
||||
//
|
||||
// `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer]
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
func NewAverageETA(style TimeStyle, startTime time.Time, normalizer TimeNormalizer, wcc ...WC) Decorator {
|
||||
d := &averageETA{
|
||||
WC: initWC(wcc...),
|
||||
startTime: startTime,
|
||||
normalizer: normalizer,
|
||||
producer: chooseTimeProducer(style),
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
type averageETA struct {
|
||||
WC
|
||||
startTime time.Time
|
||||
normalizer TimeNormalizer
|
||||
producer func(time.Duration) string
|
||||
}
|
||||
|
||||
func (d *averageETA) Decor(s Statistics) string {
|
||||
var remaining time.Duration
|
||||
if s.Current != 0 {
|
||||
durPerItem := float64(time.Since(d.startTime)) / float64(s.Current)
|
||||
durPerItem = math.Round(durPerItem)
|
||||
remaining = time.Duration((s.Total - s.Current) * int64(durPerItem))
|
||||
if d.normalizer != nil {
|
||||
remaining = d.normalizer.Normalize(remaining)
|
||||
}
|
||||
}
|
||||
return d.FormatMsg(d.producer(remaining))
|
||||
}
|
||||
|
||||
func (d *averageETA) AverageAdjust(startTime time.Time) {
|
||||
d.startTime = startTime
|
||||
}
|
||||
|
||||
// MaxTolerateTimeNormalizer returns implementation of TimeNormalizer.
|
||||
func MaxTolerateTimeNormalizer(maxTolerate time.Duration) TimeNormalizer {
|
||||
var normalized time.Duration
|
||||
var lastCall time.Time
|
||||
return TimeNormalizerFunc(func(remaining time.Duration) time.Duration {
|
||||
if diff := normalized - remaining; diff <= 0 || diff > maxTolerate || remaining < time.Minute {
|
||||
normalized = remaining
|
||||
lastCall = time.Now()
|
||||
return remaining
|
||||
}
|
||||
normalized -= time.Since(lastCall)
|
||||
lastCall = time.Now()
|
||||
return normalized
|
||||
})
|
||||
}
|
||||
|
||||
// FixedIntervalTimeNormalizer returns implementation of TimeNormalizer.
|
||||
func FixedIntervalTimeNormalizer(updInterval int) TimeNormalizer {
|
||||
var normalized time.Duration
|
||||
var lastCall time.Time
|
||||
var count int
|
||||
return TimeNormalizerFunc(func(remaining time.Duration) time.Duration {
|
||||
if count == 0 || remaining < time.Minute {
|
||||
count = updInterval
|
||||
normalized = remaining
|
||||
lastCall = time.Now()
|
||||
return remaining
|
||||
}
|
||||
count--
|
||||
normalized -= time.Since(lastCall)
|
||||
lastCall = time.Now()
|
||||
return normalized
|
||||
})
|
||||
}
|
||||
|
||||
func chooseTimeProducer(style TimeStyle) func(time.Duration) string {
|
||||
switch style {
|
||||
case ET_STYLE_HHMMSS:
|
||||
return func(remaining time.Duration) string {
|
||||
hours := int64(remaining/time.Hour) % 60
|
||||
minutes := int64(remaining/time.Minute) % 60
|
||||
seconds := int64(remaining/time.Second) % 60
|
||||
return fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
|
||||
}
|
||||
case ET_STYLE_HHMM:
|
||||
return func(remaining time.Duration) string {
|
||||
hours := int64(remaining/time.Hour) % 60
|
||||
minutes := int64(remaining/time.Minute) % 60
|
||||
return fmt.Sprintf("%02d:%02d", hours, minutes)
|
||||
}
|
||||
case ET_STYLE_MMSS:
|
||||
return func(remaining time.Duration) string {
|
||||
hours := int64(remaining/time.Hour) % 60
|
||||
minutes := int64(remaining/time.Minute) % 60
|
||||
seconds := int64(remaining/time.Second) % 60
|
||||
if hours > 0 {
|
||||
return fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
|
||||
}
|
||||
return fmt.Sprintf("%02d:%02d", minutes, seconds)
|
||||
}
|
||||
default:
|
||||
return func(remaining time.Duration) string {
|
||||
// strip off nanoseconds
|
||||
return ((remaining / time.Second) * time.Second).String()
|
||||
}
|
||||
}
|
||||
}
|
107
vendor/github.com/vbauerster/mpb/v5/decor/merge.go
generated
vendored
107
vendor/github.com/vbauerster/mpb/v5/decor/merge.go
generated
vendored
@ -1,107 +0,0 @@
|
||||
package decor
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/acarl005/stripansi"
|
||||
"github.com/mattn/go-runewidth"
|
||||
)
|
||||
|
||||
// Merge wraps its decorator argument with intention to sync width
|
||||
// with several decorators of another bar. Visual example:
|
||||
//
|
||||
// +----+--------+---------+--------+
|
||||
// | B1 | MERGE(D, P1, Pn) |
|
||||
// +----+--------+---------+--------+
|
||||
// | B2 | D0 | D1 | Dn |
|
||||
// +----+--------+---------+--------+
|
||||
//
|
||||
func Merge(decorator Decorator, placeholders ...WC) Decorator {
|
||||
if _, ok := decorator.Sync(); !ok || len(placeholders) == 0 {
|
||||
return decorator
|
||||
}
|
||||
md := &mergeDecorator{
|
||||
Decorator: decorator,
|
||||
wc: decorator.GetConf(),
|
||||
placeHolders: make([]*placeHolderDecorator, len(placeholders)),
|
||||
}
|
||||
decorator.SetConf(WC{})
|
||||
for i, wc := range placeholders {
|
||||
if (wc.C & DSyncWidth) == 0 {
|
||||
return decorator
|
||||
}
|
||||
md.placeHolders[i] = &placeHolderDecorator{wc.Init()}
|
||||
}
|
||||
return md
|
||||
}
|
||||
|
||||
type mergeDecorator struct {
|
||||
Decorator
|
||||
wc WC
|
||||
placeHolders []*placeHolderDecorator
|
||||
}
|
||||
|
||||
func (d *mergeDecorator) GetConf() WC {
|
||||
return d.wc
|
||||
}
|
||||
|
||||
func (d *mergeDecorator) SetConf(conf WC) {
|
||||
d.wc = conf.Init()
|
||||
}
|
||||
|
||||
func (d *mergeDecorator) MergeUnwrap() []Decorator {
|
||||
decorators := make([]Decorator, len(d.placeHolders))
|
||||
for i, ph := range d.placeHolders {
|
||||
decorators[i] = ph
|
||||
}
|
||||
return decorators
|
||||
}
|
||||
|
||||
func (d *mergeDecorator) Sync() (chan int, bool) {
|
||||
return d.wc.Sync()
|
||||
}
|
||||
|
||||
func (d *mergeDecorator) Base() Decorator {
|
||||
return d.Decorator
|
||||
}
|
||||
|
||||
func (d *mergeDecorator) Decor(s Statistics) string {
|
||||
msg := d.Decorator.Decor(s)
|
||||
pureWidth := runewidth.StringWidth(msg)
|
||||
stripWidth := runewidth.StringWidth(stripansi.Strip(msg))
|
||||
cellCount := stripWidth
|
||||
if (d.wc.C & DextraSpace) != 0 {
|
||||
cellCount++
|
||||
}
|
||||
|
||||
total := runewidth.StringWidth(d.placeHolders[0].FormatMsg(""))
|
||||
pw := (cellCount - total) / len(d.placeHolders)
|
||||
rem := (cellCount - total) % len(d.placeHolders)
|
||||
|
||||
var diff int
|
||||
for i := 1; i < len(d.placeHolders); i++ {
|
||||
ph := d.placeHolders[i]
|
||||
width := pw - diff
|
||||
if (ph.WC.C & DextraSpace) != 0 {
|
||||
width--
|
||||
if width < 0 {
|
||||
width = 0
|
||||
}
|
||||
}
|
||||
max := runewidth.StringWidth(ph.FormatMsg(strings.Repeat(" ", width)))
|
||||
total += max
|
||||
diff = max - pw
|
||||
}
|
||||
|
||||
d.wc.wsync <- pw + rem
|
||||
max := <-d.wc.wsync
|
||||
return d.wc.fill(msg, max+total+(pureWidth-stripWidth))
|
||||
}
|
||||
|
||||
type placeHolderDecorator struct {
|
||||
WC
|
||||
}
|
||||
|
||||
func (d *placeHolderDecorator) Decor(Statistics) string {
|
||||
return ""
|
||||
}
|
68
vendor/github.com/vbauerster/mpb/v5/decor/moving_average.go
generated
vendored
68
vendor/github.com/vbauerster/mpb/v5/decor/moving_average.go
generated
vendored
@ -1,68 +0,0 @@
|
||||
package decor
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/VividCortex/ewma"
|
||||
)
|
||||
|
||||
type threadSafeMovingAverage struct {
|
||||
ewma.MovingAverage
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (s *threadSafeMovingAverage) Add(value float64) {
|
||||
s.mu.Lock()
|
||||
s.MovingAverage.Add(value)
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *threadSafeMovingAverage) Value() float64 {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.MovingAverage.Value()
|
||||
}
|
||||
|
||||
func (s *threadSafeMovingAverage) Set(value float64) {
|
||||
s.mu.Lock()
|
||||
s.MovingAverage.Set(value)
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// NewThreadSafeMovingAverage converts provided ewma.MovingAverage
|
||||
// into thread safe ewma.MovingAverage.
|
||||
func NewThreadSafeMovingAverage(average ewma.MovingAverage) ewma.MovingAverage {
|
||||
if tsma, ok := average.(*threadSafeMovingAverage); ok {
|
||||
return tsma
|
||||
}
|
||||
return &threadSafeMovingAverage{MovingAverage: average}
|
||||
}
|
||||
|
||||
type medianWindow [3]float64
|
||||
|
||||
func (s *medianWindow) Len() int { return len(s) }
|
||||
func (s *medianWindow) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s *medianWindow) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
func (s *medianWindow) Add(value float64) {
|
||||
s[0], s[1] = s[1], s[2]
|
||||
s[2] = value
|
||||
}
|
||||
|
||||
func (s *medianWindow) Value() float64 {
|
||||
tmp := *s
|
||||
sort.Sort(&tmp)
|
||||
return tmp[1]
|
||||
}
|
||||
|
||||
func (s *medianWindow) Set(value float64) {
|
||||
for i := 0; i < len(s); i++ {
|
||||
s[i] = value
|
||||
}
|
||||
}
|
||||
|
||||
// NewMedian is fixed last 3 samples median MovingAverage.
|
||||
func NewMedian() ewma.MovingAverage {
|
||||
return NewThreadSafeMovingAverage(new(medianWindow))
|
||||
}
|
12
vendor/github.com/vbauerster/mpb/v5/decor/name.go
generated
vendored
12
vendor/github.com/vbauerster/mpb/v5/decor/name.go
generated
vendored
@ -1,12 +0,0 @@
|
||||
package decor
|
||||
|
||||
// Name decorator displays text that is set once and can't be changed
|
||||
// during decorator's lifetime.
|
||||
//
|
||||
// `str` string to display
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
func Name(str string, wcc ...WC) Decorator {
|
||||
return Any(func(Statistics) string { return str }, wcc...)
|
||||
}
|
37
vendor/github.com/vbauerster/mpb/v5/decor/on_complete.go
generated
vendored
37
vendor/github.com/vbauerster/mpb/v5/decor/on_complete.go
generated
vendored
@ -1,37 +0,0 @@
|
||||
package decor
|
||||
|
||||
// OnComplete returns decorator, which wraps provided decorator, with
|
||||
// sole purpose to display provided message on complete event.
|
||||
//
|
||||
// `decorator` Decorator to wrap
|
||||
//
|
||||
// `message` message to display on complete event
|
||||
//
|
||||
func OnComplete(decorator Decorator, message string) Decorator {
|
||||
d := &onCompleteWrapper{
|
||||
Decorator: decorator,
|
||||
msg: message,
|
||||
}
|
||||
if md, ok := decorator.(*mergeDecorator); ok {
|
||||
d.Decorator, md.Decorator = md.Decorator, d
|
||||
return md
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
type onCompleteWrapper struct {
|
||||
Decorator
|
||||
msg string
|
||||
}
|
||||
|
||||
func (d *onCompleteWrapper) Decor(s Statistics) string {
|
||||
if s.Completed {
|
||||
wc := d.GetConf()
|
||||
return wc.FormatMsg(d.msg)
|
||||
}
|
||||
return d.Decorator.Decor(s)
|
||||
}
|
||||
|
||||
func (d *onCompleteWrapper) Base() Decorator {
|
||||
return d.Decorator
|
||||
}
|
58
vendor/github.com/vbauerster/mpb/v5/decor/percentage.go
generated
vendored
58
vendor/github.com/vbauerster/mpb/v5/decor/percentage.go
generated
vendored
@ -1,58 +0,0 @@
|
||||
package decor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"github.com/vbauerster/mpb/v5/internal"
|
||||
)
|
||||
|
||||
type percentageType float64
|
||||
|
||||
func (s percentageType) Format(st fmt.State, verb rune) {
|
||||
var prec int
|
||||
switch verb {
|
||||
case 'd':
|
||||
case 's':
|
||||
prec = -1
|
||||
default:
|
||||
if p, ok := st.Precision(); ok {
|
||||
prec = p
|
||||
} else {
|
||||
prec = 6
|
||||
}
|
||||
}
|
||||
|
||||
io.WriteString(st, strconv.FormatFloat(float64(s), 'f', prec, 64))
|
||||
|
||||
if st.Flag(' ') {
|
||||
io.WriteString(st, " ")
|
||||
}
|
||||
io.WriteString(st, "%")
|
||||
}
|
||||
|
||||
// Percentage returns percentage decorator. It's a wrapper of NewPercentage.
|
||||
func Percentage(wcc ...WC) Decorator {
|
||||
return NewPercentage("% d", wcc...)
|
||||
}
|
||||
|
||||
// NewPercentage percentage decorator with custom format string.
|
||||
//
|
||||
// format examples:
|
||||
//
|
||||
// format="%.1f" output: "1.0%"
|
||||
// format="% .1f" output: "1.0 %"
|
||||
// format="%d" output: "1%"
|
||||
// format="% d" output: "1 %"
|
||||
//
|
||||
func NewPercentage(format string, wcc ...WC) Decorator {
|
||||
if format == "" {
|
||||
format = "% d"
|
||||
}
|
||||
f := func(s Statistics) string {
|
||||
p := internal.Percentage(s.Total, s.Current, 100)
|
||||
return fmt.Sprintf(format, percentageType(p))
|
||||
}
|
||||
return Any(f, wcc...)
|
||||
}
|
109
vendor/github.com/vbauerster/mpb/v5/decor/size_type.go
generated
vendored
109
vendor/github.com/vbauerster/mpb/v5/decor/size_type.go
generated
vendored
@ -1,109 +0,0 @@
|
||||
package decor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
//go:generate stringer -type=SizeB1024 -trimprefix=_i
|
||||
//go:generate stringer -type=SizeB1000 -trimprefix=_
|
||||
|
||||
const (
|
||||
_ib SizeB1024 = iota + 1
|
||||
_iKiB SizeB1024 = 1 << (iota * 10)
|
||||
_iMiB
|
||||
_iGiB
|
||||
_iTiB
|
||||
)
|
||||
|
||||
// SizeB1024 named type, which implements fmt.Formatter interface. It
|
||||
// adjusts its value according to byte size multiple by 1024 and appends
|
||||
// appropriate size marker (KiB, MiB, GiB, TiB).
|
||||
type SizeB1024 int64
|
||||
|
||||
func (self SizeB1024) Format(st fmt.State, verb rune) {
|
||||
var prec int
|
||||
switch verb {
|
||||
case 'd':
|
||||
case 's':
|
||||
prec = -1
|
||||
default:
|
||||
if p, ok := st.Precision(); ok {
|
||||
prec = p
|
||||
} else {
|
||||
prec = 6
|
||||
}
|
||||
}
|
||||
|
||||
var unit SizeB1024
|
||||
switch {
|
||||
case self < _iKiB:
|
||||
unit = _ib
|
||||
case self < _iMiB:
|
||||
unit = _iKiB
|
||||
case self < _iGiB:
|
||||
unit = _iMiB
|
||||
case self < _iTiB:
|
||||
unit = _iGiB
|
||||
case self <= math.MaxInt64:
|
||||
unit = _iTiB
|
||||
}
|
||||
|
||||
io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64))
|
||||
|
||||
if st.Flag(' ') {
|
||||
io.WriteString(st, " ")
|
||||
}
|
||||
io.WriteString(st, unit.String())
|
||||
}
|
||||
|
||||
const (
|
||||
_b SizeB1000 = 1
|
||||
_KB SizeB1000 = _b * 1000
|
||||
_MB SizeB1000 = _KB * 1000
|
||||
_GB SizeB1000 = _MB * 1000
|
||||
_TB SizeB1000 = _GB * 1000
|
||||
)
|
||||
|
||||
// SizeB1000 named type, which implements fmt.Formatter interface. It
|
||||
// adjusts its value according to byte size multiple by 1000 and appends
|
||||
// appropriate size marker (KB, MB, GB, TB).
|
||||
type SizeB1000 int64
|
||||
|
||||
func (self SizeB1000) Format(st fmt.State, verb rune) {
|
||||
var prec int
|
||||
switch verb {
|
||||
case 'd':
|
||||
case 's':
|
||||
prec = -1
|
||||
default:
|
||||
if p, ok := st.Precision(); ok {
|
||||
prec = p
|
||||
} else {
|
||||
prec = 6
|
||||
}
|
||||
}
|
||||
|
||||
var unit SizeB1000
|
||||
switch {
|
||||
case self < _KB:
|
||||
unit = _b
|
||||
case self < _MB:
|
||||
unit = _KB
|
||||
case self < _GB:
|
||||
unit = _MB
|
||||
case self < _TB:
|
||||
unit = _GB
|
||||
case self <= math.MaxInt64:
|
||||
unit = _TB
|
||||
}
|
||||
|
||||
io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64))
|
||||
|
||||
if st.Flag(' ') {
|
||||
io.WriteString(st, " ")
|
||||
}
|
||||
io.WriteString(st, unit.String())
|
||||
}
|
41
vendor/github.com/vbauerster/mpb/v5/decor/sizeb1000_string.go
generated
vendored
41
vendor/github.com/vbauerster/mpb/v5/decor/sizeb1000_string.go
generated
vendored
@ -1,41 +0,0 @@
|
||||
// Code generated by "stringer -type=SizeB1000 -trimprefix=_"; DO NOT EDIT.
|
||||
|
||||
package decor
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[_b-1]
|
||||
_ = x[_KB-1000]
|
||||
_ = x[_MB-1000000]
|
||||
_ = x[_GB-1000000000]
|
||||
_ = x[_TB-1000000000000]
|
||||
}
|
||||
|
||||
const (
|
||||
_SizeB1000_name_0 = "b"
|
||||
_SizeB1000_name_1 = "KB"
|
||||
_SizeB1000_name_2 = "MB"
|
||||
_SizeB1000_name_3 = "GB"
|
||||
_SizeB1000_name_4 = "TB"
|
||||
)
|
||||
|
||||
func (i SizeB1000) String() string {
|
||||
switch {
|
||||
case i == 1:
|
||||
return _SizeB1000_name_0
|
||||
case i == 1000:
|
||||
return _SizeB1000_name_1
|
||||
case i == 1000000:
|
||||
return _SizeB1000_name_2
|
||||
case i == 1000000000:
|
||||
return _SizeB1000_name_3
|
||||
case i == 1000000000000:
|
||||
return _SizeB1000_name_4
|
||||
default:
|
||||
return "SizeB1000(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
}
|
41
vendor/github.com/vbauerster/mpb/v5/decor/sizeb1024_string.go
generated
vendored
41
vendor/github.com/vbauerster/mpb/v5/decor/sizeb1024_string.go
generated
vendored
@ -1,41 +0,0 @@
|
||||
// Code generated by "stringer -type=SizeB1024 -trimprefix=_i"; DO NOT EDIT.
|
||||
|
||||
package decor
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[_ib-1]
|
||||
_ = x[_iKiB-1024]
|
||||
_ = x[_iMiB-1048576]
|
||||
_ = x[_iGiB-1073741824]
|
||||
_ = x[_iTiB-1099511627776]
|
||||
}
|
||||
|
||||
const (
|
||||
_SizeB1024_name_0 = "b"
|
||||
_SizeB1024_name_1 = "KiB"
|
||||
_SizeB1024_name_2 = "MiB"
|
||||
_SizeB1024_name_3 = "GiB"
|
||||
_SizeB1024_name_4 = "TiB"
|
||||
)
|
||||
|
||||
func (i SizeB1024) String() string {
|
||||
switch {
|
||||
case i == 1:
|
||||
return _SizeB1024_name_0
|
||||
case i == 1024:
|
||||
return _SizeB1024_name_1
|
||||
case i == 1048576:
|
||||
return _SizeB1024_name_2
|
||||
case i == 1073741824:
|
||||
return _SizeB1024_name_3
|
||||
case i == 1099511627776:
|
||||
return _SizeB1024_name_4
|
||||
default:
|
||||
return "SizeB1024(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
}
|
171
vendor/github.com/vbauerster/mpb/v5/decor/speed.go
generated
vendored
171
vendor/github.com/vbauerster/mpb/v5/decor/speed.go
generated
vendored
@ -1,171 +0,0 @@
|
||||
package decor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/VividCortex/ewma"
|
||||
)
|
||||
|
||||
// FmtAsSpeed adds "/s" to the end of the input formatter. To be
|
||||
// used with SizeB1000 or SizeB1024 types, for example:
|
||||
//
|
||||
// fmt.Printf("%.1f", FmtAsSpeed(SizeB1024(2048)))
|
||||
//
|
||||
func FmtAsSpeed(input fmt.Formatter) fmt.Formatter {
|
||||
return &speedFormatter{input}
|
||||
}
|
||||
|
||||
type speedFormatter struct {
|
||||
fmt.Formatter
|
||||
}
|
||||
|
||||
func (self *speedFormatter) Format(st fmt.State, verb rune) {
|
||||
self.Formatter.Format(st, verb)
|
||||
io.WriteString(st, "/s")
|
||||
}
|
||||
|
||||
// EwmaSpeed exponential-weighted-moving-average based speed decorator.
|
||||
// For this decorator to work correctly you have to measure each
|
||||
// iteration's duration and pass it to the
|
||||
// *Bar.DecoratorEwmaUpdate(time.Duration) method after each increment.
|
||||
func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator {
|
||||
var average ewma.MovingAverage
|
||||
if age == 0 {
|
||||
average = ewma.NewMovingAverage()
|
||||
} else {
|
||||
average = ewma.NewMovingAverage(age)
|
||||
}
|
||||
return MovingAverageSpeed(unit, format, NewThreadSafeMovingAverage(average), wcc...)
|
||||
}
|
||||
|
||||
// MovingAverageSpeed decorator relies on MovingAverage implementation
|
||||
// to calculate its average.
|
||||
//
|
||||
// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
|
||||
//
|
||||
// `format` printf compatible verb for value, like "%f" or "%d"
|
||||
//
|
||||
// `average` MovingAverage implementation
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
// format examples:
|
||||
//
|
||||
// unit=UnitKiB, format="%.1f" output: "1.0MiB/s"
|
||||
// unit=UnitKiB, format="% .1f" output: "1.0 MiB/s"
|
||||
// unit=UnitKB, format="%.1f" output: "1.0MB/s"
|
||||
// unit=UnitKB, format="% .1f" output: "1.0 MB/s"
|
||||
//
|
||||
func MovingAverageSpeed(unit int, format string, average ewma.MovingAverage, wcc ...WC) Decorator {
|
||||
if format == "" {
|
||||
format = "%.0f"
|
||||
}
|
||||
d := &movingAverageSpeed{
|
||||
WC: initWC(wcc...),
|
||||
average: average,
|
||||
producer: chooseSpeedProducer(unit, format),
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
type movingAverageSpeed struct {
|
||||
WC
|
||||
producer func(float64) string
|
||||
average ewma.MovingAverage
|
||||
msg string
|
||||
}
|
||||
|
||||
func (d *movingAverageSpeed) Decor(s Statistics) string {
|
||||
if !s.Completed {
|
||||
var speed float64
|
||||
if v := d.average.Value(); v > 0 {
|
||||
speed = 1 / v
|
||||
}
|
||||
d.msg = d.producer(speed * 1e9)
|
||||
}
|
||||
return d.FormatMsg(d.msg)
|
||||
}
|
||||
|
||||
func (d *movingAverageSpeed) EwmaUpdate(n int64, dur time.Duration) {
|
||||
durPerByte := float64(dur) / float64(n)
|
||||
if math.IsInf(durPerByte, 0) || math.IsNaN(durPerByte) {
|
||||
return
|
||||
}
|
||||
d.average.Add(durPerByte)
|
||||
}
|
||||
|
||||
// AverageSpeed decorator with dynamic unit measure adjustment. It's
|
||||
// a wrapper of NewAverageSpeed.
|
||||
func AverageSpeed(unit int, format string, wcc ...WC) Decorator {
|
||||
return NewAverageSpeed(unit, format, time.Now(), wcc...)
|
||||
}
|
||||
|
||||
// NewAverageSpeed decorator with dynamic unit measure adjustment and
|
||||
// user provided start time.
|
||||
//
|
||||
// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
|
||||
//
|
||||
// `format` printf compatible verb for value, like "%f" or "%d"
|
||||
//
|
||||
// `startTime` start time
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
// format examples:
|
||||
//
|
||||
// unit=UnitKiB, format="%.1f" output: "1.0MiB/s"
|
||||
// unit=UnitKiB, format="% .1f" output: "1.0 MiB/s"
|
||||
// unit=UnitKB, format="%.1f" output: "1.0MB/s"
|
||||
// unit=UnitKB, format="% .1f" output: "1.0 MB/s"
|
||||
//
|
||||
func NewAverageSpeed(unit int, format string, startTime time.Time, wcc ...WC) Decorator {
|
||||
if format == "" {
|
||||
format = "%.0f"
|
||||
}
|
||||
d := &averageSpeed{
|
||||
WC: initWC(wcc...),
|
||||
startTime: startTime,
|
||||
producer: chooseSpeedProducer(unit, format),
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
type averageSpeed struct {
|
||||
WC
|
||||
startTime time.Time
|
||||
producer func(float64) string
|
||||
msg string
|
||||
}
|
||||
|
||||
func (d *averageSpeed) Decor(s Statistics) string {
|
||||
if !s.Completed {
|
||||
speed := float64(s.Current) / float64(time.Since(d.startTime))
|
||||
d.msg = d.producer(speed * 1e9)
|
||||
}
|
||||
|
||||
return d.FormatMsg(d.msg)
|
||||
}
|
||||
|
||||
func (d *averageSpeed) AverageAdjust(startTime time.Time) {
|
||||
d.startTime = startTime
|
||||
}
|
||||
|
||||
func chooseSpeedProducer(unit int, format string) func(float64) string {
|
||||
switch unit {
|
||||
case UnitKiB:
|
||||
return func(speed float64) string {
|
||||
return fmt.Sprintf(format, FmtAsSpeed(SizeB1024(math.Round(speed))))
|
||||
}
|
||||
case UnitKB:
|
||||
return func(speed float64) string {
|
||||
return fmt.Sprintf(format, FmtAsSpeed(SizeB1000(math.Round(speed))))
|
||||
}
|
||||
default:
|
||||
return func(speed float64) string {
|
||||
return fmt.Sprintf(format, speed)
|
||||
}
|
||||
}
|
||||
}
|
21
vendor/github.com/vbauerster/mpb/v5/decor/spinner.go
generated
vendored
21
vendor/github.com/vbauerster/mpb/v5/decor/spinner.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
package decor
|
||||
|
||||
var defaultSpinnerStyle = []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
|
||||
|
||||
// Spinner returns spinner decorator.
|
||||
//
|
||||
// `frames` spinner frames, if nil or len==0, default is used
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
func Spinner(frames []string, wcc ...WC) Decorator {
|
||||
if len(frames) == 0 {
|
||||
frames = defaultSpinnerStyle
|
||||
}
|
||||
var count uint
|
||||
f := func(s Statistics) string {
|
||||
frame := frames[count%uint(len(frames))]
|
||||
count++
|
||||
return frame
|
||||
}
|
||||
return Any(f, wcc...)
|
||||
}
|
2
vendor/github.com/vbauerster/mpb/v5/doc.go
generated
vendored
2
vendor/github.com/vbauerster/mpb/v5/doc.go
generated
vendored
@ -1,2 +0,0 @@
|
||||
// Package mpb is a library for rendering progress bars in terminal applications.
|
||||
package mpb
|
10
vendor/github.com/vbauerster/mpb/v5/go.mod
generated
vendored
10
vendor/github.com/vbauerster/mpb/v5/go.mod
generated
vendored
@ -1,10 +0,0 @@
|
||||
module github.com/vbauerster/mpb/v5
|
||||
|
||||
require (
|
||||
github.com/VividCortex/ewma v1.1.1
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
|
||||
github.com/mattn/go-runewidth v0.0.9
|
||||
golang.org/x/sys v0.0.0-20201218084310-7d0127a74742
|
||||
)
|
||||
|
||||
go 1.14
|
8
vendor/github.com/vbauerster/mpb/v5/go.sum
generated
vendored
8
vendor/github.com/vbauerster/mpb/v5/go.sum
generated
vendored
@ -1,8 +0,0 @@
|
||||
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 h1:+CBz4km/0KPU3RGTwARGh/noP3bEwtHcq+0YcBQM2JQ=
|
||||
golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
18
vendor/github.com/vbauerster/mpb/v5/internal/percentage.go
generated
vendored
18
vendor/github.com/vbauerster/mpb/v5/internal/percentage.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
package internal
|
||||
|
||||
import "math"
|
||||
|
||||
// Percentage is a helper function, to calculate percentage.
|
||||
func Percentage(total, current int64, width int) float64 {
|
||||
if total <= 0 {
|
||||
return 0
|
||||
}
|
||||
if current >= total {
|
||||
return float64(width)
|
||||
}
|
||||
return float64(int64(width)*current) / float64(total)
|
||||
}
|
||||
|
||||
func PercentageRound(total, current int64, width int) float64 {
|
||||
return math.Round(Percentage(total, current, width))
|
||||
}
|
8
vendor/github.com/vbauerster/mpb/v5/internal/width.go
generated
vendored
8
vendor/github.com/vbauerster/mpb/v5/internal/width.go
generated
vendored
@ -1,8 +0,0 @@
|
||||
package internal
|
||||
|
||||
func WidthForBarFiller(reqWidth, available int) int {
|
||||
if reqWidth <= 0 || reqWidth >= available {
|
||||
return available
|
||||
}
|
||||
return reqWidth
|
||||
}
|
32
vendor/github.com/vbauerster/mpb/v5/priority_queue.go
generated
vendored
32
vendor/github.com/vbauerster/mpb/v5/priority_queue.go
generated
vendored
@ -1,32 +0,0 @@
|
||||
package mpb
|
||||
|
||||
// A priorityQueue implements heap.Interface
|
||||
type priorityQueue []*Bar
|
||||
|
||||
func (pq priorityQueue) Len() int { return len(pq) }
|
||||
|
||||
func (pq priorityQueue) Less(i, j int) bool {
|
||||
return pq[i].priority < pq[j].priority
|
||||
}
|
||||
|
||||
func (pq priorityQueue) Swap(i, j int) {
|
||||
pq[i], pq[j] = pq[j], pq[i]
|
||||
pq[i].index = i
|
||||
pq[j].index = j
|
||||
}
|
||||
|
||||
func (pq *priorityQueue) Push(x interface{}) {
|
||||
s := *pq
|
||||
bar := x.(*Bar)
|
||||
bar.index = len(s)
|
||||
s = append(s, bar)
|
||||
*pq = s
|
||||
}
|
||||
|
||||
func (pq *priorityQueue) Pop() interface{} {
|
||||
s := *pq
|
||||
*pq = s[0 : len(s)-1]
|
||||
bar := s[len(s)-1]
|
||||
bar.index = -1 // for safety
|
||||
return bar
|
||||
}
|
390
vendor/github.com/vbauerster/mpb/v5/progress.go
generated
vendored
390
vendor/github.com/vbauerster/mpb/v5/progress.go
generated
vendored
@ -1,390 +0,0 @@
|
||||
package mpb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"container/heap"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/vbauerster/mpb/v5/cwriter"
|
||||
"github.com/vbauerster/mpb/v5/decor"
|
||||
)
|
||||
|
||||
const (
|
||||
// default RefreshRate
|
||||
prr = 120 * time.Millisecond
|
||||
)
|
||||
|
||||
// Progress represents the container that renders Progress bars
|
||||
type Progress struct {
|
||||
ctx context.Context
|
||||
uwg *sync.WaitGroup
|
||||
cwg *sync.WaitGroup
|
||||
bwg *sync.WaitGroup
|
||||
operateState chan func(*pState)
|
||||
done chan struct{}
|
||||
refreshCh chan time.Time
|
||||
once sync.Once
|
||||
dlogger *log.Logger
|
||||
}
|
||||
|
||||
type pState struct {
|
||||
bHeap priorityQueue
|
||||
heapUpdated bool
|
||||
pMatrix map[int][]chan int
|
||||
aMatrix map[int][]chan int
|
||||
barShutdownQueue []*Bar
|
||||
|
||||
// following are provided/overrided by user
|
||||
idCount int
|
||||
reqWidth int
|
||||
popCompleted bool
|
||||
rr time.Duration
|
||||
uwg *sync.WaitGroup
|
||||
refreshSrc <-chan time.Time
|
||||
renderDelay <-chan struct{}
|
||||
shutdownNotifier chan struct{}
|
||||
parkedBars map[*Bar]*Bar
|
||||
output io.Writer
|
||||
debugOut io.Writer
|
||||
}
|
||||
|
||||
// New creates new Progress container instance. It's not possible to
|
||||
// reuse instance after *Progress.Wait() method has been called.
|
||||
func New(options ...ContainerOption) *Progress {
|
||||
return NewWithContext(context.Background(), options...)
|
||||
}
|
||||
|
||||
// NewWithContext creates new Progress container instance with provided
|
||||
// context. It's not possible to reuse instance after *Progress.Wait()
|
||||
// method has been called.
|
||||
func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress {
|
||||
s := &pState{
|
||||
bHeap: priorityQueue{},
|
||||
rr: prr,
|
||||
parkedBars: make(map[*Bar]*Bar),
|
||||
output: os.Stdout,
|
||||
debugOut: ioutil.Discard,
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
if opt != nil {
|
||||
opt(s)
|
||||
}
|
||||
}
|
||||
|
||||
p := &Progress{
|
||||
ctx: ctx,
|
||||
uwg: s.uwg,
|
||||
cwg: new(sync.WaitGroup),
|
||||
bwg: new(sync.WaitGroup),
|
||||
operateState: make(chan func(*pState)),
|
||||
done: make(chan struct{}),
|
||||
dlogger: log.New(s.debugOut, "[mpb] ", log.Lshortfile),
|
||||
}
|
||||
|
||||
p.cwg.Add(1)
|
||||
go p.serve(s, cwriter.New(s.output))
|
||||
return p
|
||||
}
|
||||
|
||||
// AddBar creates a new progress bar and adds it to the rendering queue.
|
||||
func (p *Progress) AddBar(total int64, options ...BarOption) *Bar {
|
||||
return p.Add(total, NewBarFiller(DefaultBarStyle, false), options...)
|
||||
}
|
||||
|
||||
// AddSpinner creates a new spinner bar and adds it to the rendering queue.
|
||||
func (p *Progress) AddSpinner(total int64, alignment SpinnerAlignment, options ...BarOption) *Bar {
|
||||
return p.Add(total, NewSpinnerFiller(DefaultSpinnerStyle, alignment), options...)
|
||||
}
|
||||
|
||||
// Add creates a bar which renders itself by provided filler.
|
||||
// Set total to 0, if you plan to update it later.
|
||||
// Panics if *Progress instance is done, i.e. called after *Progress.Wait().
|
||||
func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar {
|
||||
if filler == nil {
|
||||
filler = BarFillerFunc(func(io.Writer, int, decor.Statistics) {})
|
||||
}
|
||||
p.bwg.Add(1)
|
||||
result := make(chan *Bar)
|
||||
select {
|
||||
case p.operateState <- func(ps *pState) {
|
||||
bs := ps.makeBarState(total, filler, options...)
|
||||
bar := newBar(p, bs)
|
||||
if bs.runningBar != nil {
|
||||
bs.runningBar.noPop = true
|
||||
ps.parkedBars[bs.runningBar] = bar
|
||||
} else {
|
||||
heap.Push(&ps.bHeap, bar)
|
||||
ps.heapUpdated = true
|
||||
}
|
||||
ps.idCount++
|
||||
result <- bar
|
||||
}:
|
||||
bar := <-result
|
||||
bar.subscribeDecorators()
|
||||
return bar
|
||||
case <-p.done:
|
||||
p.bwg.Done()
|
||||
panic(fmt.Sprintf("%T instance can't be reused after it's done!", p))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Progress) dropBar(b *Bar) {
|
||||
select {
|
||||
case p.operateState <- func(s *pState) {
|
||||
if b.index < 0 {
|
||||
return
|
||||
}
|
||||
heap.Remove(&s.bHeap, b.index)
|
||||
s.heapUpdated = true
|
||||
}:
|
||||
case <-p.done:
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Progress) setBarPriority(b *Bar, priority int) {
|
||||
select {
|
||||
case p.operateState <- func(s *pState) {
|
||||
if b.index < 0 {
|
||||
return
|
||||
}
|
||||
b.priority = priority
|
||||
heap.Fix(&s.bHeap, b.index)
|
||||
}:
|
||||
case <-p.done:
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateBarPriority same as *Bar.SetPriority(int).
|
||||
func (p *Progress) UpdateBarPriority(b *Bar, priority int) {
|
||||
p.setBarPriority(b, priority)
|
||||
}
|
||||
|
||||
// BarCount returns bars count
|
||||
func (p *Progress) BarCount() int {
|
||||
result := make(chan int, 1)
|
||||
select {
|
||||
case p.operateState <- func(s *pState) { result <- s.bHeap.Len() }:
|
||||
return <-result
|
||||
case <-p.done:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// Wait waits for all bars to complete and finally shutdowns container.
|
||||
// After this method has been called, there is no way to reuse *Progress
|
||||
// instance.
|
||||
func (p *Progress) Wait() {
|
||||
if p.uwg != nil {
|
||||
// wait for user wg
|
||||
p.uwg.Wait()
|
||||
}
|
||||
|
||||
// wait for bars to quit, if any
|
||||
p.bwg.Wait()
|
||||
|
||||
p.once.Do(p.shutdown)
|
||||
|
||||
// wait for container to quit
|
||||
p.cwg.Wait()
|
||||
}
|
||||
|
||||
func (p *Progress) shutdown() {
|
||||
close(p.done)
|
||||
}
|
||||
|
||||
func (p *Progress) serve(s *pState, cw *cwriter.Writer) {
|
||||
defer p.cwg.Done()
|
||||
|
||||
p.refreshCh = s.newTicker(p.done)
|
||||
|
||||
for {
|
||||
select {
|
||||
case op := <-p.operateState:
|
||||
op(s)
|
||||
case <-p.refreshCh:
|
||||
if err := s.render(cw); err != nil {
|
||||
p.dlogger.Println(err)
|
||||
}
|
||||
case <-s.shutdownNotifier:
|
||||
if s.heapUpdated {
|
||||
if err := s.render(cw); err != nil {
|
||||
p.dlogger.Println(err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *pState) newTicker(done <-chan struct{}) chan time.Time {
|
||||
ch := make(chan time.Time)
|
||||
if s.shutdownNotifier == nil {
|
||||
s.shutdownNotifier = make(chan struct{})
|
||||
}
|
||||
go func() {
|
||||
if s.renderDelay != nil {
|
||||
<-s.renderDelay
|
||||
}
|
||||
if s.refreshSrc == nil {
|
||||
ticker := time.NewTicker(s.rr)
|
||||
defer ticker.Stop()
|
||||
s.refreshSrc = ticker.C
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case tick := <-s.refreshSrc:
|
||||
ch <- tick
|
||||
case <-done:
|
||||
close(s.shutdownNotifier)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
func (s *pState) render(cw *cwriter.Writer) error {
|
||||
if s.heapUpdated {
|
||||
s.updateSyncMatrix()
|
||||
s.heapUpdated = false
|
||||
}
|
||||
syncWidth(s.pMatrix)
|
||||
syncWidth(s.aMatrix)
|
||||
|
||||
tw, err := cw.GetWidth()
|
||||
if err != nil {
|
||||
tw = s.reqWidth
|
||||
}
|
||||
for i := 0; i < s.bHeap.Len(); i++ {
|
||||
bar := s.bHeap[i]
|
||||
go bar.render(tw)
|
||||
}
|
||||
|
||||
return s.flush(cw)
|
||||
}
|
||||
|
||||
func (s *pState) flush(cw *cwriter.Writer) error {
|
||||
var lineCount int
|
||||
bm := make(map[*Bar]struct{}, s.bHeap.Len())
|
||||
for s.bHeap.Len() > 0 {
|
||||
b := heap.Pop(&s.bHeap).(*Bar)
|
||||
cw.ReadFrom(<-b.frameCh)
|
||||
if b.toShutdown {
|
||||
if b.recoveredPanic != nil {
|
||||
s.barShutdownQueue = append(s.barShutdownQueue, b)
|
||||
b.toShutdown = false
|
||||
} else {
|
||||
// shutdown at next flush
|
||||
// this ensures no bar ends up with less than 100% rendered
|
||||
defer func() {
|
||||
s.barShutdownQueue = append(s.barShutdownQueue, b)
|
||||
}()
|
||||
}
|
||||
}
|
||||
lineCount += b.extendedLines + 1
|
||||
bm[b] = struct{}{}
|
||||
}
|
||||
|
||||
for _, b := range s.barShutdownQueue {
|
||||
if parkedBar := s.parkedBars[b]; parkedBar != nil {
|
||||
parkedBar.priority = b.priority
|
||||
heap.Push(&s.bHeap, parkedBar)
|
||||
delete(s.parkedBars, b)
|
||||
b.toDrop = true
|
||||
}
|
||||
if s.popCompleted && !b.noPop {
|
||||
lineCount -= b.extendedLines + 1
|
||||
b.toDrop = true
|
||||
}
|
||||
if b.toDrop {
|
||||
delete(bm, b)
|
||||
s.heapUpdated = true
|
||||
}
|
||||
b.cancel()
|
||||
}
|
||||
s.barShutdownQueue = s.barShutdownQueue[0:0]
|
||||
|
||||
for b := range bm {
|
||||
heap.Push(&s.bHeap, b)
|
||||
}
|
||||
|
||||
return cw.Flush(lineCount)
|
||||
}
|
||||
|
||||
func (s *pState) updateSyncMatrix() {
|
||||
s.pMatrix = make(map[int][]chan int)
|
||||
s.aMatrix = make(map[int][]chan int)
|
||||
for i := 0; i < s.bHeap.Len(); i++ {
|
||||
bar := s.bHeap[i]
|
||||
table := bar.wSyncTable()
|
||||
pRow, aRow := table[0], table[1]
|
||||
|
||||
for i, ch := range pRow {
|
||||
s.pMatrix[i] = append(s.pMatrix[i], ch)
|
||||
}
|
||||
|
||||
for i, ch := range aRow {
|
||||
s.aMatrix[i] = append(s.aMatrix[i], ch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOption) *bState {
|
||||
bs := &bState{
|
||||
id: s.idCount,
|
||||
priority: s.idCount,
|
||||
reqWidth: s.reqWidth,
|
||||
total: total,
|
||||
filler: filler,
|
||||
extender: func(r io.Reader, _ int, _ decor.Statistics) (io.Reader, int) { return r, 0 },
|
||||
debugOut: s.debugOut,
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
if opt != nil {
|
||||
opt(bs)
|
||||
}
|
||||
}
|
||||
|
||||
if bs.middleware != nil {
|
||||
bs.filler = bs.middleware(filler)
|
||||
bs.middleware = nil
|
||||
}
|
||||
|
||||
if s.popCompleted && !bs.noPop {
|
||||
bs.priority = -(math.MaxInt32 - s.idCount)
|
||||
}
|
||||
|
||||
bs.bufP = bytes.NewBuffer(make([]byte, 0, 128))
|
||||
bs.bufB = bytes.NewBuffer(make([]byte, 0, 256))
|
||||
bs.bufA = bytes.NewBuffer(make([]byte, 0, 128))
|
||||
|
||||
return bs
|
||||
}
|
||||
|
||||
func syncWidth(matrix map[int][]chan int) {
|
||||
for _, column := range matrix {
|
||||
go maxWidthDistributor(column)
|
||||
}
|
||||
}
|
||||
|
||||
var maxWidthDistributor = func(column []chan int) {
|
||||
var maxWidth int
|
||||
for _, ch := range column {
|
||||
if w := <-ch; w > maxWidth {
|
||||
maxWidth = w
|
||||
}
|
||||
}
|
||||
for _, ch := range column {
|
||||
ch <- maxWidth
|
||||
}
|
||||
}
|
90
vendor/github.com/vbauerster/mpb/v5/proxyreader.go
generated
vendored
90
vendor/github.com/vbauerster/mpb/v5/proxyreader.go
generated
vendored
@ -1,90 +0,0 @@
|
||||
package mpb
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
)
|
||||
|
||||
type proxyReader struct {
|
||||
io.ReadCloser
|
||||
bar *Bar
|
||||
}
|
||||
|
||||
func (x *proxyReader) Read(p []byte) (int, error) {
|
||||
n, err := x.ReadCloser.Read(p)
|
||||
x.bar.IncrBy(n)
|
||||
if err == io.EOF {
|
||||
go x.bar.SetTotal(0, true)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
type proxyWriterTo struct {
|
||||
io.ReadCloser // *proxyReader
|
||||
wt io.WriterTo
|
||||
bar *Bar
|
||||
}
|
||||
|
||||
func (x *proxyWriterTo) WriteTo(w io.Writer) (int64, error) {
|
||||
n, err := x.wt.WriteTo(w)
|
||||
x.bar.IncrInt64(n)
|
||||
if err == io.EOF {
|
||||
go x.bar.SetTotal(0, true)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
type ewmaProxyReader struct {
|
||||
io.ReadCloser // *proxyReader
|
||||
bar *Bar
|
||||
iT time.Time
|
||||
}
|
||||
|
||||
func (x *ewmaProxyReader) Read(p []byte) (int, error) {
|
||||
n, err := x.ReadCloser.Read(p)
|
||||
if n > 0 {
|
||||
x.bar.DecoratorEwmaUpdate(time.Since(x.iT))
|
||||
x.iT = time.Now()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
type ewmaProxyWriterTo struct {
|
||||
io.ReadCloser // *ewmaProxyReader
|
||||
wt io.WriterTo // *proxyWriterTo
|
||||
bar *Bar
|
||||
iT time.Time
|
||||
}
|
||||
|
||||
func (x *ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) {
|
||||
n, err := x.wt.WriteTo(w)
|
||||
if n > 0 {
|
||||
x.bar.DecoratorEwmaUpdate(time.Since(x.iT))
|
||||
x.iT = time.Now()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func newProxyReader(r io.Reader, bar *Bar) io.ReadCloser {
|
||||
rc := toReadCloser(r)
|
||||
rc = &proxyReader{rc, bar}
|
||||
|
||||
if wt, isWriterTo := r.(io.WriterTo); bar.hasEwmaDecorators {
|
||||
now := time.Now()
|
||||
rc = &ewmaProxyReader{rc, bar, now}
|
||||
if isWriterTo {
|
||||
rc = &ewmaProxyWriterTo{rc, wt, bar, now}
|
||||
}
|
||||
} else if isWriterTo {
|
||||
rc = &proxyWriterTo{rc, wt, bar}
|
||||
}
|
||||
return rc
|
||||
}
|
||||
|
||||
func toReadCloser(r io.Reader) io.ReadCloser {
|
||||
if rc, ok := r.(io.ReadCloser); ok {
|
||||
return rc
|
||||
}
|
||||
return ioutil.NopCloser(r)
|
||||
}
|
10
vendor/modules.txt
vendored
10
vendor/modules.txt
vendored
@ -115,7 +115,7 @@ github.com/containers/common/pkg/umask
|
||||
github.com/containers/common/version
|
||||
# github.com/containers/conmon v2.0.20+incompatible
|
||||
github.com/containers/conmon/runner/config
|
||||
# github.com/containers/image/v5 v5.10.5
|
||||
# github.com/containers/image/v5 v5.11.0
|
||||
github.com/containers/image/v5/copy
|
||||
github.com/containers/image/v5/directory
|
||||
github.com/containers/image/v5/directory/explicitfilepath
|
||||
@ -133,6 +133,7 @@ github.com/containers/image/v5/internal/pkg/keyctl
|
||||
github.com/containers/image/v5/internal/pkg/platform
|
||||
github.com/containers/image/v5/internal/rootless
|
||||
github.com/containers/image/v5/internal/tmpdir
|
||||
github.com/containers/image/v5/internal/types
|
||||
github.com/containers/image/v5/internal/uploadreader
|
||||
github.com/containers/image/v5/manifest
|
||||
github.com/containers/image/v5/oci/archive
|
||||
@ -381,7 +382,7 @@ github.com/json-iterator/go
|
||||
# github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a
|
||||
github.com/juju/ansiterm
|
||||
github.com/juju/ansiterm/tabwriter
|
||||
# github.com/klauspost/compress v1.11.12
|
||||
# github.com/klauspost/compress v1.11.13
|
||||
github.com/klauspost/compress/flate
|
||||
github.com/klauspost/compress/fse
|
||||
github.com/klauspost/compress/huff0
|
||||
@ -588,11 +589,6 @@ github.com/ulikunitz/xz/lzma
|
||||
github.com/vbatts/tar-split/archive/tar
|
||||
github.com/vbatts/tar-split/tar/asm
|
||||
github.com/vbatts/tar-split/tar/storage
|
||||
# github.com/vbauerster/mpb/v5 v5.4.0
|
||||
github.com/vbauerster/mpb/v5
|
||||
github.com/vbauerster/mpb/v5/cwriter
|
||||
github.com/vbauerster/mpb/v5/decor
|
||||
github.com/vbauerster/mpb/v5/internal
|
||||
# github.com/vbauerster/mpb/v6 v6.0.3
|
||||
github.com/vbauerster/mpb/v6
|
||||
github.com/vbauerster/mpb/v6/cwriter
|
||||
|
Reference in New Issue
Block a user