Merge pull request from rhatdan/buildah

Vendor in latest containers/buildah code
This commit is contained in:
OpenShift Merge Robot
2019-01-06 17:15:10 -08:00
committed by GitHub
55 changed files with 942 additions and 350 deletions

@ -11,8 +11,8 @@ github.com/containerd/cgroups 58556f5ad8448d99a6f7bea69ea4bdb7747cfeb0
github.com/containerd/continuity master github.com/containerd/continuity master
github.com/containernetworking/cni v0.7.0-alpha1 github.com/containernetworking/cni v0.7.0-alpha1
github.com/containernetworking/plugins 1562a1e60ed101aacc5e08ed9dbeba8e9f3d4ec1 github.com/containernetworking/plugins 1562a1e60ed101aacc5e08ed9dbeba8e9f3d4ec1
github.com/containers/image d53afe179b381fafb427e6b9cf9b1996a98c1067 github.com/containers/image f0cbc16b444d729362c78244c715b45bf4019b71
github.com/containers/storage c044256cbfbae8a18cfc76319e11fcf4f98843b9 github.com/containers/storage v1.4
github.com/containers/psgo dc0bc9fac5b715034c4310ed4d795b3182360842 github.com/containers/psgo dc0bc9fac5b715034c4310ed4d795b3182360842
github.com/coreos/go-systemd v14 github.com/coreos/go-systemd v14
github.com/cri-o/ocicni 2d2983e40c242322a56c22a903785e7f83eb378c github.com/cri-o/ocicni 2d2983e40c242322a56c22a903785e7f83eb378c
@ -46,7 +46,7 @@ github.com/kr/pty v1.0.0
github.com/mattn/go-runewidth v0.0.1 github.com/mattn/go-runewidth v0.0.1
github.com/mistifyio/go-zfs v2.1.1 github.com/mistifyio/go-zfs v2.1.1
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9 github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
github.com/opencontainers/go-digest v1.0.0-rc0 github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/opencontainers/image-spec v1.0.0 github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runc b4e2ecb452d9ee4381137cc0a7e6715b96bed6de github.com/opencontainers/runc b4e2ecb452d9ee4381137cc0a7e6715b96bed6de
github.com/opencontainers/runtime-spec d810dbc60d8c5aeeb3d054bd1132fab2121968ce github.com/opencontainers/runtime-spec d810dbc60d8c5aeeb3d054bd1132fab2121968ce
@ -78,7 +78,7 @@ golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631 golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
golang.org/x/sync master golang.org/x/sync master
google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go
gopkg.in/cheggaaa/pb.v1 v1.0.7 gopkg.in/cheggaaa/pb.v1 v1.0.27
gopkg.in/inf.v0 v0.9.0 gopkg.in/inf.v0 v0.9.0
gopkg.in/mgo.v2 v2 gopkg.in/mgo.v2 v2
gopkg.in/square/go-jose.v2 v2.1.3 gopkg.in/square/go-jose.v2 v2.1.3
@ -92,7 +92,7 @@ k8s.io/kube-openapi 275e2ce91dec4c05a4094a7b1daee5560b555ac9 https://github.com/
k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e https://github.com/kubernetes/utils k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e https://github.com/kubernetes/utils
github.com/mrunalp/fileutils master github.com/mrunalp/fileutils master
github.com/varlink/go master github.com/varlink/go master
github.com/containers/buildah dd0f4f1b1eb49b841179049ac498e4b0f874b462 github.com/containers/buildah bb710f39d01868e47224f35f48a128fbea6539c4
github.com/Nvveen/Gotty master github.com/Nvveen/Gotty master
github.com/fsouza/go-dockerclient master github.com/fsouza/go-dockerclient master
github.com/openshift/imagebuilder master github.com/openshift/imagebuilder master

@ -474,9 +474,6 @@ func (b *Builder) Hostname() string {
// Note: this setting is not present in the OCIv1 image format, so it is // Note: this setting is not present in the OCIv1 image format, so it is
// discarded when writing images using OCIv1 formats. // discarded when writing images using OCIv1 formats.
func (b *Builder) SetHostname(name string) { func (b *Builder) SetHostname(name string) {
if name != "" && b.Format != Dockerv2ImageManifest {
logrus.Errorf("HOSTNAME is not supported for OCI image format, hostname %s will be ignored. Must use `docker` format", name)
}
b.Docker.Config.Hostname = name b.Docker.Config.Hostname = name
} }

@ -557,6 +557,10 @@ func (i *containerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.B
return nil, nil return nil, nil
} }
func (i *containerImageSource) HasThreadSafeGetBlob() bool {
return false
}
func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, cache types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) { func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, cache types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) {
if blob.Digest == i.configDigest { if blob.Digest == i.configDigest {
logrus.Debugf("start reading config") logrus.Debugf("start reading config")

@ -1307,7 +1307,12 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (strin
var imageRef reference.Canonical var imageRef reference.Canonical
imageID := "" imageID := ""
if !b.layers && !b.noCache {
// Check if we have a one line Dockerfile making layers irrelevant
// or the user told us to ignore layers.
ignoreLayers := (len(stages) < 2 && len(stages[0].Node.Children) < 2) || (!b.layers && !b.noCache)
if ignoreLayers {
imgID, ref, err := stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "") imgID, ref, err := stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "")
if err != nil { if err != nil {
return "", nil, err return "", nil, err

@ -131,6 +131,11 @@ func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) {
func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) { func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) {
var timeIsGreater bool var timeIsGreater bool
// the Walk below doesn't work if rootdir and path are equal
if rootdir == path {
return false, nil
}
// Convert historyTime from string to time.Time for comparison // Convert historyTime from string to time.Time for comparison
histTime, err := time.Parse(time.RFC3339Nano, historyTime) histTime, err := time.Parse(time.RFC3339Nano, historyTime)
if err != nil { if err != nil {

@ -218,6 +218,10 @@ func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *diges
return s.source.GetManifest(ctx, instanceDigest) return s.source.GetManifest(ctx, instanceDigest)
} }
func (s *blobCacheSource) HasThreadSafeGetBlob() bool {
return false
}
func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
present, size, err := s.reference.HasBlob(blobinfo) present, size, err := s.reference.HasBlob(blobinfo)
if err != nil { if err != nil {
@ -398,6 +402,10 @@ func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os
} }
} }
func (s *blobCacheDestination) HasThreadSafePutBlob() bool {
return false
}
func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
var tempfile *os.File var tempfile *os.File
var err error var err error

@ -148,6 +148,10 @@ var (
Name: "loglevel", Name: "loglevel",
Usage: "adjust logging level (range from -2 to 3)", Usage: "adjust logging level (range from -2 to 3)",
}, },
cli.StringFlag{
Name: "platform",
Usage: "CLI compatibility: no action or effect",
},
cli.BoolTFlag{ cli.BoolTFlag{
Name: "pull", Name: "pull",
Usage: "pull the image if not present", Usage: "pull the image if not present",

@ -26,6 +26,7 @@ import (
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/reexec"
"github.com/containers/storage/pkg/stringid"
units "github.com/docker/go-units" units "github.com/docker/go-units"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-spec/specs-go"
@ -944,10 +945,25 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options RunOptions)
g.SetHostname(options.Hostname) g.SetHostname(options.Hostname)
} else if b.Hostname() != "" { } else if b.Hostname() != "" {
g.SetHostname(b.Hostname()) g.SetHostname(b.Hostname())
} else {
g.SetHostname(stringid.TruncateID(b.ContainerID))
} }
} else { } else {
g.SetHostname("") g.SetHostname("")
} }
found := false
spec := g.Spec()
for i := range spec.Process.Env {
if strings.HasPrefix(spec.Process.Env[i], "HOSTNAME=") {
found = true
break
}
}
if !found {
spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname))
}
return configureNetwork, configureNetworks, nil return configureNetwork, configureNetworks, nil
} }

@ -79,7 +79,8 @@ void _buildah_unshare(void)
pidfd = _buildah_unshare_parse_envint("_Buildah-pid-pipe"); pidfd = _buildah_unshare_parse_envint("_Buildah-pid-pipe");
if (pidfd != -1) { if (pidfd != -1) {
snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid());
if (write(pidfd, buf, strlen(buf)) != strlen(buf)) { size_t size = write(pidfd, buf, strlen(buf));
if (size != strlen(buf)) {
fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd);
_exit(1); _exit(1);
} }

@ -395,57 +395,11 @@ func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.Linux
// ParseIDMappings parses mapping triples. // ParseIDMappings parses mapping triples.
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
nonDigitsToWhitespace := func(r rune) rune { uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map")
if strings.IndexRune("0123456789", r) == -1 {
return ' '
} else {
return r
}
}
parseTriple := func(spec []string) (container, host, size uint32, err error) {
cid, err := strconv.ParseUint(spec[0], 10, 32)
if err != nil {
return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err)
}
hid, err := strconv.ParseUint(spec[1], 10, 32)
if err != nil {
return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err)
}
sz, err := strconv.ParseUint(spec[2], 10, 32)
if err != nil {
return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err)
}
return uint32(cid), uint32(hid), uint32(sz), nil
}
parseIDMap := func(mapSpec []string, mapSetting string) (idmap []idtools.IDMap, err error) {
for _, idMapSpec := range mapSpec {
idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec))
if len(idSpec)%3 != 0 {
return nil, errors.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting)
}
for i := range idSpec {
if i%3 != 0 {
continue
}
cid, hid, size, err := parseTriple(idSpec[i : i+3])
if err != nil {
return nil, errors.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting)
}
mapping := idtools.IDMap{
ContainerID: int(cid),
HostID: int(hid),
Size: int(size),
}
idmap = append(idmap, mapping)
}
}
return idmap, nil
}
uid, err := parseIDMap(uidmap, "userns-uid-map")
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
gid, err := parseIDMap(gidmap, "userns-gid-map") gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map")
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }

@ -3,10 +3,10 @@ github.com/blang/semver master
github.com/BurntSushi/toml master github.com/BurntSushi/toml master
github.com/containerd/continuity master github.com/containerd/continuity master
github.com/containernetworking/cni v0.7.0-alpha1 github.com/containernetworking/cni v0.7.0-alpha1
github.com/containers/image d53afe179b381fafb427e6b9cf9b1996a98c1067 github.com/containers/image 0c6cc8e1420001ae39fa89d308b3b3bc5ee81c57
github.com/boltdb/bolt master github.com/boltdb/bolt master
github.com/containers/libpod fe4f09493f41f675d24c969d1b60d1a6a45ddb9e github.com/containers/libpod c8eaf59d5f4bec249db8134c6a9fcfbcac792519
github.com/containers/storage db40f96d853dfced60c563e61fb66ba231ce7c8d github.com/containers/storage 60a692f7ce891feb91ce0eda87bd06bfd5651dff
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00 github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
@ -32,7 +32,7 @@ github.com/mistifyio/go-zfs master
github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c
github.com/mtrmac/gpgme master github.com/mtrmac/gpgme master
github.com/Nvveen/Gotty master github.com/Nvveen/Gotty master
github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/opencontainers/image-spec v1.0.0 github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runc master github.com/opencontainers/runc master
github.com/opencontainers/runtime-spec v1.0.0 github.com/opencontainers/runtime-spec v1.0.0
@ -55,10 +55,14 @@ github.com/xeipuuv/gojsonreference master
github.com/xeipuuv/gojsonschema master github.com/xeipuuv/gojsonschema master
golang.org/x/crypto master golang.org/x/crypto master
golang.org/x/net master golang.org/x/net master
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
golang.org/x/sys master golang.org/x/sys master
golang.org/x/text master golang.org/x/text master
gopkg.in/cheggaaa/pb.v1 v1.0.13 gopkg.in/cheggaaa/pb.v1 v1.0.27
gopkg.in/yaml.v2 cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b gopkg.in/yaml.v2 cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
k8s.io/apimachinery master k8s.io/apimachinery master
k8s.io/client-go master k8s.io/client-go master
k8s.io/kubernetes master k8s.io/kubernetes master
github.com/klauspost/pgzip v1.2.1
github.com/klauspost/compress v1.4.1
github.com/klauspost/cpuid v1.2.0

@ -2,7 +2,6 @@ package copy
import ( import (
"bytes" "bytes"
"compress/gzip"
"context" "context"
"fmt" "fmt"
"io" "io"
@ -10,6 +9,7 @@ import (
"reflect" "reflect"
"runtime" "runtime"
"strings" "strings"
"sync"
"time" "time"
"github.com/containers/image/image" "github.com/containers/image/image"
@ -18,9 +18,11 @@ import (
"github.com/containers/image/signature" "github.com/containers/image/signature"
"github.com/containers/image/transports" "github.com/containers/image/transports"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/klauspost/pgzip"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/sync/semaphore"
pb "gopkg.in/cheggaaa/pb.v1" pb "gopkg.in/cheggaaa/pb.v1"
) )
@ -32,6 +34,10 @@ type digestingReader struct {
validationSucceeded bool validationSucceeded bool
} }
// maxParallelDownloads is used to limit the maxmimum number of parallel
// downloads. Let's follow Firefox by limiting it to 6.
var maxParallelDownloads = 6
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error // newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. // or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
// (neither is set if EOF is never reached). // (neither is set if EOF is never reached).
@ -81,6 +87,7 @@ type copier struct {
progressInterval time.Duration progressInterval time.Duration
progress chan types.ProgressProperties progress chan types.ProgressProperties
blobInfoCache types.BlobInfoCache blobInfoCache types.BlobInfoCache
copyInParallel bool
} }
// imageCopier tracks state specific to a single image (possibly an item of a manifest list) // imageCopier tracks state specific to a single image (possibly an item of a manifest list)
@ -145,12 +152,14 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
} }
}() }()
copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob()
c := &copier{ c := &copier{
dest: dest, dest: dest,
rawSource: rawSource, rawSource: rawSource,
reportWriter: reportWriter, reportWriter: reportWriter,
progressInterval: options.ProgressInterval, progressInterval: options.ProgressInterval,
progress: options.Progress, progress: options.Progress,
copyInParallel: copyInParallel,
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
// we might want to add a separate CommonCtx — or would that be too confusing? // we might want to add a separate CommonCtx — or would that be too confusing?
@ -380,11 +389,26 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error {
return nil return nil
} }
// shortDigest returns the first 12 characters of the digest.
func shortDigest(d digest.Digest) string {
return d.Encoded()[:12]
}
// createProgressBar creates a pb.ProgressBar.
func createProgressBar(srcInfo types.BlobInfo, kind string, writer io.Writer) *pb.ProgressBar {
bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
bar.SetMaxWidth(80)
bar.ShowTimeLeft = false
bar.ShowPercent = false
bar.Prefix(fmt.Sprintf("Copying %s %s:", kind, shortDigest(srcInfo.Digest)))
bar.Output = writer
return bar
}
// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. // copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest.
func (ic *imageCopier) copyLayers(ctx context.Context) error { func (ic *imageCopier) copyLayers(ctx context.Context) error {
srcInfos := ic.src.LayerInfos() srcInfos := ic.src.LayerInfos()
destInfos := []types.BlobInfo{} numLayers := len(srcInfos)
diffIDs := []digest.Digest{}
updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
if err != nil { if err != nil {
return err return err
@ -397,30 +421,83 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
srcInfos = updatedSrcInfos srcInfos = updatedSrcInfos
srcInfosUpdated = true srcInfosUpdated = true
} }
for _, srcLayer := range srcInfos {
var ( type copyLayerData struct {
destInfo types.BlobInfo destInfo types.BlobInfo
diffID digest.Digest diffID digest.Digest
err error err error
) }
// copyGroup is used to determine if all layers are copied
copyGroup := sync.WaitGroup{}
copyGroup.Add(numLayers)
// copySemaphore is used to limit the number of parallel downloads to
// avoid malicious images causing troubles and to be nice to servers.
var copySemaphore *semaphore.Weighted
if ic.c.copyInParallel {
copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads))
} else {
copySemaphore = semaphore.NewWeighted(int64(1))
}
data := make([]copyLayerData, numLayers)
copyLayerHelper := func(index int, srcLayer types.BlobInfo, bar *pb.ProgressBar) {
defer bar.Finish()
defer copySemaphore.Release(1)
defer copyGroup.Done()
cld := copyLayerData{}
if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
// DiffIDs are, currently, needed only when converting from schema1. // DiffIDs are, currently, needed only when converting from schema1.
// In which case src.LayerInfos will not have URLs because schema1 // In which case src.LayerInfos will not have URLs because schema1
// does not support them. // does not support them.
if ic.diffIDsAreNeeded { if ic.diffIDsAreNeeded {
return errors.New("getting DiffID for foreign layers is unimplemented") cld.err = errors.New("getting DiffID for foreign layers is unimplemented")
bar.Prefix(fmt.Sprintf("Skipping blob %s (DiffID foreign layer unimplemented):", shortDigest(srcLayer.Digest)))
bar.Finish()
} else {
cld.destInfo = srcLayer
logrus.Debugf("Skipping foreign layer %q copy to %s\n", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
bar.Prefix(fmt.Sprintf("Skipping blob %s (foreign layer):", shortDigest(srcLayer.Digest)))
bar.Add64(bar.Total)
bar.Finish()
} }
destInfo = srcLayer
ic.c.Printf("Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.c.dest.Reference().Transport().Name())
} else { } else {
destInfo, diffID, err = ic.copyLayer(ctx, srcLayer) cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, bar)
if err != nil {
return err
}
} }
destInfos = append(destInfos, destInfo) data[index] = cld
diffIDs = append(diffIDs, diffID)
} }
progressBars := make([]*pb.ProgressBar, numLayers)
for i, srcInfo := range srcInfos {
bar := createProgressBar(srcInfo, "blob", nil)
progressBars[i] = bar
}
progressPool := pb.NewPool(progressBars...)
progressPool.Output = ic.c.reportWriter
if err := progressPool.Start(); err != nil {
return errors.Wrapf(err, "error creating progress-bar pool")
}
for i, srcLayer := range srcInfos {
copySemaphore.Acquire(ctx, 1)
go copyLayerHelper(i, srcLayer, progressBars[i])
}
destInfos := make([]types.BlobInfo, numLayers)
diffIDs := make([]digest.Digest, numLayers)
copyGroup.Wait()
progressPool.Stop()
for i, cld := range data {
if cld.err != nil {
return cld.err
}
destInfos[i] = cld.destInfo
diffIDs[i] = cld.diffID
}
ic.manifestUpdates.InformationOnly.LayerInfos = destInfos ic.manifestUpdates.InformationOnly.LayerInfos = destInfos
if ic.diffIDsAreNeeded { if ic.diffIDsAreNeeded {
ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
@ -487,12 +564,14 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte
func (c *copier) copyConfig(ctx context.Context, src types.Image) error { func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
srcInfo := src.ConfigInfo() srcInfo := src.ConfigInfo()
if srcInfo.Digest != "" { if srcInfo.Digest != "" {
c.Printf("Copying config %s\n", srcInfo.Digest)
configBlob, err := src.ConfigBlob(ctx) configBlob, err := src.ConfigBlob(ctx)
if err != nil { if err != nil {
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest) return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
} }
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true) bar := createProgressBar(srcInfo, "config", c.reportWriter)
defer bar.Finish()
bar.Start()
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
if err != nil { if err != nil {
return err return err
} }
@ -512,7 +591,7 @@ type diffIDResult struct {
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress, // copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) { func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, bar *pb.ProgressBar) (types.BlobInfo, digest.Digest, error) {
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
@ -523,13 +602,14 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest) return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
} }
if reused { if reused {
ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest) bar.Prefix(fmt.Sprintf("Skipping blob %s (already present):", shortDigest(srcInfo.Digest)))
bar.Add64(bar.Total)
bar.Finish()
return blobInfo, cachedDiffID, nil return blobInfo, cachedDiffID, nil
} }
} }
// Fallback: copy the layer, computing the diffID if we need to do so // Fallback: copy the layer, computing the diffID if we need to do so
ic.c.Printf("Copying blob %s\n", srcInfo.Digest)
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
if err != nil { if err != nil {
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
@ -537,7 +617,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t
defer srcStream.Close() defer srcStream.Close()
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize},
diffIDIsNeeded) diffIDIsNeeded, bar)
if err != nil { if err != nil {
return types.BlobInfo{}, "", err return types.BlobInfo{}, "", err
} }
@ -565,7 +645,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t
// perhaps compressing the stream if canCompress, // perhaps compressing the stream if canCompress,
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) { diffIDIsNeeded bool, bar *pb.ProgressBar) (types.BlobInfo, <-chan diffIDResult, error) {
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
var diffIDChan chan diffIDResult var diffIDChan chan diffIDResult
@ -589,7 +669,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
return pipeWriter return pipeWriter
} }
} }
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false) // Sets err to nil on success blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success
return blobInfo, diffIDChan, err return blobInfo, diffIDChan, err
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
} }
@ -626,7 +706,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
// and returns a complete blobInfo of the copied blob. // and returns a complete blobInfo of the copied blob.
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
canModifyBlob bool, isConfig bool) (types.BlobInfo, error) { canModifyBlob bool, isConfig bool, bar *pb.ProgressBar) (types.BlobInfo, error) {
// The copying happens through a pipeline of connected io.Readers. // The copying happens through a pipeline of connected io.Readers.
// === Input: srcStream // === Input: srcStream
@ -649,16 +729,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
} }
isCompressed := decompressor != nil isCompressed := decompressor != nil
// === Report progress using a pb.Reader.
bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
bar.Output = c.reportWriter
bar.SetMaxWidth(80)
bar.ShowTimeLeft = false
bar.ShowPercent = false
bar.Start()
destStream = bar.NewProxyReader(destStream) destStream = bar.NewProxyReader(destStream)
defer bar.Finish()
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary. // === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
@ -761,7 +832,7 @@ func compressGoroutine(dest *io.PipeWriter, src io.Reader) {
dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
}() }()
zipper := gzip.NewWriter(dest) zipper := pgzip.NewWriter(dest)
defer zipper.Close() defer zipper.Close()
_, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close() _, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close()

@ -124,6 +124,11 @@ func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, DockerReference() returns nil. return false // N/A, DockerReference() returns nil.
} }
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
func (d *dirImageDestination) HasThreadSafePutBlob() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in). // PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known. // inputInfo.Size is the expected length of stream, if known.

@ -48,6 +48,11 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest
return m, manifest.GuessMIMEType(m), err return m, manifest.GuessMIMEType(m), err
} }
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
func (s *dirImageSource) HasThreadSafeGetBlob() bool {
return false
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown). // GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.

@ -13,6 +13,7 @@ import (
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"sync"
"time" "time"
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
@ -84,6 +85,7 @@ type dockerClient struct {
registry string registry string
client *http.Client client *http.Client
insecureSkipTLSVerify bool insecureSkipTLSVerify bool
// The following members are not set by newDockerClient and must be set by callers if needed. // The following members are not set by newDockerClient and must be set by callers if needed.
username string username string
password string password string
@ -95,8 +97,13 @@ type dockerClient struct {
scheme string // Empty value also used to indicate detectProperties() has not yet succeeded. scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
challenges []challenge challenges []challenge
supportsSignatures bool supportsSignatures bool
// Private state for setupRequestAuth
tokenCache map[string]bearerToken // Private state for setupRequestAuth (key: string, value: bearerToken)
tokenCache sync.Map
// detectPropertiesError caches the initial error.
detectPropertiesError error
// detectPropertiesOnce is used to execuute detectProperties() at most once in in makeRequest().
detectPropertiesOnce sync.Once
} }
type authScope struct { type authScope struct {
@ -262,7 +269,6 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
registry: registry, registry: registry,
client: &http.Client{Transport: tr}, client: &http.Client{Transport: tr},
insecureSkipTLSVerify: skipVerify, insecureSkipTLSVerify: skipVerify,
tokenCache: map[string]bearerToken{},
}, nil }, nil
} }
@ -473,14 +479,18 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
cacheKey = fmt.Sprintf("%s:%s", c.extraScope.remoteName, c.extraScope.actions) cacheKey = fmt.Sprintf("%s:%s", c.extraScope.remoteName, c.extraScope.actions)
scopes = append(scopes, *c.extraScope) scopes = append(scopes, *c.extraScope)
} }
token, ok := c.tokenCache[cacheKey] var token bearerToken
if !ok || time.Now().After(token.expirationTime) { t, inCache := c.tokenCache.Load(cacheKey)
if inCache {
token = t.(bearerToken)
}
if !inCache || time.Now().After(token.expirationTime) {
t, err := c.getBearerToken(req.Context(), challenge, scopes) t, err := c.getBearerToken(req.Context(), challenge, scopes)
if err != nil { if err != nil {
return err return err
} }
token = *t token = *t
c.tokenCache[cacheKey] = token c.tokenCache.Store(cacheKey, token)
} }
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token)) req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token))
return nil return nil
@ -545,9 +555,9 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
return newBearerTokenFromJSONBlob(tokenBlob) return newBearerTokenFromJSONBlob(tokenBlob)
} }
// detectProperties detects various properties of the registry. // detectPropertiesHelper performs the work of detectProperties which executes
// See the dockerClient documentation for members which are affected by this. // it at most once.
func (c *dockerClient) detectProperties(ctx context.Context) error { func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
if c.scheme != "" { if c.scheme != "" {
return nil return nil
} }
@ -604,6 +614,13 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
return err return err
} }
// detectProperties detects various properties of the registry.
// See the dockerClient documentation for members which are affected by this.
func (c *dockerClient) detectProperties(ctx context.Context) error {
c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) })
return c.detectPropertiesError
}
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, // getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
// using the original data structures. // using the original data structures.
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {

@ -111,6 +111,11 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
return len(p), nil return len(p), nil
} }
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
func (d *dockerImageDestination) HasThreadSafePutBlob() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in). // PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known. // inputInfo.Size is the expected length of stream, if known.

@ -161,6 +161,11 @@ func getBlobSize(resp *http.Response) int64 {
return size return size
} }
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
func (s *dockerImageSource) HasThreadSafeGetBlob() bool {
return true
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown). // GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.

@ -82,6 +82,11 @@ func (d *Destination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false. return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
} }
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
func (d *Destination) HasThreadSafePutBlob() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in). // PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known. // inputInfo.Size is the expected length of stream, if known.

@ -397,6 +397,11 @@ func (r uncompressedReadCloser) Close() error {
return res return res
} }
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
func (s *Source) HasThreadSafeGetBlob() bool {
return false
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown). // GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.

@ -77,6 +77,11 @@ func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool {
return d.unpackedDest.IgnoresEmbeddedDockerReference() return d.unpackedDest.IgnoresEmbeddedDockerReference()
} }
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result. // PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known. // inputInfo.Size is the expected length of stream, if known.

@ -76,6 +76,11 @@ func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest
return s.unpackedSrc.GetManifest(ctx, instanceDigest) return s.unpackedSrc.GetManifest(ctx, instanceDigest)
} }
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
func (s *ociArchiveImageSource) HasThreadSafeGetBlob() bool {
return false
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown). // GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.

@ -107,6 +107,11 @@ func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, DockerReference() returns nil. return false // N/A, DockerReference() returns nil.
} }
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
func (d *ociImageDestination) HasThreadSafePutBlob() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result. // PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known. // inputInfo.Size is the expected length of stream, if known.

@ -92,6 +92,11 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
return m, mimeType, nil return m, mimeType, nil
} }
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
func (s *ociImageSource) HasThreadSafeGetBlob() bool {
return false
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown). // GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.

@ -211,6 +211,11 @@ func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *
return s.docker.GetManifest(ctx, instanceDigest) return s.docker.GetManifest(ctx, instanceDigest)
} }
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
func (s *openshiftImageSource) HasThreadSafeGetBlob() bool {
return false
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown). // GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
@ -378,6 +383,11 @@ func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
return d.docker.IgnoresEmbeddedDockerReference() return d.docker.IgnoresEmbeddedDockerReference()
} }
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
func (d *openshiftImageDestination) HasThreadSafePutBlob() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in). // PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known. // inputInfo.Size is the expected length of stream, if known.

@ -4,7 +4,6 @@ package ostree
import ( import (
"bytes" "bytes"
"compress/gzip"
"context" "context"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
@ -24,6 +23,7 @@ import (
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/archive"
"github.com/klauspost/pgzip"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
selinux "github.com/opencontainers/selinux/go-selinux" selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/ostreedev/ostree-go/pkg/otbuiltin" "github.com/ostreedev/ostree-go/pkg/otbuiltin"
@ -132,6 +132,11 @@ func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, DockerReference() returns nil. return false // N/A, DockerReference() returns nil.
} }
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
func (d *ostreeImageDestination) HasThreadSafePutBlob() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result. // PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known. // inputInfo.Size is the expected length of stream, if known.
@ -249,7 +254,7 @@ func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch strin
} }
func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) { func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) {
mfz := gzip.NewWriter(output) mfz := pgzip.NewWriter(output)
defer mfz.Close() defer mfz.Close()
metaPacker := storage.NewJSONPacker(mfz) metaPacker := storage.NewJSONPacker(mfz)

@ -4,7 +4,6 @@ package ostree
import ( import (
"bytes" "bytes"
"compress/gzip"
"context" "context"
"encoding/base64" "encoding/base64"
"fmt" "fmt"
@ -17,6 +16,7 @@ import (
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/ioutils"
"github.com/klauspost/pgzip"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
glib "github.com/ostreedev/ostree-go/pkg/glibobject" glib "github.com/ostreedev/ostree-go/pkg/glibobject"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -255,6 +255,11 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
return getter.Get(path) return getter.Get(path)
} }
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
func (s *ostreeImageSource) HasThreadSafeGetBlob() bool {
return false
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown). // GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
@ -304,7 +309,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
} }
mf := bytes.NewReader(tarsplit) mf := bytes.NewReader(tarsplit)
mfz, err := gzip.NewReader(mf) mfz, err := pgzip.NewReader(mf)
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }

@ -54,7 +54,7 @@ func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
} }
path := filepath.Join(dir, blobInfoCacheFilename) path := filepath.Join(dir, blobInfoCacheFilename)
if err := os.MkdirAll(dir, 0700); err != nil { if err := os.MkdirAll(dir, 0700); err != nil {
logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", err) logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err)
return NewMemoryCache() return NewMemoryCache()
} }

@ -3,10 +3,10 @@ package compression
import ( import (
"bytes" "bytes"
"compress/bzip2" "compress/bzip2"
"compress/gzip"
"io" "io"
"io/ioutil" "io/ioutil"
"github.com/klauspost/pgzip"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/ulikunitz/xz" "github.com/ulikunitz/xz"
@ -18,7 +18,7 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. // GzipDecompressor is a DecompressorFunc for the gzip compression algorithm.
func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { func GzipDecompressor(r io.Reader) (io.ReadCloser, error) {
return gzip.NewReader(r) return pgzip.NewReader(r)
} }
// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. // Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm.

@ -318,8 +318,37 @@ func FindUnqualifiedSearchRegistries(ctx *types.SystemContext) ([]Registry, erro
return unqualified, nil return unqualified, nil
} }
// FindRegistry returns the Registry with the longest prefix for ref. If no // refMatchesPrefix returns true iff ref,
// Registry prefixes the image, nil is returned. // which is a registry, repository namespace, repository or image reference (as formatted by
// reference.Domain(), reference.Named.Name() or reference.Reference.String()
// — note that this requires the name to start with an explicit hostname!),
// matches a Registry.Prefix value.
// (This is split from the caller primarily to make testing easier.)
func refMatchesPrefix(ref, prefix string) bool {
switch {
case len(ref) < len(prefix):
return false
case len(ref) == len(prefix):
return ref == prefix
case len(ref) > len(prefix):
if !strings.HasPrefix(ref, prefix) {
return false
}
c := ref[len(prefix)]
// This allows "example.com:5000" to match "example.com",
// which is unintended; that will get fixed eventually, DON'T RELY
// ON THE CURRENT BEHAVIOR.
return c == ':' || c == '/' || c == '@'
default:
panic("Internal error: impossible comparison outcome")
}
}
// FindRegistry returns the Registry with the longest prefix for ref,
// which is a registry, repository namespace repository or image reference (as formatted by
// reference.Domain(), reference.Named.Name() or reference.Reference.String()
// — note that this requires the name to start with an explicit hostname!).
// If no Registry prefixes the image, nil is returned.
func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) {
registries, err := GetRegistries(ctx) registries, err := GetRegistries(ctx)
if err != nil { if err != nil {
@ -329,7 +358,7 @@ func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) {
reg := Registry{} reg := Registry{}
prefixLen := 0 prefixLen := 0
for _, r := range registries { for _, r := range registries {
if strings.HasPrefix(ref, r.Prefix+"/") || ref == r.Prefix { if refMatchesPrefix(ref, r.Prefix) {
length := len(r.Prefix) length := len(r.Prefix)
if length > prefixLen { if length > prefixLen {
reg = r reg = r

@ -11,6 +11,7 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sync"
"sync/atomic" "sync/atomic"
"github.com/containers/image/image" "github.com/containers/image/image"
@ -47,6 +48,7 @@ type storageImageSource struct {
image *storage.Image image *storage.Image
layerPosition map[digest.Digest]int // Where we are in reading a blob's layers layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
cachedManifest []byte // A cached copy of the manifest, if already known, or nil cachedManifest []byte // A cached copy of the manifest, if already known, or nil
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
} }
@ -56,6 +58,7 @@ type storageImageDestination struct {
nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
manifest []byte // Manifest contents, temporary manifest []byte // Manifest contents, temporary
signatures []byte // Signature contents, temporary signatures []byte // Signature contents, temporary
putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
@ -91,15 +94,20 @@ func newImageSource(imageRef storageReference) (*storageImageSource, error) {
} }
// Reference returns the image reference that we used to find this image. // Reference returns the image reference that we used to find this image.
func (s storageImageSource) Reference() types.ImageReference { func (s *storageImageSource) Reference() types.ImageReference {
return s.imageRef return s.imageRef
} }
// Close cleans up any resources we tied up while reading the image. // Close cleans up any resources we tied up while reading the image.
func (s storageImageSource) Close() error { func (s *storageImageSource) Close() error {
return nil return nil
} }
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
func (s *storageImageSource) HasThreadSafeGetBlob() bool {
return true
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown). // GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
@ -137,8 +145,10 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC
// Step through the list of matching layers. Tests may want to verify that if we have multiple layers // Step through the list of matching layers. Tests may want to verify that if we have multiple layers
// which claim to have the same contents, that we actually do have multiple layers, otherwise we could // which claim to have the same contents, that we actually do have multiple layers, otherwise we could
// just go ahead and use the first one every time. // just go ahead and use the first one every time.
s.getBlobMutex.Lock()
i := s.layerPosition[info.Digest] i := s.layerPosition[info.Digest]
s.layerPosition[info.Digest] = i + 1 s.layerPosition[info.Digest] = i + 1
s.getBlobMutex.Unlock()
if len(layers) > 0 { if len(layers) > 0 {
layer = layers[i%len(layers)] layer = layers[i%len(layers)]
} }
@ -300,7 +310,7 @@ func newImageDestination(imageRef storageReference) (*storageImageDestination, e
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
func (s storageImageDestination) Reference() types.ImageReference { func (s *storageImageDestination) Reference() types.ImageReference {
return s.imageRef return s.imageRef
} }
@ -309,7 +319,7 @@ func (s *storageImageDestination) Close() error {
return os.RemoveAll(s.directory) return os.RemoveAll(s.directory)
} }
func (s storageImageDestination) DesiredLayerCompression() types.LayerCompression { func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression {
// We ultimately have to decompress layers to populate trees on disk, // We ultimately have to decompress layers to populate trees on disk,
// so callers shouldn't bother compressing them before handing them to // so callers shouldn't bother compressing them before handing them to
// us, if they're not already compressed. // us, if they're not already compressed.
@ -320,6 +330,11 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
} }
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
func (s *storageImageDestination) HasThreadSafePutBlob() bool {
return true
}
// PutBlob writes contents of stream and returns data representing the result. // PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known. // inputInfo.Size is the expected length of stream, if known.
@ -370,9 +385,11 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
return errorBlobInfo, ErrBlobSizeMismatch return errorBlobInfo, ErrBlobSizeMismatch
} }
// Record information about the blob. // Record information about the blob.
s.putBlobMutex.Lock()
s.blobDiffIDs[hasher.Digest()] = diffID.Digest() s.blobDiffIDs[hasher.Digest()] = diffID.Digest()
s.fileSizes[hasher.Digest()] = counter.Count s.fileSizes[hasher.Digest()] = counter.Count
s.filenames[hasher.Digest()] = filename s.filenames[hasher.Digest()] = filename
s.putBlobMutex.Unlock()
blobDigest := blobinfo.Digest blobDigest := blobinfo.Digest
if blobDigest.Validate() != nil { if blobDigest.Validate() != nil {
blobDigest = hasher.Digest() blobDigest = hasher.Digest()
@ -398,6 +415,9 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache. // May use and/or update cache.
func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
// lock the entire method as it executes fairly quickly
s.putBlobMutex.Lock()
defer s.putBlobMutex.Unlock()
if blobinfo.Digest == "" { if blobinfo.Digest == "" {
return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`) return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`)
} }

@ -2,7 +2,6 @@ package tarball
import ( import (
"bytes" "bytes"
"compress/gzip"
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
@ -14,7 +13,7 @@ import (
"time" "time"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
imgspecs "github.com/opencontainers/image-spec/specs-go" imgspecs "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@ -77,7 +76,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
// Set up to digest the file after we maybe decompress it. // Set up to digest the file after we maybe decompress it.
diffIDdigester := digest.Canonical.Digester() diffIDdigester := digest.Canonical.Digester()
uncompressed, err := gzip.NewReader(reader) uncompressed, err := pgzip.NewReader(reader)
if err == nil { if err == nil {
// It is compressed, so the diffID is the digest of the uncompressed version // It is compressed, so the diffID is the digest of the uncompressed version
reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) reader = io.TeeReader(uncompressed, diffIDdigester.Hash())
@ -207,6 +206,11 @@ func (is *tarballImageSource) Close() error {
return nil return nil
} }
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
func (is *tarballImageSource) HasThreadSafeGetBlob() bool {
return false
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown). // GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.

@ -198,6 +198,8 @@ type ImageSource interface {
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error)
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
HasThreadSafeGetBlob() bool
// GetSignatures returns the image's signatures. It may use a remote (= slow) service. // GetSignatures returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
@ -264,6 +266,8 @@ type ImageDestination interface {
// to any other readers for download using the supplied digest. // to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error)
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
HasThreadSafePutBlob() bool
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty. // info.Digest must not be empty.

@ -16,7 +16,7 @@ github.com/imdario/mergo 50d4dbd4eb0e84778abe37cefef140271d96fade
github.com/mattn/go-runewidth 14207d285c6c197daabb5c9793d63e7af9ab2d50 github.com/mattn/go-runewidth 14207d285c6c197daabb5c9793d63e7af9ab2d50
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062 github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9 github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/opencontainers/image-spec v1.0.0 github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runc 6b1d0e76f239ffb435445e5ae316d2676c07c6e3 github.com/opencontainers/runc 6b1d0e76f239ffb435445e5ae316d2676c07c6e3
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9 github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
@ -26,8 +26,9 @@ github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
github.com/vbatts/tar-split v0.10.2 github.com/vbatts/tar-split v0.10.2
golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8 golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8
golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08 golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
gopkg.in/cheggaaa/pb.v1 d7e6ca3010b6f084d8056847f55d7f572f180678 gopkg.in/cheggaaa/pb.v1 v1.0.27
gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a
k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6 k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6
github.com/xeipuuv/gojsonschema master github.com/xeipuuv/gojsonschema master
@ -36,7 +37,7 @@ github.com/xeipuuv/gojsonpointer master
github.com/tchap/go-patricia v2.2.6 github.com/tchap/go-patricia v2.2.6
github.com/opencontainers/selinux 077c8b6d1c18456fb7c792bc0de52295a0d1900e github.com/opencontainers/selinux 077c8b6d1c18456fb7c792bc0de52295a0d1900e
github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0 github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 github.com/ostreedev/ostree-go 56f3a639dbc0f2f5051c6d52dade28a882ba78ce
github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8 github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
github.com/pquerna/ffjson master github.com/pquerna/ffjson master
github.com/syndtr/gocapability master github.com/syndtr/gocapability master
@ -44,3 +45,6 @@ github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175
github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a
github.com/ulikunitz/xz v0.5.4 github.com/ulikunitz/xz v0.5.4
github.com/boltdb/bolt master github.com/boltdb/bolt master
github.com/klauspost/pgzip v1.2.1
github.com/klauspost/compress v1.4.1
github.com/klauspost/cpuid v1.2.0

@ -101,4 +101,4 @@ the various OCI projects).
# Copyright and license # Copyright and license
Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.

@ -19,6 +19,7 @@ import (
"fmt" "fmt"
"hash" "hash"
"io" "io"
"regexp"
) )
// Algorithm identifies and implementation of a digester by an identifier. // Algorithm identifies and implementation of a digester by an identifier.
@ -28,9 +29,9 @@ type Algorithm string
// supported digest types // supported digest types
const ( const (
SHA256 Algorithm = "sha256" // sha256 with hex encoding SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only)
SHA384 Algorithm = "sha384" // sha384 with hex encoding SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only)
SHA512 Algorithm = "sha512" // sha512 with hex encoding SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only)
// Canonical is the primary digest algorithm used with the distribution // Canonical is the primary digest algorithm used with the distribution
// project. Other digests may be used but this one is the primary storage // project. Other digests may be used but this one is the primary storage
@ -50,6 +51,14 @@ var (
SHA384: crypto.SHA384, SHA384: crypto.SHA384,
SHA512: crypto.SHA512, SHA512: crypto.SHA512,
} }
// anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests.
// Note that /A-F/ disallowed.
anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{
SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`),
SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`),
SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`),
}
) )
// Available returns true if the digest type is available for use. If this // Available returns true if the digest type is available for use. If this
@ -125,6 +134,14 @@ func (a Algorithm) Hash() hash.Hash {
return algorithms[a].New() return algorithms[a].New()
} }
// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into
// the encoded portion of the digest.
func (a Algorithm) Encode(d []byte) string {
// TODO(stevvooe): Currently, all algorithms use a hex encoding. When we
// add support for back registration, we can modify this accordingly.
return fmt.Sprintf("%x", d)
}
// FromReader returns the digest of the reader using the algorithm. // FromReader returns the digest of the reader using the algorithm.
func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
digester := a.Digester() digester := a.Digester()
@ -156,3 +173,20 @@ func (a Algorithm) FromBytes(p []byte) Digest {
func (a Algorithm) FromString(s string) Digest { func (a Algorithm) FromString(s string) Digest {
return a.FromBytes([]byte(s)) return a.FromBytes([]byte(s))
} }
// Validate validates the encoded portion string
func (a Algorithm) Validate(encoded string) error {
r, ok := anchoredEncodedRegexps[a]
if !ok {
return ErrDigestUnsupported
}
// Digests much always be hex-encoded, ensuring that their hex portion will
// always be size*2
if a.Size()*2 != len(encoded) {
return ErrDigestInvalidLength
}
if r.MatchString(encoded) {
return nil
}
return ErrDigestInvalidFormat
}

@ -45,16 +45,21 @@ func NewDigest(alg Algorithm, h hash.Hash) Digest {
// functions. This is also useful for rebuilding digests from binary // functions. This is also useful for rebuilding digests from binary
// serializations. // serializations.
func NewDigestFromBytes(alg Algorithm, p []byte) Digest { func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
return Digest(fmt.Sprintf("%s:%x", alg, p)) return NewDigestFromEncoded(alg, alg.Encode(p))
} }
// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. // NewDigestFromHex is deprecated. Please use NewDigestFromEncoded.
func NewDigestFromHex(alg, hex string) Digest { func NewDigestFromHex(alg, hex string) Digest {
return Digest(fmt.Sprintf("%s:%s", alg, hex)) return NewDigestFromEncoded(Algorithm(alg), hex)
}
// NewDigestFromEncoded returns a Digest from alg and the encoded digest.
func NewDigestFromEncoded(alg Algorithm, encoded string) Digest {
return Digest(fmt.Sprintf("%s:%s", alg, encoded))
} }
// DigestRegexp matches valid digest types. // DigestRegexp matches valid digest types.
var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`)
// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. // DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
@ -96,26 +101,18 @@ func FromString(s string) Digest {
// error if not. // error if not.
func (d Digest) Validate() error { func (d Digest) Validate() error {
s := string(d) s := string(d)
i := strings.Index(s, ":") i := strings.Index(s, ":")
if i <= 0 || i+1 == len(s) {
// validate i then run through regexp
if i < 0 || i+1 == len(s) || !DigestRegexpAnchored.MatchString(s) {
return ErrDigestInvalidFormat return ErrDigestInvalidFormat
} }
algorithm, encoded := Algorithm(s[:i]), s[i+1:]
algorithm := Algorithm(s[:i])
if !algorithm.Available() { if !algorithm.Available() {
if !DigestRegexpAnchored.MatchString(s) {
return ErrDigestInvalidFormat
}
return ErrDigestUnsupported return ErrDigestUnsupported
} }
return algorithm.Validate(encoded)
// Digests much always be hex-encoded, ensuring that their hex portion will
// always be size*2
if algorithm.Size()*2 != len(s[i+1:]) {
return ErrDigestInvalidLength
}
return nil
} }
// Algorithm returns the algorithm portion of the digest. This will panic if // Algorithm returns the algorithm portion of the digest. This will panic if
@ -133,12 +130,17 @@ func (d Digest) Verifier() Verifier {
} }
} }
// Hex returns the hex digest portion of the digest. This will panic if the // Encoded returns the encoded portion of the digest. This will panic if the
// underlying digest is not in a valid format. // underlying digest is not in a valid format.
func (d Digest) Hex() string { func (d Digest) Encoded() string {
return string(d[d.sepIndex()+1:]) return string(d[d.sepIndex()+1:])
} }
// Hex is deprecated. Please use Digest.Encoded.
func (d Digest) Hex() string {
return d.Encoded()
}
func (d Digest) String() string { func (d Digest) String() string {
return string(d) return string(d)
} }

27
vendor/golang.org/x/sync/LICENSE generated vendored Normal file

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/sync/PATENTS generated vendored Normal file

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

18
vendor/golang.org/x/sync/README.md generated vendored Normal file

@ -0,0 +1,18 @@
# Go Sync
This repository provides Go concurrency primitives in addition to the
ones provided by the language and "sync" and "sync/atomic" packages.
## Download/Install
The easiest way to install is to run `go get -u golang.org/x/sync`. You can
also manually git clone the repository to `$GOPATH/src/golang.org/x/sync`.
## Report Issues / Send Patches
This repository uses Gerrit for code changes. To learn how to submit changes to
this repository, see https://golang.org/doc/contribute.html.
The main issue tracker for the sync repository is located at
https://github.com/golang/go/issues. Prefix your issue with "x/sync:" in the
subject line, so it is easy to find.

127
vendor/golang.org/x/sync/semaphore/semaphore.go generated vendored Normal file

@ -0,0 +1,127 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package semaphore provides a weighted semaphore implementation.
package semaphore // import "golang.org/x/sync/semaphore"
import (
"container/list"
"context"
"sync"
)
type waiter struct {
n int64
ready chan<- struct{} // Closed when semaphore acquired.
}
// NewWeighted creates a new weighted semaphore with the given
// maximum combined weight for concurrent access.
func NewWeighted(n int64) *Weighted {
w := &Weighted{size: n}
return w
}
// Weighted provides a way to bound concurrent access to a resource.
// The callers can request access with a given weight.
type Weighted struct {
size int64
cur int64
mu sync.Mutex
waiters list.List
}
// Acquire acquires the semaphore with a weight of n, blocking until resources
// are available or ctx is done. On success, returns nil. On failure, returns
// ctx.Err() and leaves the semaphore unchanged.
//
// If ctx is already done, Acquire may still succeed without blocking.
func (s *Weighted) Acquire(ctx context.Context, n int64) error {
s.mu.Lock()
if s.size-s.cur >= n && s.waiters.Len() == 0 {
s.cur += n
s.mu.Unlock()
return nil
}
if n > s.size {
// Don't make other Acquire calls block on one that's doomed to fail.
s.mu.Unlock()
<-ctx.Done()
return ctx.Err()
}
ready := make(chan struct{})
w := waiter{n: n, ready: ready}
elem := s.waiters.PushBack(w)
s.mu.Unlock()
select {
case <-ctx.Done():
err := ctx.Err()
s.mu.Lock()
select {
case <-ready:
// Acquired the semaphore after we were canceled. Rather than trying to
// fix up the queue, just pretend we didn't notice the cancelation.
err = nil
default:
s.waiters.Remove(elem)
}
s.mu.Unlock()
return err
case <-ready:
return nil
}
}
// TryAcquire acquires the semaphore with a weight of n without blocking.
// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
func (s *Weighted) TryAcquire(n int64) bool {
s.mu.Lock()
success := s.size-s.cur >= n && s.waiters.Len() == 0
if success {
s.cur += n
}
s.mu.Unlock()
return success
}
// Release releases the semaphore with a weight of n.
func (s *Weighted) Release(n int64) {
s.mu.Lock()
s.cur -= n
if s.cur < 0 {
s.mu.Unlock()
panic("semaphore: bad release")
}
for {
next := s.waiters.Front()
if next == nil {
break // No more waiters blocked.
}
w := next.Value.(waiter)
if s.size-s.cur < w.n {
// Not enough tokens for the next waiter. We could keep going (to try to
// find a waiter with a smaller request), but under load that could cause
// starvation for large requests; instead, we leave all remaining waiters
// blocked.
//
// Consider a semaphore used as a read-write lock, with N tokens, N
// readers, and one writer. Each reader can Acquire(1) to obtain a read
// lock. The writer can Acquire(N) to obtain a write lock, excluding all
// of the readers. If we allow the readers to jump ahead in the queue,
// the writer will starve — there is always one token available for every
// reader.
break
}
s.cur += w.n
s.waiters.Remove(next)
close(w.ready)
}
s.mu.Unlock()
}

@ -2,6 +2,7 @@
Simple progress bar for console programs. Simple progress bar for console programs.
Please check the new version https://github.com/cheggaaa/pb/tree/v2 (currently, it's beta)
## Installation ## Installation
@ -170,7 +171,7 @@ The result will be as follows:
``` ```
$ go run example/multiple.go $ go run example/multiple.go
First 141 / 1000 [===============>---------------------------------------] 14.10 % 44s First 34 / 200 [=========>---------------------------------------------] 17.00% 00m08s
Second 139 / 1000 [==============>---------------------------------------] 13.90 % 44s Second 42 / 200 [===========>------------------------------------------] 21.00% 00m06s
Third 152 / 1000 [================>--------------------------------------] 15.20 % 40s Third 36 / 200 [=========>---------------------------------------------] 18.00% 00m08s
``` ```

@ -2,7 +2,6 @@ package pb
import ( import (
"fmt" "fmt"
"strings"
"time" "time"
) )
@ -11,12 +10,26 @@ type Units int
const ( const (
// U_NO are default units, they represent a simple value and are not formatted at all. // U_NO are default units, they represent a simple value and are not formatted at all.
U_NO Units = iota U_NO Units = iota
// U_BYTES units are formatted in a human readable way (b, Bb, Mb, ...) // U_BYTES units are formatted in a human readable way (B, KiB, MiB, ...)
U_BYTES U_BYTES
// U_BYTES_DEC units are like U_BYTES, but base 10 (B, KB, MB, ...)
U_BYTES_DEC
// U_DURATION units are formatted in a human readable way (3h14m15s) // U_DURATION units are formatted in a human readable way (3h14m15s)
U_DURATION U_DURATION
) )
const (
KiB = 1024
MiB = 1048576
GiB = 1073741824
TiB = 1099511627776
KB = 1e3
MB = 1e6
GB = 1e9
TB = 1e12
)
func Format(i int64) *formatter { func Format(i int64) *formatter {
return &formatter{n: i} return &formatter{n: i}
} }
@ -28,11 +41,6 @@ type formatter struct {
perSec bool perSec bool
} }
func (f *formatter) Value(n int64) *formatter {
f.n = n
return f
}
func (f *formatter) To(unit Units) *formatter { func (f *formatter) To(unit Units) *formatter {
f.unit = unit f.unit = unit
return f return f
@ -52,13 +60,10 @@ func (f *formatter) String() (out string) {
switch f.unit { switch f.unit {
case U_BYTES: case U_BYTES:
out = formatBytes(f.n) out = formatBytes(f.n)
case U_BYTES_DEC:
out = formatBytesDec(f.n)
case U_DURATION: case U_DURATION:
d := time.Duration(f.n) out = formatDuration(f.n)
if d > time.Hour*24 {
out = fmt.Sprintf("%dd", d/24/time.Hour)
d -= (d / time.Hour / 24) * (time.Hour * 24)
}
out = fmt.Sprintf("%s%v", out, d)
default: default:
out = fmt.Sprintf(fmt.Sprintf("%%%dd", f.width), f.n) out = fmt.Sprintf(fmt.Sprintf("%%%dd", f.width), f.n)
} }
@ -68,20 +73,53 @@ func (f *formatter) String() (out string) {
return return
} }
// Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B // Convert bytes to human readable string. Like 2 MiB, 64.2 KiB, 52 B
func formatBytes(i int64) (result string) { func formatBytes(i int64) (result string) {
switch { switch {
case i > (1024 * 1024 * 1024 * 1024): case i >= TiB:
result = fmt.Sprintf("%.02f TB", float64(i)/1024/1024/1024/1024) result = fmt.Sprintf("%.02f TiB", float64(i)/TiB)
case i > (1024 * 1024 * 1024): case i >= GiB:
result = fmt.Sprintf("%.02f GB", float64(i)/1024/1024/1024) result = fmt.Sprintf("%.02f GiB", float64(i)/GiB)
case i > (1024 * 1024): case i >= MiB:
result = fmt.Sprintf("%.02f MB", float64(i)/1024/1024) result = fmt.Sprintf("%.02f MiB", float64(i)/MiB)
case i > 1024: case i >= KiB:
result = fmt.Sprintf("%.02f KB", float64(i)/1024) result = fmt.Sprintf("%.02f KiB", float64(i)/KiB)
default: default:
result = fmt.Sprintf("%d B", i) result = fmt.Sprintf("%d B", i)
} }
result = strings.Trim(result, " ") return
}
// Convert bytes to base-10 human readable string. Like 2 MB, 64.2 KB, 52 B
func formatBytesDec(i int64) (result string) {
switch {
case i >= TB:
result = fmt.Sprintf("%.02f TB", float64(i)/TB)
case i >= GB:
result = fmt.Sprintf("%.02f GB", float64(i)/GB)
case i >= MB:
result = fmt.Sprintf("%.02f MB", float64(i)/MB)
case i >= KB:
result = fmt.Sprintf("%.02f KB", float64(i)/KB)
default:
result = fmt.Sprintf("%d B", i)
}
return
}
func formatDuration(n int64) (result string) {
d := time.Duration(n)
if d > time.Hour*24 {
result = fmt.Sprintf("%dd", d/24/time.Hour)
d -= (d / time.Hour / 24) * (time.Hour * 24)
}
if d > time.Hour {
result = fmt.Sprintf("%s%dh", result, d/time.Hour)
d -= d / time.Hour * time.Hour
}
m := d / time.Minute
d -= m * time.Minute
s := d / time.Second
result = fmt.Sprintf("%s%02dm%02ds", result, m, s)
return return
} }

193
vendor/gopkg.in/cheggaaa/pb.v1/pb.go generated vendored

@ -13,7 +13,7 @@ import (
) )
// Current version // Current version
const Version = "1.0.6" const Version = "1.0.27"
const ( const (
// Default refresh rate - 200ms // Default refresh rate - 200ms
@ -37,18 +37,17 @@ func New(total int) *ProgressBar {
// Create new progress bar object using int64 as total // Create new progress bar object using int64 as total
func New64(total int64) *ProgressBar { func New64(total int64) *ProgressBar {
pb := &ProgressBar{ pb := &ProgressBar{
Total: total, Total: total,
RefreshRate: DEFAULT_REFRESH_RATE, RefreshRate: DEFAULT_REFRESH_RATE,
ShowPercent: true, ShowPercent: true,
ShowCounters: true, ShowCounters: true,
ShowBar: true, ShowBar: true,
ShowTimeLeft: true, ShowTimeLeft: true,
ShowFinalTime: true, ShowElapsedTime: false,
Units: U_NO, ShowFinalTime: true,
ManualUpdate: false, Units: U_NO,
finish: make(chan struct{}), ManualUpdate: false,
currentValue: -1, finish: make(chan struct{}),
mu: new(sync.Mutex),
} }
return pb.Format(FORMAT) return pb.Format(FORMAT)
} }
@ -67,13 +66,14 @@ func StartNew(total int) *ProgressBar {
type Callback func(out string) type Callback func(out string)
type ProgressBar struct { type ProgressBar struct {
current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278) current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278)
previous int64
Total int64 Total int64
RefreshRate time.Duration RefreshRate time.Duration
ShowPercent, ShowCounters bool ShowPercent, ShowCounters bool
ShowSpeed, ShowTimeLeft, ShowBar bool ShowSpeed, ShowTimeLeft, ShowBar bool
ShowFinalTime bool ShowFinalTime, ShowElapsedTime bool
Output io.Writer Output io.Writer
Callback Callback Callback Callback
NotPrint bool NotPrint bool
@ -91,13 +91,14 @@ type ProgressBar struct {
finish chan struct{} finish chan struct{}
isFinish bool isFinish bool
startTime time.Time startTime time.Time
startValue int64 startValue int64
currentValue int64
changeTime time.Time
prefix, postfix string prefix, postfix string
mu *sync.Mutex mu sync.Mutex
lastPrint string lastPrint string
BarStart string BarStart string
@ -112,8 +113,8 @@ type ProgressBar struct {
// Start print // Start print
func (pb *ProgressBar) Start() *ProgressBar { func (pb *ProgressBar) Start() *ProgressBar {
pb.startTime = time.Now() pb.startTime = time.Now()
pb.startValue = pb.current pb.startValue = atomic.LoadInt64(&pb.current)
if pb.Total == 0 { if atomic.LoadInt64(&pb.Total) == 0 {
pb.ShowTimeLeft = false pb.ShowTimeLeft = false
pb.ShowPercent = false pb.ShowPercent = false
pb.AutoStat = false pb.AutoStat = false
@ -158,12 +159,16 @@ func (pb *ProgressBar) Add64(add int64) int64 {
// Set prefix string // Set prefix string
func (pb *ProgressBar) Prefix(prefix string) *ProgressBar { func (pb *ProgressBar) Prefix(prefix string) *ProgressBar {
pb.mu.Lock()
defer pb.mu.Unlock()
pb.prefix = prefix pb.prefix = prefix
return pb return pb
} }
// Set postfix string // Set postfix string
func (pb *ProgressBar) Postfix(postfix string) *ProgressBar { func (pb *ProgressBar) Postfix(postfix string) *ProgressBar {
pb.mu.Lock()
defer pb.mu.Unlock()
pb.postfix = postfix pb.postfix = postfix
return pb return pb
} }
@ -173,7 +178,7 @@ func (pb *ProgressBar) Postfix(postfix string) *ProgressBar {
// Example: bar.Format("[\x00=\x00>\x00-\x00]") // \x00 is the delimiter // Example: bar.Format("[\x00=\x00>\x00-\x00]") // \x00 is the delimiter
func (pb *ProgressBar) Format(format string) *ProgressBar { func (pb *ProgressBar) Format(format string) *ProgressBar {
var formatEntries []string var formatEntries []string
if len(format) == 5 { if utf8.RuneCountInString(format) == 5 {
formatEntries = strings.Split(format, "") formatEntries = strings.Split(format, "")
} else { } else {
formatEntries = strings.Split(format, "\x00") formatEntries = strings.Split(format, "\x00")
@ -221,7 +226,9 @@ func (pb *ProgressBar) Finish() {
//Protect multiple calls //Protect multiple calls
pb.finishOnce.Do(func() { pb.finishOnce.Do(func() {
close(pb.finish) close(pb.finish)
pb.write(atomic.LoadInt64(&pb.current)) pb.write(atomic.LoadInt64(&pb.Total), atomic.LoadInt64(&pb.current))
pb.mu.Lock()
defer pb.mu.Unlock()
switch { switch {
case pb.Output != nil: case pb.Output != nil:
fmt.Fprintln(pb.Output) fmt.Fprintln(pb.Output)
@ -232,6 +239,13 @@ func (pb *ProgressBar) Finish() {
}) })
} }
// IsFinished return boolean
func (pb *ProgressBar) IsFinished() bool {
pb.mu.Lock()
defer pb.mu.Unlock()
return pb.isFinish
}
// End print and write string 'str' // End print and write string 'str'
func (pb *ProgressBar) FinishPrint(str string) { func (pb *ProgressBar) FinishPrint(str string) {
pb.Finish() pb.Finish()
@ -262,16 +276,18 @@ func (pb *ProgressBar) NewProxyReader(r io.Reader) *Reader {
return &Reader{r, pb} return &Reader{r, pb}
} }
func (pb *ProgressBar) write(current int64) { func (pb *ProgressBar) write(total, current int64) {
pb.mu.Lock()
defer pb.mu.Unlock()
width := pb.GetWidth() width := pb.GetWidth()
var percentBox, countersBox, timeLeftBox, speedBox, barBox, end, out string var percentBox, countersBox, timeLeftBox, timeSpentBox, speedBox, barBox, end, out string
// percents // percents
if pb.ShowPercent { if pb.ShowPercent {
var percent float64 var percent float64
if pb.Total > 0 { if total > 0 {
percent = float64(current) / (float64(pb.Total) / float64(100)) percent = float64(current) / (float64(total) / float64(100))
} else { } else {
percent = float64(current) / float64(100) percent = float64(current) / float64(100)
} }
@ -281,17 +297,24 @@ func (pb *ProgressBar) write(current int64) {
// counters // counters
if pb.ShowCounters { if pb.ShowCounters {
current := Format(current).To(pb.Units).Width(pb.UnitsWidth) current := Format(current).To(pb.Units).Width(pb.UnitsWidth)
if pb.Total > 0 { if total > 0 {
total := Format(pb.Total).To(pb.Units).Width(pb.UnitsWidth) totalS := Format(total).To(pb.Units).Width(pb.UnitsWidth)
countersBox = fmt.Sprintf(" %s / %s ", current, total) countersBox = fmt.Sprintf(" %s / %s ", current, totalS)
} else { } else {
countersBox = fmt.Sprintf(" %s / ? ", current) countersBox = fmt.Sprintf(" %s / ? ", current)
} }
} }
// time left // time left
fromStart := time.Now().Sub(pb.startTime)
currentFromStart := current - pb.startValue currentFromStart := current - pb.startValue
fromStart := time.Now().Sub(pb.startTime)
lastChangeTime := pb.changeTime
fromChange := lastChangeTime.Sub(pb.startTime)
if pb.ShowElapsedTime {
timeSpentBox = fmt.Sprintf(" %s ", (fromStart/time.Second)*time.Second)
}
select { select {
case <-pb.finish: case <-pb.finish:
if pb.ShowFinalTime { if pb.ShowFinalTime {
@ -301,17 +324,20 @@ func (pb *ProgressBar) write(current int64) {
} }
default: default:
if pb.ShowTimeLeft && currentFromStart > 0 { if pb.ShowTimeLeft && currentFromStart > 0 {
perEntry := fromStart / time.Duration(currentFromStart) perEntry := fromChange / time.Duration(currentFromStart)
var left time.Duration var left time.Duration
if pb.Total > 0 { if total > 0 {
left = time.Duration(pb.Total-currentFromStart) * perEntry left = time.Duration(total-currentFromStart) * perEntry
left -= time.Since(lastChangeTime)
left = (left / time.Second) * time.Second left = (left / time.Second) * time.Second
} else { } else {
left = time.Duration(currentFromStart) * perEntry left = time.Duration(currentFromStart) * perEntry
left = (left / time.Second) * time.Second left = (left / time.Second) * time.Second
} }
timeLeft := Format(int64(left)).To(U_DURATION).String() if left > 0 {
timeLeftBox = fmt.Sprintf(" %s", timeLeft) timeLeft := Format(int64(left)).To(U_DURATION).String()
timeLeftBox = fmt.Sprintf(" %s", timeLeft)
}
} }
} }
@ -326,30 +352,38 @@ func (pb *ProgressBar) write(current int64) {
speedBox = " " + Format(int64(speed)).To(pb.Units).Width(pb.UnitsWidth).PerSec().String() speedBox = " " + Format(int64(speed)).To(pb.Units).Width(pb.UnitsWidth).PerSec().String()
} }
barWidth := escapeAwareRuneCountInString(countersBox + pb.BarStart + pb.BarEnd + percentBox + timeLeftBox + speedBox + pb.prefix + pb.postfix) barWidth := escapeAwareRuneCountInString(countersBox + pb.BarStart + pb.BarEnd + percentBox + timeSpentBox + timeLeftBox + speedBox + pb.prefix + pb.postfix)
// bar // bar
if pb.ShowBar { if pb.ShowBar {
size := width - barWidth size := width - barWidth
if size > 0 { if size > 0 {
if pb.Total > 0 { if total > 0 {
curCount := int(math.Ceil((float64(current) / float64(pb.Total)) * float64(size))) curSize := int(math.Ceil((float64(current) / float64(total)) * float64(size)))
emptCount := size - curCount emptySize := size - curSize
barBox = pb.BarStart barBox = pb.BarStart
if emptCount < 0 { if emptySize < 0 {
emptCount = 0 emptySize = 0
} }
if curCount > size { if curSize > size {
curCount = size curSize = size
} }
if emptCount <= 0 {
barBox += strings.Repeat(pb.Current, curCount) cursorLen := escapeAwareRuneCountInString(pb.Current)
} else if curCount > 0 { if emptySize <= 0 {
barBox += strings.Repeat(pb.Current, curCount-1) + pb.CurrentN barBox += strings.Repeat(pb.Current, curSize/cursorLen)
} else if curSize > 0 {
cursorEndLen := escapeAwareRuneCountInString(pb.CurrentN)
cursorRepetitions := (curSize - cursorEndLen) / cursorLen
barBox += strings.Repeat(pb.Current, cursorRepetitions)
barBox += pb.CurrentN
} }
barBox += strings.Repeat(pb.Empty, emptCount) + pb.BarEnd
emptyLen := escapeAwareRuneCountInString(pb.Empty)
barBox += strings.Repeat(pb.Empty, emptySize/emptyLen)
barBox += pb.BarEnd
} else { } else {
barBox = pb.BarStart
pos := size - int(current)%int(size) pos := size - int(current)%int(size)
barBox = pb.BarStart
if pos-1 > 0 { if pos-1 > 0 {
barBox += strings.Repeat(pb.Empty, pos-1) barBox += strings.Repeat(pb.Empty, pos-1)
} }
@ -363,17 +397,18 @@ func (pb *ProgressBar) write(current int64) {
} }
// check len // check len
out = pb.prefix + countersBox + barBox + percentBox + speedBox + timeLeftBox + pb.postfix out = pb.prefix + timeSpentBox + countersBox + barBox + percentBox + speedBox + timeLeftBox + pb.postfix
if escapeAwareRuneCountInString(out) < width {
end = strings.Repeat(" ", width-utf8.RuneCountInString(out)) if cl := escapeAwareRuneCountInString(out); cl < width {
end = strings.Repeat(" ", width-cl)
} }
// and print! // and print!
pb.mu.Lock()
pb.lastPrint = out + end pb.lastPrint = out + end
pb.mu.Unlock() isFinish := pb.isFinish
switch { switch {
case pb.isFinish: case isFinish:
return return
case pb.Output != nil: case pb.Output != nil:
fmt.Fprint(pb.Output, "\r"+out+end) fmt.Fprint(pb.Output, "\r"+out+end)
@ -406,24 +441,55 @@ func (pb *ProgressBar) GetWidth() int {
// Write the current state of the progressbar // Write the current state of the progressbar
func (pb *ProgressBar) Update() { func (pb *ProgressBar) Update() {
c := atomic.LoadInt64(&pb.current) c := atomic.LoadInt64(&pb.current)
if pb.AlwaysUpdate || c != pb.currentValue { p := atomic.LoadInt64(&pb.previous)
pb.write(c) t := atomic.LoadInt64(&pb.Total)
pb.currentValue = c if p != c {
pb.mu.Lock()
pb.changeTime = time.Now()
pb.mu.Unlock()
atomic.StoreInt64(&pb.previous, c)
} }
pb.write(t, c)
if pb.AutoStat { if pb.AutoStat {
if c == 0 { if c == 0 {
pb.startTime = time.Now() pb.startTime = time.Now()
pb.startValue = 0 pb.startValue = 0
} else if c >= pb.Total && pb.isFinish != true { } else if c >= t && pb.isFinish != true {
pb.Finish() pb.Finish()
} }
} }
} }
// String return the last bar print
func (pb *ProgressBar) String() string { func (pb *ProgressBar) String() string {
pb.mu.Lock()
defer pb.mu.Unlock()
return pb.lastPrint return pb.lastPrint
} }
// SetTotal atomically sets new total count
func (pb *ProgressBar) SetTotal(total int) *ProgressBar {
return pb.SetTotal64(int64(total))
}
// SetTotal64 atomically sets new total count
func (pb *ProgressBar) SetTotal64(total int64) *ProgressBar {
atomic.StoreInt64(&pb.Total, total)
return pb
}
// Reset bar and set new total count
// Does effect only on finished bar
func (pb *ProgressBar) Reset(total int) *ProgressBar {
pb.mu.Lock()
defer pb.mu.Unlock()
if pb.isFinish {
pb.SetTotal(total).Set(0)
atomic.StoreInt64(&pb.previous, 0)
}
return pb
}
// Internal loop for refreshing the progressbar // Internal loop for refreshing the progressbar
func (pb *ProgressBar) refresher() { func (pb *ProgressBar) refresher() {
for { for {
@ -435,10 +501,3 @@ func (pb *ProgressBar) refresher() {
} }
} }
} }
type window struct {
Row uint16
Col uint16
Xpixel uint16
Ypixel uint16
}

@ -1,4 +1,4 @@
// +build appengine // +build appengine js
package pb package pb

@ -1,8 +0,0 @@
// +build linux darwin freebsd netbsd openbsd dragonfly
// +build !appengine
package pb
import "syscall"
const sysIoctl = syscall.SYS_IOCTL

@ -1,6 +0,0 @@
// +build solaris
// +build !appengine
package pb
const sysIoctl = 54

@ -102,7 +102,7 @@ var echoLockMutex sync.Mutex
var oldState word var oldState word
func lockEcho() (quit chan int, err error) { func lockEcho() (shutdownCh chan struct{}, err error) {
echoLockMutex.Lock() echoLockMutex.Lock()
defer echoLockMutex.Unlock() defer echoLockMutex.Unlock()
if echoLocked { if echoLocked {
@ -124,6 +124,8 @@ func lockEcho() (quit chan int, err error) {
err = fmt.Errorf("Can't set terminal settings: %v", e) err = fmt.Errorf("Can't set terminal settings: %v", e)
return return
} }
shutdownCh = make(chan struct{})
return return
} }

@ -1,5 +1,5 @@
// +build linux darwin freebsd netbsd openbsd solaris dragonfly // +build linux darwin freebsd netbsd openbsd solaris dragonfly
// +build !appengine // +build !appengine !js
package pb package pb
@ -8,101 +8,109 @@ import (
"fmt" "fmt"
"os" "os"
"os/signal" "os/signal"
"runtime"
"sync" "sync"
"syscall" "syscall"
"unsafe"
)
const ( "golang.org/x/sys/unix"
TIOCGWINSZ = 0x5413
TIOCGWINSZ_OSX = 1074295912
) )
var tty *os.File
var ErrPoolWasStarted = errors.New("Bar pool was started") var ErrPoolWasStarted = errors.New("Bar pool was started")
var echoLocked bool var (
var echoLockMutex sync.Mutex echoLockMutex sync.Mutex
origTermStatePtr *unix.Termios
tty *os.File
istty bool
)
func init() { func init() {
echoLockMutex.Lock()
defer echoLockMutex.Unlock()
var err error var err error
tty, err = os.Open("/dev/tty") tty, err = os.Open("/dev/tty")
istty = true
if err != nil { if err != nil {
tty = os.Stdin tty = os.Stdin
istty = false
} }
} }
// terminalWidth returns width of the terminal. // terminalWidth returns width of the terminal.
func terminalWidth() (int, error) { func terminalWidth() (int, error) {
w := new(window) if !istty {
tio := syscall.TIOCGWINSZ return 0, errors.New("Not Supported")
if runtime.GOOS == "darwin" {
tio = TIOCGWINSZ_OSX
} }
res, _, err := syscall.Syscall(sysIoctl, echoLockMutex.Lock()
tty.Fd(), defer echoLockMutex.Unlock()
uintptr(tio),
uintptr(unsafe.Pointer(w)), fd := int(tty.Fd())
)
if int(res) == -1 { ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
if err != nil {
return 0, err return 0, err
} }
return int(w.Col), nil
return int(ws.Col), nil
} }
var oldState syscall.Termios func lockEcho() (shutdownCh chan struct{}, err error) {
func lockEcho() (quit chan int, err error) {
echoLockMutex.Lock() echoLockMutex.Lock()
defer echoLockMutex.Unlock() defer echoLockMutex.Unlock()
if echoLocked { if istty {
err = ErrPoolWasStarted if origTermStatePtr != nil {
return return shutdownCh, ErrPoolWasStarted
} }
echoLocked = true
fd := tty.Fd() fd := int(tty.Fd())
if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 {
err = fmt.Errorf("Can't get terminal settings: %v", e) origTermStatePtr, err = unix.IoctlGetTermios(fd, ioctlReadTermios)
return if err != nil {
} return nil, fmt.Errorf("Can't get terminal settings: %v", err)
}
oldTermios := *origTermStatePtr
newTermios := oldTermios
newTermios.Lflag &^= syscall.ECHO
newTermios.Lflag |= syscall.ICANON | syscall.ISIG
newTermios.Iflag |= syscall.ICRNL
if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newTermios); err != nil {
return nil, fmt.Errorf("Can't set terminal settings: %v", err)
}
newState := oldState
newState.Lflag &^= syscall.ECHO
newState.Lflag |= syscall.ICANON | syscall.ISIG
newState.Iflag |= syscall.ICRNL
if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); e != 0 {
err = fmt.Errorf("Can't set terminal settings: %v", e)
return
} }
quit = make(chan int, 1) shutdownCh = make(chan struct{})
go catchTerminate(quit) go catchTerminate(shutdownCh)
return return
} }
func unlockEcho() (err error) { func unlockEcho() error {
echoLockMutex.Lock() echoLockMutex.Lock()
defer echoLockMutex.Unlock() defer echoLockMutex.Unlock()
if !echoLocked { if istty {
return if origTermStatePtr == nil {
return nil
}
fd := int(tty.Fd())
if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, origTermStatePtr); err != nil {
return fmt.Errorf("Can't set terminal settings: %v", err)
}
} }
echoLocked = false origTermStatePtr = nil
fd := tty.Fd()
if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 { return nil
err = fmt.Errorf("Can't set terminal settings")
}
return
} }
// listen exit signals and restore terminal state // listen exit signals and restore terminal state
func catchTerminate(quit chan int) { func catchTerminate(shutdownCh chan struct{}) {
sig := make(chan os.Signal, 1) sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL) signal.Notify(sig, os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL)
defer signal.Stop(sig) defer signal.Stop(sig)
select { select {
case <-quit: case <-shutdownCh:
unlockEcho() unlockEcho()
case <-sig: case <-sig:
unlockEcho() unlockEcho()

@ -3,6 +3,7 @@
package pb package pb
import ( import (
"io"
"sync" "sync"
"time" "time"
) )
@ -11,22 +12,36 @@ import (
// You need call pool.Stop() after work // You need call pool.Stop() after work
func StartPool(pbs ...*ProgressBar) (pool *Pool, err error) { func StartPool(pbs ...*ProgressBar) (pool *Pool, err error) {
pool = new(Pool) pool = new(Pool)
if err = pool.start(); err != nil { if err = pool.Start(); err != nil {
return return
} }
pool.Add(pbs...) pool.Add(pbs...)
return return
} }
// NewPool initialises a pool with progress bars, but
// doesn't start it. You need to call Start manually
func NewPool(pbs ...*ProgressBar) (pool *Pool) {
pool = new(Pool)
pool.Add(pbs...)
return
}
type Pool struct { type Pool struct {
RefreshRate time.Duration Output io.Writer
bars []*ProgressBar RefreshRate time.Duration
quit chan int bars []*ProgressBar
finishOnce sync.Once lastBarsCount int
shutdownCh chan struct{}
workerCh chan struct{}
m sync.Mutex
finishOnce sync.Once
} }
// Add progress bars. // Add progress bars.
func (p *Pool) Add(pbs ...*ProgressBar) { func (p *Pool) Add(pbs ...*ProgressBar) {
p.m.Lock()
defer p.m.Unlock()
for _, bar := range pbs { for _, bar := range pbs {
bar.ManualUpdate = true bar.ManualUpdate = true
bar.NotPrint = true bar.NotPrint = true
@ -35,30 +50,38 @@ func (p *Pool) Add(pbs ...*ProgressBar) {
} }
} }
func (p *Pool) start() (err error) { func (p *Pool) Start() (err error) {
p.RefreshRate = DefaultRefreshRate p.RefreshRate = DefaultRefreshRate
quit, err := lockEcho() p.shutdownCh, err = lockEcho()
if err != nil { if err != nil {
return return
} }
p.quit = make(chan int) p.workerCh = make(chan struct{})
go p.writer(quit) go p.writer()
return return
} }
func (p *Pool) writer(finish chan int) { func (p *Pool) writer() {
var first = true var first = true
defer func() {
if first == false {
p.print(false)
} else {
p.print(true)
p.print(false)
}
close(p.workerCh)
}()
for { for {
select { select {
case <-time.After(p.RefreshRate): case <-time.After(p.RefreshRate):
if p.print(first) { if p.print(first) {
p.print(false) p.print(false)
finish <- 1
return return
} }
first = false first = false
case <-p.quit: case <-p.shutdownCh:
finish <- 1
return return
} }
} }
@ -66,11 +89,16 @@ func (p *Pool) writer(finish chan int) {
// Restore terminal state and close pool // Restore terminal state and close pool
func (p *Pool) Stop() error { func (p *Pool) Stop() error {
// Wait until one final refresh has passed.
time.Sleep(p.RefreshRate)
p.finishOnce.Do(func() { p.finishOnce.Do(func() {
close(p.quit) if p.shutdownCh != nil {
close(p.shutdownCh)
}
}) })
// Wait for the worker to complete
select {
case <-p.workerCh:
}
return unlockEcho() return unlockEcho()
} }

@ -8,13 +8,18 @@ import (
) )
func (p *Pool) print(first bool) bool { func (p *Pool) print(first bool) bool {
p.m.Lock()
defer p.m.Unlock()
var out string var out string
if !first { if !first {
coords, err := getCursorPos() coords, err := getCursorPos()
if err != nil { if err != nil {
log.Panic(err) log.Panic(err)
} }
coords.Y -= int16(len(p.bars)) coords.Y -= int16(p.lastBarsCount)
if coords.Y < 0 {
coords.Y = 0
}
coords.X = 0 coords.X = 0
err = setCursorPos(coords) err = setCursorPos(coords)
@ -24,12 +29,17 @@ func (p *Pool) print(first bool) bool {
} }
isFinished := true isFinished := true
for _, bar := range p.bars { for _, bar := range p.bars {
if !bar.isFinish { if !bar.IsFinished() {
isFinished = false isFinished = false
} }
bar.Update() bar.Update()
out += fmt.Sprintf("\r%s\n", bar.String()) out += fmt.Sprintf("\r%s\n", bar.String())
} }
fmt.Print(out) if p.Output != nil {
fmt.Fprint(p.Output, out)
} else {
fmt.Print(out)
}
p.lastBarsCount = len(p.bars)
return isFinished return isFinished
} }

@ -5,18 +5,25 @@ package pb
import "fmt" import "fmt"
func (p *Pool) print(first bool) bool { func (p *Pool) print(first bool) bool {
p.m.Lock()
defer p.m.Unlock()
var out string var out string
if !first { if !first {
out = fmt.Sprintf("\033[%dA", len(p.bars)) out = fmt.Sprintf("\033[%dA", p.lastBarsCount)
} }
isFinished := true isFinished := true
for _, bar := range p.bars { for _, bar := range p.bars {
if !bar.isFinish { if !bar.IsFinished() {
isFinished = false isFinished = false
} }
bar.Update() bar.Update()
out += fmt.Sprintf("\r%s\n", bar.String()) out += fmt.Sprintf("\r%s\n", bar.String())
} }
fmt.Print(out) if p.Output != nil {
fmt.Fprint(p.Output, out)
} else {
fmt.Print(out)
}
p.lastBarsCount = len(p.bars)
return isFinished return isFinished
} }

@ -11,7 +11,7 @@ var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9]+\x6d")
func escapeAwareRuneCountInString(s string) int { func escapeAwareRuneCountInString(s string) int {
n := runewidth.StringWidth(s) n := runewidth.StringWidth(s)
for _, sm := range ctrlFinder.FindAllString(s, -1) { for _, sm := range ctrlFinder.FindAllString(s, -1) {
n -= len(sm) n -= runewidth.StringWidth(sm)
} }
return n return n
} }

@ -1,7 +0,0 @@
// +build linux solaris
// +build !appengine
package pb
const ioctlReadTermios = 0x5401 // syscall.TCGETS
const ioctlWriteTermios = 0x5402 // syscall.TCSETS

13
vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go generated vendored Normal file

@ -0,0 +1,13 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux solaris
// +build !appengine
package pb
import "golang.org/x/sys/unix"
const ioctlReadTermios = unix.TCGETS
const ioctlWriteTermios = unix.TCSETS