Bump github.com/containers/image/v5 from 5.9.0 to 5.10.0

Bumps [github.com/containers/image/v5](https://github.com/containers/image) from 5.9.0 to 5.10.0.
- [Release notes](https://github.com/containers/image/releases)
- [Commits](https://github.com/containers/image/compare/v5.9.0...v5.10.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
dependabot-preview[bot]
2021-01-28 09:17:40 +00:00
committed by Daniel J Walsh
parent 9d59daa7cc
commit 75c3b33899
63 changed files with 693 additions and 655 deletions

View File

@@ -23,6 +23,7 @@ import (
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/pkg/tlsclientconfig"
"github.com/containers/image/v5/types"
"github.com/containers/image/v5/version"
"github.com/containers/storage/pkg/homedir"
clientLib "github.com/docker/distribution/registry/client"
"github.com/docker/go-connections/tlsconfig"
@@ -65,6 +66,8 @@ var (
{path: "/etc/containers/certs.d", absolute: true},
{path: "/etc/docker/certs.d", absolute: true},
}
defaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)"
)
// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
@@ -92,8 +95,9 @@ type bearerToken struct {
// dockerClient is configuration for dealing with a single Docker registry.
type dockerClient struct {
// The following members are set by newDockerClient and do not change afterwards.
sys *types.SystemContext
registry string
sys *types.SystemContext
registry string
userAgent string
// tlsClientConfig is setup by newDockerClient and will be used and updated
// by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime.
@@ -200,9 +204,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
logrus.Debugf("error accessing certs directory due to permissions: %v", err)
continue
}
if err != nil {
return "", err
}
return "", err
}
return fullCertDirPath, nil
}
@@ -277,9 +279,15 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
}
tlsClientConfig.InsecureSkipVerify = skipVerify
userAgent := defaultUserAgent
if sys != nil && sys.DockerRegistryUserAgent != "" {
userAgent = sys.DockerRegistryUserAgent
}
return &dockerClient{
sys: sys,
registry: registry,
userAgent: userAgent,
tlsClientConfig: tlsClientConfig,
}, nil
}
@@ -529,9 +537,7 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method,
req.Header.Add(n, hh)
}
}
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
}
req.Header.Add("User-Agent", c.userAgent)
if auth == v2Auth {
if err := c.setupRequestAuth(req, extraScope); err != nil {
return nil, err
@@ -637,9 +643,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
params.Add("client_id", "containers/image")
authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode()))
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
}
authReq.Header.Add("User-Agent", c.userAgent)
authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
res, err := c.client.Do(authReq)
@@ -692,9 +696,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
if c.auth.Username != "" && c.auth.Password != "" {
authReq.SetBasicAuth(c.auth.Username, c.auth.Password)
}
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
}
authReq.Header.Add("User-Agent", c.userAgent)
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
res, err := c.client.Do(authReq)

View File

@@ -15,6 +15,7 @@ import (
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/uploadreader"
"github.com/containers/image/v5/manifest"
@@ -284,7 +285,9 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
@@ -299,17 +302,23 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
}
if exists {
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil
}
// Then try reusing blobs from other locations.
for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) {
bic := blobinfocache.FromBlobInfoCache(cache)
candidates := bic.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute)
for _, candidate := range candidates {
candidateRepo, err := parseBICLocationReference(candidate.Location)
if err != nil {
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
continue
}
logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name())
if candidate.CompressorName != blobinfocache.Uncompressed {
logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
} else {
logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name())
}
// Sanity checks:
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
@@ -351,8 +360,16 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
continue
}
}
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil
bic.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
if err != nil {
logrus.Debugf("... Failed: %v", err)
continue
}
return true, types.BlobInfo{Digest: candidate.Digest, MediaType: info.MediaType, Size: size, CompressionOperation: compressionOperation, CompressionAlgorithm: compressionAlgorithm}, nil
}
return false, types.BlobInfo{}, nil

View File

@@ -64,6 +64,11 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
}
attempts := []attempt{}
for _, pullSource := range pullSources {
if sys.DockerLogMirrorChoice {
logrus.Infof("Trying to access %q", pullSource.Reference)
} else {
logrus.Debugf("Trying to access %q", pullSource.Reference)
}
logrus.Debugf("Trying to access %q", pullSource.Reference)
s, err := newImageSourceAttempt(ctx, sys, ref, pullSource)
if err == nil {

View File

@@ -159,7 +159,9 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {

View File

@@ -96,10 +96,16 @@ func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference,
// registriesDirPath returns a path to registries.d
func registriesDirPath(sys *types.SystemContext) string {
return registriesDirPathWithHomeDir(sys, homedir.Get())
}
// registriesDirPathWithHomeDir is an internal implementation detail of registriesDirPath,
// it exists only to allow testing it with an artificial home directory.
func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) string {
if sys != nil && sys.RegistriesDirPath != "" {
return sys.RegistriesDirPath
}
userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir)
if _, err := os.Stat(userRegistriesDirPath); err == nil {
return userRegistriesDirPath
}

View File

@@ -52,5 +52,26 @@ func DockerReferenceNamespaces(ref reference.Named) []string {
}
name = name[:lastSlash]
}
// Strip port number if any, before appending to res slice.
// Currently, the most compatible behavior is to return
// example.com:8443/ns, example.com:8443, *.com.
// If a port number is not specified, the expected behavior would be
// example.com/ns, example.com, *.com
portNumColon := strings.Index(name, ":")
if portNumColon != -1 {
name = name[:portNumColon]
}
// Append wildcarded domains to res slice
for {
firstDot := strings.Index(name, ".")
if firstDot == -1 {
break
}
name = name[firstDot+1:]
res = append(res, "*."+name)
}
return res
}

View File

@@ -86,7 +86,9 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {