Bump github.com/containers/image/v5 from 5.14.0 to 5.15.0

Bumps [github.com/containers/image/v5](https://github.com/containers/image) from 5.14.0 to 5.15.0.
- [Release notes](https://github.com/containers/image/releases)
- [Commits](https://github.com/containers/image/compare/v5.14.0...v5.15.0)

---
updated-dependencies:
- dependency-name: github.com/containers/image/v5
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2021-08-01 11:33:58 +00:00
committed by GitHub
parent 4244288716
commit 58672847ee
22 changed files with 142 additions and 115 deletions

2
go.mod
View File

@ -14,7 +14,7 @@ require (
github.com/containers/buildah v1.21.1-0.20210721171232-54cafea4c933 github.com/containers/buildah v1.21.1-0.20210721171232-54cafea4c933
github.com/containers/common v0.41.1-0.20210730122913-cd6c45fd20e3 github.com/containers/common v0.41.1-0.20210730122913-cd6c45fd20e3
github.com/containers/conmon v2.0.20+incompatible github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.14.0 github.com/containers/image/v5 v5.15.0
github.com/containers/ocicrypt v1.1.2 github.com/containers/ocicrypt v1.1.2
github.com/containers/psgo v1.5.2 github.com/containers/psgo v1.5.2
github.com/containers/storage v1.33.0 github.com/containers/storage v1.33.0

3
go.sum
View File

@ -247,8 +247,9 @@ github.com/containers/common v0.41.1-0.20210730122913-cd6c45fd20e3/go.mod h1:UzA
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.13.2/go.mod h1:GkWursKDlDcUIT7L7vZf70tADvZCk/Ga0wgS0MuF0ag= github.com/containers/image/v5 v5.13.2/go.mod h1:GkWursKDlDcUIT7L7vZf70tADvZCk/Ga0wgS0MuF0ag=
github.com/containers/image/v5 v5.14.0 h1:ORaFZ/NwFjkSunMhxg9I8fQod8pgXkrYNiZzit/VqOE=
github.com/containers/image/v5 v5.14.0/go.mod h1:SxiBKOcKuT+4yTjD0AskjO+UwFvNcVOJ9qlAw1HNSPU= github.com/containers/image/v5 v5.14.0/go.mod h1:SxiBKOcKuT+4yTjD0AskjO+UwFvNcVOJ9qlAw1HNSPU=
github.com/containers/image/v5 v5.15.0 h1:NduhN20ptHNlf0uRny5iTJa2OodB9SLMEB4hKKbzBBs=
github.com/containers/image/v5 v5.15.0/go.mod h1:gzdBcooi6AFdiqfzirUqv90hUyHyI0MMdaqKzACKr2s=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=

View File

@ -20,6 +20,7 @@ import (
"github.com/containers/image/v5/manifest" "github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache" "github.com/containers/image/v5/pkg/blobinfocache"
"github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/pkg/compression"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/signature" "github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports" "github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
@ -57,7 +58,7 @@ var compressionBufferSize = 1048576
// expectedCompressionFormats is used to check if a blob with a specified media type is compressed // expectedCompressionFormats is used to check if a blob with a specified media type is compressed
// using the algorithm that the media type says it should be compressed with // using the algorithm that the media type says it should be compressed with
var expectedCompressionFormats = map[string]*compression.Algorithm{ var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip, imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip,
imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd, imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd,
manifest.DockerV2Schema2LayerMediaType: &compression.Gzip, manifest.DockerV2Schema2LayerMediaType: &compression.Gzip,
@ -117,13 +118,12 @@ type copier struct {
progress chan types.ProgressProperties progress chan types.ProgressProperties
blobInfoCache internalblobinfocache.BlobInfoCache2 blobInfoCache internalblobinfocache.BlobInfoCache2
copyInParallel bool copyInParallel bool
compressionFormat compression.Algorithm compressionFormat compressiontypes.Algorithm
compressionLevel *int compressionLevel *int
ociDecryptConfig *encconfig.DecryptConfig ociDecryptConfig *encconfig.DecryptConfig
ociEncryptConfig *encconfig.EncryptConfig ociEncryptConfig *encconfig.EncryptConfig
maxParallelDownloads uint maxParallelDownloads uint
downloadForeignLayers bool downloadForeignLayers bool
fetchPartialBlobs bool
} }
// imageCopier tracks state specific to a single image (possibly an item of a manifest list) // imageCopier tracks state specific to a single image (possibly an item of a manifest list)
@ -207,9 +207,6 @@ type Options struct {
// Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type // Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type
// to not indicate "nondistributable". // to not indicate "nondistributable".
DownloadForeignLayers bool DownloadForeignLayers bool
// FetchPartialBlobs indicates whether to attempt to fetch the blob partially. Experimental.
FetchPartialBlobs bool
} }
// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value // validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value
@ -290,15 +287,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
ociEncryptConfig: options.OciEncryptConfig, ociEncryptConfig: options.OciEncryptConfig,
maxParallelDownloads: options.MaxParallelDownloads, maxParallelDownloads: options.MaxParallelDownloads,
downloadForeignLayers: options.DownloadForeignLayers, downloadForeignLayers: options.DownloadForeignLayers,
fetchPartialBlobs: options.FetchPartialBlobs,
} }
// Default to using gzip compression unless specified otherwise. // Default to using gzip compression unless specified otherwise.
if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil { if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil {
algo, err := compression.AlgorithmByName("gzip") c.compressionFormat = compression.Gzip
if err != nil {
return nil, err
}
c.compressionFormat = algo
} else { } else {
c.compressionFormat = *options.DestinationCtx.CompressionFormat c.compressionFormat = *options.DestinationCtx.CompressionFormat
} }
@ -1286,7 +1278,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// the destination has support for it. // the destination has support for it.
imgSource, okSource := ic.c.rawSource.(internalTypes.ImageSourceSeekable) imgSource, okSource := ic.c.rawSource.(internalTypes.ImageSourceSeekable)
imgDest, okDest := ic.c.dest.(internalTypes.ImageDestinationPartial) imgDest, okDest := ic.c.dest.(internalTypes.ImageDestinationPartial)
if ic.c.fetchPartialBlobs && okSource && okDest && !diffIDIsNeeded { if okSource && okDest && !diffIDIsNeeded {
bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
progress := make(chan int64) progress := make(chan int64)
@ -1320,7 +1312,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
return info, cachedDiffID, nil return info, cachedDiffID, nil
} }
bar.Abort(true) bar.Abort(true)
logrus.Errorf("Failed to retrieve partial blob: %v", err) logrus.Debugf("Failed to retrieve partial blob: %v", err)
} }
// Fallback: copy the layer, computing the diffID if we need to do so // Fallback: copy the layer, computing the diffID if we need to do so
@ -1364,7 +1356,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil
var diffIDChan chan diffIDResult var diffIDChan chan diffIDResult
err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow
@ -1375,7 +1367,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
_ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil _ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
}() }()
getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer { getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer {
// If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further
// reading from the pipe has failed, we dont really care. // reading from the pipe has failed, we dont really care.
// We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it,
@ -1394,7 +1386,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
} }
// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. // diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest.
func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) { func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) {
result := diffIDResult{ result := diffIDResult{
digest: "", digest: "",
err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"),
@ -1406,7 +1398,7 @@ func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadClo
} }
// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. // computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) { func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) {
if decompressor != nil { if decompressor != nil {
s, err := decompressor(stream) s, err := decompressor(stream)
if err != nil { if err != nil {
@ -1439,7 +1431,7 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
// perhaps (de/re/)compressing it if canModifyBlob, // perhaps (de/re/)compressing it if canModifyBlob,
// and returns a complete blobInfo of the copied blob. // and returns a complete blobInfo of the copied blob.
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
if isConfig { // This is guaranteed by the caller, but set it here to be explicit. if isConfig { // This is guaranteed by the caller, but set it here to be explicit.
canModifyBlob = false canModifyBlob = false
@ -1733,7 +1725,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
} }
// doCompression reads all input from src and writes its compressed equivalent to dest. // doCompression reads all input from src and writes its compressed equivalent to dest.
func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compression.Algorithm, compressionLevel *int) error { func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error {
compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel) compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel)
if err != nil { if err != nil {
return err return err
@ -1751,7 +1743,7 @@ func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, co
} }
// compressGoroutine reads all input from src and writes its compressed equivalent to dest. // compressGoroutine reads all input from src and writes its compressed equivalent to dest.
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compression.Algorithm) { func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
err := errors.New("Internal error: unexpected panic in compressGoroutine") err := errors.New("Internal error: unexpected panic in compressGoroutine")
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
_ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil

View File

@ -21,13 +21,26 @@ const version = "Directory Transport Version: 1.1\n"
var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data") var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data")
type dirImageDestination struct { type dirImageDestination struct {
ref dirReference ref dirReference
compress bool desiredLayerCompression types.LayerCompression
} }
// newImageDestination returns an ImageDestination for writing to a directory. // newImageDestination returns an ImageDestination for writing to a directory.
func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) { func newImageDestination(sys *types.SystemContext, ref dirReference) (types.ImageDestination, error) {
d := &dirImageDestination{ref: ref, compress: compress} desiredLayerCompression := types.PreserveOriginal
if sys != nil {
if sys.DirForceCompress {
desiredLayerCompression = types.Compress
if sys.DirForceDecompress {
return nil, errors.Errorf("Cannot compress and decompress at the same time")
}
}
if sys.DirForceDecompress {
desiredLayerCompression = types.Decompress
}
}
d := &dirImageDestination{ref: ref, desiredLayerCompression: desiredLayerCompression}
// If directory exists check if it is empty // If directory exists check if it is empty
// if not empty, check whether the contents match that of a container image directory and overwrite the contents // if not empty, check whether the contents match that of a container image directory and overwrite the contents
@ -101,10 +114,7 @@ func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error {
} }
func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression { func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression {
if d.compress { return d.desiredLayerCompression
return types.Compress
}
return types.PreserveOriginal
} }
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually

View File

@ -153,11 +153,7 @@ func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemCon
// NewImageDestination returns a types.ImageDestination for this reference. // NewImageDestination returns a types.ImageDestination for this reference.
// The caller must call .Close() on the returned ImageDestination. // The caller must call .Close() on the returned ImageDestination.
func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
compress := false return newImageDestination(sys, ref)
if sys != nil {
compress = sys.DirForceCompress
}
return newImageDestination(ref, compress)
} }
// DeleteImage deletes the named image from the registry, if supported. // DeleteImage deletes the named image from the registry, if supported.

View File

@ -304,7 +304,7 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password
Password: password, Password: password,
} }
resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil) resp, err := client.makeRequest(ctx, http.MethodGet, "/v2/", nil, nil, v2Auth, nil)
if err != nil { if err != nil {
return err return err
} }
@ -343,8 +343,8 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
v1Res := &V1Results{} v1Res := &V1Results{}
// Get credentials from authfile for the underlying hostname // Get credentials from authfile for the underlying hostname
// lint:ignore SA1019 We can't use GetCredentialsForRef because we want to search the whole registry. // We can't use GetCredentialsForRef here because we want to search the whole registry.
auth, err := config.GetCredentials(sys, registry) // nolint:staticcheck // https://github.com/golangci/golangci-lint/issues/741 auth, err := config.GetCredentials(sys, registry)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "getting username and password") return nil, errors.Wrapf(err, "getting username and password")
} }
@ -380,7 +380,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
u.RawQuery = q.Encode() u.RawQuery = q.Encode()
logrus.Debugf("trying to talk to v1 search endpoint") logrus.Debugf("trying to talk to v1 search endpoint")
resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth, nil) resp, err := client.makeRequest(ctx, http.MethodGet, u.String(), nil, nil, noAuth, nil)
if err != nil { if err != nil {
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err) logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
} else { } else {
@ -400,14 +400,15 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
searchRes := []SearchResult{} searchRes := []SearchResult{}
path := "/v2/_catalog" path := "/v2/_catalog"
for len(searchRes) < limit { for len(searchRes) < limit {
resp, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) resp, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil { if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err) logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
return nil, errors.Wrapf(err, "couldn't search registry %q", registry) return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, httpResponseToError(resp, "")) err := httpResponseToError(resp, "")
logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err)
return nil, errors.Wrapf(err, "couldn't search registry %q", registry) return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
} }
v2Res := &V2Results{} v2Res := &V2Results{}
@ -533,11 +534,10 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
// makeRequest should generally be preferred. // makeRequest should generally be preferred.
// Note that no exponential back off is performed when receiving an http 429 status code. // Note that no exponential back off is performed when receiving an http 429 status code.
func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
req, err := http.NewRequest(method, url, stream) req, err := http.NewRequestWithContext(ctx, method, url, stream)
if err != nil { if err != nil {
return nil, err return nil, err
} }
req = req.WithContext(ctx)
if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it.
req.ContentLength = streamLen req.ContentLength = streamLen
} }
@ -630,13 +630,11 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
return nil, errors.Errorf("missing realm in bearer auth challenge") return nil, errors.Errorf("missing realm in bearer auth challenge")
} }
authReq, err := http.NewRequest(http.MethodPost, realm, nil) authReq, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
authReq = authReq.WithContext(ctx)
// Make the form data required against the oauth2 authentication // Make the form data required against the oauth2 authentication
// More details here: https://docs.docker.com/registry/spec/auth/oauth/ // More details here: https://docs.docker.com/registry/spec/auth/oauth/
params := authReq.URL.Query() params := authReq.URL.Query()
@ -680,12 +678,11 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
return nil, errors.Errorf("missing realm in bearer auth challenge") return nil, errors.Errorf("missing realm in bearer auth challenge")
} }
authReq, err := http.NewRequest(http.MethodGet, realm, nil) authReq, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
authReq = authReq.WithContext(ctx)
params := authReq.URL.Query() params := authReq.URL.Query()
if c.auth.Username != "" { if c.auth.Username != "" {
params.Add("account", c.auth.Username) params.Add("account", c.auth.Username)
@ -739,7 +736,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
ping := func(scheme string) error { ping := func(scheme string) error {
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
if err != nil { if err != nil {
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
return err return err
@ -766,7 +763,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
// best effort to understand if we're talking to a V1 registry // best effort to understand if we're talking to a V1 registry
pingV1 := func(scheme string) bool { pingV1 := func(scheme string) bool {
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
if err != nil { if err != nil {
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
return false return false
@ -800,7 +797,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
// using the original data structures. // using the original data structures.
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -68,7 +68,7 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
tags := make([]string, 0) tags := make([]string, 0)
for { for {
res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) res, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -134,7 +134,7 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
"Accept": manifest.DefaultRequestedManifestMIMETypes, "Accept": manifest.DefaultRequestedManifestMIMETypes,
} }
res, err := client.makeRequest(ctx, "HEAD", path, headers, nil, v2Auth, nil) res, err := client.makeRequest(ctx, http.MethodHead, path, headers, nil, v2Auth, nil)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -147,7 +147,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// FIXME? Chunked upload, progress reporting, etc. // FIXME? Chunked upload, progress reporting, etc.
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)) uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
logrus.Debugf("Uploading %s", uploadPath) logrus.Debugf("Uploading %s", uploadPath)
res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth, nil) res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil)
if err != nil { if err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
} }
@ -168,7 +168,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// This error text should never be user-visible, we terminate only after makeRequestToResolvedURL // This error text should never be user-visible, we terminate only after makeRequestToResolvedURL
// returns, so there isnt a way for the error text to be provided to any of our callers. // returns, so there isnt a way for the error text to be provided to any of our callers.
defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload")) defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload"))
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil) res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
if err != nil { if err != nil {
logrus.Debugf("Error uploading layer chunked %v", err) logrus.Debugf("Error uploading layer chunked %v", err)
return nil, err return nil, err
@ -194,7 +194,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717 // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
locationQuery.Set("digest", computedDigest.String()) locationQuery.Set("digest", computedDigest.String())
uploadLocation.RawQuery = locationQuery.Encode() uploadLocation.RawQuery = locationQuery.Encode()
res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
if err != nil { if err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
} }
@ -215,7 +215,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) { func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) {
checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String()) checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
logrus.Debugf("Checking %s", checkPath) logrus.Debugf("Checking %s", checkPath)
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth, extraScope) res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope)
if err != nil { if err != nil {
return false, -1, err return false, -1, err
} }
@ -246,7 +246,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
} }
mountPath := u.String() mountPath := u.String()
logrus.Debugf("Trying to mount %s", mountPath) logrus.Debugf("Trying to mount %s", mountPath)
res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth, extraScope) res, err := d.c.makeRequest(ctx, http.MethodPost, mountPath, nil, nil, v2Auth, extraScope)
if err != nil { if err != nil {
return err return err
} }
@ -264,7 +264,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
return errors.Wrap(err, "determining upload URL after a mount attempt") return errors.Wrap(err, "determining upload URL after a mount attempt")
} }
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String()) logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope) res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
if err != nil { if err != nil {
logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err) logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
} else { } else {
@ -424,7 +424,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
if mimeType != "" { if mimeType != "" {
headers["Content-Type"] = []string{mimeType} headers["Content-Type"] = []string{mimeType}
} }
res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth, nil) res, err := d.c.makeRequest(ctx, http.MethodPut, path, headers, bytes.NewReader(m), v2Auth, nil)
if err != nil { if err != nil {
return err return err
} }
@ -640,7 +640,7 @@ sigExists:
} }
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String()) path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String())
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil) res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil)
if err != nil { if err != nil {
return err return err
} }

View File

@ -192,7 +192,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
headers := map[string][]string{ headers := map[string][]string{
"Accept": manifest.DefaultRequestedManifestMIMETypes, "Accept": manifest.DefaultRequestedManifestMIMETypes,
} }
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil) res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }
@ -248,7 +248,7 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
// NOTE: we must not authenticate on additional URLs as those // NOTE: we must not authenticate on additional URLs as those
// can be abused to leak credentials or tokens. Please // can be abused to leak credentials or tokens. Please
// refer to CVE-2020-15157 for more information. // refer to CVE-2020-15157 for more information.
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
if err == nil { if err == nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode)) err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode))
@ -295,7 +295,7 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String()) path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
logrus.Debugf("Downloading %s", path) logrus.Debugf("Downloading %s", path)
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil) res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -364,7 +364,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String()) path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
logrus.Debugf("Downloading %s", path) logrus.Debugf("Downloading %s", path)
res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) res, err := s.c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }
@ -454,11 +454,10 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
case "http", "https": case "http", "https":
logrus.Debugf("GET %s", url) logrus.Debugf("GET %s", url)
req, err := http.NewRequest("GET", url.String(), nil) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil)
if err != nil { if err != nil {
return nil, false, err return nil, false, err
} }
req = req.WithContext(ctx)
res, err := s.c.client.Do(req) res, err := s.c.client.Do(req)
if err != nil { if err != nil {
return nil, false, err return nil, false, err
@ -523,7 +522,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
return err return err
} }
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail) getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth, nil) get, err := c.makeRequest(ctx, http.MethodGet, getPath, headers, nil, v2Auth, nil)
if err != nil { if err != nil {
return err return err
} }
@ -545,7 +544,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
// When retrieving the digest from a registry >= 2.3 use the following header: // When retrieving the digest from a registry >= 2.3 use the following header:
// "Accept": "application/vnd.docker.distribution.manifest.v2+json" // "Accept": "application/vnd.docker.distribution.manifest.v2+json"
delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth, nil) delete, err := c.makeRequest(ctx, http.MethodDelete, deletePath, headers, nil, v2Auth, nil)
if err != nil { if err != nil {
return err return err
} }

View File

@ -2,6 +2,7 @@ package blobinfocache
import ( import (
"github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/pkg/compression"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
) )
@ -47,7 +48,7 @@ func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.B
// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by // compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by
// TryReusingBlob() implementations to set values in the BlobInfo structure that they return // TryReusingBlob() implementations to set values in the BlobInfo structure that they return
// upon success. // upon success.
func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compression.Algorithm, error) { func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compressiontypes.Algorithm, error) {
switch compressorName { switch compressorName {
case Uncompressed: case Uncompressed:
return types.Decompress, nil, nil return types.Decompress, nil, nil

View File

@ -3,7 +3,7 @@ package manifest
import ( import (
"fmt" "fmt"
"github.com/containers/image/v5/pkg/compression" compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
@ -44,7 +44,7 @@ func layerInfosToStrings(infos []LayerInfo) []string {
// compressionMIMETypeSet describes a set of MIME type “variants” that represent differently-compressed // compressionMIMETypeSet describes a set of MIME type “variants” that represent differently-compressed
// versions of “the same kind of content”. // versions of “the same kind of content”.
// The map key is the return value of compression.Algorithm.Name(), or mtsUncompressed; // The map key is the return value of compressiontypes.Algorithm.Name(), or mtsUncompressed;
// the map value is a MIME type, or mtsUnsupportedMIMEType to mean "recognized but unsupported". // the map value is a MIME type, or mtsUnsupportedMIMEType to mean "recognized but unsupported".
type compressionMIMETypeSet map[string]string type compressionMIMETypeSet map[string]string
@ -59,7 +59,7 @@ const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that mean
// If the compression algorithm is unrecognized, or mimeType is not known to have variants that // If the compression algorithm is unrecognized, or mimeType is not known to have variants that
// differ from it only in what type of compression has been applied, the returned error will not be // differ from it only in what type of compression has been applied, the returned error will not be
// a ManifestLayerCompressionIncompatibilityError. // a ManifestLayerCompressionIncompatibilityError.
func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compression.Algorithm) (string, error) { func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compressiontypes.Algorithm) (string, error) {
if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
return "", fmt.Errorf("cannot update unknown MIME type") return "", fmt.Errorf("cannot update unknown MIME type")
} }

View File

@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/containers/image/v5/pkg/compression" compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/pkg/strslice" "github.com/containers/image/v5/pkg/strslice"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
@ -214,14 +214,14 @@ func (m *Schema2) LayerInfos() []LayerInfo {
var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{ var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{
{ {
mtsUncompressed: DockerV2Schema2ForeignLayerMediaType, mtsUncompressed: DockerV2Schema2ForeignLayerMediaType,
compression.Gzip.Name(): DockerV2Schema2ForeignLayerMediaTypeGzip, compressiontypes.GzipAlgorithmName: DockerV2Schema2ForeignLayerMediaTypeGzip,
compression.Zstd.Name(): mtsUnsupportedMIMEType, compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType,
}, },
{ {
mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed, mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed,
compression.Gzip.Name(): DockerV2Schema2LayerMediaType, compressiontypes.GzipAlgorithmName: DockerV2Schema2LayerMediaType,
compression.Zstd.Name(): mtsUnsupportedMIMEType, compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType,
}, },
} }

View File

@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"strings" "strings"
"github.com/containers/image/v5/pkg/compression" compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
ociencspec "github.com/containers/ocicrypt/spec" ociencspec "github.com/containers/ocicrypt/spec"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
@ -96,14 +96,14 @@ func (m *OCI1) LayerInfos() []LayerInfo {
var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{ var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
{ {
mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable, mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable,
compression.Gzip.Name(): imgspecv1.MediaTypeImageLayerNonDistributableGzip, compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip,
compression.Zstd.Name(): imgspecv1.MediaTypeImageLayerNonDistributableZstd, compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd,
}, },
{ {
mtsUncompressed: imgspecv1.MediaTypeImageLayer, mtsUncompressed: imgspecv1.MediaTypeImageLayer,
compression.Gzip.Name(): imgspecv1.MediaTypeImageLayerGzip, compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerGzip,
compression.Zstd.Name(): imgspecv1.MediaTypeImageLayerZstd, compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerZstd,
}, },
} }

View File

@ -148,13 +148,13 @@ func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io
errWrap := errors.New("failed fetching external blob from all urls") errWrap := errors.New("failed fetching external blob from all urls")
for _, url := range urls { for _, url := range urls {
req, err := http.NewRequest("GET", url, nil) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil { if err != nil {
errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error()) errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error())
continue continue
} }
resp, err := s.client.Do(req.WithContext(ctx)) resp, err := s.client.Do(req)
if err != nil { if err != nil {
errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error()) errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error())
continue continue

View File

@ -79,11 +79,10 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
logrus.Debugf("Will send body: %s", requestBody) logrus.Debugf("Will send body: %s", requestBody)
requestBodyReader = bytes.NewReader(requestBody) requestBodyReader = bytes.NewReader(requestBody)
} }
req, err := http.NewRequest(method, url.String(), requestBodyReader) req, err := http.NewRequestWithContext(ctx, method, url.String(), requestBodyReader)
if err != nil { if err != nil {
return nil, err return nil, err
} }
req = req.WithContext(ctx)
if len(c.bearerToken) != 0 { if len(c.bearerToken) != 0 {
req.Header.Set("Authorization", "Bearer "+c.bearerToken) req.Header.Set("Authorization", "Bearer "+c.bearerToken)
@ -137,7 +136,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) { func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) {
// FIXME: validate components per validation.IsValidPathSegmentName? // FIXME: validate components per validation.IsValidPathSegmentName?
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName) path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName)
body, err := c.doRequest(ctx, "GET", path, nil) body, err := c.doRequest(ctx, http.MethodGet, path, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -273,7 +272,7 @@ func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error
// FIXME: validate components per validation.IsValidPathSegmentName? // FIXME: validate components per validation.IsValidPathSegmentName?
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream) path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream)
body, err := s.client.doRequest(ctx, "GET", path, nil) body, err := s.client.doRequest(ctx, http.MethodGet, path, nil)
if err != nil { if err != nil {
return err return err
} }
@ -496,7 +495,7 @@ sigExists:
if err != nil { if err != nil {
return err return err
} }
_, err = d.client.doRequest(ctx, "POST", "/oapi/v1/imagesignatures", body) _, err = d.client.doRequest(ctx, http.MethodPost, "/oapi/v1/imagesignatures", body)
if err != nil { if err != nil {
return err return err
} }

View File

@ -9,7 +9,7 @@ import (
"github.com/containers/image/v5/pkg/compression/internal" "github.com/containers/image/v5/pkg/compression/internal"
"github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/storage/pkg/chunked" "github.com/containers/storage/pkg/chunked/compressor"
"github.com/klauspost/pgzip" "github.com/klauspost/pgzip"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -21,15 +21,20 @@ type Algorithm = types.Algorithm
var ( var (
// Gzip compression. // Gzip compression.
Gzip = internal.NewAlgorithm("gzip", "gzip", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor) Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, types.GzipAlgorithmName,
[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor)
// Bzip2 compression. // Bzip2 compression.
Bzip2 = internal.NewAlgorithm("bzip2", "bzip2", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor) Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, types.Bzip2AlgorithmName,
[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor)
// Xz compression. // Xz compression.
Xz = internal.NewAlgorithm("Xz", "xz", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor) Xz = internal.NewAlgorithm(types.XzAlgorithmName, types.XzAlgorithmName,
[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor)
// Zstd compression. // Zstd compression.
Zstd = internal.NewAlgorithm("zstd", "zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName,
[]byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
// Zstd:chunked compression. // Zstd:chunked compression.
ZstdChunked = internal.NewAlgorithm("zstd:chunked", "zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, chunked.ZstdCompressor) ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */
nil, ZstdDecompressor, compressor.ZstdCompressor)
compressionAlgorithms = map[string]Algorithm{ compressionAlgorithms = map[string]Algorithm{
Gzip.Name(): Gzip, Gzip.Name(): Gzip,
@ -118,7 +123,8 @@ func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.R
var retAlgo Algorithm var retAlgo Algorithm
var decompressor DecompressorFunc var decompressor DecompressorFunc
for _, algo := range compressionAlgorithms { for _, algo := range compressionAlgorithms {
if bytes.HasPrefix(buffer[:n], internal.AlgorithmPrefix(algo)) { prefix := internal.AlgorithmPrefix(algo)
if len(prefix) > 0 && bytes.HasPrefix(buffer[:n], prefix) {
logrus.Debugf("Detected compression format %s", algo.Name()) logrus.Debugf("Detected compression format %s", algo.Name())
retAlgo = algo retAlgo = algo
decompressor = internal.AlgorithmDecompressor(algo) decompressor = internal.AlgorithmDecompressor(algo)

View File

@ -14,7 +14,7 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
type Algorithm struct { type Algorithm struct {
name string name string
mime string mime string
prefix []byte prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection.
decompressor DecompressorFunc decompressor DecompressorFunc
compressor CompressorFunc compressor CompressorFunc
} }

View File

@ -11,3 +11,31 @@ type DecompressorFunc = internal.DecompressorFunc
// Algorithm is a compression algorithm provided and supported by pkg/compression. // Algorithm is a compression algorithm provided and supported by pkg/compression.
// It cant be supplied from the outside. // It cant be supplied from the outside.
type Algorithm = internal.Algorithm type Algorithm = internal.Algorithm
const (
// GzipAlgorithmName is the name used by pkg/compression.Gzip.
// NOTE: Importing only this /types package does not inherently guarantee a Gzip algorithm
// will actually be available. (In fact it is intended for this types package not to depend
// on any of the implementations.)
GzipAlgorithmName = "gzip"
// Bzip2AlgorithmName is the name used by pkg/compression.Bzip2.
// NOTE: Importing only this /types package does not inherently guarantee a Bzip2 algorithm
// will actually be available. (In fact it is intended for this types package not to depend
// on any of the implementations.)
Bzip2AlgorithmName = "bzip2"
// XzAlgorithmName is the name used by pkg/compression.Xz.
// NOTE: Importing only this /types package does not inherently guarantee a Xz algorithm
// will actually be available. (In fact it is intended for this types package not to depend
// on any of the implementations.)
XzAlgorithmName = "Xz"
// ZstdAlgorithmName is the name used by pkg/compression.Zstd.
// NOTE: Importing only this /types package does not inherently guarantee a Zstd algorithm
// will actually be available. (In fact it is intended for this types package not to depend
// on any of the implementations.)
ZstdAlgorithmName = "zstd"
// ZstdChunkedAlgorithmName is the name used by pkg/compression.ZstdChunked.
// NOTE: Importing only this /types package does not inherently guarantee a ZstdChunked algorithm
// will actually be available. (In fact it is intended for this types package not to depend
// on any of the implementations.)
ZstdChunkedAlgorithmName = "zstd:chunked"
)

View File

@ -236,9 +236,8 @@ func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath {
// file or .docker/config.json, including support for OAuth2 and IdentityToken. // file or .docker/config.json, including support for OAuth2 and IdentityToken.
// If an entry is not found, an empty struct is returned. // If an entry is not found, an empty struct is returned.
// //
// Deprecated: GetCredentialsForRef should be used in favor of this API // GetCredentialsForRef should almost always be used in favor of this API to
// because it allows different credentials for different repositories on the // allow different credentials for different repositories on the same registry.
// same registry.
func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) { func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) {
return getCredentialsWithHomeDir(sys, nil, registry, homedir.Get()) return getCredentialsWithHomeDir(sys, nil, registry, homedir.Get())
} }
@ -665,14 +664,11 @@ func findAuthentication(ref reference.Named, registry, path string, legacyFormat
// those entries even in non-legacyFormat ~/.docker/config.json. // those entries even in non-legacyFormat ~/.docker/config.json.
// The docker.io registry still uses the /v1/ key with a special host name, // The docker.io registry still uses the /v1/ key with a special host name,
// so account for that as well. // so account for that as well.
registry = normalizeAuthFileKey(registry, legacyFormat) registry = normalizeRegistry(registry)
normalizedAuths := map[string]dockerAuthConfig{}
for k, v := range auths.AuthConfigs { for k, v := range auths.AuthConfigs {
normalizedAuths[normalizeAuthFileKey(k, legacyFormat)] = v if normalizeAuthFileKey(k, legacyFormat) == registry {
} return decodeDockerAuth(v)
}
if val, exists := normalizedAuths[registry]; exists {
return decodeDockerAuth(val)
} }
return types.DockerAuthConfig{}, nil return types.DockerAuthConfig{}, nil

View File

@ -636,6 +636,8 @@ type SystemContext struct {
// === dir.Transport overrides === // === dir.Transport overrides ===
// DirForceCompress compresses the image layers if set to true // DirForceCompress compresses the image layers if set to true
DirForceCompress bool DirForceCompress bool
// DirForceDecompress decompresses the image layers if set to true
DirForceDecompress bool
// CompressionFormat is the format to use for the compression of the blobs // CompressionFormat is the format to use for the compression of the blobs
CompressionFormat *compression.Algorithm CompressionFormat *compression.Algorithm

View File

@ -6,7 +6,7 @@ const (
// VersionMajor is for an API incompatible changes // VersionMajor is for an API incompatible changes
VersionMajor = 5 VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner // VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 14 VersionMinor = 15
// VersionPatch is for backwards-compatible bug fixes // VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0 VersionPatch = 0

2
vendor/modules.txt vendored
View File

@ -125,7 +125,7 @@ github.com/containers/common/pkg/umask
github.com/containers/common/version github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible # github.com/containers/conmon v2.0.20+incompatible
github.com/containers/conmon/runner/config github.com/containers/conmon/runner/config
# github.com/containers/image/v5 v5.14.0 # github.com/containers/image/v5 v5.15.0
github.com/containers/image/v5/copy github.com/containers/image/v5/copy
github.com/containers/image/v5/directory github.com/containers/image/v5/directory
github.com/containers/image/v5/directory/explicitfilepath github.com/containers/image/v5/directory/explicitfilepath