Files
podman/pkg/api/handlers/compat/images_push.go
Matej Vasek ad182976b6 Use request context instead of background
This prevents goroutine leak:
If background context were used then push operation would continue even
if client aborted request by closing connection.

[NO TESTS NEEDED]

Signed-off-by: Matej Vasek <mvasek@redhat.com>
2021-06-01 15:25:20 +02:00

196 lines
5.6 KiB
Go

package compat
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v3/libpod"
"github.com/containers/podman/v3/pkg/api/handlers/utils"
"github.com/containers/podman/v3/pkg/auth"
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/domain/infra/abi"
"github.com/containers/storage"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/gorilla/schema"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// PushImage is the handler for the compat http endpoint for pushing images.
func PushImage(w http.ResponseWriter, r *http.Request) {
decoder := r.Context().Value("decoder").(*schema.Decoder)
runtime := r.Context().Value("runtime").(*libpod.Runtime)
digestFile, err := ioutil.TempFile("", "digest.txt")
if err != nil {
utils.Error(w, "unable to create digest tempfile", http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
return
}
defer digestFile.Close()
// Now use the ABI implementation to prevent us from having duplicate
// code.
imageEngine := abi.ImageEngine{Libpod: runtime}
query := struct {
All bool `schema:"all"`
Compress bool `schema:"compress"`
Destination string `schema:"destination"`
Format string `schema:"format"`
TLSVerify bool `schema:"tlsVerify"`
Tag string `schema:"tag"`
}{
// This is where you can override the golang default value for one of fields
TLSVerify: true,
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
return
}
// Note that Docker's docs state "Image name or ID" to be in the path
// parameter but it really must be a name as Docker does not allow for
// pushing an image by ID.
imageName := strings.TrimSuffix(utils.GetName(r), "/push") // GetName returns the entire path
if query.Tag != "" {
imageName += ":" + query.Tag
}
if _, err := utils.ParseStorageReference(imageName); err != nil {
utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
errors.Wrapf(err, "image source %q is not a containers-storage-transport reference", imageName))
return
}
authconf, authfile, key, err := auth.GetCredentials(r)
if err != nil {
utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "failed to parse %q header for %s", key, r.URL.String()))
return
}
defer auth.RemoveAuthfile(authfile)
var username, password string
if authconf != nil {
username = authconf.Username
password = authconf.Password
}
options := entities.ImagePushOptions{
All: query.All,
Authfile: authfile,
Compress: query.Compress,
Format: query.Format,
Password: password,
Username: username,
DigestFile: digestFile.Name(),
Quiet: true,
Progress: make(chan types.ProgressProperties),
}
if _, found := r.URL.Query()["tlsVerify"]; found {
options.SkipTLSVerify = types.NewOptionalBool(!query.TLSVerify)
}
var destination string
if _, found := r.URL.Query()["destination"]; found {
destination = query.Destination
} else {
destination = imageName
}
flush := func() {}
if flusher, ok := w.(http.Flusher); ok {
flush = flusher.Flush
}
w.WriteHeader(http.StatusOK)
w.Header().Add("Content-Type", "application/json")
flush()
var report jsonmessage.JSONMessage
enc := json.NewEncoder(w)
enc.SetEscapeHTML(true)
report.Status = fmt.Sprintf("The push refers to repository [%s]", imageName)
if err := enc.Encode(report); err != nil {
logrus.Warnf("Failed to json encode error %q", err.Error())
}
flush()
pushErrChan := make(chan error)
go func() {
pushErrChan <- imageEngine.Push(r.Context(), imageName, destination, options)
}()
loop: // break out of for/select infinite loop
for {
report = jsonmessage.JSONMessage{}
select {
case e := <-options.Progress:
switch e.Event {
case types.ProgressEventNewArtifact:
report.Status = "Preparing"
case types.ProgressEventRead:
report.Status = "Pushing"
report.Progress = &jsonmessage.JSONProgress{
Current: int64(e.Offset),
Total: e.Artifact.Size,
}
case types.ProgressEventSkipped:
report.Status = "Layer already exists"
case types.ProgressEventDone:
report.Status = "Pushed"
}
report.ID = e.Artifact.Digest.Encoded()[0:12]
if err := enc.Encode(report); err != nil {
logrus.Warnf("Failed to json encode error %q", err.Error())
}
flush()
case err := <-pushErrChan:
if err != nil {
var msg string
if errors.Cause(err) != storage.ErrImageUnknown {
msg = "An image does not exist locally with the tag: " + imageName
} else {
msg = err.Error()
}
report.Error = &jsonmessage.JSONError{
Message: msg,
}
report.ErrorMessage = msg
if err := enc.Encode(report); err != nil {
logrus.Warnf("Failed to json encode error %q", err.Error())
}
flush()
break loop
}
digestBytes, err := ioutil.ReadAll(digestFile)
if err != nil {
report.Error = &jsonmessage.JSONError{
Message: err.Error(),
}
report.ErrorMessage = err.Error()
if err := enc.Encode(report); err != nil {
logrus.Warnf("Failed to json encode error %q", err.Error())
}
flush()
break loop
}
tag := query.Tag
if tag == "" {
tag = "latest"
}
report.Status = fmt.Sprintf("%s: digest: %s", tag, string(digestBytes))
if err := enc.Encode(report); err != nil {
logrus.Warnf("Failed to json encode error %q", err.Error())
}
flush()
break loop // break out of for/select infinite loop
}
}
}