pkg/autoupdate: update unit-by-unit

Change the auto-update logic to update unit-by-unit rather by policy.
This allows for, in theory now and in practice later, to have mutliple
containers run in a single systemd unit and update them in sequence
before restarting the unit.

[NO NEW TESTS NEEDED] - should not change behavior.

Signed-off-by: Valentin Rothberg <vrothberg@redhat.com>
This commit is contained in:
Valentin Rothberg
2022-08-02 16:18:57 +02:00
parent f8b6a81ae4
commit 2c999f1ecb

View File

@ -50,14 +50,11 @@ var supportedPolicies = map[string]Policy{
"local": PolicyLocalImage, "local": PolicyLocalImage,
} }
// policyMappers assembles update tasks by policy
type policyMapper map[Policy][]*task
// updater includes shared state for auto-updating one or more containers. // updater includes shared state for auto-updating one or more containers.
type updater struct { type updater struct {
conn *dbus.Conn conn *dbus.Conn
idToImage map[string]*libimage.Image idToImage map[string]*libimage.Image
imageToPolicyMapper map[string]policyMapper unitToTasks map[string][]*task
options *entities.AutoUpdateOptions options *entities.AutoUpdateOptions
updatedRawImages map[string]bool updatedRawImages map[string]bool
runtime *libpod.Runtime runtime *libpod.Runtime
@ -152,26 +149,22 @@ func (u *updater) assembleImageMap(ctx context.Context) error {
// It returns a slice of successfully restarted systemd units and a slice of // It returns a slice of successfully restarted systemd units and a slice of
// errors encountered during auto update. // errors encountered during auto update.
func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.AutoUpdateOptions) ([]*entities.AutoUpdateReport, []error) { func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.AutoUpdateOptions) ([]*entities.AutoUpdateReport, []error) {
// Note that (most) errors are non-fatal such that a single
// misconfigured container does not prevent others from being updated
// (which could be a security threat).
auto := updater{ auto := updater{
options: &options, options: &options,
runtime: runtime, runtime: runtime,
updatedRawImages: make(map[string]bool), updatedRawImages: make(map[string]bool),
} }
// Assemble a map `image ID -> *libimage.Image` that we can consult // Find auto-update tasks and assemble them by unit.
// later on for lookups. errors := auto.assembleTasks(ctx)
if err := auto.assembleImageMap(ctx); err != nil {
return nil, []error{err}
}
// Create a map from `image ID -> []*Container`.
if errs := auto.imageContainersMap(); len(errs) > 0 {
return nil, errs
}
// Nothing to do. // Nothing to do.
if len(auto.imageToPolicyMapper) == 0 { if len(auto.unitToTasks) == 0 {
return nil, nil return nil, errors
} }
// Connect to DBUS. // Connect to DBUS.
@ -187,27 +180,28 @@ func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.A
// Update all images/container according to their auto-update policy. // Update all images/container according to their auto-update policy.
var allReports []*entities.AutoUpdateReport var allReports []*entities.AutoUpdateReport
var errs []error for unit, tasks := range auto.unitToTasks {
for imageID, policyMapper := range auto.imageToPolicyMapper { // Sanity check: we'll support that in the future.
if _, exists := auto.idToImage[imageID]; !exists { if len(tasks) != 1 {
errs = append(errs, fmt.Errorf("container image ID %q not found in local storage", imageID)) errors = append(errors, fmt.Errorf("only 1 task per unit supported but unit %s has %d", unit, len(tasks)))
return nil, errs return nil, errors
} }
for _, task := range policyMapper[PolicyRegistryImage] { for _, task := range tasks {
report, err := auto.updateRegistry(ctx, task) var report *entities.AutoUpdateReport
if err != nil { var reportError error
errs = append(errs, err)
} switch task.policy {
if report != nil { case PolicyRegistryImage:
allReports = append(allReports, report) report, reportError = auto.updateRegistry(ctx, task)
} case PolicyLocalImage:
report, reportError = auto.updateLocally(ctx, task)
default:
reportError = fmt.Errorf("unexpected auto-update policy %s for container %s", task.policy, task.container.ID())
} }
for _, task := range policyMapper[PolicyLocalImage] { if reportError != nil {
report, err := auto.updateLocally(ctx, task) errors = append(errors, reportError)
if err != nil {
errs = append(errs, err)
} }
if report != nil { if report != nil {
allReports = append(allReports, report) allReports = append(allReports, report)
@ -215,7 +209,7 @@ func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.A
} }
} }
return allReports, errs return allReports, errors
} }
// updateRegistry updates the image/container according to the "registry" policy. // updateRegistry updates the image/container according to the "registry" policy.
@ -372,15 +366,21 @@ func (u *updater) restartSystemdUnit(ctx context.Context, ctr *libpod.Container,
} }
} }
// imageContainersMap generates a map[image ID] -> [containers using the image] // assembleTasks assembles update tasks per unit and populates a mapping from
// of all containers with a valid auto-update policy. // `unit -> []*task` such that multiple containers _can_ run in a single unit.
func (u *updater) imageContainersMap() []error { func (u *updater) assembleTasks(ctx context.Context) []error {
// Assemble a map `image ID -> *libimage.Image` that we can consult
// later on for lookups.
if err := u.assembleImageMap(ctx); err != nil {
return []error{err}
}
allContainers, err := u.runtime.GetAllContainers() allContainers, err := u.runtime.GetAllContainers()
if err != nil { if err != nil {
return []error{err} return []error{err}
} }
u.imageToPolicyMapper = make(map[string]policyMapper) u.unitToTasks = make(map[string][]*task)
errors := []error{} errors := []error{}
for _, c := range allContainers { for _, c := range allContainers {
@ -395,6 +395,8 @@ func (u *updater) imageContainersMap() []error {
continue continue
} }
// Check the container's auto-update policy which is configured
// as a label.
labels := ctr.Labels() labels := ctr.Labels()
value, exists := labels[Label] value, exists := labels[Label]
if !exists { if !exists {
@ -405,18 +407,19 @@ func (u *updater) imageContainersMap() []error {
errors = append(errors, err) errors = append(errors, err)
continue continue
} }
// Skip labels not related to autoupdate
if policy == PolicyDefault { if policy == PolicyDefault {
continue continue
} }
id, _ := ctr.Image() // Make sure the container runs in a systemd unit which is
policyMap, exists := u.imageToPolicyMapper[id] // stored as a label at container creation.
unit, exists := labels[systemdDefine.EnvVariable]
if !exists { if !exists {
policyMap = make(map[Policy][]*task) errors = append(errors, fmt.Errorf("auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable))
continue
} }
id, _ := ctr.Image()
image, exists := u.idToImage[id] image, exists := u.idToImage[id]
if !exists { if !exists {
err := fmt.Errorf("internal error: no image found for ID %s", id) err := fmt.Errorf("internal error: no image found for ID %s", id)
@ -424,16 +427,15 @@ func (u *updater) imageContainersMap() []error {
continue continue
} }
unit, _ := labels[systemdDefine.EnvVariable]
t := task{ t := task{
container: ctr, container: ctr,
policy: policy, policy: policy,
image: image, image: image,
unit: unit, unit: unit,
} }
policyMap[policy] = append(policyMap[policy], &t)
u.imageToPolicyMapper[id] = policyMap // Add the task to the unit.
u.unitToTasks[unit] = append(u.unitToTasks[unit], &t)
} }
return errors return errors