mirror of
				https://github.com/containers/podman.git
				synced 2025-10-31 10:00:01 +08:00 
			
		
		
		
	 fd1d951262
			
		
	
	fd1d951262
	
	
	
		
			
			We used to use ignition to perform any customization required for podman machine because our input was a generic FCOS image. Now that we are building our own images, some of this customization can be migrated to the Containerfile itself and be less of a burden in our code at boot up. At the time of this PR, the Containerfile can be found at https://github.com/baude/podman-machine-images/tree/main. It is only present for a so-called daily image. There is little liklihood that this would the final location for the Containerfile so consider it a working version only. Split WSL and rest apart in the e2e tests so we no longer ppull the generic FCOS image for testing. Note: the change to the pull image name is so PRs are not immediately broken that are already in the queue. [NO NEW TESTS REQUIRED] Signed-off-by: Brent Baude <bbaude@redhat.com>
		
			
				
	
	
		
			75 lines
		
	
	
		
			1.8 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			75 lines
		
	
	
		
			1.8 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
| //go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd
 | |
| 
 | |
| package machine
 | |
| 
 | |
| import (
 | |
| 	"errors"
 | |
| 	"fmt"
 | |
| 	"syscall"
 | |
| 	"time"
 | |
| 
 | |
| 	psutil "github.com/shirou/gopsutil/v3/process"
 | |
| 	"github.com/sirupsen/logrus"
 | |
| 	"golang.org/x/sys/unix"
 | |
| )
 | |
| 
 | |
| const (
 | |
| 	loops     = 8
 | |
| 	sleepTime = time.Millisecond * 1
 | |
| )
 | |
| 
 | |
| // backoffForProcess checks if the process still exists, for something like
 | |
| // sigterm. If the process still exists after loops and sleep time are exhausted,
 | |
| // an error is returned
 | |
| func backoffForProcess(p *psutil.Process) error {
 | |
| 	sleepInterval := sleepTime
 | |
| 	for i := 0; i < loops; i++ {
 | |
| 		running, err := p.IsRunning()
 | |
| 		if err != nil {
 | |
| 			// It is possible that while in our loop, the PID vaporize triggering
 | |
| 			// an input/output error (#21845)
 | |
| 			if errors.Is(err, unix.EIO) {
 | |
| 				return nil
 | |
| 			}
 | |
| 			return fmt.Errorf("checking if process running: %w", err)
 | |
| 		}
 | |
| 		if !running {
 | |
| 			return nil
 | |
| 		}
 | |
| 
 | |
| 		time.Sleep(sleepInterval)
 | |
| 		// double the time
 | |
| 		sleepInterval += sleepInterval
 | |
| 	}
 | |
| 	return fmt.Errorf("process %d has not ended", p.Pid)
 | |
| }
 | |
| 
 | |
| // / waitOnProcess takes a pid and sends a sigterm to it. it then waits for the
 | |
| // process to not exist.  if the sigterm does not end the process after an interval,
 | |
| // then sigkill is sent.  it also waits for the process to exit after the sigkill too.
 | |
| func waitOnProcess(processID int) error {
 | |
| 	logrus.Infof("Going to stop gvproxy (PID %d)", processID)
 | |
| 
 | |
| 	p, err := psutil.NewProcess(int32(processID))
 | |
| 	if err != nil {
 | |
| 		return fmt.Errorf("looking up PID %d: %w", processID, err)
 | |
| 	}
 | |
| 
 | |
| 	running, err := p.IsRunning()
 | |
| 	if err != nil {
 | |
| 		return fmt.Errorf("checking if gvproxy is running: %w", err)
 | |
| 	}
 | |
| 	if !running {
 | |
| 		return nil
 | |
| 	}
 | |
| 
 | |
| 	if err := p.Kill(); err != nil {
 | |
| 		if errors.Is(err, syscall.ESRCH) {
 | |
| 			logrus.Debugf("Gvproxy already dead, exiting cleanly")
 | |
| 			return nil
 | |
| 		}
 | |
| 		return err
 | |
| 	}
 | |
| 	return backoffForProcess(p)
 | |
| }
 |