mirror of
https://github.com/containers/podman.git
synced 2025-09-15 04:34:45 +08:00
Update vendor containers/(common,storage,buildah,image)
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
252
vendor/github.com/containers/storage/containers.go
generated
vendored
252
vendor/github.com/containers/storage/containers.go
generated
vendored
@ -68,11 +68,24 @@ type Container struct {
|
||||
|
||||
// rwContainerStore provides bookkeeping for information about Containers.
|
||||
type rwContainerStore interface {
|
||||
fileBasedStore
|
||||
metadataStore
|
||||
containerBigDataStore
|
||||
flaggableStore
|
||||
|
||||
// startWriting makes sure the store is fresh, and locks it for writing.
|
||||
// If this succeeds, the caller MUST call stopWriting().
|
||||
startWriting() error
|
||||
|
||||
// stopWriting releases locks obtained by startWriting.
|
||||
stopWriting()
|
||||
|
||||
// startReading makes sure the store is fresh, and locks it for reading.
|
||||
// If this succeeds, the caller MUST call stopReading().
|
||||
startReading() error
|
||||
|
||||
// stopReading releases locks obtained by startReading.
|
||||
stopReading()
|
||||
|
||||
// Create creates a container that has a specified ID (or generates a
|
||||
// random one if an empty value is supplied) and optional names,
|
||||
// based on the specified image, using the specified layer as its
|
||||
@ -147,12 +160,12 @@ func (c *Container) ProcessLabel() string {
|
||||
}
|
||||
|
||||
func (c *Container) MountOpts() []string {
|
||||
switch c.Flags[mountOptsFlag].(type) {
|
||||
switch value := c.Flags[mountOptsFlag].(type) {
|
||||
case []string:
|
||||
return c.Flags[mountOptsFlag].([]string)
|
||||
return value
|
||||
case []interface{}:
|
||||
var mountOpts []string
|
||||
for _, v := range c.Flags[mountOptsFlag].([]interface{}) {
|
||||
for _, v := range value {
|
||||
if flag, ok := v.(string); ok {
|
||||
mountOpts = append(mountOpts, flag)
|
||||
}
|
||||
@ -163,6 +176,80 @@ func (c *Container) MountOpts() []string {
|
||||
}
|
||||
}
|
||||
|
||||
// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing.
|
||||
// If this succeeds, the caller MUST call stopWriting().
|
||||
//
|
||||
// This is an internal implementation detail of containerStore construction, every other caller
|
||||
// should use startWriting() instead.
|
||||
func (r *containerStore) startWritingWithReload(canReload bool) error {
|
||||
r.lockfile.Lock()
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
r.lockfile.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
if canReload {
|
||||
if err := r.reloadIfChanged(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// startWriting makes sure the store is fresh, and locks it for writing.
|
||||
// If this succeeds, the caller MUST call stopWriting().
|
||||
func (r *containerStore) startWriting() error {
|
||||
return r.startWritingWithReload(true)
|
||||
}
|
||||
|
||||
// stopWriting releases locks obtained by startWriting.
|
||||
func (r *containerStore) stopWriting() {
|
||||
r.lockfile.Unlock()
|
||||
}
|
||||
|
||||
// startReading makes sure the store is fresh, and locks it for reading.
|
||||
// If this succeeds, the caller MUST call stopReading().
|
||||
func (r *containerStore) startReading() error {
|
||||
r.lockfile.RLock()
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
r.lockfile.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
if err := r.reloadIfChanged(false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// stopReading releases locks obtained by startReading.
|
||||
func (r *containerStore) stopReading() {
|
||||
r.lockfile.Unlock()
|
||||
}
|
||||
|
||||
// reloadIfChanged reloads the contents of the store from disk if it is changed.
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *containerStore) reloadIfChanged(lockedForWriting bool) error {
|
||||
r.loadMut.Lock()
|
||||
defer r.loadMut.Unlock()
|
||||
|
||||
modified, err := r.lockfile.Modified()
|
||||
if err == nil && modified {
|
||||
return r.load(lockedForWriting)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *containerStore) Containers() ([]Container, error) {
|
||||
containers := make([]Container, len(r.containers))
|
||||
for i := range r.containers {
|
||||
@ -183,48 +270,60 @@ func (r *containerStore) datapath(id, key string) string {
|
||||
return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
|
||||
}
|
||||
|
||||
func (r *containerStore) Load() error {
|
||||
// load reloads the contents of the store from disk.
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *containerStore) load(lockedForWriting bool) error {
|
||||
needSave := false
|
||||
rpath := r.containerspath()
|
||||
data, err := os.ReadFile(rpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
containers := []*Container{}
|
||||
layers := make(map[string]*Container)
|
||||
idlist := []string{}
|
||||
ids := make(map[string]*Container)
|
||||
names := make(map[string]*Container)
|
||||
if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil {
|
||||
idlist = make([]string, 0, len(containers))
|
||||
for n, container := range containers {
|
||||
idlist = append(idlist, container.ID)
|
||||
ids[container.ID] = containers[n]
|
||||
layers[container.LayerID] = containers[n]
|
||||
for _, name := range container.Names {
|
||||
if conflict, ok := names[name]; ok {
|
||||
r.removeName(conflict, name)
|
||||
needSave = true
|
||||
}
|
||||
names[name] = containers[n]
|
||||
}
|
||||
if len(data) != 0 {
|
||||
if err := json.Unmarshal(data, &containers); err != nil {
|
||||
return fmt.Errorf("loading %q: %w", rpath, err)
|
||||
}
|
||||
}
|
||||
idlist := make([]string, 0, len(containers))
|
||||
layers := make(map[string]*Container)
|
||||
ids := make(map[string]*Container)
|
||||
names := make(map[string]*Container)
|
||||
for n, container := range containers {
|
||||
idlist = append(idlist, container.ID)
|
||||
ids[container.ID] = containers[n]
|
||||
layers[container.LayerID] = containers[n]
|
||||
for _, name := range container.Names {
|
||||
if conflict, ok := names[name]; ok {
|
||||
r.removeName(conflict, name)
|
||||
needSave = true
|
||||
}
|
||||
names[name] = containers[n]
|
||||
}
|
||||
}
|
||||
|
||||
r.containers = containers
|
||||
r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store.
|
||||
r.byid = ids
|
||||
r.bylayer = layers
|
||||
r.byname = names
|
||||
if needSave {
|
||||
if !lockedForWriting {
|
||||
// Eventually, the callers should be modified to retry with a write lock, instead.
|
||||
return errors.New("container store is inconsistent and the current caller does not hold a write lock")
|
||||
}
|
||||
return r.Save()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save saves the contents of the store to disk. It should be called with
|
||||
// the lock held, locked for writing.
|
||||
func (r *containerStore) Save() error {
|
||||
if !r.Locked() {
|
||||
return errors.New("container store is not locked")
|
||||
}
|
||||
r.lockfile.AssertLockedForWriting()
|
||||
rpath := r.containerspath()
|
||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||
return err
|
||||
@ -236,7 +335,7 @@ func (r *containerStore) Save() error {
|
||||
if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
return r.Touch()
|
||||
return r.lockfile.Touch()
|
||||
}
|
||||
|
||||
func newContainerStore(dir string) (rwContainerStore, error) {
|
||||
@ -255,9 +354,11 @@ func newContainerStore(dir string) (rwContainerStore, error) {
|
||||
bylayer: make(map[string]*Container),
|
||||
byname: make(map[string]*Container),
|
||||
}
|
||||
cstore.Lock()
|
||||
defer cstore.Unlock()
|
||||
if err := cstore.Load(); err != nil {
|
||||
if err := cstore.startWritingWithReload(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cstore.stopWriting()
|
||||
if err := cstore.load(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cstore, nil
|
||||
@ -329,33 +430,31 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
|
||||
if err := hasOverlappingRanges(options.GIDMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil {
|
||||
container = &Container{
|
||||
ID: id,
|
||||
Names: names,
|
||||
ImageID: image,
|
||||
LayerID: layer,
|
||||
Metadata: metadata,
|
||||
BigDataNames: []string{},
|
||||
BigDataSizes: make(map[string]int64),
|
||||
BigDataDigests: make(map[string]digest.Digest),
|
||||
Created: time.Now().UTC(),
|
||||
Flags: copyStringInterfaceMap(options.Flags),
|
||||
UIDMap: copyIDMap(options.UIDMap),
|
||||
GIDMap: copyIDMap(options.GIDMap),
|
||||
}
|
||||
r.containers = append(r.containers, container)
|
||||
r.byid[id] = container
|
||||
// This can only fail on duplicate IDs, which shouldn’t happen — and in that case the index is already in the desired state anyway.
|
||||
// Implementing recovery from an unlikely and unimportant failure here would be too risky.
|
||||
_ = r.idindex.Add(id)
|
||||
r.bylayer[layer] = container
|
||||
for _, name := range names {
|
||||
r.byname[name] = container
|
||||
}
|
||||
err = r.Save()
|
||||
container = copyContainer(container)
|
||||
container = &Container{
|
||||
ID: id,
|
||||
Names: names,
|
||||
ImageID: image,
|
||||
LayerID: layer,
|
||||
Metadata: metadata,
|
||||
BigDataNames: []string{},
|
||||
BigDataSizes: make(map[string]int64),
|
||||
BigDataDigests: make(map[string]digest.Digest),
|
||||
Created: time.Now().UTC(),
|
||||
Flags: copyStringInterfaceMap(options.Flags),
|
||||
UIDMap: copyIDMap(options.UIDMap),
|
||||
GIDMap: copyIDMap(options.GIDMap),
|
||||
}
|
||||
r.containers = append(r.containers, container)
|
||||
r.byid[id] = container
|
||||
// This can only fail on duplicate IDs, which shouldn’t happen — and in that case the index is already in the desired state anyway.
|
||||
// Implementing recovery from an unlikely and unimportant failure here would be too risky.
|
||||
_ = r.idindex.Add(id)
|
||||
r.bylayer[layer] = container
|
||||
for _, name := range names {
|
||||
r.byname[name] = container
|
||||
}
|
||||
err = r.Save()
|
||||
container = copyContainer(container)
|
||||
return container, err
|
||||
}
|
||||
|
||||
@ -595,46 +694,3 @@ func (r *containerStore) Wipe() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *containerStore) Lock() {
|
||||
r.lockfile.Lock()
|
||||
}
|
||||
|
||||
func (r *containerStore) RLock() {
|
||||
r.lockfile.RLock()
|
||||
}
|
||||
|
||||
func (r *containerStore) Unlock() {
|
||||
r.lockfile.Unlock()
|
||||
}
|
||||
|
||||
func (r *containerStore) Touch() error {
|
||||
return r.lockfile.Touch()
|
||||
}
|
||||
|
||||
func (r *containerStore) Modified() (bool, error) {
|
||||
return r.lockfile.Modified()
|
||||
}
|
||||
|
||||
func (r *containerStore) IsReadWrite() bool {
|
||||
return r.lockfile.IsReadWrite()
|
||||
}
|
||||
|
||||
func (r *containerStore) TouchedSince(when time.Time) bool {
|
||||
return r.lockfile.TouchedSince(when)
|
||||
}
|
||||
|
||||
func (r *containerStore) Locked() bool {
|
||||
return r.lockfile.Locked()
|
||||
}
|
||||
|
||||
func (r *containerStore) ReloadIfChanged() error {
|
||||
r.loadMut.Lock()
|
||||
defer r.loadMut.Unlock()
|
||||
|
||||
modified, err := r.Modified()
|
||||
if err == nil && modified {
|
||||
return r.Load()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
Reference in New Issue
Block a user