mirror of
				https://github.com/containers/podman.git
				synced 2025-11-01 02:42:11 +08:00 
			
		
		
		
	
		
			
				
	
	
		
			1968 lines
		
	
	
		
			59 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			1968 lines
		
	
	
		
			59 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
| package storage
 | ||
| 
 | ||
| import (
 | ||
| 	"bytes"
 | ||
| 	"fmt"
 | ||
| 	"io"
 | ||
| 	"io/ioutil"
 | ||
| 	"os"
 | ||
| 	"path"
 | ||
| 	"path/filepath"
 | ||
| 	"reflect"
 | ||
| 	"sort"
 | ||
| 	"strings"
 | ||
| 	"sync"
 | ||
| 	"time"
 | ||
| 
 | ||
| 	drivers "github.com/containers/storage/drivers"
 | ||
| 	"github.com/containers/storage/pkg/archive"
 | ||
| 	"github.com/containers/storage/pkg/idtools"
 | ||
| 	"github.com/containers/storage/pkg/ioutils"
 | ||
| 	"github.com/containers/storage/pkg/mount"
 | ||
| 	"github.com/containers/storage/pkg/stringid"
 | ||
| 	"github.com/containers/storage/pkg/system"
 | ||
| 	"github.com/containers/storage/pkg/tarlog"
 | ||
| 	"github.com/containers/storage/pkg/truncindex"
 | ||
| 	multierror "github.com/hashicorp/go-multierror"
 | ||
| 	"github.com/klauspost/pgzip"
 | ||
| 	digest "github.com/opencontainers/go-digest"
 | ||
| 	"github.com/opencontainers/selinux/go-selinux/label"
 | ||
| 	"github.com/pkg/errors"
 | ||
| 	"github.com/sirupsen/logrus"
 | ||
| 	"github.com/vbatts/tar-split/archive/tar"
 | ||
| 	"github.com/vbatts/tar-split/tar/asm"
 | ||
| 	"github.com/vbatts/tar-split/tar/storage"
 | ||
| )
 | ||
| 
 | ||
| const (
 | ||
| 	tarSplitSuffix = ".tar-split.gz"
 | ||
| 	incompleteFlag = "incomplete"
 | ||
| )
 | ||
| 
 | ||
| // A Layer is a record of a copy-on-write layer that's stored by the lower
 | ||
| // level graph driver.
 | ||
| type Layer struct {
 | ||
| 	// ID is either one which was specified at create-time, or a random
 | ||
| 	// value which was generated by the library.
 | ||
| 	ID string `json:"id"`
 | ||
| 
 | ||
| 	// Names is an optional set of user-defined convenience values.  The
 | ||
| 	// layer can be referred to by its ID or any of its names.  Names are
 | ||
| 	// unique among layers.
 | ||
| 	Names []string `json:"names,omitempty"`
 | ||
| 
 | ||
| 	// Parent is the ID of a layer from which this layer inherits data.
 | ||
| 	Parent string `json:"parent,omitempty"`
 | ||
| 
 | ||
| 	// Metadata is data we keep for the convenience of the caller.  It is not
 | ||
| 	// expected to be large, since it is kept in memory.
 | ||
| 	Metadata string `json:"metadata,omitempty"`
 | ||
| 
 | ||
| 	// MountLabel is an SELinux label which should be used when attempting to mount
 | ||
| 	// the layer.
 | ||
| 	MountLabel string `json:"mountlabel,omitempty"`
 | ||
| 
 | ||
| 	// MountPoint is the path where the layer is mounted, or where it was most
 | ||
| 	// recently mounted.  This can change between subsequent Unmount() and
 | ||
| 	// Mount() calls, so the caller should consult this value after Mount()
 | ||
| 	// succeeds to find the location of the container's root filesystem.
 | ||
| 	MountPoint string `json:"-"`
 | ||
| 
 | ||
| 	// MountCount is used as a reference count for the container's layer being
 | ||
| 	// mounted at the mount point.
 | ||
| 	MountCount int `json:"-"`
 | ||
| 
 | ||
| 	// Created is the datestamp for when this layer was created.  Older
 | ||
| 	// versions of the library did not track this information, so callers
 | ||
| 	// will likely want to use the IsZero() method to verify that a value
 | ||
| 	// is set before using it.
 | ||
| 	Created time.Time `json:"created,omitempty"`
 | ||
| 
 | ||
| 	// CompressedDigest is the digest of the blob that was last passed to
 | ||
| 	// ApplyDiff() or Put(), as it was presented to us.
 | ||
| 	CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"`
 | ||
| 
 | ||
| 	// CompressedSize is the length of the blob that was last passed to
 | ||
| 	// ApplyDiff() or Put(), as it was presented to us.  If
 | ||
| 	// CompressedDigest is not set, this should be treated as if it were an
 | ||
| 	// uninitialized value.
 | ||
| 	CompressedSize int64 `json:"compressed-size,omitempty"`
 | ||
| 
 | ||
| 	// UncompressedDigest is the digest of the blob that was last passed to
 | ||
| 	// ApplyDiff() or Put(), after we decompressed it.  Often referred to
 | ||
| 	// as a DiffID.
 | ||
| 	UncompressedDigest digest.Digest `json:"diff-digest,omitempty"`
 | ||
| 
 | ||
| 	// UncompressedSize is the length of the blob that was last passed to
 | ||
| 	// ApplyDiff() or Put(), after we decompressed it.  If
 | ||
| 	// UncompressedDigest is not set, this should be treated as if it were
 | ||
| 	// an uninitialized value.
 | ||
| 	UncompressedSize int64 `json:"diff-size,omitempty"`
 | ||
| 
 | ||
| 	// CompressionType is the type of compression which we detected on the blob
 | ||
| 	// that was last passed to ApplyDiff() or Put().
 | ||
| 	CompressionType archive.Compression `json:"compression,omitempty"`
 | ||
| 
 | ||
| 	// UIDs and GIDs are lists of UIDs and GIDs used in the layer.  This
 | ||
| 	// field is only populated (i.e., will only contain one or more
 | ||
| 	// entries) if the layer was created using ApplyDiff() or Put().
 | ||
| 	UIDs []uint32 `json:"uidset,omitempty"`
 | ||
| 	GIDs []uint32 `json:"gidset,omitempty"`
 | ||
| 
 | ||
| 	// Flags is arbitrary data about the layer.
 | ||
| 	Flags map[string]interface{} `json:"flags,omitempty"`
 | ||
| 
 | ||
| 	// UIDMap and GIDMap are used for setting up a layer's contents
 | ||
| 	// for use inside of a user namespace where UID mapping is being used.
 | ||
| 	UIDMap []idtools.IDMap `json:"uidmap,omitempty"`
 | ||
| 	GIDMap []idtools.IDMap `json:"gidmap,omitempty"`
 | ||
| 
 | ||
| 	// ReadOnly is true if this layer resides in a read-only layer store.
 | ||
| 	ReadOnly bool `json:"-"`
 | ||
| 
 | ||
| 	// BigDataNames is a list of names of data items that we keep for the
 | ||
| 	// convenience of the caller.  They can be large, and are only in
 | ||
| 	// memory when being read from or written to disk.
 | ||
| 	BigDataNames []string `json:"big-data-names,omitempty"`
 | ||
| }
 | ||
| 
 | ||
| type layerMountPoint struct {
 | ||
| 	ID         string `json:"id"`
 | ||
| 	MountPoint string `json:"path"`
 | ||
| 	MountCount int    `json:"count"`
 | ||
| }
 | ||
| 
 | ||
| // DiffOptions override the default behavior of Diff() methods.
 | ||
| type DiffOptions struct {
 | ||
| 	// Compression, if set overrides the default compressor when generating a diff.
 | ||
| 	Compression *archive.Compression
 | ||
| }
 | ||
| 
 | ||
| // ROLayerStore wraps a graph driver, adding the ability to refer to layers by
 | ||
| // name, and keeping track of parent-child relationships, along with a list of
 | ||
| // all known layers.
 | ||
| type ROLayerStore interface {
 | ||
| 	ROFileBasedStore
 | ||
| 	ROMetadataStore
 | ||
| 	ROLayerBigDataStore
 | ||
| 
 | ||
| 	// Exists checks if a layer with the specified name or ID is known.
 | ||
| 	Exists(id string) bool
 | ||
| 
 | ||
| 	// Get retrieves information about a layer given an ID or name.
 | ||
| 	Get(id string) (*Layer, error)
 | ||
| 
 | ||
| 	// Status returns an slice of key-value pairs, suitable for human consumption,
 | ||
| 	// relaying whatever status information the underlying driver can share.
 | ||
| 	Status() ([][2]string, error)
 | ||
| 
 | ||
| 	// Changes returns a slice of Change structures, which contain a pathname
 | ||
| 	// (Path) and a description of what sort of change (Kind) was made by the
 | ||
| 	// layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a
 | ||
| 	// specified layer.  By default, the layer's parent is used as a reference.
 | ||
| 	Changes(from, to string) ([]archive.Change, error)
 | ||
| 
 | ||
| 	// Diff produces a tarstream which can be applied to a layer with the contents
 | ||
| 	// of the first layer to produce a layer with the contents of the second layer.
 | ||
| 	// By default, the parent of the second layer is used as the first
 | ||
| 	// layer, so it need not be specified.  Options can be used to override
 | ||
| 	// default behavior, but are also not required.
 | ||
| 	Diff(from, to string, options *DiffOptions) (io.ReadCloser, error)
 | ||
| 
 | ||
| 	// DiffSize produces an estimate of the length of the tarstream which would be
 | ||
| 	// produced by Diff.
 | ||
| 	DiffSize(from, to string) (int64, error)
 | ||
| 
 | ||
| 	// Size produces a cached value for the uncompressed size of the layer,
 | ||
| 	// if one is known, or -1 if it is not known.  If the layer can not be
 | ||
| 	// found, it returns an error.
 | ||
| 	Size(name string) (int64, error)
 | ||
| 
 | ||
| 	// Lookup attempts to translate a name to an ID.  Most methods do this
 | ||
| 	// implicitly.
 | ||
| 	Lookup(name string) (string, error)
 | ||
| 
 | ||
| 	// LayersByCompressedDigest returns a slice of the layers with the
 | ||
| 	// specified compressed digest value recorded for them.
 | ||
| 	LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
 | ||
| 
 | ||
| 	// LayersByUncompressedDigest returns a slice of the layers with the
 | ||
| 	// specified uncompressed digest value recorded for them.
 | ||
| 	LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
 | ||
| 
 | ||
| 	// Layers returns a slice of the known layers.
 | ||
| 	Layers() ([]Layer, error)
 | ||
| }
 | ||
| 
 | ||
| // LayerStore wraps a graph driver, adding the ability to refer to layers by
 | ||
| // name, and keeping track of parent-child relationships, along with a list of
 | ||
| // all known layers.
 | ||
| type LayerStore interface {
 | ||
| 	ROLayerStore
 | ||
| 	RWFileBasedStore
 | ||
| 	RWMetadataStore
 | ||
| 	FlaggableStore
 | ||
| 	RWLayerBigDataStore
 | ||
| 
 | ||
| 	// Create creates a new layer, optionally giving it a specified ID rather than
 | ||
| 	// a randomly-generated one, either inheriting data from another specified
 | ||
| 	// layer or the empty base layer.  The new layer can optionally be given names
 | ||
| 	// and have an SELinux label specified for use when mounting it.  Some
 | ||
| 	// underlying drivers can accept a "size" option.  At this time, most
 | ||
| 	// underlying drivers do not themselves distinguish between writeable
 | ||
| 	// and read-only layers.
 | ||
| 	Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error)
 | ||
| 
 | ||
| 	// CreateWithFlags combines the functions of Create and SetFlag.
 | ||
| 	CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error)
 | ||
| 
 | ||
| 	// Put combines the functions of CreateWithFlags and ApplyDiff.
 | ||
| 	Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error)
 | ||
| 
 | ||
| 	// SetNames replaces the list of names associated with a layer with the
 | ||
| 	// supplied values.
 | ||
| 	// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
 | ||
| 	SetNames(id string, names []string) error
 | ||
| 
 | ||
| 	// AddNames adds the supplied values to the list of names associated with the layer with the
 | ||
| 	// specified id.
 | ||
| 	AddNames(id string, names []string) error
 | ||
| 
 | ||
| 	// RemoveNames remove the supplied values from the list of names associated with the layer with the
 | ||
| 	// specified id.
 | ||
| 	RemoveNames(id string, names []string) error
 | ||
| 
 | ||
| 	// Delete deletes a layer with the specified name or ID.
 | ||
| 	Delete(id string) error
 | ||
| 
 | ||
| 	// Wipe deletes all layers.
 | ||
| 	Wipe() error
 | ||
| 
 | ||
| 	// Mount mounts a layer for use.  If the specified layer is the parent of other
 | ||
| 	// layers, it should not be written to.  An SELinux label to be applied to the
 | ||
| 	// mount can be specified to override the one configured for the layer.
 | ||
| 	// The mappings used by the container can be specified.
 | ||
| 	Mount(id string, options drivers.MountOpts) (string, error)
 | ||
| 
 | ||
| 	// Unmount unmounts a layer when it is no longer in use.
 | ||
| 	Unmount(id string, force bool) (bool, error)
 | ||
| 
 | ||
| 	// Mounted returns number of times the layer has been mounted.
 | ||
| 	Mounted(id string) (int, error)
 | ||
| 
 | ||
| 	// ParentOwners returns the UIDs and GIDs of parents of the layer's mountpoint
 | ||
| 	// for which the layer's UID and GID maps don't contain corresponding entries.
 | ||
| 	ParentOwners(id string) (uids, gids []int, err error)
 | ||
| 
 | ||
| 	// ApplyDiff reads a tarstream which was created by a previous call to Diff and
 | ||
| 	// applies its changes to a specified layer.
 | ||
| 	ApplyDiff(to string, diff io.Reader) (int64, error)
 | ||
| 
 | ||
| 	// ApplyDiffWithDiffer applies the changes through the differ callback function.
 | ||
| 	// If to is the empty string, then a staging directory is created by the driver.
 | ||
| 	ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
 | ||
| 
 | ||
| 	// CleanupStagingDirectory cleanups the staging directory.  It can be used to cleanup the staging directory on errors
 | ||
| 	CleanupStagingDirectory(stagingDirectory string) error
 | ||
| 
 | ||
| 	// ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
 | ||
| 	ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error
 | ||
| 
 | ||
| 	// DifferTarget gets the location where files are stored for the layer.
 | ||
| 	DifferTarget(id string) (string, error)
 | ||
| 
 | ||
| 	// LoadLocked wraps Load in a locked state. This means it loads the store
 | ||
| 	// and cleans-up invalid layers if needed.
 | ||
| 	LoadLocked() error
 | ||
| 
 | ||
| 	// PutAdditionalLayer creates a layer using the diff contained in the additional layer
 | ||
| 	// store.
 | ||
| 	// This API is experimental and can be changed without bumping the major version number.
 | ||
| 	PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error)
 | ||
| }
 | ||
| 
 | ||
| type layerStore struct {
 | ||
| 	lockfile           Locker
 | ||
| 	mountsLockfile     Locker
 | ||
| 	rundir             string
 | ||
| 	driver             drivers.Driver
 | ||
| 	layerdir           string
 | ||
| 	layers             []*Layer
 | ||
| 	idindex            *truncindex.TruncIndex
 | ||
| 	byid               map[string]*Layer
 | ||
| 	byname             map[string]*Layer
 | ||
| 	bymount            map[string]*Layer
 | ||
| 	bycompressedsum    map[digest.Digest][]string
 | ||
| 	byuncompressedsum  map[digest.Digest][]string
 | ||
| 	uidMap             []idtools.IDMap
 | ||
| 	gidMap             []idtools.IDMap
 | ||
| 	loadMut            sync.Mutex
 | ||
| 	layerspathModified time.Time
 | ||
| }
 | ||
| 
 | ||
| func copyLayer(l *Layer) *Layer {
 | ||
| 	return &Layer{
 | ||
| 		ID:                 l.ID,
 | ||
| 		Names:              copyStringSlice(l.Names),
 | ||
| 		Parent:             l.Parent,
 | ||
| 		Metadata:           l.Metadata,
 | ||
| 		MountLabel:         l.MountLabel,
 | ||
| 		MountPoint:         l.MountPoint,
 | ||
| 		MountCount:         l.MountCount,
 | ||
| 		Created:            l.Created,
 | ||
| 		CompressedDigest:   l.CompressedDigest,
 | ||
| 		CompressedSize:     l.CompressedSize,
 | ||
| 		UncompressedDigest: l.UncompressedDigest,
 | ||
| 		UncompressedSize:   l.UncompressedSize,
 | ||
| 		CompressionType:    l.CompressionType,
 | ||
| 		ReadOnly:           l.ReadOnly,
 | ||
| 		BigDataNames:       copyStringSlice(l.BigDataNames),
 | ||
| 		Flags:              copyStringInterfaceMap(l.Flags),
 | ||
| 		UIDMap:             copyIDMap(l.UIDMap),
 | ||
| 		GIDMap:             copyIDMap(l.GIDMap),
 | ||
| 		UIDs:               copyUint32Slice(l.UIDs),
 | ||
| 		GIDs:               copyUint32Slice(l.GIDs),
 | ||
| 	}
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Layers() ([]Layer, error) {
 | ||
| 	layers := make([]Layer, len(r.layers))
 | ||
| 	for i := range r.layers {
 | ||
| 		layers[i] = *copyLayer(r.layers[i])
 | ||
| 	}
 | ||
| 	return layers, nil
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) mountspath() string {
 | ||
| 	return filepath.Join(r.rundir, "mountpoints.json")
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) layerspath() string {
 | ||
| 	return filepath.Join(r.layerdir, "layers.json")
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Load() error {
 | ||
| 	shouldSave := false
 | ||
| 	rpath := r.layerspath()
 | ||
| 	data, err := ioutil.ReadFile(rpath)
 | ||
| 	if err != nil && !os.IsNotExist(err) {
 | ||
| 		return err
 | ||
| 	}
 | ||
| 	layers := []*Layer{}
 | ||
| 	idlist := []string{}
 | ||
| 	ids := make(map[string]*Layer)
 | ||
| 	names := make(map[string]*Layer)
 | ||
| 	compressedsums := make(map[digest.Digest][]string)
 | ||
| 	uncompressedsums := make(map[digest.Digest][]string)
 | ||
| 	if r.IsReadWrite() {
 | ||
| 		label.ClearLabels()
 | ||
| 	}
 | ||
| 	if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil {
 | ||
| 		idlist = make([]string, 0, len(layers))
 | ||
| 		for n, layer := range layers {
 | ||
| 			ids[layer.ID] = layers[n]
 | ||
| 			idlist = append(idlist, layer.ID)
 | ||
| 			for _, name := range layer.Names {
 | ||
| 				if conflict, ok := names[name]; ok {
 | ||
| 					r.removeName(conflict, name)
 | ||
| 					shouldSave = true
 | ||
| 				}
 | ||
| 				names[name] = layers[n]
 | ||
| 			}
 | ||
| 			if layer.CompressedDigest != "" {
 | ||
| 				compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID)
 | ||
| 			}
 | ||
| 			if layer.UncompressedDigest != "" {
 | ||
| 				uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID)
 | ||
| 			}
 | ||
| 			if layer.MountLabel != "" {
 | ||
| 				label.ReserveLabel(layer.MountLabel)
 | ||
| 			}
 | ||
| 			layer.ReadOnly = !r.IsReadWrite()
 | ||
| 		}
 | ||
| 		err = nil
 | ||
| 	}
 | ||
| 	if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
 | ||
| 		return ErrDuplicateLayerNames
 | ||
| 	}
 | ||
| 	r.layers = layers
 | ||
| 	r.idindex = truncindex.NewTruncIndex(idlist)
 | ||
| 	r.byid = ids
 | ||
| 	r.byname = names
 | ||
| 	r.bycompressedsum = compressedsums
 | ||
| 	r.byuncompressedsum = uncompressedsums
 | ||
| 
 | ||
| 	// Load and merge information about which layers are mounted, and where.
 | ||
| 	if r.IsReadWrite() {
 | ||
| 		r.mountsLockfile.RLock()
 | ||
| 		defer r.mountsLockfile.Unlock()
 | ||
| 		if err = r.loadMounts(); err != nil {
 | ||
| 			return err
 | ||
| 		}
 | ||
| 
 | ||
| 		// Last step: as we’re writable, try to remove anything that a previous
 | ||
| 		// user of this storage area marked for deletion but didn't manage to
 | ||
| 		// actually delete.
 | ||
| 		if r.Locked() {
 | ||
| 			for _, layer := range r.layers {
 | ||
| 				if layer.Flags == nil {
 | ||
| 					layer.Flags = make(map[string]interface{})
 | ||
| 				}
 | ||
| 				if layerHasIncompleteFlag(layer) {
 | ||
| 					logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
 | ||
| 					err = r.deleteInternal(layer.ID)
 | ||
| 					if err != nil {
 | ||
| 						break
 | ||
| 					}
 | ||
| 					shouldSave = true
 | ||
| 				}
 | ||
| 			}
 | ||
| 		}
 | ||
| 		if shouldSave {
 | ||
| 			return r.saveLayers()
 | ||
| 		}
 | ||
| 	}
 | ||
| 
 | ||
| 	return err
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) LoadLocked() error {
 | ||
| 	r.lockfile.Lock()
 | ||
| 	defer r.lockfile.Unlock()
 | ||
| 	return r.Load()
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) loadMounts() error {
 | ||
| 	mounts := make(map[string]*Layer)
 | ||
| 	mpath := r.mountspath()
 | ||
| 	data, err := ioutil.ReadFile(mpath)
 | ||
| 	if err != nil && !os.IsNotExist(err) {
 | ||
| 		return err
 | ||
| 	}
 | ||
| 	layerMounts := []layerMountPoint{}
 | ||
| 	if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
 | ||
| 		// Clear all of our mount information.  If another process
 | ||
| 		// unmounted something, it (along with its zero count) won't
 | ||
| 		// have been encoded into the version of mountpoints.json that
 | ||
| 		// we're loading, so our count could fall out of sync with it
 | ||
| 		// if we don't, and if we subsequently change something else,
 | ||
| 		// we'd pass that error along to other process that reloaded
 | ||
| 		// the data after we saved it.
 | ||
| 		for _, layer := range r.layers {
 | ||
| 			layer.MountPoint = ""
 | ||
| 			layer.MountCount = 0
 | ||
| 		}
 | ||
| 		// All of the non-zero count values will have been encoded, so
 | ||
| 		// we reset the still-mounted ones based on the contents.
 | ||
| 		for _, mount := range layerMounts {
 | ||
| 			if mount.MountPoint != "" {
 | ||
| 				if layer, ok := r.lookup(mount.ID); ok {
 | ||
| 					mounts[mount.MountPoint] = layer
 | ||
| 					layer.MountPoint = mount.MountPoint
 | ||
| 					layer.MountCount = mount.MountCount
 | ||
| 				}
 | ||
| 			}
 | ||
| 		}
 | ||
| 		err = nil
 | ||
| 	}
 | ||
| 	r.bymount = mounts
 | ||
| 	return err
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Save() error {
 | ||
| 	r.mountsLockfile.Lock()
 | ||
| 	defer r.mountsLockfile.Unlock()
 | ||
| 	defer r.mountsLockfile.Touch()
 | ||
| 	if err := r.saveLayers(); err != nil {
 | ||
| 		return err
 | ||
| 	}
 | ||
| 	return r.saveMounts()
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) saveLayers() error {
 | ||
| 	if !r.IsReadWrite() {
 | ||
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
 | ||
| 	}
 | ||
| 	if !r.Locked() {
 | ||
| 		return errors.New("layer store is not locked for writing")
 | ||
| 	}
 | ||
| 	rpath := r.layerspath()
 | ||
| 	if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
 | ||
| 		return err
 | ||
| 	}
 | ||
| 	jldata, err := json.Marshal(&r.layers)
 | ||
| 	if err != nil {
 | ||
| 		return err
 | ||
| 	}
 | ||
| 	defer r.Touch()
 | ||
| 	return ioutils.AtomicWriteFile(rpath, jldata, 0600)
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) saveMounts() error {
 | ||
| 	if !r.IsReadWrite() {
 | ||
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
 | ||
| 	}
 | ||
| 	if !r.mountsLockfile.Locked() {
 | ||
| 		return errors.New("layer store mount information is not locked for writing")
 | ||
| 	}
 | ||
| 	mpath := r.mountspath()
 | ||
| 	if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
 | ||
| 		return err
 | ||
| 	}
 | ||
| 	mounts := make([]layerMountPoint, 0, len(r.layers))
 | ||
| 	for _, layer := range r.layers {
 | ||
| 		if layer.MountPoint != "" && layer.MountCount > 0 {
 | ||
| 			mounts = append(mounts, layerMountPoint{
 | ||
| 				ID:         layer.ID,
 | ||
| 				MountPoint: layer.MountPoint,
 | ||
| 				MountCount: layer.MountCount,
 | ||
| 			})
 | ||
| 		}
 | ||
| 	}
 | ||
| 	jmdata, err := json.Marshal(&mounts)
 | ||
| 	if err != nil {
 | ||
| 		return err
 | ||
| 	}
 | ||
| 	if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil {
 | ||
| 		return err
 | ||
| 	}
 | ||
| 	return r.loadMounts()
 | ||
| }
 | ||
| 
 | ||
| func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver) (LayerStore, error) {
 | ||
| 	if err := os.MkdirAll(rundir, 0700); err != nil {
 | ||
| 		return nil, err
 | ||
| 	}
 | ||
| 	if err := os.MkdirAll(layerdir, 0700); err != nil {
 | ||
| 		return nil, err
 | ||
| 	}
 | ||
| 	lockfile, err := GetLockfile(filepath.Join(layerdir, "layers.lock"))
 | ||
| 	if err != nil {
 | ||
| 		return nil, err
 | ||
| 	}
 | ||
| 	mountsLockfile, err := GetLockfile(filepath.Join(rundir, "mountpoints.lock"))
 | ||
| 	if err != nil {
 | ||
| 		return nil, err
 | ||
| 	}
 | ||
| 	rlstore := layerStore{
 | ||
| 		lockfile:       lockfile,
 | ||
| 		mountsLockfile: mountsLockfile,
 | ||
| 		driver:         driver,
 | ||
| 		rundir:         rundir,
 | ||
| 		layerdir:       layerdir,
 | ||
| 		byid:           make(map[string]*Layer),
 | ||
| 		bymount:        make(map[string]*Layer),
 | ||
| 		byname:         make(map[string]*Layer),
 | ||
| 		uidMap:         copyIDMap(s.uidMap),
 | ||
| 		gidMap:         copyIDMap(s.gidMap),
 | ||
| 	}
 | ||
| 	if err := rlstore.Load(); err != nil {
 | ||
| 		return nil, err
 | ||
| 	}
 | ||
| 	return &rlstore, nil
 | ||
| }
 | ||
| 
 | ||
| func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROLayerStore, error) {
 | ||
| 	lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock"))
 | ||
| 	if err != nil {
 | ||
| 		return nil, err
 | ||
| 	}
 | ||
| 	rlstore := layerStore{
 | ||
| 		lockfile:       lockfile,
 | ||
| 		mountsLockfile: nil,
 | ||
| 		driver:         driver,
 | ||
| 		rundir:         rundir,
 | ||
| 		layerdir:       layerdir,
 | ||
| 		byid:           make(map[string]*Layer),
 | ||
| 		bymount:        make(map[string]*Layer),
 | ||
| 		byname:         make(map[string]*Layer),
 | ||
| 	}
 | ||
| 	if err := rlstore.Load(); err != nil {
 | ||
| 		return nil, err
 | ||
| 	}
 | ||
| 	return &rlstore, nil
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) lookup(id string) (*Layer, bool) {
 | ||
| 	if layer, ok := r.byid[id]; ok {
 | ||
| 		return layer, ok
 | ||
| 	} else if layer, ok := r.byname[id]; ok {
 | ||
| 		return layer, ok
 | ||
| 	} else if longid, err := r.idindex.Get(id); err == nil {
 | ||
| 		layer, ok := r.byid[longid]
 | ||
| 		return layer, ok
 | ||
| 	}
 | ||
| 	return nil, false
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Size(name string) (int64, error) {
 | ||
| 	layer, ok := r.lookup(name)
 | ||
| 	if !ok {
 | ||
| 		return -1, ErrLayerUnknown
 | ||
| 	}
 | ||
| 	// We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that
 | ||
| 	// a zero value is not just present because it was never set to anything else (which can happen if the layer was
 | ||
| 	// created by a version of this library that didn't keep track of digest and size information).
 | ||
| 	if layer.UncompressedDigest != "" {
 | ||
| 		return layer.UncompressedSize, nil
 | ||
| 	}
 | ||
| 	return -1, nil
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) ClearFlag(id string, flag string) error {
 | ||
| 	if !r.IsReadWrite() {
 | ||
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on layers at %q", r.layerspath())
 | ||
| 	}
 | ||
| 	layer, ok := r.lookup(id)
 | ||
| 	if !ok {
 | ||
| 		return ErrLayerUnknown
 | ||
| 	}
 | ||
| 	delete(layer.Flags, flag)
 | ||
| 	return r.Save()
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
 | ||
| 	if !r.IsReadWrite() {
 | ||
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on layers at %q", r.layerspath())
 | ||
| 	}
 | ||
| 	layer, ok := r.lookup(id)
 | ||
| 	if !ok {
 | ||
| 		return ErrLayerUnknown
 | ||
| 	}
 | ||
| 	if layer.Flags == nil {
 | ||
| 		layer.Flags = make(map[string]interface{})
 | ||
| 	}
 | ||
| 	layer.Flags[flag] = value
 | ||
| 	return r.Save()
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Status() ([][2]string, error) {
 | ||
| 	return r.driver.Status(), nil
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) {
 | ||
| 	if duplicateLayer, idInUse := r.byid[id]; idInUse {
 | ||
| 		return duplicateLayer, ErrDuplicateID
 | ||
| 	}
 | ||
| 	for _, name := range names {
 | ||
| 		if _, nameInUse := r.byname[name]; nameInUse {
 | ||
| 			return nil, ErrDuplicateName
 | ||
| 		}
 | ||
| 	}
 | ||
| 
 | ||
| 	parent := ""
 | ||
| 	if parentLayer != nil {
 | ||
| 		parent = parentLayer.ID
 | ||
| 	}
 | ||
| 
 | ||
| 	info, err := aLayer.Info()
 | ||
| 	if err != nil {
 | ||
| 		return nil, err
 | ||
| 	}
 | ||
| 	defer info.Close()
 | ||
| 	layer = &Layer{}
 | ||
| 	if err := json.NewDecoder(info).Decode(layer); err != nil {
 | ||
| 		return nil, err
 | ||
| 	}
 | ||
| 	layer.ID = id
 | ||
| 	layer.Parent = parent
 | ||
| 	layer.Created = time.Now().UTC()
 | ||
| 
 | ||
| 	if err := aLayer.CreateAs(id, parent); err != nil {
 | ||
| 		return nil, err
 | ||
| 	}
 | ||
| 
 | ||
| 	// TODO: check if necessary fields are filled
 | ||
| 	r.layers = append(r.layers, layer)
 | ||
| 	r.idindex.Add(id)
 | ||
| 	r.byid[id] = layer
 | ||
| 	for _, name := range names { // names got from the additional layer store won't be used
 | ||
| 		r.byname[name] = layer
 | ||
| 	}
 | ||
| 	if layer.CompressedDigest != "" {
 | ||
| 		r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
 | ||
| 	}
 | ||
| 	if layer.UncompressedDigest != "" {
 | ||
| 		r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
 | ||
| 	}
 | ||
| 	if err := r.Save(); err != nil {
 | ||
| 		r.driver.Remove(id)
 | ||
| 		return nil, err
 | ||
| 	}
 | ||
| 	return copyLayer(layer), nil
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) {
 | ||
| 	if !r.IsReadWrite() {
 | ||
| 		return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath())
 | ||
| 	}
 | ||
| 	if err := os.MkdirAll(r.rundir, 0700); err != nil {
 | ||
| 		return nil, -1, err
 | ||
| 	}
 | ||
| 	if err := os.MkdirAll(r.layerdir, 0700); err != nil {
 | ||
| 		return nil, -1, err
 | ||
| 	}
 | ||
| 	if id == "" {
 | ||
| 		id = stringid.GenerateRandomID()
 | ||
| 		_, idInUse := r.byid[id]
 | ||
| 		for idInUse {
 | ||
| 			id = stringid.GenerateRandomID()
 | ||
| 			_, idInUse = r.byid[id]
 | ||
| 		}
 | ||
| 	}
 | ||
| 	if duplicateLayer, idInUse := r.byid[id]; idInUse {
 | ||
| 		return duplicateLayer, -1, ErrDuplicateID
 | ||
| 	}
 | ||
| 	names = dedupeNames(names)
 | ||
| 	for _, name := range names {
 | ||
| 		if _, nameInUse := r.byname[name]; nameInUse {
 | ||
| 			return nil, -1, ErrDuplicateName
 | ||
| 		}
 | ||
| 	}
 | ||
| 	parent := ""
 | ||
| 	if parentLayer != nil {
 | ||
| 		parent = parentLayer.ID
 | ||
| 	}
 | ||
| 	var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings
 | ||
| 	var (
 | ||
| 		templateMetadata           string
 | ||
| 		templateCompressedDigest   digest.Digest
 | ||
| 		templateCompressedSize     int64
 | ||
| 		templateUncompressedDigest digest.Digest
 | ||
| 		templateUncompressedSize   int64
 | ||
| 		templateCompressionType    archive.Compression
 | ||
| 		templateUIDs, templateGIDs []uint32
 | ||
| 		templateTSdata             []byte
 | ||
| 	)
 | ||
| 	if moreOptions.TemplateLayer != "" {
 | ||
| 		var tserr error
 | ||
| 		templateLayer, ok := r.lookup(moreOptions.TemplateLayer)
 | ||
| 		if !ok {
 | ||
| 			return nil, -1, ErrLayerUnknown
 | ||
| 		}
 | ||
| 		templateMetadata = templateLayer.Metadata
 | ||
| 		templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap)
 | ||
| 		templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize
 | ||
| 		templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
 | ||
| 		templateCompressionType = templateLayer.CompressionType
 | ||
| 		templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...)
 | ||
| 		templateTSdata, tserr = ioutil.ReadFile(r.tspath(templateLayer.ID))
 | ||
| 		if tserr != nil && !os.IsNotExist(tserr) {
 | ||
| 			return nil, -1, tserr
 | ||
| 		}
 | ||
| 	} else {
 | ||
| 		templateIDMappings = &idtools.IDMappings{}
 | ||
| 	}
 | ||
| 	if parentLayer != nil {
 | ||
| 		parentMappings = idtools.NewIDMappingsFromMaps(parentLayer.UIDMap, parentLayer.GIDMap)
 | ||
| 	} else {
 | ||
| 		parentMappings = &idtools.IDMappings{}
 | ||
| 	}
 | ||
| 	if mountLabel != "" {
 | ||
| 		label.ReserveLabel(mountLabel)
 | ||
| 	}
 | ||
| 
 | ||
| 	// Before actually creating the layer, make a persistent record of it with incompleteFlag,
 | ||
| 	// so that future processes have a chance to delete it.
 | ||
| 	layer := &Layer{
 | ||
| 		ID:                 id,
 | ||
| 		Parent:             parent,
 | ||
| 		Names:              names,
 | ||
| 		MountLabel:         mountLabel,
 | ||
| 		Metadata:           templateMetadata,
 | ||
| 		Created:            time.Now().UTC(),
 | ||
| 		CompressedDigest:   templateCompressedDigest,
 | ||
| 		CompressedSize:     templateCompressedSize,
 | ||
| 		UncompressedDigest: templateUncompressedDigest,
 | ||
| 		UncompressedSize:   templateUncompressedSize,
 | ||
| 		CompressionType:    templateCompressionType,
 | ||
| 		UIDs:               templateUIDs,
 | ||
| 		GIDs:               templateGIDs,
 | ||
| 		Flags:              make(map[string]interface{}),
 | ||
| 		UIDMap:             copyIDMap(moreOptions.UIDMap),
 | ||
| 		GIDMap:             copyIDMap(moreOptions.GIDMap),
 | ||
| 		BigDataNames:       []string{},
 | ||
| 	}
 | ||
| 	r.layers = append(r.layers, layer)
 | ||
| 	r.idindex.Add(id)
 | ||
| 	r.byid[id] = layer
 | ||
| 	for _, name := range names {
 | ||
| 		r.byname[name] = layer
 | ||
| 	}
 | ||
| 	for flag, value := range flags {
 | ||
| 		layer.Flags[flag] = value
 | ||
| 	}
 | ||
| 	layer.Flags[incompleteFlag] = true
 | ||
| 
 | ||
| 	succeeded := false
 | ||
| 	cleanupFailureContext := ""
 | ||
| 	defer func() {
 | ||
| 		if !succeeded {
 | ||
| 			// On any error, try both removing the driver's data as well
 | ||
| 			// as the in-memory layer record.
 | ||
| 			if err2 := r.Delete(layer.ID); err2 != nil {
 | ||
| 				if cleanupFailureContext == "" {
 | ||
| 					cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site"
 | ||
| 				}
 | ||
| 				logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, layer.ID, err2)
 | ||
| 			}
 | ||
| 		}
 | ||
| 	}()
 | ||
| 
 | ||
| 	err := r.Save()
 | ||
| 	if err != nil {
 | ||
| 		cleanupFailureContext = "saving incomplete layer metadata"
 | ||
| 		return nil, -1, err
 | ||
| 	}
 | ||
| 
 | ||
| 	idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap)
 | ||
| 	opts := drivers.CreateOpts{
 | ||
| 		MountLabel: mountLabel,
 | ||
| 		StorageOpt: options,
 | ||
| 		IDMappings: idMappings,
 | ||
| 	}
 | ||
| 	if moreOptions.TemplateLayer != "" {
 | ||
| 		if err := r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
 | ||
| 			cleanupFailureContext = "creating a layer from template"
 | ||
| 			return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id)
 | ||
| 		}
 | ||
| 		oldMappings = templateIDMappings
 | ||
| 	} else {
 | ||
| 		if writeable {
 | ||
| 			if err := r.driver.CreateReadWrite(id, parent, &opts); err != nil {
 | ||
| 				cleanupFailureContext = "creating a read-write layer"
 | ||
| 				return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id)
 | ||
| 			}
 | ||
| 		} else {
 | ||
| 			if err := r.driver.Create(id, parent, &opts); err != nil {
 | ||
| 				cleanupFailureContext = "creating a read-only layer"
 | ||
| 				return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id)
 | ||
| 			}
 | ||
| 		}
 | ||
| 		oldMappings = parentMappings
 | ||
| 	}
 | ||
| 	if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) {
 | ||
| 		if err := r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
 | ||
| 			cleanupFailureContext = "in UpdateLayerIDMap"
 | ||
| 			return nil, -1, err
 | ||
| 		}
 | ||
| 	}
 | ||
| 	if len(templateTSdata) > 0 {
 | ||
| 		if err := os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil {
 | ||
| 			cleanupFailureContext = "creating tar-split parent directory for a copy from template"
 | ||
| 			return nil, -1, err
 | ||
| 		}
 | ||
| 		if err := ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil {
 | ||
| 			cleanupFailureContext = "creating a tar-split copy from template"
 | ||
| 			return nil, -1, err
 | ||
| 		}
 | ||
| 	}
 | ||
| 
 | ||
| 	var size int64 = -1
 | ||
| 	if diff != nil {
 | ||
| 		size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff)
 | ||
| 		if err != nil {
 | ||
| 			cleanupFailureContext = "applying layer diff"
 | ||
| 			return nil, -1, err
 | ||
| 		}
 | ||
| 	} else {
 | ||
| 		// applyDiffWithOptions in the `diff != nil` case handles this bit for us
 | ||
| 		if layer.CompressedDigest != "" {
 | ||
| 			r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
 | ||
| 		}
 | ||
| 		if layer.UncompressedDigest != "" {
 | ||
| 			r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
 | ||
| 		}
 | ||
| 	}
 | ||
| 	delete(layer.Flags, incompleteFlag)
 | ||
| 	err = r.Save()
 | ||
| 	if err != nil {
 | ||
| 		cleanupFailureContext = "saving finished layer metadata"
 | ||
| 		return nil, -1, err
 | ||
| 	}
 | ||
| 
 | ||
| 	layer = copyLayer(layer)
 | ||
| 	succeeded = true
 | ||
| 	return layer, size, err
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) {
 | ||
| 	layer, _, err = r.Put(id, parent, names, mountLabel, options, moreOptions, writeable, flags, nil)
 | ||
| 	return layer, err
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (layer *Layer, err error) {
 | ||
| 	return r.CreateWithFlags(id, parent, names, mountLabel, options, moreOptions, writeable, nil)
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Mounted(id string) (int, error) {
 | ||
| 	if !r.IsReadWrite() {
 | ||
| 		return 0, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
 | ||
| 	}
 | ||
| 	r.mountsLockfile.RLock()
 | ||
| 	defer r.mountsLockfile.Unlock()
 | ||
| 	if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
 | ||
| 		if err = r.loadMounts(); err != nil {
 | ||
| 			return 0, err
 | ||
| 		}
 | ||
| 	}
 | ||
| 	layer, ok := r.lookup(id)
 | ||
| 	if !ok {
 | ||
| 		return 0, ErrLayerUnknown
 | ||
| 	}
 | ||
| 	return layer.MountCount, nil
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) {
 | ||
| 	// check whether options include ro option
 | ||
| 	hasReadOnlyOpt := func(opts []string) bool {
 | ||
| 		for _, item := range opts {
 | ||
| 			if item == "ro" {
 | ||
| 				return true
 | ||
| 			}
 | ||
| 		}
 | ||
| 		return false
 | ||
| 	}
 | ||
| 
 | ||
| 	// You are not allowed to mount layers from readonly stores if they
 | ||
| 	// are not mounted read/only.
 | ||
| 	if !r.IsReadWrite() && !hasReadOnlyOpt(options.Options) {
 | ||
| 		return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
 | ||
| 	}
 | ||
| 	r.mountsLockfile.Lock()
 | ||
| 	defer r.mountsLockfile.Unlock()
 | ||
| 	if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
 | ||
| 		if err = r.loadMounts(); err != nil {
 | ||
| 			return "", err
 | ||
| 		}
 | ||
| 	}
 | ||
| 	defer r.mountsLockfile.Touch()
 | ||
| 	layer, ok := r.lookup(id)
 | ||
| 	if !ok {
 | ||
| 		return "", ErrLayerUnknown
 | ||
| 	}
 | ||
| 	if layer.MountCount > 0 {
 | ||
| 		mounted, err := mount.Mounted(layer.MountPoint)
 | ||
| 		if err != nil {
 | ||
| 			return "", err
 | ||
| 		}
 | ||
| 		// If the container is not mounted then we have a condition
 | ||
| 		// where the kernel umounted the mount point. This means
 | ||
| 		// that the mount count never got decremented.
 | ||
| 		if mounted {
 | ||
| 			layer.MountCount++
 | ||
| 			return layer.MountPoint, r.saveMounts()
 | ||
| 		}
 | ||
| 	}
 | ||
| 	if options.MountLabel == "" {
 | ||
| 		options.MountLabel = layer.MountLabel
 | ||
| 	}
 | ||
| 
 | ||
| 	if (options.UidMaps != nil || options.GidMaps != nil) && !r.driver.SupportsShifting() {
 | ||
| 		if !reflect.DeepEqual(options.UidMaps, layer.UIDMap) || !reflect.DeepEqual(options.GidMaps, layer.GIDMap) {
 | ||
| 			return "", fmt.Errorf("cannot mount layer %v: shifting not enabled", layer.ID)
 | ||
| 		}
 | ||
| 	}
 | ||
| 	mountpoint, err := r.driver.Get(id, options)
 | ||
| 	if mountpoint != "" && err == nil {
 | ||
| 		if layer.MountPoint != "" {
 | ||
| 			delete(r.bymount, layer.MountPoint)
 | ||
| 		}
 | ||
| 		layer.MountPoint = filepath.Clean(mountpoint)
 | ||
| 		layer.MountCount++
 | ||
| 		r.bymount[layer.MountPoint] = layer
 | ||
| 		err = r.saveMounts()
 | ||
| 	}
 | ||
| 	return mountpoint, err
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Unmount(id string, force bool) (bool, error) {
 | ||
| 	if !r.IsReadWrite() {
 | ||
| 		return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
 | ||
| 	}
 | ||
| 	r.mountsLockfile.Lock()
 | ||
| 	defer r.mountsLockfile.Unlock()
 | ||
| 	if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
 | ||
| 		if err = r.loadMounts(); err != nil {
 | ||
| 			return false, err
 | ||
| 		}
 | ||
| 	}
 | ||
| 	defer r.mountsLockfile.Touch()
 | ||
| 	layer, ok := r.lookup(id)
 | ||
| 	if !ok {
 | ||
| 		layerByMount, ok := r.bymount[filepath.Clean(id)]
 | ||
| 		if !ok {
 | ||
| 			return false, ErrLayerUnknown
 | ||
| 		}
 | ||
| 		layer = layerByMount
 | ||
| 	}
 | ||
| 	if force {
 | ||
| 		layer.MountCount = 1
 | ||
| 	}
 | ||
| 	if layer.MountCount > 1 {
 | ||
| 		layer.MountCount--
 | ||
| 		return true, r.saveMounts()
 | ||
| 	}
 | ||
| 	err := r.driver.Put(id)
 | ||
| 	if err == nil || os.IsNotExist(err) {
 | ||
| 		if layer.MountPoint != "" {
 | ||
| 			delete(r.bymount, layer.MountPoint)
 | ||
| 		}
 | ||
| 		layer.MountCount--
 | ||
| 		layer.MountPoint = ""
 | ||
| 		return false, r.saveMounts()
 | ||
| 	}
 | ||
| 	return true, err
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
 | ||
| 	if !r.IsReadWrite() {
 | ||
| 		return nil, nil, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
 | ||
| 	}
 | ||
| 	r.mountsLockfile.RLock()
 | ||
| 	defer r.mountsLockfile.Unlock()
 | ||
| 	if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
 | ||
| 		if err = r.loadMounts(); err != nil {
 | ||
| 			return nil, nil, err
 | ||
| 		}
 | ||
| 	}
 | ||
| 	layer, ok := r.lookup(id)
 | ||
| 	if !ok {
 | ||
| 		return nil, nil, ErrLayerUnknown
 | ||
| 	}
 | ||
| 	if len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 {
 | ||
| 		// We're not using any mappings, so there aren't any unmapped IDs on parent directories.
 | ||
| 		return nil, nil, nil
 | ||
| 	}
 | ||
| 	if layer.MountPoint == "" {
 | ||
| 		// We don't know which directories to examine.
 | ||
| 		return nil, nil, ErrLayerNotMounted
 | ||
| 	}
 | ||
| 	rootuid, rootgid, err := idtools.GetRootUIDGID(layer.UIDMap, layer.GIDMap)
 | ||
| 	if err != nil {
 | ||
| 		return nil, nil, errors.Wrapf(err, "error reading root ID values for layer %q", layer.ID)
 | ||
| 	}
 | ||
| 	m := idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap)
 | ||
| 	fsuids := make(map[int]struct{})
 | ||
| 	fsgids := make(map[int]struct{})
 | ||
| 	for dir := filepath.Dir(layer.MountPoint); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) {
 | ||
| 		st, err := system.Stat(dir)
 | ||
| 		if err != nil {
 | ||
| 			return nil, nil, errors.Wrap(err, "read directory ownership")
 | ||
| 		}
 | ||
| 		lst, err := system.Lstat(dir)
 | ||
| 		if err != nil {
 | ||
| 			return nil, nil, err
 | ||
| 		}
 | ||
| 		fsuid := int(st.UID())
 | ||
| 		fsgid := int(st.GID())
 | ||
| 		if _, _, err := m.ToContainer(idtools.IDPair{UID: fsuid, GID: rootgid}); err != nil {
 | ||
| 			fsuids[fsuid] = struct{}{}
 | ||
| 		}
 | ||
| 		if _, _, err := m.ToContainer(idtools.IDPair{UID: rootuid, GID: fsgid}); err != nil {
 | ||
| 			fsgids[fsgid] = struct{}{}
 | ||
| 		}
 | ||
| 		fsuid = int(lst.UID())
 | ||
| 		fsgid = int(lst.GID())
 | ||
| 		if _, _, err := m.ToContainer(idtools.IDPair{UID: fsuid, GID: rootgid}); err != nil {
 | ||
| 			fsuids[fsuid] = struct{}{}
 | ||
| 		}
 | ||
| 		if _, _, err := m.ToContainer(idtools.IDPair{UID: rootuid, GID: fsgid}); err != nil {
 | ||
| 			fsgids[fsgid] = struct{}{}
 | ||
| 		}
 | ||
| 	}
 | ||
| 	for uid := range fsuids {
 | ||
| 		uids = append(uids, uid)
 | ||
| 	}
 | ||
| 	for gid := range fsgids {
 | ||
| 		gids = append(gids, gid)
 | ||
| 	}
 | ||
| 	if len(uids) > 1 {
 | ||
| 		sort.Ints(uids)
 | ||
| 	}
 | ||
| 	if len(gids) > 1 {
 | ||
| 		sort.Ints(gids)
 | ||
| 	}
 | ||
| 	return uids, gids, nil
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) removeName(layer *Layer, name string) {
 | ||
| 	layer.Names = stringSliceWithoutValue(layer.Names, name)
 | ||
| }
 | ||
| 
 | ||
| // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
 | ||
| func (r *layerStore) SetNames(id string, names []string) error {
 | ||
| 	return r.updateNames(id, names, setNames)
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) AddNames(id string, names []string) error {
 | ||
| 	return r.updateNames(id, names, addNames)
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) RemoveNames(id string, names []string) error {
 | ||
| 	return r.updateNames(id, names, removeNames)
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error {
 | ||
| 	if !r.IsReadWrite() {
 | ||
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath())
 | ||
| 	}
 | ||
| 	layer, ok := r.lookup(id)
 | ||
| 	if !ok {
 | ||
| 		return ErrLayerUnknown
 | ||
| 	}
 | ||
| 	oldNames := layer.Names
 | ||
| 	names, err := applyNameOperation(oldNames, names, op)
 | ||
| 	if err != nil {
 | ||
| 		return err
 | ||
| 	}
 | ||
| 	for _, name := range oldNames {
 | ||
| 		delete(r.byname, name)
 | ||
| 	}
 | ||
| 	for _, name := range names {
 | ||
| 		if otherLayer, ok := r.byname[name]; ok {
 | ||
| 			r.removeName(otherLayer, name)
 | ||
| 		}
 | ||
| 		r.byname[name] = layer
 | ||
| 	}
 | ||
| 	layer.Names = names
 | ||
| 	return r.Save()
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) datadir(id string) string {
 | ||
| 	return filepath.Join(r.layerdir, id)
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) datapath(id, key string) string {
 | ||
| 	return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) {
 | ||
| 	if key == "" {
 | ||
| 		return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve layer big data value for empty name")
 | ||
| 	}
 | ||
| 	layer, ok := r.lookup(id)
 | ||
| 	if !ok {
 | ||
| 		return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id)
 | ||
| 	}
 | ||
| 	return os.Open(r.datapath(layer.ID, key))
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
 | ||
| 	if key == "" {
 | ||
| 		return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for layer big data item")
 | ||
| 	}
 | ||
| 	if !r.IsReadWrite() {
 | ||
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with layers at %q", r.layerspath())
 | ||
| 	}
 | ||
| 	layer, ok := r.lookup(id)
 | ||
| 	if !ok {
 | ||
| 		return errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q to write bigdata", id)
 | ||
| 	}
 | ||
| 	err := os.MkdirAll(r.datadir(layer.ID), 0700)
 | ||
| 	if err != nil {
 | ||
| 		return err
 | ||
| 	}
 | ||
| 
 | ||
| 	// NewAtomicFileWriter doesn't overwrite/truncate the existing inode.
 | ||
| 	// BigData() relies on this behaviour when opening the file for read
 | ||
| 	// so that it is either accessing the old data or the new one.
 | ||
| 	writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0600)
 | ||
| 	if err != nil {
 | ||
| 		return errors.Wrapf(err, "error opening bigdata file")
 | ||
| 	}
 | ||
| 
 | ||
| 	if _, err := io.Copy(writer, data); err != nil {
 | ||
| 		writer.Close()
 | ||
| 		return errors.Wrapf(err, "error copying bigdata for the layer")
 | ||
| 
 | ||
| 	}
 | ||
| 	if err := writer.Close(); err != nil {
 | ||
| 		return errors.Wrapf(err, "error closing bigdata file for the layer")
 | ||
| 	}
 | ||
| 
 | ||
| 	addName := true
 | ||
| 	for _, name := range layer.BigDataNames {
 | ||
| 		if name == key {
 | ||
| 			addName = false
 | ||
| 			break
 | ||
| 		}
 | ||
| 	}
 | ||
| 	if addName {
 | ||
| 		layer.BigDataNames = append(layer.BigDataNames, key)
 | ||
| 		return r.Save()
 | ||
| 	}
 | ||
| 	return nil
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) BigDataNames(id string) ([]string, error) {
 | ||
| 	layer, ok := r.lookup(id)
 | ||
| 	if !ok {
 | ||
| 		return nil, errors.Wrapf(ErrImageUnknown, "error locating layer with ID %q to retrieve bigdata names", id)
 | ||
| 	}
 | ||
| 	return copyStringSlice(layer.BigDataNames), nil
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Metadata(id string) (string, error) {
 | ||
| 	if layer, ok := r.lookup(id); ok {
 | ||
| 		return layer.Metadata, nil
 | ||
| 	}
 | ||
| 	return "", ErrLayerUnknown
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) SetMetadata(id, metadata string) error {
 | ||
| 	if !r.IsReadWrite() {
 | ||
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer metadata at %q", r.layerspath())
 | ||
| 	}
 | ||
| 	if layer, ok := r.lookup(id); ok {
 | ||
| 		layer.Metadata = metadata
 | ||
| 		return r.Save()
 | ||
| 	}
 | ||
| 	return ErrLayerUnknown
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) tspath(id string) string {
 | ||
| 	return filepath.Join(r.layerdir, id+tarSplitSuffix)
 | ||
| }
 | ||
| 
 | ||
| // layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true
 | ||
| func layerHasIncompleteFlag(layer *Layer) bool {
 | ||
| 	// layer.Flags[…] is defined to succeed and return ok == false if Flags == nil
 | ||
| 	if flagValue, ok := layer.Flags[incompleteFlag]; ok {
 | ||
| 		if b, ok := flagValue.(bool); ok && b {
 | ||
| 			return true
 | ||
| 		}
 | ||
| 	}
 | ||
| 	return false
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) deleteInternal(id string) error {
 | ||
| 	if !r.IsReadWrite() {
 | ||
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
 | ||
| 	}
 | ||
| 	layer, ok := r.lookup(id)
 | ||
| 	if !ok {
 | ||
| 		return ErrLayerUnknown
 | ||
| 	}
 | ||
| 	// Ensure that if we are interrupted, the layer will be cleaned up.
 | ||
| 	if !layerHasIncompleteFlag(layer) {
 | ||
| 		if layer.Flags == nil {
 | ||
| 			layer.Flags = make(map[string]interface{})
 | ||
| 		}
 | ||
| 		layer.Flags[incompleteFlag] = true
 | ||
| 		if err := r.Save(); err != nil {
 | ||
| 			return err
 | ||
| 		}
 | ||
| 	}
 | ||
| 	// We never unset incompleteFlag; below, we remove the entire object from r.layers.
 | ||
| 
 | ||
| 	id = layer.ID
 | ||
| 	err := r.driver.Remove(id)
 | ||
| 	if err != nil {
 | ||
| 		return err
 | ||
| 	}
 | ||
| 
 | ||
| 	os.Remove(r.tspath(id))
 | ||
| 	os.RemoveAll(r.datadir(id))
 | ||
| 	delete(r.byid, id)
 | ||
| 	for _, name := range layer.Names {
 | ||
| 		delete(r.byname, name)
 | ||
| 	}
 | ||
| 	r.idindex.Delete(id)
 | ||
| 	mountLabel := layer.MountLabel
 | ||
| 	if layer.MountPoint != "" {
 | ||
| 		delete(r.bymount, layer.MountPoint)
 | ||
| 	}
 | ||
| 	r.deleteInDigestMap(id)
 | ||
| 	toDeleteIndex := -1
 | ||
| 	for i, candidate := range r.layers {
 | ||
| 		if candidate.ID == id {
 | ||
| 			toDeleteIndex = i
 | ||
| 			break
 | ||
| 		}
 | ||
| 	}
 | ||
| 	if toDeleteIndex != -1 {
 | ||
| 		// delete the layer at toDeleteIndex
 | ||
| 		if toDeleteIndex == len(r.layers)-1 {
 | ||
| 			r.layers = r.layers[:len(r.layers)-1]
 | ||
| 		} else {
 | ||
| 			r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...)
 | ||
| 		}
 | ||
| 	}
 | ||
| 	if mountLabel != "" {
 | ||
| 		var found bool
 | ||
| 		for _, candidate := range r.layers {
 | ||
| 			if candidate.MountLabel == mountLabel {
 | ||
| 				found = true
 | ||
| 				break
 | ||
| 			}
 | ||
| 		}
 | ||
| 		if !found {
 | ||
| 			label.ReleaseLabel(mountLabel)
 | ||
| 		}
 | ||
| 	}
 | ||
| 
 | ||
| 	return nil
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) deleteInDigestMap(id string) {
 | ||
| 	for digest, layers := range r.bycompressedsum {
 | ||
| 		for i, layerID := range layers {
 | ||
| 			if layerID == id {
 | ||
| 				layers = append(layers[:i], layers[i+1:]...)
 | ||
| 				r.bycompressedsum[digest] = layers
 | ||
| 				break
 | ||
| 			}
 | ||
| 		}
 | ||
| 	}
 | ||
| 	for digest, layers := range r.byuncompressedsum {
 | ||
| 		for i, layerID := range layers {
 | ||
| 			if layerID == id {
 | ||
| 				layers = append(layers[:i], layers[i+1:]...)
 | ||
| 				r.byuncompressedsum[digest] = layers
 | ||
| 				break
 | ||
| 			}
 | ||
| 		}
 | ||
| 	}
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Delete(id string) error {
 | ||
| 	layer, ok := r.lookup(id)
 | ||
| 	if !ok {
 | ||
| 		return ErrLayerUnknown
 | ||
| 	}
 | ||
| 	id = layer.ID
 | ||
| 	// The layer may already have been explicitly unmounted, but if not, we
 | ||
| 	// should try to clean that up before we start deleting anything at the
 | ||
| 	// driver level.
 | ||
| 	mountCount, err := r.Mounted(id)
 | ||
| 	if err != nil {
 | ||
| 		return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
 | ||
| 	}
 | ||
| 	for mountCount > 0 {
 | ||
| 		if _, err := r.Unmount(id, false); err != nil {
 | ||
| 			return err
 | ||
| 		}
 | ||
| 		mountCount, err = r.Mounted(id)
 | ||
| 		if err != nil {
 | ||
| 			return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
 | ||
| 		}
 | ||
| 	}
 | ||
| 	if err := r.deleteInternal(id); err != nil {
 | ||
| 		return err
 | ||
| 	}
 | ||
| 	return r.Save()
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Lookup(name string) (id string, err error) {
 | ||
| 	if layer, ok := r.lookup(name); ok {
 | ||
| 		return layer.ID, nil
 | ||
| 	}
 | ||
| 	return "", ErrLayerUnknown
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Exists(id string) bool {
 | ||
| 	_, ok := r.lookup(id)
 | ||
| 	return ok
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Get(id string) (*Layer, error) {
 | ||
| 	if layer, ok := r.lookup(id); ok {
 | ||
| 		return copyLayer(layer), nil
 | ||
| 	}
 | ||
| 	return nil, ErrLayerUnknown
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Wipe() error {
 | ||
| 	if !r.IsReadWrite() {
 | ||
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
 | ||
| 	}
 | ||
| 	ids := make([]string, 0, len(r.byid))
 | ||
| 	for id := range r.byid {
 | ||
| 		ids = append(ids, id)
 | ||
| 	}
 | ||
| 	for _, id := range ids {
 | ||
| 		if err := r.Delete(id); err != nil {
 | ||
| 			return err
 | ||
| 		}
 | ||
| 	}
 | ||
| 	return nil
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID string, fromLayer, toLayer *Layer, err error) {
 | ||
| 	var ok bool
 | ||
| 	toLayer, ok = r.lookup(to)
 | ||
| 	if !ok {
 | ||
| 		return "", "", nil, nil, ErrLayerUnknown
 | ||
| 	}
 | ||
| 	to = toLayer.ID
 | ||
| 	if from == "" {
 | ||
| 		from = toLayer.Parent
 | ||
| 	}
 | ||
| 	if from != "" {
 | ||
| 		fromLayer, ok = r.lookup(from)
 | ||
| 		if ok {
 | ||
| 			from = fromLayer.ID
 | ||
| 		} else {
 | ||
| 			fromLayer, ok = r.lookup(toLayer.Parent)
 | ||
| 			if ok {
 | ||
| 				from = fromLayer.ID
 | ||
| 			}
 | ||
| 		}
 | ||
| 	}
 | ||
| 	return from, to, fromLayer, toLayer, nil
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings {
 | ||
| 	if layer == nil {
 | ||
| 		return &idtools.IDMappings{}
 | ||
| 	}
 | ||
| 	return idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap)
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Changes(from, to string) ([]archive.Change, error) {
 | ||
| 	from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to)
 | ||
| 	if err != nil {
 | ||
| 		return nil, ErrLayerUnknown
 | ||
| 	}
 | ||
| 	return r.driver.Changes(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel)
 | ||
| }
 | ||
| 
 | ||
| type simpleGetCloser struct {
 | ||
| 	r    *layerStore
 | ||
| 	path string
 | ||
| 	id   string
 | ||
| }
 | ||
| 
 | ||
| func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) {
 | ||
| 	return os.Open(filepath.Join(s.path, path))
 | ||
| }
 | ||
| 
 | ||
| func (s *simpleGetCloser) Close() error {
 | ||
| 	_, err := s.r.Unmount(s.id, false)
 | ||
| 	return err
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) {
 | ||
| 	if getter, ok := r.driver.(drivers.DiffGetterDriver); ok {
 | ||
| 		return getter.DiffGetter(id)
 | ||
| 	}
 | ||
| 	path, err := r.Mount(id, drivers.MountOpts{})
 | ||
| 	if err != nil {
 | ||
| 		return nil, err
 | ||
| 	}
 | ||
| 	return &simpleGetCloser{
 | ||
| 		r:    r,
 | ||
| 		path: path,
 | ||
| 		id:   id,
 | ||
| 	}, nil
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) {
 | ||
| 	var metadata storage.Unpacker
 | ||
| 
 | ||
| 	from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to)
 | ||
| 	if err != nil {
 | ||
| 		return nil, ErrLayerUnknown
 | ||
| 	}
 | ||
| 	// Default to applying the type of compression that we noted was used
 | ||
| 	// for the layerdiff when it was applied.
 | ||
| 	compression := toLayer.CompressionType
 | ||
| 	// If a particular compression type (or no compression) was selected,
 | ||
| 	// use that instead.
 | ||
| 	if options != nil && options.Compression != nil {
 | ||
| 		compression = *options.Compression
 | ||
| 	}
 | ||
| 	maybeCompressReadCloser := func(rc io.ReadCloser) (io.ReadCloser, error) {
 | ||
| 		// Depending on whether or not compression is desired, return either the
 | ||
| 		// passed-in ReadCloser, or a new one that provides its readers with a
 | ||
| 		// compressed version of the data that the original would have provided
 | ||
| 		// to its readers.
 | ||
| 		if compression == archive.Uncompressed {
 | ||
| 			return rc, nil
 | ||
| 		}
 | ||
| 		preader, pwriter := io.Pipe()
 | ||
| 		compressor, err := archive.CompressStream(pwriter, compression)
 | ||
| 		if err != nil {
 | ||
| 			rc.Close()
 | ||
| 			pwriter.Close()
 | ||
| 			preader.Close()
 | ||
| 			return nil, err
 | ||
| 		}
 | ||
| 		go func() {
 | ||
| 			defer pwriter.Close()
 | ||
| 			defer compressor.Close()
 | ||
| 			defer rc.Close()
 | ||
| 			io.Copy(compressor, rc)
 | ||
| 		}()
 | ||
| 		return preader, nil
 | ||
| 	}
 | ||
| 
 | ||
| 	if from != toLayer.Parent {
 | ||
| 		diff, err := r.driver.Diff(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel)
 | ||
| 		if err != nil {
 | ||
| 			return nil, err
 | ||
| 		}
 | ||
| 		return maybeCompressReadCloser(diff)
 | ||
| 	}
 | ||
| 
 | ||
| 	if ad, ok := r.driver.(drivers.AdditionalLayerStoreDriver); ok {
 | ||
| 		if aLayer, err := ad.LookupAdditionalLayerByID(to); err == nil {
 | ||
| 			// This is an additional layer. We leverage blob API for acquiring the reproduced raw blob.
 | ||
| 			info, err := aLayer.Info()
 | ||
| 			if err != nil {
 | ||
| 				aLayer.Release()
 | ||
| 				return nil, err
 | ||
| 			}
 | ||
| 			defer info.Close()
 | ||
| 			layer := &Layer{}
 | ||
| 			if err := json.NewDecoder(info).Decode(layer); err != nil {
 | ||
| 				aLayer.Release()
 | ||
| 				return nil, err
 | ||
| 			}
 | ||
| 			blob, err := aLayer.Blob()
 | ||
| 			if err != nil {
 | ||
| 				aLayer.Release()
 | ||
| 				return nil, err
 | ||
| 			}
 | ||
| 			// If layer compression type is different from the expected one, decompress and convert it.
 | ||
| 			if compression != layer.CompressionType {
 | ||
| 				diff, err := archive.DecompressStream(blob)
 | ||
| 				if err != nil {
 | ||
| 					if err2 := blob.Close(); err2 != nil {
 | ||
| 						err = errors.Wrapf(err, "failed to close blob file: %v", err2)
 | ||
| 					}
 | ||
| 					aLayer.Release()
 | ||
| 					return nil, err
 | ||
| 				}
 | ||
| 				rc, err := maybeCompressReadCloser(diff)
 | ||
| 				if err != nil {
 | ||
| 					if err2 := closeAll(blob.Close, diff.Close); err2 != nil {
 | ||
| 						err = errors.Wrapf(err, "failed to cleanup: %v", err2)
 | ||
| 					}
 | ||
| 					aLayer.Release()
 | ||
| 					return nil, err
 | ||
| 				}
 | ||
| 				return ioutils.NewReadCloserWrapper(rc, func() error {
 | ||
| 					defer aLayer.Release()
 | ||
| 					return closeAll(blob.Close, rc.Close)
 | ||
| 				}), nil
 | ||
| 			}
 | ||
| 			return ioutils.NewReadCloserWrapper(blob, func() error { defer aLayer.Release(); return blob.Close() }), nil
 | ||
| 		}
 | ||
| 	}
 | ||
| 
 | ||
| 	tsfile, err := os.Open(r.tspath(to))
 | ||
| 	if err != nil {
 | ||
| 		if !os.IsNotExist(err) {
 | ||
| 			return nil, err
 | ||
| 		}
 | ||
| 		diff, err := r.driver.Diff(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel)
 | ||
| 		if err != nil {
 | ||
| 			return nil, err
 | ||
| 		}
 | ||
| 		return maybeCompressReadCloser(diff)
 | ||
| 	}
 | ||
| 
 | ||
| 	decompressor, err := pgzip.NewReader(tsfile)
 | ||
| 	if err != nil {
 | ||
| 		if e := tsfile.Close(); e != nil {
 | ||
| 			logrus.Debug(e)
 | ||
| 		}
 | ||
| 		return nil, err
 | ||
| 	}
 | ||
| 
 | ||
| 	metadata = storage.NewJSONUnpacker(decompressor)
 | ||
| 
 | ||
| 	fgetter, err := r.newFileGetter(to)
 | ||
| 	if err != nil {
 | ||
| 		errs := multierror.Append(nil, errors.Wrapf(err, "creating file-getter"))
 | ||
| 		if err := decompressor.Close(); err != nil {
 | ||
| 			errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor"))
 | ||
| 		}
 | ||
| 		if err := tsfile.Close(); err != nil {
 | ||
| 			errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers"))
 | ||
| 		}
 | ||
| 		return nil, errs.ErrorOrNil()
 | ||
| 	}
 | ||
| 
 | ||
| 	tarstream := asm.NewOutputTarStream(fgetter, metadata)
 | ||
| 	rc := ioutils.NewReadCloserWrapper(tarstream, func() error {
 | ||
| 		var errs *multierror.Error
 | ||
| 		if err := decompressor.Close(); err != nil {
 | ||
| 			errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor"))
 | ||
| 		}
 | ||
| 		if err := tsfile.Close(); err != nil {
 | ||
| 			errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers"))
 | ||
| 		}
 | ||
| 		if err := tarstream.Close(); err != nil {
 | ||
| 			errs = multierror.Append(errs, errors.Wrapf(err, "closing reconstructed tarstream"))
 | ||
| 		}
 | ||
| 		if err := fgetter.Close(); err != nil {
 | ||
| 			errs = multierror.Append(errs, errors.Wrapf(err, "closing file-getter"))
 | ||
| 		}
 | ||
| 		if errs != nil {
 | ||
| 			return errs.ErrorOrNil()
 | ||
| 		}
 | ||
| 		return nil
 | ||
| 	})
 | ||
| 	return maybeCompressReadCloser(rc)
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
 | ||
| 	var fromLayer, toLayer *Layer
 | ||
| 	from, to, fromLayer, toLayer, err = r.findParentAndLayer(from, to)
 | ||
| 	if err != nil {
 | ||
| 		return -1, ErrLayerUnknown
 | ||
| 	}
 | ||
| 	return r.driver.DiffSize(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel)
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) {
 | ||
| 	return r.applyDiffWithOptions(to, nil, diff)
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) {
 | ||
| 	if !r.IsReadWrite() {
 | ||
| 		return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath())
 | ||
| 	}
 | ||
| 
 | ||
| 	layer, ok := r.lookup(to)
 | ||
| 	if !ok {
 | ||
| 		return -1, ErrLayerUnknown
 | ||
| 	}
 | ||
| 
 | ||
| 	header := make([]byte, 10240)
 | ||
| 	n, err := diff.Read(header)
 | ||
| 	if err != nil && err != io.EOF {
 | ||
| 		return -1, err
 | ||
| 	}
 | ||
| 	compression := archive.DetectCompression(header[:n])
 | ||
| 	defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff)
 | ||
| 
 | ||
| 	// Decide if we need to compute digests
 | ||
| 	var compressedDigest, uncompressedDigest digest.Digest       // = ""
 | ||
| 	var compressedDigester, uncompressedDigester digest.Digester // = nil
 | ||
| 	if layerOptions != nil && layerOptions.OriginalDigest != "" &&
 | ||
| 		layerOptions.OriginalDigest.Algorithm() == digest.Canonical {
 | ||
| 		compressedDigest = layerOptions.OriginalDigest
 | ||
| 	} else {
 | ||
| 		compressedDigester = digest.Canonical.Digester()
 | ||
| 	}
 | ||
| 	if layerOptions != nil && layerOptions.UncompressedDigest != "" &&
 | ||
| 		layerOptions.UncompressedDigest.Algorithm() == digest.Canonical {
 | ||
| 		uncompressedDigest = layerOptions.UncompressedDigest
 | ||
| 	} else {
 | ||
| 		uncompressedDigester = digest.Canonical.Digester()
 | ||
| 	}
 | ||
| 
 | ||
| 	var compressedWriter io.Writer
 | ||
| 	if compressedDigester != nil {
 | ||
| 		compressedWriter = compressedDigester.Hash()
 | ||
| 	} else {
 | ||
| 		compressedWriter = ioutil.Discard
 | ||
| 	}
 | ||
| 	compressedCounter := ioutils.NewWriteCounter(compressedWriter)
 | ||
| 	defragmented = io.TeeReader(defragmented, compressedCounter)
 | ||
| 
 | ||
| 	tsdata := bytes.Buffer{}
 | ||
| 	compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
 | ||
| 	if err != nil {
 | ||
| 		compressor = pgzip.NewWriter(&tsdata)
 | ||
| 	}
 | ||
| 	if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that
 | ||
| 		logrus.Infof("Error setting compression concurrency threads to 1: %v; ignoring", err)
 | ||
| 	}
 | ||
| 	metadata := storage.NewJSONPacker(compressor)
 | ||
| 	uncompressed, err := archive.DecompressStream(defragmented)
 | ||
| 	if err != nil {
 | ||
| 		return -1, err
 | ||
| 	}
 | ||
| 	defer uncompressed.Close()
 | ||
| 	uidLog := make(map[uint32]struct{})
 | ||
| 	gidLog := make(map[uint32]struct{})
 | ||
| 	idLogger, err := tarlog.NewLogger(func(h *tar.Header) {
 | ||
| 		if !strings.HasPrefix(path.Base(h.Name), archive.WhiteoutPrefix) {
 | ||
| 			uidLog[uint32(h.Uid)] = struct{}{}
 | ||
| 			gidLog[uint32(h.Gid)] = struct{}{}
 | ||
| 		}
 | ||
| 	})
 | ||
| 	if err != nil {
 | ||
| 		return -1, err
 | ||
| 	}
 | ||
| 	defer idLogger.Close()
 | ||
| 	uncompressedCounter := ioutils.NewWriteCounter(idLogger)
 | ||
| 	uncompressedWriter := (io.Writer)(uncompressedCounter)
 | ||
| 	if uncompressedDigester != nil {
 | ||
| 		uncompressedWriter = io.MultiWriter(uncompressedWriter, uncompressedDigester.Hash())
 | ||
| 	}
 | ||
| 	payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedWriter), metadata, storage.NewDiscardFilePutter())
 | ||
| 	if err != nil {
 | ||
| 		return -1, err
 | ||
| 	}
 | ||
| 	options := drivers.ApplyDiffOpts{
 | ||
| 		Diff:       payload,
 | ||
| 		Mappings:   r.layerMappings(layer),
 | ||
| 		MountLabel: layer.MountLabel,
 | ||
| 	}
 | ||
| 	size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, options)
 | ||
| 	if err != nil {
 | ||
| 		return -1, err
 | ||
| 	}
 | ||
| 	compressor.Close()
 | ||
| 	if err == nil {
 | ||
| 		if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil {
 | ||
| 			return -1, err
 | ||
| 		}
 | ||
| 		if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil {
 | ||
| 			return -1, err
 | ||
| 		}
 | ||
| 	}
 | ||
| 	if compressedDigester != nil {
 | ||
| 		compressedDigest = compressedDigester.Digest()
 | ||
| 	}
 | ||
| 	if uncompressedDigester != nil {
 | ||
| 		uncompressedDigest = uncompressedDigester.Digest()
 | ||
| 	}
 | ||
| 
 | ||
| 	updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) {
 | ||
| 		var newList []string
 | ||
| 		if oldvalue != "" {
 | ||
| 			for _, value := range (*m)[oldvalue] {
 | ||
| 				if value != id {
 | ||
| 					newList = append(newList, value)
 | ||
| 				}
 | ||
| 			}
 | ||
| 			if len(newList) > 0 {
 | ||
| 				(*m)[oldvalue] = newList
 | ||
| 			} else {
 | ||
| 				delete(*m, oldvalue)
 | ||
| 			}
 | ||
| 		}
 | ||
| 		if newvalue != "" {
 | ||
| 			(*m)[newvalue] = append((*m)[newvalue], id)
 | ||
| 		}
 | ||
| 	}
 | ||
| 	updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID)
 | ||
| 	layer.CompressedDigest = compressedDigest
 | ||
| 	layer.CompressedSize = compressedCounter.Count
 | ||
| 	updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest, layer.ID)
 | ||
| 	layer.UncompressedDigest = uncompressedDigest
 | ||
| 	layer.UncompressedSize = uncompressedCounter.Count
 | ||
| 	layer.CompressionType = compression
 | ||
| 	layer.UIDs = make([]uint32, 0, len(uidLog))
 | ||
| 	for uid := range uidLog {
 | ||
| 		layer.UIDs = append(layer.UIDs, uid)
 | ||
| 	}
 | ||
| 	sort.Slice(layer.UIDs, func(i, j int) bool {
 | ||
| 		return layer.UIDs[i] < layer.UIDs[j]
 | ||
| 	})
 | ||
| 	layer.GIDs = make([]uint32, 0, len(gidLog))
 | ||
| 	for gid := range gidLog {
 | ||
| 		layer.GIDs = append(layer.GIDs, gid)
 | ||
| 	}
 | ||
| 	sort.Slice(layer.GIDs, func(i, j int) bool {
 | ||
| 		return layer.GIDs[i] < layer.GIDs[j]
 | ||
| 	})
 | ||
| 
 | ||
| 	err = r.Save()
 | ||
| 
 | ||
| 	return size, err
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) DifferTarget(id string) (string, error) {
 | ||
| 	ddriver, ok := r.driver.(drivers.DriverWithDiffer)
 | ||
| 	if !ok {
 | ||
| 		return "", ErrNotSupported
 | ||
| 	}
 | ||
| 	layer, ok := r.lookup(id)
 | ||
| 	if !ok {
 | ||
| 		return "", ErrLayerUnknown
 | ||
| 	}
 | ||
| 	return ddriver.DifferTarget(layer.ID)
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error {
 | ||
| 	ddriver, ok := r.driver.(drivers.DriverWithDiffer)
 | ||
| 	if !ok {
 | ||
| 		return ErrNotSupported
 | ||
| 	}
 | ||
| 	layer, ok := r.lookup(id)
 | ||
| 	if !ok {
 | ||
| 		return ErrLayerUnknown
 | ||
| 	}
 | ||
| 	if options == nil {
 | ||
| 		options = &drivers.ApplyDiffOpts{
 | ||
| 			Mappings:   r.layerMappings(layer),
 | ||
| 			MountLabel: layer.MountLabel,
 | ||
| 		}
 | ||
| 	}
 | ||
| 	err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, stagingDirectory, diffOutput, options)
 | ||
| 	if err != nil {
 | ||
| 		return err
 | ||
| 	}
 | ||
| 	layer.UIDs = diffOutput.UIDs
 | ||
| 	layer.GIDs = diffOutput.GIDs
 | ||
| 	layer.UncompressedDigest = diffOutput.UncompressedDigest
 | ||
| 	layer.UncompressedSize = diffOutput.Size
 | ||
| 	layer.Metadata = diffOutput.Metadata
 | ||
| 	if err = r.Save(); err != nil {
 | ||
| 		return err
 | ||
| 	}
 | ||
| 	for k, v := range diffOutput.BigData {
 | ||
| 		if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil {
 | ||
| 			r.Delete(id)
 | ||
| 			return err
 | ||
| 		}
 | ||
| 	}
 | ||
| 	return err
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
 | ||
| 	ddriver, ok := r.driver.(drivers.DriverWithDiffer)
 | ||
| 	if !ok {
 | ||
| 		return nil, ErrNotSupported
 | ||
| 	}
 | ||
| 
 | ||
| 	if to == "" {
 | ||
| 		output, err := ddriver.ApplyDiffWithDiffer("", "", options, differ)
 | ||
| 		return &output, err
 | ||
| 	}
 | ||
| 
 | ||
| 	layer, ok := r.lookup(to)
 | ||
| 	if !ok {
 | ||
| 		return nil, ErrLayerUnknown
 | ||
| 	}
 | ||
| 	if options == nil {
 | ||
| 		options = &drivers.ApplyDiffOpts{
 | ||
| 			Mappings:   r.layerMappings(layer),
 | ||
| 			MountLabel: layer.MountLabel,
 | ||
| 		}
 | ||
| 	}
 | ||
| 	output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, differ)
 | ||
| 	if err != nil {
 | ||
| 		return nil, err
 | ||
| 	}
 | ||
| 	layer.UIDs = output.UIDs
 | ||
| 	layer.GIDs = output.GIDs
 | ||
| 	err = r.Save()
 | ||
| 	return &output, err
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) CleanupStagingDirectory(stagingDirectory string) error {
 | ||
| 	ddriver, ok := r.driver.(drivers.DriverWithDiffer)
 | ||
| 	if !ok {
 | ||
| 		return ErrNotSupported
 | ||
| 	}
 | ||
| 	return ddriver.CleanupStagingDirectory(stagingDirectory)
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) {
 | ||
| 	var layers []Layer
 | ||
| 	for _, layerID := range m[d] {
 | ||
| 		layer, ok := r.lookup(layerID)
 | ||
| 		if !ok {
 | ||
| 			return nil, ErrLayerUnknown
 | ||
| 		}
 | ||
| 		layers = append(layers, *copyLayer(layer))
 | ||
| 	}
 | ||
| 	return layers, nil
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
 | ||
| 	return r.layersByDigestMap(r.bycompressedsum, d)
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
 | ||
| 	return r.layersByDigestMap(r.byuncompressedsum, d)
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Lock() {
 | ||
| 	r.lockfile.Lock()
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) RecursiveLock() {
 | ||
| 	r.lockfile.RecursiveLock()
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) RLock() {
 | ||
| 	r.lockfile.RLock()
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Unlock() {
 | ||
| 	r.lockfile.Unlock()
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Touch() error {
 | ||
| 	return r.lockfile.Touch()
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Modified() (bool, error) {
 | ||
| 	var mmodified, tmodified bool
 | ||
| 	lmodified, err := r.lockfile.Modified()
 | ||
| 	if err != nil {
 | ||
| 		return lmodified, err
 | ||
| 	}
 | ||
| 	if r.IsReadWrite() {
 | ||
| 		r.mountsLockfile.RLock()
 | ||
| 		defer r.mountsLockfile.Unlock()
 | ||
| 		mmodified, err = r.mountsLockfile.Modified()
 | ||
| 		if err != nil {
 | ||
| 			return lmodified, err
 | ||
| 		}
 | ||
| 	}
 | ||
| 
 | ||
| 	if lmodified || mmodified {
 | ||
| 		return true, nil
 | ||
| 	}
 | ||
| 
 | ||
| 	// If the layers.json file has been modified manually, then we have to
 | ||
| 	// reload the storage in any case.
 | ||
| 	info, err := os.Stat(r.layerspath())
 | ||
| 	if err != nil && !os.IsNotExist(err) {
 | ||
| 		return false, errors.Wrap(err, "stat layers file")
 | ||
| 	}
 | ||
| 	if info != nil {
 | ||
| 		tmodified = info.ModTime() != r.layerspathModified
 | ||
| 		r.layerspathModified = info.ModTime()
 | ||
| 	}
 | ||
| 
 | ||
| 	return tmodified, nil
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) IsReadWrite() bool {
 | ||
| 	return r.lockfile.IsReadWrite()
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) TouchedSince(when time.Time) bool {
 | ||
| 	return r.lockfile.TouchedSince(when)
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) Locked() bool {
 | ||
| 	return r.lockfile.Locked()
 | ||
| }
 | ||
| 
 | ||
| func (r *layerStore) ReloadIfChanged() error {
 | ||
| 	r.loadMut.Lock()
 | ||
| 	defer r.loadMut.Unlock()
 | ||
| 
 | ||
| 	modified, err := r.Modified()
 | ||
| 	if err == nil && modified {
 | ||
| 		return r.Load()
 | ||
| 	}
 | ||
| 	return err
 | ||
| }
 | ||
| 
 | ||
| func closeAll(closes ...func() error) (rErr error) {
 | ||
| 	for _, f := range closes {
 | ||
| 		if err := f(); err != nil {
 | ||
| 			if rErr == nil {
 | ||
| 				rErr = errors.Wrapf(err, "close error")
 | ||
| 				continue
 | ||
| 			}
 | ||
| 			rErr = errors.Wrapf(rErr, "%v", err)
 | ||
| 		}
 | ||
| 	}
 | ||
| 	return
 | ||
| }
 | 
