enabled hyperv image downloads

now that we have public hypverv fcos artifacts, we can download them
instead of requiring a special build.

Signed-off-by: Brent Baude <bbaude@redhat.com>

[NO NEW TESTS NEEDED]
This commit is contained in:
Brent Baude
2023-08-07 13:46:04 -05:00
parent 3e8f19be9d
commit 9e14e3ebe5
21 changed files with 745 additions and 301 deletions

8
go.mod
View File

@ -13,9 +13,9 @@ require (
github.com/containernetworking/cni v1.1.2
github.com/containernetworking/plugins v1.3.0
github.com/containers/buildah v1.31.1-0.20230722114901-5ece066f82c6
github.com/containers/common v0.55.1-0.20230808082859-b13df748a2b8
github.com/containers/common v0.55.1-0.20230811093040-524b4d5c12f9
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.26.1-0.20230801083106-fcf7f0e1712a
github.com/containers/image/v5 v5.26.1-0.20230807184415-3fb422379cfa
github.com/containers/libhvee v0.4.0
github.com/containers/ocicrypt v1.1.7
github.com/containers/psgo v1.8.0
@ -62,7 +62,7 @@ require (
github.com/vbauerster/mpb/v8 v8.5.2
github.com/vishvananda/netlink v1.2.1-beta.2
go.etcd.io/bbolt v1.3.7
golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b
golang.org/x/net v0.14.0
golang.org/x/sync v0.3.0
golang.org/x/sys v0.11.0
@ -185,7 +185,7 @@ require (
golang.org/x/arch v0.3.0 // indirect
golang.org/x/crypto v0.12.0 // indirect
golang.org/x/mod v0.11.0 // indirect
golang.org/x/oauth2 v0.10.0 // indirect
golang.org/x/oauth2 v0.11.0 // indirect
golang.org/x/tools v0.9.3 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect

16
go.sum
View File

@ -246,12 +246,12 @@ github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q
github.com/containernetworking/plugins v1.3.0/go.mod h1:Pc2wcedTQQCVuROOOaLBPPxrEXqqXBFt3cZ+/yVg6l0=
github.com/containers/buildah v1.31.1-0.20230722114901-5ece066f82c6 h1:K/S8SFQsnnNTF0Ws58SrBD9L0EuClzAG8Zp08d7+6AA=
github.com/containers/buildah v1.31.1-0.20230722114901-5ece066f82c6/go.mod h1:0sptTFBBtSznLqoTh80DfvMOCNbdRsNRgVOKhBhrupA=
github.com/containers/common v0.55.1-0.20230808082859-b13df748a2b8 h1:14hfjsnD/mQV6k86uQy99pwZZ3nu/fBWAI/++tJc5YM=
github.com/containers/common v0.55.1-0.20230808082859-b13df748a2b8/go.mod h1:ahyNZvF+l2DrlfqVH7Hxj6iR0o6rQ8u6ebUVxtFGVE4=
github.com/containers/common v0.55.1-0.20230811093040-524b4d5c12f9 h1:TJIOB2FmgB0YpBby30WmnocbvfCeiJkRORFZjyrkso8=
github.com/containers/common v0.55.1-0.20230811093040-524b4d5c12f9/go.mod h1:P80FfWkQ7oITVaBAHF50gZVzsj198bJ2IyQjHVsdBVk=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.26.1-0.20230801083106-fcf7f0e1712a h1:ZK6GNc7wWP9/CTQySx0TM9VN9p+og4Pfd3Y5aAHrwLk=
github.com/containers/image/v5 v5.26.1-0.20230801083106-fcf7f0e1712a/go.mod h1:vsetwKSm1kQayKIWlN7SdGNu/KwcVCgnrhh4Z6Yb75s=
github.com/containers/image/v5 v5.26.1-0.20230807184415-3fb422379cfa h1:wDfVQtc6ik2MvsUmu/YRSyBAE5YUxdjcEDtuT1q2KDo=
github.com/containers/image/v5 v5.26.1-0.20230807184415-3fb422379cfa/go.mod h1:apL4qwq31NV0gsSZQJPxYyTH0yzWavmMCjT8vsQaXSk=
github.com/containers/libhvee v0.4.0 h1:HGHIIExgP2PjwjHKKoQM3B+3qakNIZcmmkiAO4luAZE=
github.com/containers/libhvee v0.4.0/go.mod h1:fyWDxNQccveTdE3Oe+QRuLbwF/iyV0hDxXqRX5Svlic=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
@ -1117,8 +1117,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME=
golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b h1:r+vk0EmXNmekl0S0BascoeeoHk/L7wmaW2QF90K+kYI=
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -1192,8 +1192,8 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@ -126,27 +126,15 @@ func (v HyperVVirtualization) NewMachine(opts machine.InitOptions) (machine.VM,
// Set creation time
m.Created = time.Now()
dataDir, err := machine.GetDataDir(machine.HyperVVirt)
if err != nil {
return nil, err
}
// Acquire the image
// Until we are producing vhdx images in fcos, all images must be fed to us
// with --image-path. We should, however, accept both a file or url
g, err := machine.NewGenericDownloader(machine.HyperVVirt, opts.Name, opts.ImagePath)
imagePath, imageStream, err := v.acquireVMImage(opts)
if err != nil {
return nil, err
}
imagePath, err := machine.NewMachineFile(g.Get().GetLocalUncompressedFile(dataDir), nil)
if err != nil {
return nil, err
}
// assign values to machine
m.ImagePath = *imagePath
if err := machine.DownloadImage(g); err != nil {
return nil, err
}
m.ImageStream = imageStream
config := hypervctl.HardwareConfig{
CPUs: uint16(opts.CPUS),
@ -172,6 +160,44 @@ func (v HyperVVirtualization) NewMachine(opts machine.InitOptions) (machine.VM,
return v.LoadVMByName(opts.Name)
}
// acquireVMImage determines if the image is already in a FCOS stream. If so,
// retrieves the image path of the uncompressed file. Otherwise, the user has
// provided an alternative image, so we set the image path and download the image.
func (v HyperVVirtualization) acquireVMImage(opts machine.InitOptions) (*machine.VMFile, string, error) {
imageStream := opts.ImagePath
var imagePath *machine.VMFile
switch opts.ImagePath {
// TODO these need to be re-typed as FCOSStreams
case machine.Testing.String(), machine.Next.String(), machine.Stable.String(), "":
// Get image as usual
vp := VirtualizationProvider()
dd, err := machine.NewFcosDownloader(machine.HyperVVirt, opts.Name, machine.FCOSStreamFromString(imageStream), vp)
if err != nil {
return nil, "", err
}
uncompressedFile, err := machine.NewMachineFile(dd.Get().LocalUncompressedFile, nil)
if err != nil {
return nil, "", err
}
imagePath = uncompressedFile
if err := machine.DownloadImage(dd); err != nil {
return nil, "", err
}
default:
// The user has provided an alternate image which can be a file path
// or URL.
imageStream = "custom"
altImagePath, err := machine.AcquireAlternateImage(opts.Name, vmtype, opts)
if err != nil {
return nil, "", err
}
imagePath = altImagePath
}
return imagePath, imageStream, nil
}
func (v HyperVVirtualization) RemoveAndCleanMachines() error {
// Error handling used here is following what qemu did
var (

View File

@ -203,9 +203,6 @@ func (m *HyperVMachine) Init(opts machine.InitOptions) (bool, error) {
m.IdentityPath = util.GetIdentityPath(m.Name)
// TODO This needs to be fixed in c-common
m.RemoteUsername = "core"
if m.UID == 0 {
m.UID = 1000
}

View File

@ -37,7 +37,10 @@ func (p *WSLVirtualization) NewMachine(opts machine.InitOptions) (machine.VM, er
vm.ConfigPath = configPath
vm.ImagePath = opts.ImagePath
vm.RemoteUsername = opts.Username
// WSL uses a different username; here we hardcode it.
vm.RemoteUsername = "user"
vm.Created = time.Now()
vm.LastUp = vm.Created

View File

@ -49,6 +49,10 @@ type CopyOptions struct {
CompressionFormat *compression.Algorithm
// CompressionLevel specifies what compression level is used
CompressionLevel *int
// ForceCompressionFormat ensures that the compression algorithm set in
// CompressionFormat is used exclusively, and blobs of other compression
// algorithms are not reused.
ForceCompressionFormat bool
// containers-auth.json(5) file to use when authenticating against
// container registries.
@ -294,6 +298,7 @@ func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) {
c.imageCopyOptions.ProgressInterval = time.Second
}
c.imageCopyOptions.ForceCompressionFormat = options.ForceCompressionFormat
c.imageCopyOptions.ForceManifestMIMEType = options.ManifestMIMEType
c.imageCopyOptions.SourceCtx = c.systemContext
c.imageCopyOptions.DestinationCtx = c.systemContext

View File

@ -461,6 +461,7 @@ func (m *ManifestList) Push(ctx context.Context, destination string, options *Ma
SignSigstorePrivateKeyPassphrase: options.SignSigstorePrivateKeyPassphrase,
RemoveSignatures: options.RemoveSignatures,
ManifestType: options.ManifestMIMEType,
ForceCompressionFormat: options.ForceCompressionFormat,
}
_, d, err := m.list.Push(ctx, dest, pushOptions)

View File

@ -72,6 +72,7 @@ type PushOptions struct {
ManifestType string // the format to use when saving the list - possible options are oci, v2s1, and v2s2
SourceFilter LookupReferenceFunc // filter the list source
AddCompression []string // add existing instances with requested compression algorithms to manifest list
ForceCompressionFormat bool // force push with requested compression ignoring the blobs which can be reused.
}
// Create creates a new list containing information about the specified image,
@ -259,6 +260,7 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push
SignSigstorePrivateKeyPassphrase: options.SignSigstorePrivateKeyPassphrase,
ForceManifestMIMEType: singleImageManifestType,
EnsureCompressionVariantsExist: compressionVariants,
ForceCompressionFormat: options.ForceCompressionFormat,
}
// Copy whatever we were asked to copy.

View File

@ -5,12 +5,14 @@ import "os"
// getDefaultImage returns the default machine image stream
// On Windows this refers to the Fedora major release number
func getDefaultMachineImage() string {
return "35"
return "testing"
}
// getDefaultMachineUser returns the user to use for rootless podman
// This is only for the hyperv and qemu implementations. WSL's user
// will be hardcoded in podman to "user"
func getDefaultMachineUser() string {
return "user"
return "core"
}
// isCgroup2UnifiedMode returns whether we are running in cgroup2 mode.

View File

@ -133,6 +133,10 @@ type Options struct {
// Invalid when copying a non-multi-architecture image. That will probably
// change in the future.
EnsureCompressionVariantsExist []OptionCompressionVariant
// ForceCompressionFormat ensures that the compression algorithm set in
// DestinationCtx.CompressionFormat is used exclusively, and blobs of other
// compression algorithms are not reused.
ForceCompressionFormat bool
}
// OptionCompressionVariant allows to supply information about
@ -163,6 +167,14 @@ type copier struct {
signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed.
}
// Internal function to validate `requireCompressionFormatMatch` for copySingleImageOptions
func shouldRequireCompressionFormatMatch(options *Options) (bool, error) {
if options.ForceCompressionFormat && (options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil) {
return false, fmt.Errorf("cannot use ForceCompressionFormat with undefined default compression format")
}
return options.ForceCompressionFormat, nil
}
// Image copies image from srcRef to destRef, using policyContext to validate
// source image admissibility. It returns the manifest which was written to
// the new copy of the image.
@ -269,8 +281,12 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
if len(options.EnsureCompressionVariantsExist) > 0 {
return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image")
}
requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options)
if err != nil {
return nil, err
}
// The simple case: just copy a single image.
single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: false})
single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch})
if err != nil {
return nil, err
}
@ -279,6 +295,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
if len(options.EnsureCompressionVariantsExist) > 0 {
return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image")
}
requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options)
if err != nil {
return nil, err
}
// This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that
// matches the current system to copy, and copy it.
mfest, manifestType, err := c.unparsedToplevel.Manifest(ctx)
@ -295,7 +315,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
}
logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest)
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: false})
single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch})
if err != nil {
return nil, fmt.Errorf("copying system image from manifest list: %w", err)
}

View File

@ -32,6 +32,10 @@ type instanceCopy struct {
op instanceCopyKind
sourceDigest digest.Digest
// Fields which can be used by callers when operation
// is `instanceCopyCopy`
copyForceCompressionFormat bool
// Fields which can be used by callers when operation
// is `instanceCopyClone`
cloneCompressionVariant OptionCompressionVariant
@ -122,9 +126,14 @@ func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest.
if err != nil {
return res, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err)
}
forceCompressionFormat, err := shouldRequireCompressionFormatMatch(options)
if err != nil {
return nil, err
}
res = append(res, instanceCopy{
op: instanceCopyCopy,
sourceDigest: instanceDigest,
op: instanceCopyCopy,
sourceDigest: instanceDigest,
copyForceCompressionFormat: forceCompressionFormat,
})
platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform)
compressionList := compressionsByPlatform[platform]
@ -230,7 +239,7 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte,
logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList))
c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList))
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest)
updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: false})
updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: instance.copyForceCompressionFormat})
if err != nil {
return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err)
}

View File

@ -170,8 +170,19 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
index.Manifests = append(index.Manifests, addedEntries...)
}
if len(addedEntries) != 0 || updatedAnnotations {
slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) bool {
return !instanceIsZstd(a) && instanceIsZstd(b)
slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) int {
// FIXME? With Go 1.21 and cmp.Compare available, turn instanceIsZstd into an integer score that can be compared, and generalizes
// into more algorithms?
aZstd := instanceIsZstd(a)
bZstd := instanceIsZstd(b)
switch {
case aZstd == bZstd:
return 0
case !aZstd: // Implies bZstd
return -1
default: // aZstd && !bZstd
return 1
}
})
}
return nil

View File

@ -82,6 +82,7 @@ func (css *candidateSortState) Swap(i, j int) {
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 {
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
// compare equal.
// FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available.
sort.Sort(&candidateSortState{
cs: cs,
primaryDigest: primaryDigest,

View File

@ -57,7 +57,7 @@ type storageImageDestination struct {
imageRef storageReference
directory string // Temporary directory where we store blobs until Commit() time
nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
manifest []byte // Manifest contents, temporary
manifestDigest digest.Digest // Valid if len(manifest) != 0
signatures []byte // Signature contents, temporary
@ -154,7 +154,7 @@ func (s *storageImageDestination) Close() error {
}
func (s *storageImageDestination) computeNextBlobCacheFile() string {
return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1)))
}
// PutBlobWithOptions writes contents of stream and returns data representing the result.
@ -763,7 +763,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
if len(layerBlobs) > 0 { // Can happen when using caches
prev := s.indexToStorageID[len(layerBlobs)-1]
if prev == nil {
return fmt.Errorf("Internal error: StorageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1)
return fmt.Errorf("Internal error: storageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1)
}
lastLayer = *prev
}
@ -775,6 +775,78 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
logrus.Debugf("setting image creation date to %s", inspect.Created)
options.CreationDate = *inspect.Created
}
// Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so
// we just need to screen out the ones that are actually layers to get the list of non-layers.
dataBlobs := set.New[digest.Digest]()
for blob := range s.filenames {
dataBlobs.Add(blob)
}
for _, layerBlob := range layerBlobs {
dataBlobs.Delete(layerBlob.Digest)
}
for _, blob := range dataBlobs.Values() {
v, err := os.ReadFile(s.filenames[blob])
if err != nil {
return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
}
options.BigData = append(options.BigData, storage.ImageBigDataOption{
Key: blob.String(),
Data: v,
Digest: digest.Canonical.FromBytes(v),
})
}
// Set up to save the unparsedToplevel's manifest if it differs from
// the per-platform one, which is saved below.
if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) {
manifestDigest, err := manifest.Digest(toplevelManifest)
if err != nil {
return fmt.Errorf("digesting top-level manifest: %w", err)
}
options.BigData = append(options.BigData, storage.ImageBigDataOption{
Key: manifestBigDataKey(manifestDigest),
Data: toplevelManifest,
Digest: manifestDigest,
})
}
// Set up to save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store.
// Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
// and using storage.ImageDigestBigDataKey for future users that dont specify any digest and for compatibility with older readers.
options.BigData = append(options.BigData, storage.ImageBigDataOption{
Key: manifestBigDataKey(s.manifestDigest),
Data: s.manifest,
Digest: s.manifestDigest,
})
options.BigData = append(options.BigData, storage.ImageBigDataOption{
Key: storage.ImageDigestBigDataKey,
Data: s.manifest,
Digest: s.manifestDigest,
})
// Set up to save the signatures, if we have any.
if len(s.signatures) > 0 {
options.BigData = append(options.BigData, storage.ImageBigDataOption{
Key: "signatures",
Data: s.signatures,
Digest: digest.Canonical.FromBytes(s.signatures),
})
}
for instanceDigest, signatures := range s.signatureses {
options.BigData = append(options.BigData, storage.ImageBigDataOption{
Key: signatureBigDataKey(instanceDigest),
Data: signatures,
Digest: digest.Canonical.FromBytes(signatures),
})
}
// Set up to save our metadata.
metadata, err := json.Marshal(s)
if err != nil {
return fmt.Errorf("encoding metadata for image: %w", err)
}
if len(metadata) != 0 {
options.Metadata = string(metadata)
}
// Create the image record, pointing to the most-recently added layer.
intendedID := s.imageRef.id
if intendedID == "" {
@ -797,8 +869,26 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
}
logrus.Debugf("reusing image ID %q", img.ID)
oldNames = append(oldNames, img.Names...)
// set the data items and metadata on the already-present image
// FIXME: this _replaces_ any "signatures" blobs and their
// sizes (tracked in the metadata) which might have already
// been present with new values, when ideally we'd find a way
// to merge them since they all apply to the same image
for _, data := range options.BigData {
if err := s.imageRef.transport.store.SetImageBigData(img.ID, data.Key, data.Data, manifest.Digest); err != nil {
logrus.Debugf("error saving big data %q for image %q: %v", data.Key, img.ID, err)
return fmt.Errorf("saving big data %q for image %q: %w", data.Key, img.ID, err)
}
}
if options.Metadata != "" {
if err := s.imageRef.transport.store.SetMetadata(img.ID, options.Metadata); err != nil {
logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
return fmt.Errorf("saving metadata for image %q: %w", img.ID, err)
}
logrus.Debugf("saved image metadata %q", options.Metadata)
}
} else {
logrus.Debugf("created new image ID %q", img.ID)
logrus.Debugf("created new image ID %q with metadata %q", img.ID, options.Metadata)
}
// Clean up the unfinished image on any error.
@ -813,78 +903,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
}
}()
// Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so
// we just need to screen out the ones that are actually layers to get the list of non-layers.
dataBlobs := set.New[digest.Digest]()
for blob := range s.filenames {
dataBlobs.Add(blob)
}
for _, layerBlob := range layerBlobs {
dataBlobs.Delete(layerBlob.Digest)
}
for _, blob := range dataBlobs.Values() {
v, err := os.ReadFile(s.filenames[blob])
if err != nil {
return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
}
if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil {
logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err)
return fmt.Errorf("saving big data %q for image %q: %w", blob.String(), img.ID, err)
}
}
// Save the unparsedToplevel's manifest if it differs from the per-platform one, which is saved below.
if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) {
manifestDigest, err := manifest.Digest(toplevelManifest)
if err != nil {
return fmt.Errorf("digesting top-level manifest: %w", err)
}
key := manifestBigDataKey(manifestDigest)
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil {
logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err)
return fmt.Errorf("saving top-level manifest for image %q: %w", img.ID, err)
}
}
// Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store.
// Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
// and using storage.ImageDigestBigDataKey for future users that dont specify any digest and for compatibility with older readers.
key := manifestBigDataKey(s.manifestDigest)
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
return fmt.Errorf("saving manifest for image %q: %w", img.ID, err)
}
key = storage.ImageDigestBigDataKey
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
return fmt.Errorf("saving manifest for image %q: %w", img.ID, err)
}
// Save the signatures, if we have any.
if len(s.signatures) > 0 {
if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil {
logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
return fmt.Errorf("saving signatures for image %q: %w", img.ID, err)
}
}
for instanceDigest, signatures := range s.signatureses {
key := signatureBigDataKey(instanceDigest)
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil {
logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
return fmt.Errorf("saving signatures for image %q: %w", img.ID, err)
}
}
// Save our metadata.
metadata, err := json.Marshal(s)
if err != nil {
logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err)
return fmt.Errorf("encoding metadata for image %q: %w", img.ID, err)
}
if len(metadata) != 0 {
if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil {
logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
return fmt.Errorf("saving metadata for image %q: %w", img.ID, err)
}
logrus.Debugf("saved image metadata %q", string(metadata))
}
// Adds the reference's name on the image. We don't need to worry about avoiding duplicate
// Add the reference's name on the image. We don't need to worry about avoiding duplicate
// values because AddNames() will deduplicate the list that we pass to it.
if name := s.imageRef.DockerReference(); name != nil {
if err := s.imageRef.transport.store.AddNames(img.ID, []string{name.String()}); err != nil {
@ -921,10 +940,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s
return err
}
sizes = append(sizes, len(sig))
newblob := make([]byte, len(sigblob)+len(sig))
copy(newblob, sigblob)
copy(newblob[len(sigblob):], sig)
sigblob = newblob
sigblob = append(sigblob, sig...)
}
if instanceDigest == nil {
s.signatures = sigblob

44
vendor/golang.org/x/exp/slices/cmp.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package slices
import "golang.org/x/exp/constraints"
// min is a version of the predeclared function from the Go 1.21 release.
func min[T constraints.Ordered](a, b T) T {
if a < b || isNaN(a) {
return a
}
return b
}
// max is a version of the predeclared function from the Go 1.21 release.
func max[T constraints.Ordered](a, b T) T {
if a > b || isNaN(a) {
return a
}
return b
}
// cmpLess is a copy of cmp.Less from the Go 1.21 release.
func cmpLess[T constraints.Ordered](x, y T) bool {
return (isNaN(x) && !isNaN(y)) || x < y
}
// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
func cmpCompare[T constraints.Ordered](x, y T) int {
xNaN := isNaN(x)
yNaN := isNaN(y)
if xNaN && yNaN {
return 0
}
if xNaN || x < y {
return -1
}
if yNaN || x > y {
return +1
}
return 0
}

View File

@ -3,23 +3,20 @@
// license that can be found in the LICENSE file.
// Package slices defines various functions useful with slices of any type.
// Unless otherwise specified, these functions all apply to the elements
// of a slice at index 0 <= i < len(s).
//
// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a
// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings),
// or the sorting may fail to sort correctly. A common case is when sorting slices of
// floating-point numbers containing NaN values.
package slices
import "golang.org/x/exp/constraints"
import (
"unsafe"
"golang.org/x/exp/constraints"
)
// Equal reports whether two slices are equal: the same length and all
// elements equal. If the lengths are different, Equal returns false.
// Otherwise, the elements are compared in increasing index order, and the
// comparison stops at the first unequal pair.
// Floating point NaNs are not considered equal.
func Equal[E comparable](s1, s2 []E) bool {
func Equal[S ~[]E, E comparable](s1, s2 S) bool {
if len(s1) != len(s2) {
return false
}
@ -31,12 +28,12 @@ func Equal[E comparable](s1, s2 []E) bool {
return true
}
// EqualFunc reports whether two slices are equal using a comparison
// EqualFunc reports whether two slices are equal using an equality
// function on each pair of elements. If the lengths are different,
// EqualFunc returns false. Otherwise, the elements are compared in
// increasing index order, and the comparison stops at the first index
// for which eq returns false.
func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
if len(s1) != len(s2) {
return false
}
@ -49,45 +46,37 @@ func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
return true
}
// Compare compares the elements of s1 and s2.
// The elements are compared sequentially, starting at index 0,
// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
// of elements. The elements are compared sequentially, starting at index 0,
// until one element is not equal to the other.
// The result of comparing the first non-matching elements is returned.
// If both slices are equal until one of them ends, the shorter slice is
// considered less than the longer one.
// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
// Comparisons involving floating point NaNs are ignored.
func Compare[E constraints.Ordered](s1, s2 []E) int {
s2len := len(s2)
func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
for i, v1 := range s1 {
if i >= s2len {
if i >= len(s2) {
return +1
}
v2 := s2[i]
switch {
case v1 < v2:
return -1
case v1 > v2:
return +1
if c := cmpCompare(v1, v2); c != 0 {
return c
}
}
if len(s1) < s2len {
if len(s1) < len(s2) {
return -1
}
return 0
}
// CompareFunc is like Compare but uses a comparison function
// on each pair of elements. The elements are compared in increasing
// index order, and the comparisons stop after the first time cmp
// returns non-zero.
// CompareFunc is like [Compare] but uses a custom comparison function on each
// pair of elements.
// The result is the first non-zero result of cmp; if cmp always
// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
// and +1 if len(s1) > len(s2).
func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
s2len := len(s2)
func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
for i, v1 := range s1 {
if i >= s2len {
if i >= len(s2) {
return +1
}
v2 := s2[i]
@ -95,7 +84,7 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
return c
}
}
if len(s1) < s2len {
if len(s1) < len(s2) {
return -1
}
return 0
@ -103,7 +92,7 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
// Index returns the index of the first occurrence of v in s,
// or -1 if not present.
func Index[E comparable](s []E, v E) int {
func Index[S ~[]E, E comparable](s S, v E) int {
for i := range s {
if v == s[i] {
return i
@ -114,7 +103,7 @@ func Index[E comparable](s []E, v E) int {
// IndexFunc returns the first index i satisfying f(s[i]),
// or -1 if none do.
func IndexFunc[E any](s []E, f func(E) bool) int {
func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
for i := range s {
if f(s[i]) {
return i
@ -124,39 +113,104 @@ func IndexFunc[E any](s []E, f func(E) bool) int {
}
// Contains reports whether v is present in s.
func Contains[E comparable](s []E, v E) bool {
func Contains[S ~[]E, E comparable](s S, v E) bool {
return Index(s, v) >= 0
}
// ContainsFunc reports whether at least one
// element e of s satisfies f(e).
func ContainsFunc[E any](s []E, f func(E) bool) bool {
func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
return IndexFunc(s, f) >= 0
}
// Insert inserts the values v... into s at index i,
// returning the modified slice.
// In the returned slice r, r[i] == v[0].
// The elements at s[i:] are shifted up to make room.
// In the returned slice r, r[i] == v[0],
// and r[i+len(v)] == value originally at r[i].
// Insert panics if i is out of range.
// This function is O(len(s) + len(v)).
func Insert[S ~[]E, E any](s S, i int, v ...E) S {
tot := len(s) + len(v)
if tot <= cap(s) {
s2 := s[:tot]
copy(s2[i+len(v):], s[i:])
m := len(v)
if m == 0 {
return s
}
n := len(s)
if i == n {
return append(s, v...)
}
if n+m > cap(s) {
// Use append rather than make so that we bump the size of
// the slice up to the next storage class.
// This is what Grow does but we don't call Grow because
// that might copy the values twice.
s2 := append(s[:i], make(S, n+m-i)...)
copy(s2[i:], v)
copy(s2[i+m:], s[i:])
return s2
}
s2 := make(S, tot)
copy(s2, s[:i])
copy(s2[i:], v)
copy(s2[i+len(v):], s[i:])
return s2
s = s[:n+m]
// before:
// s: aaaaaaaabbbbccccccccdddd
// ^ ^ ^ ^
// i i+m n n+m
// after:
// s: aaaaaaaavvvvbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
//
// a are the values that don't move in s.
// v are the values copied in from v.
// b and c are the values from s that are shifted up in index.
// d are the values that get overwritten, never to be seen again.
if !overlaps(v, s[i+m:]) {
// Easy case - v does not overlap either the c or d regions.
// (It might be in some of a or b, or elsewhere entirely.)
// The data we copy up doesn't write to v at all, so just do it.
copy(s[i+m:], s[i:])
// Now we have
// s: aaaaaaaabbbbbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
// Note the b values are duplicated.
copy(s[i:], v)
// Now we have
// s: aaaaaaaavvvvbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
// That's the result we want.
return s
}
// The hard case - v overlaps c or d. We can't just shift up
// the data because we'd move or clobber the values we're trying
// to insert.
// So instead, write v on top of d, then rotate.
copy(s[n:], v)
// Now we have
// s: aaaaaaaabbbbccccccccvvvv
// ^ ^ ^ ^
// i i+m n n+m
rotateRight(s[i:], m)
// Now we have
// s: aaaaaaaavvvvbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
// That's the result we want.
return s
}
// Delete removes the elements s[i:j] from s, returning the modified slice.
// Delete panics if s[i:j] is not a valid slice of s.
// Delete modifies the contents of the slice s; it does not create a new slice.
// Delete is O(len(s)-j), so if many items must be deleted, it is better to
// make a single call deleting them all together than to delete one at a time.
// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
@ -168,22 +222,113 @@ func Delete[S ~[]E, E any](s S, i, j int) S {
return append(s[:i], s[j:]...)
}
// DeleteFunc removes any elements from s for which del returns true,
// returning the modified slice.
// When DeleteFunc removes m elements, it might not modify the elements
// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
// zeroing those elements so that objects they reference can be garbage
// collected.
func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
i := IndexFunc(s, del)
if i == -1 {
return s
}
// Don't start copying elements until we find one to delete.
for j := i + 1; j < len(s); j++ {
if v := s[j]; !del(v) {
s[i] = v
i++
}
}
return s[:i]
}
// Replace replaces the elements s[i:j] by the given v, and returns the
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
_ = s[i:j] // verify that i:j is a valid subslice
if i == j {
return Insert(s, i, v...)
}
if j == len(s) {
return append(s[:i], v...)
}
tot := len(s[:i]) + len(v) + len(s[j:])
if tot <= cap(s) {
s2 := s[:tot]
copy(s2[i+len(v):], s[j:])
if tot > cap(s) {
// Too big to fit, allocate and copy over.
s2 := append(s[:i], make(S, tot-i)...) // See Insert
copy(s2[i:], v)
copy(s2[i+len(v):], s[j:])
return s2
}
s2 := make(S, tot)
copy(s2, s[:i])
copy(s2[i:], v)
copy(s2[i+len(v):], s[j:])
return s2
r := s[:tot]
if i+len(v) <= j {
// Easy, as v fits in the deleted portion.
copy(r[i:], v)
if i+len(v) != j {
copy(r[i+len(v):], s[j:])
}
return r
}
// We are expanding (v is bigger than j-i).
// The situation is something like this:
// (example has i=4,j=8,len(s)=16,len(v)=6)
// s: aaaaxxxxbbbbbbbbyy
// ^ ^ ^ ^
// i j len(s) tot
// a: prefix of s
// x: deleted range
// b: more of s
// y: area to expand into
if !overlaps(r[i+len(v):], v) {
// Easy, as v is not clobbered by the first copy.
copy(r[i+len(v):], s[j:])
copy(r[i:], v)
return r
}
// This is a situation where we don't have a single place to which
// we can copy v. Parts of it need to go to two different places.
// We want to copy the prefix of v into y and the suffix into x, then
// rotate |y| spots to the right.
//
// v[2:] v[:2]
// | |
// s: aaaavvvvbbbbbbbbvv
// ^ ^ ^ ^
// i j len(s) tot
//
// If either of those two destinations don't alias v, then we're good.
y := len(v) - (j - i) // length of y portion
if !overlaps(r[i:j], v) {
copy(r[i:j], v[y:])
copy(r[len(s):], v[:y])
rotateRight(r[i:], y)
return r
}
if !overlaps(r[len(s):], v) {
copy(r[len(s):], v[:y])
copy(r[i:j], v[y:])
rotateRight(r[i:], y)
return r
}
// Now we know that v overlaps both x and y.
// That means that the entirety of b is *inside* v.
// So we don't need to preserve b at all; instead we
// can copy v first, then copy the b part of v out of
// v to the right destination.
k := startIdx(v, s[j:])
copy(r[i:], v)
copy(r[i+len(v):], r[i+k:])
return r
}
// Clone returns a copy of the slice.
@ -198,7 +343,8 @@ func Clone[S ~[]E, E any](s S) S {
// Compact replaces consecutive runs of equal elements with a single copy.
// This is like the uniq command found on Unix.
// Compact modifies the contents of the slice s; it does not create a new slice.
// Compact modifies the contents of the slice s and returns the modified slice,
// which may have a smaller length.
// When Compact discards m elements in total, it might not modify the elements
// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
// zeroing those elements so that objects they reference can be garbage collected.
@ -218,7 +364,8 @@ func Compact[S ~[]E, E comparable](s S) S {
return s[:i]
}
// CompactFunc is like Compact but uses a comparison function.
// CompactFunc is like [Compact] but uses an equality function to compare elements.
// For runs of elements that compare equal, CompactFunc keeps the first one.
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
if len(s) < 2 {
return s
@ -256,3 +403,97 @@ func Grow[S ~[]E, E any](s S, n int) S {
func Clip[S ~[]E, E any](s S) S {
return s[:len(s):len(s)]
}
// Rotation algorithm explanation:
//
// rotate left by 2
// start with
// 0123456789
// split up like this
// 01 234567 89
// swap first 2 and last 2
// 89 234567 01
// join first parts
// 89234567 01
// recursively rotate first left part by 2
// 23456789 01
// join at the end
// 2345678901
//
// rotate left by 8
// start with
// 0123456789
// split up like this
// 01 234567 89
// swap first 2 and last 2
// 89 234567 01
// join last parts
// 89 23456701
// recursively rotate second part left by 6
// 89 01234567
// join at the end
// 8901234567
// TODO: There are other rotate algorithms.
// This algorithm has the desirable property that it moves each element exactly twice.
// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
// rotateLeft rotates b left by n spaces.
// s_final[i] = s_orig[i+r], wrapping around.
func rotateLeft[E any](s []E, r int) {
for r != 0 && r != len(s) {
if r*2 <= len(s) {
swap(s[:r], s[len(s)-r:])
s = s[:len(s)-r]
} else {
swap(s[:len(s)-r], s[r:])
s, r = s[len(s)-r:], r*2-len(s)
}
}
}
func rotateRight[E any](s []E, r int) {
rotateLeft(s, len(s)-r)
}
// swap swaps the contents of x and y. x and y must be equal length and disjoint.
func swap[E any](x, y []E) {
for i := 0; i < len(x); i++ {
x[i], y[i] = y[i], x[i]
}
}
// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
func overlaps[E any](a, b []E) bool {
if len(a) == 0 || len(b) == 0 {
return false
}
elemSize := unsafe.Sizeof(a[0])
if elemSize == 0 {
return false
}
// TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
// Also see crypto/internal/alias/alias.go:AnyOverlap
return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
}
// startIdx returns the index in haystack where the needle starts.
// prerequisite: the needle must be aliased entirely inside the haystack.
func startIdx[E any](haystack, needle []E) int {
p := &needle[0]
for i := range haystack {
if p == &haystack[i] {
return i
}
}
// TODO: what if the overlap is by a non-integral number of Es?
panic("needle not found")
}
// Reverse reverses the elements of the slice in place.
func Reverse[S ~[]E, E any](s S) {
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
}

View File

@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
package slices
import (
@ -11,57 +13,116 @@ import (
)
// Sort sorts a slice of any ordered type in ascending order.
// Sort may fail to sort correctly when sorting slices of floating-point
// numbers containing Not-a-number (NaN) values.
// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))})
// instead if the input may contain NaNs.
func Sort[E constraints.Ordered](x []E) {
// When sorting floating-point numbers, NaNs are ordered before other values.
func Sort[S ~[]E, E constraints.Ordered](x S) {
n := len(x)
pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
}
// SortFunc sorts the slice x in ascending order as determined by the less function.
// This sort is not guaranteed to be stable.
// SortFunc sorts the slice x in ascending order as determined by the cmp
// function. This sort is not guaranteed to be stable.
// cmp(a, b) should return a negative number when a < b, a positive number when
// a > b and zero when a == b.
//
// SortFunc requires that less is a strict weak ordering.
// SortFunc requires that cmp is a strict weak ordering.
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
func SortFunc[E any](x []E, less func(a, b E) bool) {
func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
n := len(x)
pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less)
pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
}
// SortStableFunc sorts the slice x while keeping the original order of equal
// elements, using less to compare elements.
func SortStableFunc[E any](x []E, less func(a, b E) bool) {
stableLessFunc(x, len(x), less)
// elements, using cmp to compare elements in the same way as [SortFunc].
func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
stableCmpFunc(x, len(x), cmp)
}
// IsSorted reports whether x is sorted in ascending order.
func IsSorted[E constraints.Ordered](x []E) bool {
func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
for i := len(x) - 1; i > 0; i-- {
if x[i] < x[i-1] {
if cmpLess(x[i], x[i-1]) {
return false
}
}
return true
}
// IsSortedFunc reports whether x is sorted in ascending order, with less as the
// comparison function.
func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool {
// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
// comparison function as defined by [SortFunc].
func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
for i := len(x) - 1; i > 0; i-- {
if less(x[i], x[i-1]) {
if cmp(x[i], x[i-1]) < 0 {
return false
}
}
return true
}
// Min returns the minimal value in x. It panics if x is empty.
// For floating-point numbers, Min propagates NaNs (any NaN value in x
// forces the output to be NaN).
func Min[S ~[]E, E constraints.Ordered](x S) E {
if len(x) < 1 {
panic("slices.Min: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
m = min(m, x[i])
}
return m
}
// MinFunc returns the minimal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one minimal element
// according to the cmp function, MinFunc returns the first one.
func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
if len(x) < 1 {
panic("slices.MinFunc: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
if cmp(x[i], m) < 0 {
m = x[i]
}
}
return m
}
// Max returns the maximal value in x. It panics if x is empty.
// For floating-point E, Max propagates NaNs (any NaN value in x
// forces the output to be NaN).
func Max[S ~[]E, E constraints.Ordered](x S) E {
if len(x) < 1 {
panic("slices.Max: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
m = max(m, x[i])
}
return m
}
// MaxFunc returns the maximal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one maximal element
// according to the cmp function, MaxFunc returns the first one.
func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
if len(x) < 1 {
panic("slices.MaxFunc: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
if cmp(x[i], m) > 0 {
m = x[i]
}
}
return m
}
// BinarySearch searches for target in a sorted slice and returns the position
// where target is found, or the position where target would appear in the
// sort order; it also returns a bool saying whether the target is really found
// in the slice. The slice must be sorted in increasing order.
func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
// Inlining is faster than calling BinarySearchFunc with a lambda.
n := len(x)
// Define x[-1] < target and x[n] >= target.
@ -70,24 +131,24 @@ func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
for i < j {
h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
if x[h] < target {
if cmpLess(x[h], target) {
i = h + 1 // preserves x[i-1] < target
} else {
j = h // preserves x[j] >= target
}
}
// i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
return i, i < n && x[i] == target
return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
}
// BinarySearchFunc works like BinarySearch, but uses a custom comparison
// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
// function. The slice must be sorted in increasing order, where "increasing"
// is defined by cmp. cmp should return 0 if the slice element matches
// the target, a negative number if the slice element precedes the target,
// or a positive number if the slice element follows the target.
// cmp must implement the same ordering as the slice, such that if
// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) {
func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
n := len(x)
// Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
// Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
@ -126,3 +187,9 @@ func (r *xorshift) Next() uint64 {
func nextPowerOfTwo(length int) uint {
return 1 << bits.Len(uint(length))
}
// isNaN reports whether x is a NaN without requiring the math package.
// This will always return false if T is not floating-point.
func isNaN[T constraints.Ordered](x T) bool {
return x != x
}

View File

@ -6,28 +6,28 @@
package slices
// insertionSortLessFunc sorts data[a:b] using insertion sort.
func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
// insertionSortCmpFunc sorts data[a:b] using insertion sort.
func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && less(data[j], data[j-1]); j-- {
for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}
// siftDownLessFunc implements the heap property on data[lo:hi].
// siftDownCmpFunc implements the heap property on data[lo:hi].
// first is an offset into the array where the root of the heap lies.
func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) {
func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
if child+1 < hi && less(data[first+child], data[first+child+1]) {
if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
child++
}
if !less(data[first+root], data[first+child]) {
if !(cmp(data[first+root], data[first+child]) < 0) {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
@ -35,30 +35,30 @@ func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool
}
}
func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
first := a
lo := 0
hi := b - a
// Build heap with greatest element at top.
for i := (hi - 1) / 2; i >= 0; i-- {
siftDownLessFunc(data, i, hi, first, less)
siftDownCmpFunc(data, i, hi, first, cmp)
}
// Pop elements, largest first, into end of data.
for i := hi - 1; i >= 0; i-- {
data[first], data[first+i] = data[first+i], data[first]
siftDownLessFunc(data, lo, i, first, less)
siftDownCmpFunc(data, lo, i, first, cmp)
}
}
// pdqsortLessFunc sorts data[a:b].
// pdqsortCmpFunc sorts data[a:b].
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
// C++ implementation: https://github.com/orlp/pdqsort
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
const maxInsertion = 12
var (
@ -70,25 +70,25 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
length := b - a
if length <= maxInsertion {
insertionSortLessFunc(data, a, b, less)
insertionSortCmpFunc(data, a, b, cmp)
return
}
// Fall back to heapsort if too many bad choices were made.
if limit == 0 {
heapSortLessFunc(data, a, b, less)
heapSortCmpFunc(data, a, b, cmp)
return
}
// If the last partitioning was imbalanced, we need to breaking patterns.
if !wasBalanced {
breakPatternsLessFunc(data, a, b, less)
breakPatternsCmpFunc(data, a, b, cmp)
limit--
}
pivot, hint := choosePivotLessFunc(data, a, b, less)
pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
if hint == decreasingHint {
reverseRangeLessFunc(data, a, b, less)
reverseRangeCmpFunc(data, a, b, cmp)
// The chosen pivot was pivot-a elements after the start of the array.
// After reversing it is pivot-a elements before the end of the array.
// The idea came from Rust's implementation.
@ -98,48 +98,48 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
// The slice is likely already sorted.
if wasBalanced && wasPartitioned && hint == increasingHint {
if partialInsertionSortLessFunc(data, a, b, less) {
if partialInsertionSortCmpFunc(data, a, b, cmp) {
return
}
}
// Probably the slice contains many duplicate elements, partition the slice into
// elements equal to and elements greater than the pivot.
if a > 0 && !less(data[a-1], data[pivot]) {
mid := partitionEqualLessFunc(data, a, b, pivot, less)
if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
a = mid
continue
}
mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less)
mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
wasPartitioned = alreadyPartitioned
leftLen, rightLen := mid-a, b-mid
balanceThreshold := length / 8
if leftLen < rightLen {
wasBalanced = leftLen >= balanceThreshold
pdqsortLessFunc(data, a, mid, limit, less)
pdqsortCmpFunc(data, a, mid, limit, cmp)
a = mid + 1
} else {
wasBalanced = rightLen >= balanceThreshold
pdqsortLessFunc(data, mid+1, b, limit, less)
pdqsortCmpFunc(data, mid+1, b, limit, cmp)
b = mid
}
}
}
// partitionLessFunc does one quicksort partition.
// partitionCmpFunc does one quicksort partition.
// Let p = data[pivot]
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
// On return, data[newpivot] = p
func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) {
func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for i <= j && less(data[i], data[a]) {
for i <= j && (cmp(data[i], data[a]) < 0) {
i++
}
for i <= j && !less(data[j], data[a]) {
for i <= j && !(cmp(data[j], data[a]) < 0) {
j--
}
if i > j {
@ -151,10 +151,10 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool)
j--
for {
for i <= j && less(data[i], data[a]) {
for i <= j && (cmp(data[i], data[a]) < 0) {
i++
}
for i <= j && !less(data[j], data[a]) {
for i <= j && !(cmp(data[j], data[a]) < 0) {
j--
}
if i > j {
@ -168,17 +168,17 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool)
return j, false
}
// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) {
func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for {
for i <= j && !less(data[a], data[i]) {
for i <= j && !(cmp(data[a], data[i]) < 0) {
i++
}
for i <= j && less(data[a], data[j]) {
for i <= j && (cmp(data[a], data[j]) < 0) {
j--
}
if i > j {
@ -191,15 +191,15 @@ func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E)
return i
}
// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end.
func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool {
// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
const (
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
shortestShifting = 50 // don't shift any elements on short arrays
)
i := a + 1
for j := 0; j < maxSteps; j++ {
for i < b && !less(data[i], data[i-1]) {
for i < b && !(cmp(data[i], data[i-1]) < 0) {
i++
}
@ -216,7 +216,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b
// Shift the smaller one to the left.
if i-a >= 2 {
for j := i - 1; j >= 1; j-- {
if !less(data[j], data[j-1]) {
if !(cmp(data[j], data[j-1]) < 0) {
break
}
data[j], data[j-1] = data[j-1], data[j]
@ -225,7 +225,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b
// Shift the greater one to the right.
if b-i >= 2 {
for j := i + 1; j < b; j++ {
if !less(data[j], data[j-1]) {
if !(cmp(data[j], data[j-1]) < 0) {
break
}
data[j], data[j-1] = data[j-1], data[j]
@ -235,9 +235,9 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b
return false
}
// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns
// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
// that might cause imbalanced partitions in quicksort.
func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
length := b - a
if length >= 8 {
random := xorshift(length)
@ -253,12 +253,12 @@ func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
}
}
// choosePivotLessFunc chooses a pivot in data[a:b].
// choosePivotCmpFunc chooses a pivot in data[a:b].
//
// [0,8): chooses a static pivot.
// [8,shortestNinther): uses the simple median-of-three method.
// [shortestNinther,∞): uses the Tukey ninther method.
func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) {
func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
const (
shortestNinther = 50
maxSwaps = 4 * 3
@ -276,12 +276,12 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv
if l >= 8 {
if l >= shortestNinther {
// Tukey ninther method, the idea came from Rust's implementation.
i = medianAdjacentLessFunc(data, i, &swaps, less)
j = medianAdjacentLessFunc(data, j, &swaps, less)
k = medianAdjacentLessFunc(data, k, &swaps, less)
i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
}
// Find the median among i, j, k and stores it into j.
j = medianLessFunc(data, i, j, k, &swaps, less)
j = medianCmpFunc(data, i, j, k, &swaps, cmp)
}
switch swaps {
@ -294,29 +294,29 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv
}
}
// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) {
if less(data[b], data[a]) {
// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
if cmp(data[b], data[a]) < 0 {
*swaps++
return b, a
}
return a, b
}
// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int {
a, b = order2LessFunc(data, a, b, swaps, less)
b, c = order2LessFunc(data, b, c, swaps, less)
a, b = order2LessFunc(data, a, b, swaps, less)
// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
a, b = order2CmpFunc(data, a, b, swaps, cmp)
b, c = order2CmpFunc(data, b, c, swaps, cmp)
a, b = order2CmpFunc(data, a, b, swaps, cmp)
return b
}
// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int {
return medianLessFunc(data, a-1, a, a+1, swaps, less)
// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
}
func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
i := a
j := b - 1
for i < j {
@ -326,37 +326,37 @@ func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
}
}
func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) {
func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
for i := 0; i < n; i++ {
data[a+i], data[b+i] = data[b+i], data[a+i]
}
}
func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
blockSize := 20 // must be > 0
a, b := 0, blockSize
for b <= n {
insertionSortLessFunc(data, a, b, less)
insertionSortCmpFunc(data, a, b, cmp)
a = b
b += blockSize
}
insertionSortLessFunc(data, a, n, less)
insertionSortCmpFunc(data, a, n, cmp)
for blockSize < n {
a, b = 0, 2*blockSize
for b <= n {
symMergeLessFunc(data, a, a+blockSize, b, less)
symMergeCmpFunc(data, a, a+blockSize, b, cmp)
a = b
b += 2 * blockSize
}
if m := a + blockSize; m < n {
symMergeLessFunc(data, a, m, n, less)
symMergeCmpFunc(data, a, m, n, cmp)
}
blockSize *= 2
}
}
// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using
// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
@ -375,7 +375,7 @@ func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
// symMerge assumes non-degenerate arguments: a < m && m < b.
// Having the caller check this condition eliminates many leaf recursion calls,
// which improves performance.
func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
// Avoid unnecessary recursions of symMerge
// by direct insertion of data[a] into data[m:b]
// if data[a:m] only contains one element.
@ -387,7 +387,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
j := b
for i < j {
h := int(uint(i+j) >> 1)
if less(data[h], data[a]) {
if cmp(data[h], data[a]) < 0 {
i = h + 1
} else {
j = h
@ -411,7 +411,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
j := m
for i < j {
h := int(uint(i+j) >> 1)
if !less(data[m], data[h]) {
if !(cmp(data[m], data[h]) < 0) {
i = h + 1
} else {
j = h
@ -438,7 +438,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
for start < r {
c := int(uint(start+r) >> 1)
if !less(data[p-c], data[c]) {
if !(cmp(data[p-c], data[c]) < 0) {
start = c + 1
} else {
r = c
@ -447,33 +447,33 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
end := n - start
if start < m && m < end {
rotateLessFunc(data, start, m, end, less)
rotateCmpFunc(data, start, m, end, cmp)
}
if a < start && start < mid {
symMergeLessFunc(data, a, start, mid, less)
symMergeCmpFunc(data, a, start, mid, cmp)
}
if mid < end && end < b {
symMergeLessFunc(data, mid, end, b, less)
symMergeCmpFunc(data, mid, end, b, cmp)
}
}
// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
// Data of the form 'x u v y' is changed to 'x v u y'.
// rotate performs at most b-a many calls to data.Swap,
// and it assumes non-degenerate arguments: a < m && m < b.
func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
i := m - a
j := b - m
for i != j {
if i > j {
swapRangeLessFunc(data, m-i, m, j, less)
swapRangeCmpFunc(data, m-i, m, j, cmp)
i -= j
} else {
swapRangeLessFunc(data, m-i, m+j-i, i, less)
swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
j -= i
}
}
// i == j
swapRangeLessFunc(data, m-i, m, i, less)
swapRangeCmpFunc(data, m-i, m, i, cmp)
}

View File

@ -11,7 +11,7 @@ import "golang.org/x/exp/constraints"
// insertionSortOrdered sorts data[a:b] using insertion sort.
func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && (data[j] < data[j-1]); j-- {
for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
@ -26,10 +26,10 @@ func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
if child >= hi {
break
}
if child+1 < hi && (data[first+child] < data[first+child+1]) {
if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
child++
}
if !(data[first+root] < data[first+child]) {
if !cmpLess(data[first+root], data[first+child]) {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
@ -107,7 +107,7 @@ func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
// Probably the slice contains many duplicate elements, partition the slice into
// elements equal to and elements greater than the pivot.
if a > 0 && !(data[a-1] < data[pivot]) {
if a > 0 && !cmpLess(data[a-1], data[pivot]) {
mid := partitionEqualOrdered(data, a, b, pivot)
a = mid
continue
@ -138,10 +138,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for i <= j && (data[i] < data[a]) {
for i <= j && cmpLess(data[i], data[a]) {
i++
}
for i <= j && !(data[j] < data[a]) {
for i <= j && !cmpLess(data[j], data[a]) {
j--
}
if i > j {
@ -153,10 +153,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo
j--
for {
for i <= j && (data[i] < data[a]) {
for i <= j && cmpLess(data[i], data[a]) {
i++
}
for i <= j && !(data[j] < data[a]) {
for i <= j && !cmpLess(data[j], data[a]) {
j--
}
if i > j {
@ -177,10 +177,10 @@ func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (ne
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for {
for i <= j && !(data[a] < data[i]) {
for i <= j && !cmpLess(data[a], data[i]) {
i++
}
for i <= j && (data[a] < data[j]) {
for i <= j && cmpLess(data[a], data[j]) {
j--
}
if i > j {
@ -201,7 +201,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool
)
i := a + 1
for j := 0; j < maxSteps; j++ {
for i < b && !(data[i] < data[i-1]) {
for i < b && !cmpLess(data[i], data[i-1]) {
i++
}
@ -218,7 +218,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool
// Shift the smaller one to the left.
if i-a >= 2 {
for j := i - 1; j >= 1; j-- {
if !(data[j] < data[j-1]) {
if !cmpLess(data[j], data[j-1]) {
break
}
data[j], data[j-1] = data[j-1], data[j]
@ -227,7 +227,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool
// Shift the greater one to the right.
if b-i >= 2 {
for j := i + 1; j < b; j++ {
if !(data[j] < data[j-1]) {
if !cmpLess(data[j], data[j-1]) {
break
}
data[j], data[j-1] = data[j-1], data[j]
@ -298,7 +298,7 @@ func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, h
// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
if data[b] < data[a] {
if cmpLess(data[b], data[a]) {
*swaps++
return b, a
}
@ -389,7 +389,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
j := b
for i < j {
h := int(uint(i+j) >> 1)
if data[h] < data[a] {
if cmpLess(data[h], data[a]) {
i = h + 1
} else {
j = h
@ -413,7 +413,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
j := m
for i < j {
h := int(uint(i+j) >> 1)
if !(data[m] < data[h]) {
if !cmpLess(data[m], data[h]) {
i = h + 1
} else {
j = h
@ -440,7 +440,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
for start < r {
c := int(uint(start+r) >> 1)
if !(data[p-c] < data[c]) {
if !cmpLess(data[p-c], data[c]) {
start = c + 1
} else {
r = c

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build appengine
// +build appengine
package internal

12
vendor/modules.txt vendored
View File

@ -156,7 +156,7 @@ github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util
github.com/containers/buildah/util
# github.com/containers/common v0.55.1-0.20230808082859-b13df748a2b8
# github.com/containers/common v0.55.1-0.20230811093040-524b4d5c12f9
## explicit; go 1.18
github.com/containers/common/libimage
github.com/containers/common/libimage/define
@ -214,8 +214,8 @@ github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible
## explicit
github.com/containers/conmon/runner/config
# github.com/containers/image/v5 v5.26.1-0.20230801083106-fcf7f0e1712a
## explicit; go 1.18
# github.com/containers/image/v5 v5.26.1-0.20230807184415-3fb422379cfa
## explicit; go 1.19
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
github.com/containers/image/v5/directory/explicitfilepath
@ -1059,7 +1059,7 @@ golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/knownhosts
# golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df
# golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b
## explicit; go 1.20
golang.org/x/exp/constraints
golang.org/x/exp/maps
@ -1083,8 +1083,8 @@ golang.org/x/net/internal/socks
golang.org/x/net/internal/timeseries
golang.org/x/net/proxy
golang.org/x/net/trace
# golang.org/x/oauth2 v0.10.0
## explicit; go 1.17
# golang.org/x/oauth2 v0.11.0
## explicit; go 1.18
golang.org/x/oauth2
golang.org/x/oauth2/internal
# golang.org/x/sync v0.3.0