mirror of
https://github.com/containers/podman.git
synced 2025-12-01 10:38:05 +08:00
vendor: bump buildah to v1.32.1-0.20231012130144-244170240d85
Signed-off-by: Aditya R <arajan@redhat.com>
This commit is contained in:
3
vendor/github.com/containers/luksy/.cirrus.yml
generated
vendored
3
vendor/github.com/containers/luksy/.cirrus.yml
generated
vendored
@@ -3,13 +3,14 @@ docker_builder:
|
||||
env:
|
||||
HOME: /root
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
CIRRUS_LOG_TIMESTAMP: true
|
||||
setup_script: |
|
||||
apt-get -q update
|
||||
apt-get -q install -y bats cryptsetup golang
|
||||
go version
|
||||
make
|
||||
unit_test_script:
|
||||
go test -v -cover
|
||||
go test -timeout 45m -v -cover
|
||||
defaults_script: |
|
||||
bats -f defaults ./tests
|
||||
aes_script: |
|
||||
|
||||
6
vendor/github.com/containers/luksy/Makefile
generated
vendored
6
vendor/github.com/containers/luksy/Makefile
generated
vendored
@@ -4,11 +4,11 @@ BATS = bats
|
||||
all: luksy
|
||||
|
||||
luksy: cmd/luksy/*.go *.go
|
||||
$(GO) build -o luksy ./cmd/luksy
|
||||
$(GO) build -o luksy$(shell go env GOEXE) ./cmd/luksy
|
||||
|
||||
clean:
|
||||
$(RM) luksy luksy.test
|
||||
$(RM) luksy$(shell go env GOEXE) luksy.test
|
||||
|
||||
test:
|
||||
$(GO) test
|
||||
$(GO) test -timeout 45m -v -cover
|
||||
$(BATS) ./tests
|
||||
|
||||
4
vendor/github.com/containers/luksy/OWNERS
generated
vendored
Normal file
4
vendor/github.com/containers/luksy/OWNERS
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
approvers:
|
||||
- nalind
|
||||
reviewers:
|
||||
- nalind
|
||||
27
vendor/github.com/containers/luksy/decrypt.go
generated
vendored
27
vendor/github.com/containers/luksy/decrypt.go
generated
vendored
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
@@ -11,14 +12,23 @@ import (
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
)
|
||||
|
||||
// ReaderAtSeekCloser is a combination of io.ReaderAt, io.Seeker, and io.Closer,
|
||||
// which is all we really need from an encrypted file.
|
||||
type ReaderAtSeekCloser interface {
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// Decrypt attempts to verify the specified password using information from the
|
||||
// header and read from the specified file.
|
||||
//
|
||||
// Returns a function which will decrypt payload blocks in succession, the size
|
||||
// of chunks of data that the function expects, the offset in the file where
|
||||
// the payload begins, and the size of the payload.
|
||||
func (h V1Header) Decrypt(password string, f *os.File) (func([]byte) ([]byte, error), int, int64, int64, error) {
|
||||
st, err := f.Stat()
|
||||
// the payload begins, and the size of the payload, assuming the payload runs
|
||||
// to the end of the file.
|
||||
func (h V1Header) Decrypt(password string, f ReaderAtSeekCloser) (func([]byte) ([]byte, error), int, int64, int64, error) {
|
||||
size, err := f.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return nil, -1, -1, -1, err
|
||||
}
|
||||
@@ -70,7 +80,7 @@ func (h V1Header) Decrypt(password string, f *os.File) (func([]byte) ([]byte, er
|
||||
}
|
||||
if bytes.Equal(mkcandidateDerived, h.MKDigest()) {
|
||||
payloadOffset := int64(h.PayloadOffset() * V1SectorSize)
|
||||
return decryptStream, V1SectorSize, payloadOffset, st.Size() - payloadOffset, nil
|
||||
return decryptStream, V1SectorSize, payloadOffset, size - payloadOffset, nil
|
||||
}
|
||||
}
|
||||
if activeKeys == 0 {
|
||||
@@ -84,8 +94,9 @@ func (h V1Header) Decrypt(password string, f *os.File) (func([]byte) ([]byte, er
|
||||
//
|
||||
// Returns a function which will decrypt payload blocks in succession, the size
|
||||
// of chunks of data that the function expects, the offset in the file where
|
||||
// the payload begins, and the size of the payload.
|
||||
func (h V2Header) Decrypt(password string, f *os.File, j V2JSON) (func([]byte) ([]byte, error), int, int64, int64, error) {
|
||||
// the payload begins, and the size of the payload, assuming the payload runs
|
||||
// to the end of the file.
|
||||
func (h V2Header) Decrypt(password string, f ReaderAtSeekCloser, j V2JSON) (func([]byte) ([]byte, error), int, int64, int64, error) {
|
||||
foundDigests := 0
|
||||
for d, digest := range j.Digests {
|
||||
if digest.Type != "pbkdf2" {
|
||||
@@ -117,11 +128,11 @@ func (h V2Header) Decrypt(password string, f *os.File, j V2JSON) (func([]byte) (
|
||||
}
|
||||
payloadOffset = tmp
|
||||
if segment.Size == "dynamic" {
|
||||
st, err := f.Stat()
|
||||
size, err := f.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
payloadSize = st.Size() - payloadOffset
|
||||
payloadSize = size - payloadOffset
|
||||
} else {
|
||||
payloadSize, err = strconv.ParseInt(segment.Size, 10, 64)
|
||||
if err != nil {
|
||||
|
||||
4
vendor/github.com/containers/luksy/encrypt.go
generated
vendored
4
vendor/github.com/containers/luksy/encrypt.go
generated
vendored
@@ -246,8 +246,8 @@ func EncryptV2(password []string, cipher string, payloadSectorSize int) ([]byte,
|
||||
return nil, nil, -1, errors.New("internal error")
|
||||
}
|
||||
iterations := IterationsPBKDF2(tuningSalt, len(mkey), hasher)
|
||||
timeCost := 1
|
||||
threadsCost := 4
|
||||
timeCost := 16
|
||||
threadsCost := 16
|
||||
memoryCost := MemoryCostArgon2(tuningSalt, len(mkey), timeCost, threadsCost)
|
||||
priority := V2JSONKeyslotPriorityNormal
|
||||
var stripes [][]byte
|
||||
|
||||
149
vendor/github.com/containers/luksy/encryption.go
generated
vendored
149
vendor/github.com/containers/luksy/encryption.go
generated
vendored
@@ -417,9 +417,22 @@ func roundUpToMultiple(i, factor int) int {
|
||||
if i < 0 {
|
||||
return 0
|
||||
}
|
||||
if factor < 1 {
|
||||
return i
|
||||
}
|
||||
return i + ((factor - (i % factor)) % factor)
|
||||
}
|
||||
|
||||
func roundDownToMultiple(i, factor int) int {
|
||||
if i < 0 {
|
||||
return 0
|
||||
}
|
||||
if factor < 1 {
|
||||
return i
|
||||
}
|
||||
return i - (i % factor)
|
||||
}
|
||||
|
||||
func hasherByName(name string) (func() hash.Hash, error) {
|
||||
switch name {
|
||||
case "sha1":
|
||||
@@ -436,13 +449,39 @@ func hasherByName(name string) (func() hash.Hash, error) {
|
||||
}
|
||||
|
||||
type wrapper struct {
|
||||
fn func(plaintext []byte) ([]byte, error)
|
||||
blockSize int
|
||||
buf []byte
|
||||
buffered, consumed int
|
||||
reader io.Reader
|
||||
eof bool
|
||||
writer io.Writer
|
||||
fn func(plaintext []byte) ([]byte, error)
|
||||
blockSize int
|
||||
buf []byte
|
||||
buffered int
|
||||
processed int
|
||||
reader io.Reader
|
||||
eof bool
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (w *wrapper) partialWrite() error {
|
||||
if w.buffered-w.processed >= w.blockSize {
|
||||
toProcess := roundDownToMultiple(w.buffered-w.processed, w.blockSize)
|
||||
processed, err := w.fn(w.buf[w.processed : w.processed+toProcess])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nProcessed := copy(w.buf[w.processed:], processed)
|
||||
w.processed += nProcessed
|
||||
}
|
||||
if w.processed >= w.blockSize {
|
||||
nWritten, err := w.writer.Write(w.buf[:w.processed])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
copy(w.buf, w.buf[nWritten:w.buffered])
|
||||
w.buffered -= nWritten
|
||||
w.processed -= nWritten
|
||||
if w.processed != 0 {
|
||||
return fmt.Errorf("short write: %d != %d", nWritten, nWritten+w.processed)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *wrapper) Write(buf []byte) (int, error) {
|
||||
@@ -451,19 +490,8 @@ func (w *wrapper) Write(buf []byte) (int, error) {
|
||||
nBuffered := copy(w.buf[w.buffered:], buf[n:])
|
||||
w.buffered += nBuffered
|
||||
n += nBuffered
|
||||
if w.buffered == len(w.buf) {
|
||||
processed, err := w.fn(w.buf)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
nWritten, err := w.writer.Write(processed)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
w.buffered -= nWritten
|
||||
if nWritten != len(processed) {
|
||||
return n, fmt.Errorf("short write: %d != %d", nWritten, len(processed))
|
||||
}
|
||||
if err := w.partialWrite(); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
@@ -472,66 +500,73 @@ func (w *wrapper) Write(buf []byte) (int, error) {
|
||||
func (w *wrapper) Read(buf []byte) (int, error) {
|
||||
n := 0
|
||||
for n < len(buf) {
|
||||
nRead := copy(buf[n:], w.buf[w.consumed:])
|
||||
w.consumed += nRead
|
||||
n += nRead
|
||||
if w.consumed == len(w.buf) && !w.eof {
|
||||
nRead, err := w.reader.Read(w.buf)
|
||||
w.eof = errors.Is(err, io.EOF)
|
||||
if err != nil && !w.eof {
|
||||
return n, err
|
||||
if !w.eof {
|
||||
nRead, err := w.reader.Read(w.buf[w.buffered:])
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
w.buffered += nRead
|
||||
return n, err
|
||||
}
|
||||
w.eof = true
|
||||
}
|
||||
if nRead != len(w.buf) && !w.eof {
|
||||
return n, fmt.Errorf("short read: %d != %d", nRead, len(w.buf))
|
||||
}
|
||||
processed, err := w.fn(w.buf[:nRead])
|
||||
w.buffered += nRead
|
||||
}
|
||||
if w.buffered == 0 && w.eof {
|
||||
return n, io.EOF
|
||||
}
|
||||
if w.buffered-w.processed >= w.blockSize {
|
||||
toProcess := roundDownToMultiple(w.buffered-w.processed, w.blockSize)
|
||||
processed, err := w.fn(w.buf[w.processed : w.processed+toProcess])
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
w.buf = processed
|
||||
w.consumed = 0
|
||||
nProcessed := copy(w.buf[w.processed:], processed)
|
||||
w.processed += nProcessed
|
||||
}
|
||||
nRead := copy(buf[n:], w.buf[:w.processed])
|
||||
n += nRead
|
||||
copy(w.buf, w.buf[nRead:w.buffered])
|
||||
w.processed -= nRead
|
||||
w.buffered -= nRead
|
||||
if w.buffered-w.processed < w.blockSize {
|
||||
break
|
||||
}
|
||||
}
|
||||
var eof error
|
||||
if w.consumed == len(w.buf) && w.eof {
|
||||
eof = io.EOF
|
||||
}
|
||||
return n, eof
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w *wrapper) Close() error {
|
||||
if w.writer != nil {
|
||||
if w.buffered%w.blockSize != 0 {
|
||||
w.buffered += copy(w.buf[w.buffered:], make([]byte, roundUpToMultiple(w.buffered%w.blockSize, w.blockSize)))
|
||||
nPadding := w.blockSize - w.buffered%w.blockSize
|
||||
nWritten, err := w.Write(make([]byte, nPadding))
|
||||
if err != nil {
|
||||
return fmt.Errorf("flushing write: %v", err)
|
||||
}
|
||||
if nWritten < nPadding {
|
||||
return fmt.Errorf("flushing write: %d != %d", nPadding, nWritten)
|
||||
}
|
||||
}
|
||||
processed, err := w.fn(w.buf[:w.buffered])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nWritten, err := w.writer.Write(processed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if nWritten != len(processed) {
|
||||
return fmt.Errorf("short write: %d != %d", nWritten, len(processed))
|
||||
}
|
||||
w.buffered = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncryptWriter creates an io.WriteCloser which buffers writes through an
|
||||
// encryption function. After writing a final block, the returned writer
|
||||
// should be closed.
|
||||
// encryption function, transforming and writing multiples of the blockSize.
|
||||
// After writing a final block, the returned writer should be closed.
|
||||
// If only a partial block has been written when Close() is called, a final
|
||||
// block with its length padded with zero bytes will be transformed and
|
||||
// written.
|
||||
func EncryptWriter(fn func(plaintext []byte) ([]byte, error), writer io.Writer, blockSize int) io.WriteCloser {
|
||||
bufferSize := roundUpToMultiple(1024*1024, blockSize)
|
||||
return &wrapper{fn: fn, blockSize: blockSize, buf: make([]byte, bufferSize), writer: writer}
|
||||
}
|
||||
|
||||
// DecryptReader creates an io.ReadCloser which buffers reads through a
|
||||
// decryption function. When data will no longer be read, the returned reader
|
||||
// should be closed.
|
||||
// decryption function, decrypting and returning multiples of the blockSize
|
||||
// until it reaches the end of the file. When data will no longer be read, the
|
||||
// returned reader should be closed.
|
||||
func DecryptReader(fn func(ciphertext []byte) ([]byte, error), reader io.Reader, blockSize int) io.ReadCloser {
|
||||
bufferSize := roundUpToMultiple(1024*1024, blockSize)
|
||||
return &wrapper{fn: fn, blockSize: blockSize, buf: make([]byte, bufferSize), consumed: bufferSize, reader: reader}
|
||||
return &wrapper{fn: fn, blockSize: blockSize, buf: make([]byte, bufferSize), reader: reader}
|
||||
}
|
||||
|
||||
4
vendor/github.com/containers/luksy/luks.go
generated
vendored
4
vendor/github.com/containers/luksy/luks.go
generated
vendored
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"io"
|
||||
)
|
||||
|
||||
// ReadHeaderOptions can control some of what ReadHeaders() does.
|
||||
@@ -13,7 +13,7 @@ type ReadHeaderOptions struct{}
|
||||
// ReadHeaders reads LUKS headers from the specified file, returning either a
|
||||
// LUKSv1 header, or two LUKSv2 headers and a LUKSv2 JSON block, depending on
|
||||
// which format is detected.
|
||||
func ReadHeaders(f *os.File, options ReadHeaderOptions) (*V1Header, *V2Header, *V2Header, *V2JSON, error) {
|
||||
func ReadHeaders(f io.ReaderAt, options ReadHeaderOptions) (*V1Header, *V2Header, *V2Header, *V2JSON, error) {
|
||||
var v1 V1Header
|
||||
var v2a, v2b V2Header
|
||||
n, err := f.ReadAt(v2a[:], 0)
|
||||
|
||||
Reference in New Issue
Block a user