mirror of
https://github.com/containers/podman.git
synced 2025-06-22 01:48:54 +08:00
Vendor Buildah 1.10.1
As the title says, vendor Buildah v1.10.1 Signed-off-by: TomSweeneyRedHat <tsweeney@redhat.com>
This commit is contained in:
7
go.mod
7
go.mod
@ -13,11 +13,11 @@ require (
|
|||||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect
|
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect
|
||||||
github.com/containernetworking/cni v0.7.1
|
github.com/containernetworking/cni v0.7.1
|
||||||
github.com/containernetworking/plugins v0.8.1
|
github.com/containernetworking/plugins v0.8.1
|
||||||
github.com/containers/buildah v1.9.2
|
github.com/containers/buildah v1.10.1
|
||||||
github.com/containers/conmon v0.3.0 // indirect
|
github.com/containers/conmon v0.3.0 // indirect
|
||||||
github.com/containers/image v2.0.1+incompatible
|
github.com/containers/image v3.0.2+incompatible
|
||||||
github.com/containers/psgo v1.3.1
|
github.com/containers/psgo v1.3.1
|
||||||
github.com/containers/storage v1.12.16
|
github.com/containers/storage v1.13.1
|
||||||
github.com/coreos/bbolt v1.3.3 // indirect
|
github.com/coreos/bbolt v1.3.3 // indirect
|
||||||
github.com/coreos/etcd v3.3.13+incompatible // indirect
|
github.com/coreos/etcd v3.3.13+incompatible // indirect
|
||||||
github.com/coreos/go-iptables v0.4.1
|
github.com/coreos/go-iptables v0.4.1
|
||||||
@ -91,7 +91,6 @@ require (
|
|||||||
github.com/ugorji/go v1.1.5-pre // indirect
|
github.com/ugorji/go v1.1.5-pre // indirect
|
||||||
github.com/ulikunitz/xz v0.5.6 // indirect
|
github.com/ulikunitz/xz v0.5.6 // indirect
|
||||||
github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b
|
github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b
|
||||||
github.com/vbauerster/mpb v3.4.0+incompatible // indirect
|
|
||||||
github.com/vishvananda/netlink v1.0.0
|
github.com/vishvananda/netlink v1.0.0
|
||||||
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f // indirect
|
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f // indirect
|
||||||
go.etcd.io/bbolt v1.3.3 // indirect
|
go.etcd.io/bbolt v1.3.3 // indirect
|
||||||
|
6
go.sum
6
go.sum
@ -71,12 +71,16 @@ github.com/containers/buildah v1.9.0 h1:ktVRCGNoVfW8PlTuCKUeh+zGdqn1Nik80DSWvGX+
|
|||||||
github.com/containers/buildah v1.9.0/go.mod h1:1CsiLJvyU+h+wOjnqJJOWuJCVcMxZOr5HN/gHGdzJxY=
|
github.com/containers/buildah v1.9.0/go.mod h1:1CsiLJvyU+h+wOjnqJJOWuJCVcMxZOr5HN/gHGdzJxY=
|
||||||
github.com/containers/buildah v1.9.2 h1:dg87r1W1poWVQE0lTmP3BzaqgEI5IRudZ3jKjNIZ3xQ=
|
github.com/containers/buildah v1.9.2 h1:dg87r1W1poWVQE0lTmP3BzaqgEI5IRudZ3jKjNIZ3xQ=
|
||||||
github.com/containers/buildah v1.9.2/go.mod h1:UFq7EQtnDEEZv42AE7ZbmQMN+mSWSg1JIMwjYW1bn48=
|
github.com/containers/buildah v1.9.2/go.mod h1:UFq7EQtnDEEZv42AE7ZbmQMN+mSWSg1JIMwjYW1bn48=
|
||||||
|
github.com/containers/buildah v1.10.1 h1:YBFHZkpbWCxUR/gjRAZrRzs2E0DfdUe3+/8OA9filWY=
|
||||||
|
github.com/containers/buildah v1.10.1/go.mod h1:ZTyMFo3IQlu9tYndtnAf0Tjf2NdeUL6bY2/TpP9uIuU=
|
||||||
github.com/containers/conmon v0.3.0 h1:NDkYcQAu1BDZSVLh6xrY9jh/WmiDaUloKzRM16237XM=
|
github.com/containers/conmon v0.3.0 h1:NDkYcQAu1BDZSVLh6xrY9jh/WmiDaUloKzRM16237XM=
|
||||||
github.com/containers/conmon v0.3.0/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
github.com/containers/conmon v0.3.0/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||||
github.com/containers/image v2.0.0+incompatible h1:FTr6Br7jlIKNCKMjSOMbAxKp2keQ0//jzJaYNTVhauk=
|
github.com/containers/image v2.0.0+incompatible h1:FTr6Br7jlIKNCKMjSOMbAxKp2keQ0//jzJaYNTVhauk=
|
||||||
github.com/containers/image v2.0.0+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
|
github.com/containers/image v2.0.0+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
|
||||||
github.com/containers/image v2.0.1+incompatible h1:w39mlElA/aSFZ6moFa5N+A4MWu9c8hgdMiMMYnH94Hs=
|
github.com/containers/image v2.0.1+incompatible h1:w39mlElA/aSFZ6moFa5N+A4MWu9c8hgdMiMMYnH94Hs=
|
||||||
github.com/containers/image v2.0.1+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
|
github.com/containers/image v2.0.1+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
|
||||||
|
github.com/containers/image v3.0.2+incompatible h1:B1lqAE8MUPCrsBLE86J0gnXleeRq8zJnQryhiiGQNyE=
|
||||||
|
github.com/containers/image v3.0.2+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
|
||||||
github.com/containers/psgo v1.3.0 h1:kDhiA4gNNyJ2qCzmOuBf6AmrF/Pp+6Jo98P68R7fB8I=
|
github.com/containers/psgo v1.3.0 h1:kDhiA4gNNyJ2qCzmOuBf6AmrF/Pp+6Jo98P68R7fB8I=
|
||||||
github.com/containers/psgo v1.3.0/go.mod h1:7MELvPTW1fj6yMrwD9I1Iasx1vU+hKlRkHXAJ51sFtU=
|
github.com/containers/psgo v1.3.0/go.mod h1:7MELvPTW1fj6yMrwD9I1Iasx1vU+hKlRkHXAJ51sFtU=
|
||||||
github.com/containers/psgo v1.3.1-0.20190626112706-fbef66e4ce92 h1:aVJs/Av0Yc9uNoWnIwmG+6Z+XozuRXFwvLwAOVmwlvI=
|
github.com/containers/psgo v1.3.1-0.20190626112706-fbef66e4ce92 h1:aVJs/Av0Yc9uNoWnIwmG+6Z+XozuRXFwvLwAOVmwlvI=
|
||||||
@ -93,6 +97,8 @@ github.com/containers/storage v1.12.13 h1:GtaLCY8p1Drlk1Oew581jGvB137UaO+kpz0HII
|
|||||||
github.com/containers/storage v1.12.13/go.mod h1:+RirK6VQAqskQlaTBrOG6ulDvn4si2QjFE1NZCn06MM=
|
github.com/containers/storage v1.12.13/go.mod h1:+RirK6VQAqskQlaTBrOG6ulDvn4si2QjFE1NZCn06MM=
|
||||||
github.com/containers/storage v1.12.16 h1:zePYS1GiG8CuRqLCeA0ufx4X27K06HcJLV50DdojL+Y=
|
github.com/containers/storage v1.12.16 h1:zePYS1GiG8CuRqLCeA0ufx4X27K06HcJLV50DdojL+Y=
|
||||||
github.com/containers/storage v1.12.16/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c=
|
github.com/containers/storage v1.12.16/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c=
|
||||||
|
github.com/containers/storage v1.13.1 h1:rjVirLS9fCGkUFlLDZEoGDDUugtIf46DufWvJu08wxQ=
|
||||||
|
github.com/containers/storage v1.13.1/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA=
|
||||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
|
2
vendor/github.com/containers/buildah/.golangci.yml
generated
vendored
2
vendor/github.com/containers/buildah/.golangci.yml
generated
vendored
@ -17,6 +17,7 @@ linters:
|
|||||||
- errcheck
|
- errcheck
|
||||||
- gofmt
|
- gofmt
|
||||||
- goimports
|
- goimports
|
||||||
|
- golint
|
||||||
- gosimple
|
- gosimple
|
||||||
- govet
|
- govet
|
||||||
- ineffassign
|
- ineffassign
|
||||||
@ -35,7 +36,6 @@ linters:
|
|||||||
# - goconst
|
# - goconst
|
||||||
# - gocritic
|
# - gocritic
|
||||||
# - gocyclo
|
# - gocyclo
|
||||||
# - golint
|
|
||||||
# - gosec
|
# - gosec
|
||||||
# - interfacer
|
# - interfacer
|
||||||
# - lll
|
# - lll
|
||||||
|
191
vendor/github.com/containers/buildah/CHANGELOG.md
generated
vendored
191
vendor/github.com/containers/buildah/CHANGELOG.md
generated
vendored
@ -2,6 +2,111 @@
|
|||||||
|
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## v1.10.0 (2019-08-02)
|
||||||
|
vendor github.com/containers/image@v3.0.0
|
||||||
|
Remove GO111MODULE in favor of `-mod=vendor`
|
||||||
|
Vendor in containers/storage v1.12.16
|
||||||
|
Add '-' minus syntax for removal of config values
|
||||||
|
tests: enable overlay tests for rootless
|
||||||
|
rootless, overlay: use fuse-overlayfs
|
||||||
|
vendor github.com/containers/image@v2.0.1
|
||||||
|
Added '-' syntax to remove volume config option
|
||||||
|
delete `successfully pushed` message
|
||||||
|
Add golint linter and apply fixes
|
||||||
|
vendor github.com/containers/storage@v1.12.15
|
||||||
|
Change wait to sleep in buildahimage readme
|
||||||
|
Handle ReadOnly images when deleting images
|
||||||
|
Add support for listing read/only images
|
||||||
|
|
||||||
|
## v1.9.2 (2019-07-19)
|
||||||
|
from/import: record the base image's digest, if it has one
|
||||||
|
Fix CNI version retrieval to not require network connection
|
||||||
|
Add misspell linter and apply fixes
|
||||||
|
Add goimports linter and apply fixes
|
||||||
|
Add stylecheck linter and apply fixes
|
||||||
|
Add unconvert linter and apply fixes
|
||||||
|
image: make sure we don't try to use zstd compression
|
||||||
|
run.bats: skip the "z" flag when testing --mount
|
||||||
|
Update to runc v1.0.0-rc8
|
||||||
|
Update to match updated runtime-tools API
|
||||||
|
bump github.com/opencontainers/runtime-tools to v0.9.0
|
||||||
|
Build e2e tests using the proper build tags
|
||||||
|
Add unparam linter and apply fixes
|
||||||
|
Run: correct a typo in the --cap-add help text
|
||||||
|
unshare: add a --mount flag
|
||||||
|
fix push check image name is not empty
|
||||||
|
Bump to v1.9.2-dev
|
||||||
|
|
||||||
|
## v1.9.1 (2019-07-12)
|
||||||
|
add: fix slow copy with no excludes
|
||||||
|
Add errcheck linter and fix missing error check
|
||||||
|
Improve tests/tools/Makefile parallelism and abstraction
|
||||||
|
Fix response body not closed resource leak
|
||||||
|
Switch to golangci-lint
|
||||||
|
Add gomod instructions and mailing list links
|
||||||
|
On Masked path, check if /dev/null already mounted before mounting
|
||||||
|
Update to containers/storage v1.12.13
|
||||||
|
Refactor code in package imagebuildah
|
||||||
|
Add rootless podman with NFS issue in documentation
|
||||||
|
Add --mount for buildah run
|
||||||
|
import method ValidateVolumeOpts from libpod
|
||||||
|
Fix typo
|
||||||
|
Makefile: set GO111MODULE=off
|
||||||
|
rootless: add the built-in slirp DNS server
|
||||||
|
Update docker/libnetwork to get rid of outdated sctp package
|
||||||
|
Update buildah-login.md
|
||||||
|
migrate to go modules
|
||||||
|
install.md: mention go modules
|
||||||
|
tests/tools: go module for test binaries
|
||||||
|
fix --volume splits comma delimited option
|
||||||
|
Add bud test for RUN with a priv'd command
|
||||||
|
vendor logrus v1.4.2
|
||||||
|
pkg/cli: panic when flags can't be hidden
|
||||||
|
pkg/unshare: check all errors
|
||||||
|
pull: check error during report write
|
||||||
|
run_linux.go: ignore unchecked errors
|
||||||
|
conformance test: catch copy error
|
||||||
|
chroot/run_test.go: export funcs to actually be executed
|
||||||
|
tests/imgtype: ignore error when shutting down the store
|
||||||
|
testreport: check json error
|
||||||
|
bind/util.go: remove unused func
|
||||||
|
rm chroot/util.go
|
||||||
|
imagebuildah: remove unused `dedupeStringSlice`
|
||||||
|
StageExecutor: EnsureContainerPath: catch error from SecureJoin()
|
||||||
|
imagebuildah/build.go: return <expr> instead of branching
|
||||||
|
rmi: avoid redundant branching
|
||||||
|
conformance tests: nilness: allocate map
|
||||||
|
imagebuildah/build.go: avoid redundant `filepath.Join()`
|
||||||
|
imagebuildah/build.go: avoid redundant `os.Stat()`
|
||||||
|
imagebuildah: omit comparison to bool
|
||||||
|
fix "ineffectual assignment" lint errors
|
||||||
|
docker: ignore "repeats json tag" lint error
|
||||||
|
pkg/unshare: use `...` instead of iterating a slice
|
||||||
|
conformance: bud test: use raw strings for regexes
|
||||||
|
conformance suite: remove unused func/var
|
||||||
|
buildah test suite: remove unused vars/funcs
|
||||||
|
testreport: fix golangci-lint errors
|
||||||
|
util: remove redundant `return` statement
|
||||||
|
chroot: only log clean-up errors
|
||||||
|
images_test: ignore golangci-lint error
|
||||||
|
blobcache: log error when draining the pipe
|
||||||
|
imagebuildah: check errors in deferred calls
|
||||||
|
chroot: fix error handling in deferred funcs
|
||||||
|
cmd: check all errors
|
||||||
|
chroot/run_test.go: check errors
|
||||||
|
chroot/run.go: check errors in deferred calls
|
||||||
|
imagebuildah.Executor: remove unused onbuild field
|
||||||
|
docker/types.go: remove unused struct fields
|
||||||
|
util: use strings.ContainsRune instead of index check
|
||||||
|
Cirrus: Initial implementation
|
||||||
|
Bump to v1.9.1-dev
|
||||||
|
|
||||||
|
## v1.9.0 (2019-06-15)
|
||||||
|
buildah-run: fix-out-of-range panic (2)
|
||||||
|
Bump back to v1.9.0-dev
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## v1.8.4 (2019-06-13)
|
## v1.8.4 (2019-06-13)
|
||||||
Update containers/image to v2.0.0
|
Update containers/image to v2.0.0
|
||||||
run: fix hang with run and --isolation=chroot
|
run: fix hang with run and --isolation=chroot
|
||||||
@ -32,49 +137,49 @@
|
|||||||
Cleanup Overlay Mounts content
|
Cleanup Overlay Mounts content
|
||||||
|
|
||||||
## v1.8.3 (2019-06-04)
|
## v1.8.3 (2019-06-04)
|
||||||
* Add support for file secret mounts
|
Add support for file secret mounts
|
||||||
* Add ability to skip secrets in mounts file
|
Add ability to skip secrets in mounts file
|
||||||
* allow 32bit builds
|
allow 32bit builds
|
||||||
* fix tutorial instructions
|
fix tutorial instructions
|
||||||
* imagebuilder: pass the right contextDir to Add()
|
imagebuilder: pass the right contextDir to Add()
|
||||||
* add: use fileutils.PatternMatcher for .dockerignore
|
add: use fileutils.PatternMatcher for .dockerignore
|
||||||
* bud.bats: add another .dockerignore test
|
bud.bats: add another .dockerignore test
|
||||||
* unshare: fallback to single usermapping
|
unshare: fallback to single usermapping
|
||||||
* addHelperSymlink: clear the destination on os.IsExist errors
|
addHelperSymlink: clear the destination on os.IsExist errors
|
||||||
* bud.bats: test replacing symbolic links
|
bud.bats: test replacing symbolic links
|
||||||
* imagebuildah: fix handling of destinations that end with '/'
|
imagebuildah: fix handling of destinations that end with '/'
|
||||||
* bud.bats: test COPY with a final "/" in the destination
|
bud.bats: test COPY with a final "/" in the destination
|
||||||
* linux: add check for sysctl before using it
|
linux: add check for sysctl before using it
|
||||||
* unshare: set _CONTAINERS_ROOTLESS_GID
|
unshare: set _CONTAINERS_ROOTLESS_GID
|
||||||
* Rework buildahimamges
|
Rework buildahimamges
|
||||||
* build context: support https git repos
|
build context: support https git repos
|
||||||
* Add a test for ENV special chars behaviour
|
Add a test for ENV special chars behaviour
|
||||||
* Check in new Dockerfiles
|
Check in new Dockerfiles
|
||||||
* Apply custom SHELL during build time
|
Apply custom SHELL during build time
|
||||||
* config: expand variables only at the command line
|
config: expand variables only at the command line
|
||||||
* SetEnv: we only need to expand v once
|
SetEnv: we only need to expand v once
|
||||||
* Add default /root if empty on chroot iso
|
Add default /root if empty on chroot iso
|
||||||
* Add support for Overlay volumes into the container.
|
Add support for Overlay volumes into the container.
|
||||||
* Export buildah validate volume functions so it can share code with libpod
|
Export buildah validate volume functions so it can share code with libpod
|
||||||
* Bump baseline test to F30
|
Bump baseline test to F30
|
||||||
* Fix rootless handling of /dev/shm size
|
Fix rootless handling of /dev/shm size
|
||||||
* Avoid fmt.Printf() in the library
|
Avoid fmt.Printf() in the library
|
||||||
* imagebuildah: tighten cache checking back up
|
imagebuildah: tighten cache checking back up
|
||||||
* Handle WORKDIR with dangling target
|
Handle WORKDIR with dangling target
|
||||||
* Default Authfile to proper path
|
Default Authfile to proper path
|
||||||
* Make buildah run --isolation follow BUILDAH_ISOLATION environment
|
Make buildah run --isolation follow BUILDAH_ISOLATION environment
|
||||||
* Vendor in latest containers/storage and containers/image
|
Vendor in latest containers/storage and containers/image
|
||||||
* getParent/getChildren: handle layerless images
|
getParent/getChildren: handle layerless images
|
||||||
* imagebuildah: recognize cache images for layerless images
|
imagebuildah: recognize cache images for layerless images
|
||||||
* bud.bats: test scratch images with --layers caching
|
bud.bats: test scratch images with --layers caching
|
||||||
* Get CHANGELOG.md updates
|
Get CHANGELOG.md updates
|
||||||
* Add some symlinks to test our .dockerignore logic
|
Add some symlinks to test our .dockerignore logic
|
||||||
* imagebuildah: addHelper: handle symbolic links
|
imagebuildah: addHelper: handle symbolic links
|
||||||
* commit/push: use an everything-allowed policy
|
commit/push: use an everything-allowed policy
|
||||||
* Correct manpage formatting in files section
|
Correct manpage formatting in files section
|
||||||
* Remove must be root statement from buildah doc
|
Remove must be root statement from buildah doc
|
||||||
* Change image names to stable, testing and upstream
|
Change image names to stable, testing and upstream
|
||||||
* Bump back to v1.9.0-dev
|
Bump back to v1.9.0-dev
|
||||||
|
|
||||||
## v1.8.2 (2019-05-02)
|
## v1.8.2 (2019-05-02)
|
||||||
Vendor Storage 1.12.6
|
Vendor Storage 1.12.6
|
||||||
|
21
vendor/github.com/containers/buildah/Makefile
generated
vendored
21
vendor/github.com/containers/buildah/Makefile
generated
vendored
@ -1,8 +1,7 @@
|
|||||||
export GO111MODULE=off
|
|
||||||
|
|
||||||
SELINUXTAG := $(shell ./selinux_tag.sh)
|
SELINUXTAG := $(shell ./selinux_tag.sh)
|
||||||
|
APPARMORTAG := $(shell hack/apparmor_tag.sh)
|
||||||
STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./libdm_tag.sh) $(shell ./ostree_tag.sh)
|
STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./libdm_tag.sh) $(shell ./ostree_tag.sh)
|
||||||
SECURITYTAGS ?= seccomp $(SELINUXTAG)
|
SECURITYTAGS ?= seccomp $(SELINUXTAG) $(APPARMORTAG)
|
||||||
TAGS ?= $(SECURITYTAGS) $(STORAGETAGS)
|
TAGS ?= $(SECURITYTAGS) $(STORAGETAGS)
|
||||||
BUILDTAGS += $(TAGS)
|
BUILDTAGS += $(TAGS)
|
||||||
PREFIX := /usr/local
|
PREFIX := /usr/local
|
||||||
@ -10,9 +9,17 @@ BINDIR := $(PREFIX)/bin
|
|||||||
BASHINSTALLDIR = $(PREFIX)/share/bash-completion/completions
|
BASHINSTALLDIR = $(PREFIX)/share/bash-completion/completions
|
||||||
BUILDFLAGS := -tags "$(BUILDTAGS)"
|
BUILDFLAGS := -tags "$(BUILDTAGS)"
|
||||||
BUILDAH := buildah
|
BUILDAH := buildah
|
||||||
|
|
||||||
GO := go
|
GO := go
|
||||||
GO110 := 1.10
|
GO110 := 1.10
|
||||||
GOVERSION := $(findstring $(GO110),$(shell go version))
|
GOVERSION := $(findstring $(GO110),$(shell go version))
|
||||||
|
# test for go module support
|
||||||
|
ifeq ($(shell go help mod >/dev/null 2>&1 && echo true), true)
|
||||||
|
export GO_BUILD=GO111MODULE=on $(GO) build -mod=vendor
|
||||||
|
else
|
||||||
|
export GO_BUILD=$(GO) build
|
||||||
|
endif
|
||||||
|
|
||||||
GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed"))
|
GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed"))
|
||||||
BUILD_INFO := $(if $(shell date +%s),$(shell date +%s),$(error "date failed"))
|
BUILD_INFO := $(if $(shell date +%s),$(shell date +%s),$(error "date failed"))
|
||||||
STATIC_STORAGETAGS = "containers_image_ostree_stub containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)"
|
STATIC_STORAGETAGS = "containers_image_ostree_stub containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)"
|
||||||
@ -33,15 +40,15 @@ static: $(SOURCES)
|
|||||||
|
|
||||||
.PHONY: binary
|
.PHONY: binary
|
||||||
binary: $(SOURCES)
|
binary: $(SOURCES)
|
||||||
$(GO) build $(LDFLAGS) -o $(BUILDAH) $(BUILDFLAGS) ./cmd/buildah
|
$(GO_BUILD) $(LDFLAGS) -o $(BUILDAH) $(BUILDFLAGS) ./cmd/buildah
|
||||||
|
|
||||||
buildah: binary
|
buildah: binary
|
||||||
|
|
||||||
darwin:
|
darwin:
|
||||||
GOOS=darwin $(GO) build $(LDFLAGS) -o buildah.darwin -tags "containers_image_openpgp" ./cmd/buildah
|
GOOS=darwin $(GO_BUILD) $(LDFLAGS) -o buildah.darwin -tags "containers_image_openpgp" ./cmd/buildah
|
||||||
|
|
||||||
imgtype: *.go docker/*.go util/*.go tests/imgtype/imgtype.go
|
imgtype: *.go docker/*.go util/*.go tests/imgtype/imgtype.go
|
||||||
$(GO) build $(LDFLAGS) -o imgtype $(BUILDFLAGS) ./tests/imgtype/imgtype.go
|
$(GO_BUILD) $(LDFLAGS) -o imgtype $(BUILDFLAGS) ./tests/imgtype/imgtype.go
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean:
|
clean:
|
||||||
@ -121,7 +128,7 @@ test-integration: install.tools
|
|||||||
cd tests; ./test_runner.sh
|
cd tests; ./test_runner.sh
|
||||||
|
|
||||||
tests/testreport/testreport: tests/testreport/testreport.go
|
tests/testreport/testreport: tests/testreport/testreport.go
|
||||||
$(GO) build -ldflags "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport
|
$(GO_BUILD) -ldflags "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport
|
||||||
|
|
||||||
.PHONY: test-unit
|
.PHONY: test-unit
|
||||||
test-unit: tests/testreport/testreport
|
test-unit: tests/testreport/testreport
|
||||||
|
2
vendor/github.com/containers/buildah/buildah.go
generated
vendored
2
vendor/github.com/containers/buildah/buildah.go
generated
vendored
@ -26,7 +26,7 @@ const (
|
|||||||
Package = "buildah"
|
Package = "buildah"
|
||||||
// Version for the Package. Bump version in contrib/rpm/buildah.spec
|
// Version for the Package. Bump version in contrib/rpm/buildah.spec
|
||||||
// too.
|
// too.
|
||||||
Version = "1.9.2"
|
Version = "1.10.1"
|
||||||
// The value we use to identify what type of information, currently a
|
// The value we use to identify what type of information, currently a
|
||||||
// serialized Builder structure, we are using as per-container state.
|
// serialized Builder structure, we are using as per-container state.
|
||||||
// This should only be changed when we make incompatible changes to
|
// This should only be changed when we make incompatible changes to
|
||||||
|
26
vendor/github.com/containers/buildah/changelog.txt
generated
vendored
26
vendor/github.com/containers/buildah/changelog.txt
generated
vendored
@ -1,3 +1,29 @@
|
|||||||
|
- Changelog for v1.10.1 (2019-08-08)
|
||||||
|
* Bump containers/image to v3.0.2 to fix keyring issue
|
||||||
|
* Bug fix for volume minus syntax
|
||||||
|
* Bump container/storage v1.13.1 and containers/image v3.0.1
|
||||||
|
* bump github.com/containernetworking/cni to v0.7.1
|
||||||
|
* Add overlayfs to fuse-overlayfs tip
|
||||||
|
* Add automatic apparmor tag discovery
|
||||||
|
* Fix bug whereby --get-login has no effect
|
||||||
|
* Bump to v1.11.0-dev
|
||||||
|
|
||||||
|
- Changelog for v1.10.0 (2019-08-02)
|
||||||
|
* vendor github.com/containers/image@v3.0.0
|
||||||
|
* Remove GO111MODULE in favor of `-mod=vendor`
|
||||||
|
* Vendor in containers/storage v1.12.16
|
||||||
|
* Add '-' minus syntax for removal of config values
|
||||||
|
* tests: enable overlay tests for rootless
|
||||||
|
* rootless, overlay: use fuse-overlayfs
|
||||||
|
* vendor github.com/containers/image@v2.0.1
|
||||||
|
* Added '-' syntax to remove volume config option
|
||||||
|
* delete `successfully pushed` message
|
||||||
|
* Add golint linter and apply fixes
|
||||||
|
* vendor github.com/containers/storage@v1.12.15
|
||||||
|
* Change wait to sleep in buildahimage readme
|
||||||
|
* Handle ReadOnly images when deleting images
|
||||||
|
* Add support for listing read/only images
|
||||||
|
|
||||||
- Changelog for v1.9.2 (2019-07-19)
|
- Changelog for v1.9.2 (2019-07-19)
|
||||||
* from/import: record the base image's digest, if it has one
|
* from/import: record the base image's digest, if it has one
|
||||||
* Fix CNI version retrieval to not require network connection
|
* Fix CNI version retrieval to not require network connection
|
||||||
|
16
vendor/github.com/containers/buildah/chroot/run.go
generated
vendored
16
vendor/github.com/containers/buildah/chroot/run.go
generated
vendored
@ -205,13 +205,13 @@ func runUsingChrootMain() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Prepare to shuttle stdio back and forth.
|
// Prepare to shuttle stdio back and forth.
|
||||||
rootUid32, rootGid32, err := util.GetHostRootIDs(options.Spec)
|
rootUID32, rootGID32, err := util.GetHostRootIDs(options.Spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("error determining ownership for container stdio")
|
logrus.Errorf("error determining ownership for container stdio")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
rootUid := int(rootUid32)
|
rootUID := int(rootUID32)
|
||||||
rootGid := int(rootGid32)
|
rootGID := int(rootGID32)
|
||||||
relays := make(map[int]int)
|
relays := make(map[int]int)
|
||||||
closeOnceRunning := []*os.File{}
|
closeOnceRunning := []*os.File{}
|
||||||
var ctty *os.File
|
var ctty *os.File
|
||||||
@ -288,7 +288,7 @@ func runUsingChrootMain() {
|
|||||||
// Open an *os.File object that we can pass to our child.
|
// Open an *os.File object that we can pass to our child.
|
||||||
ctty = os.NewFile(ptyFd, "/dev/tty")
|
ctty = os.NewFile(ptyFd, "/dev/tty")
|
||||||
// Set ownership for the PTY.
|
// Set ownership for the PTY.
|
||||||
if err = ctty.Chown(rootUid, rootGid); err != nil {
|
if err = ctty.Chown(rootUID, rootGID); err != nil {
|
||||||
var cttyInfo unix.Stat_t
|
var cttyInfo unix.Stat_t
|
||||||
err2 := unix.Fstat(int(ptyFd), &cttyInfo)
|
err2 := unix.Fstat(int(ptyFd), &cttyInfo)
|
||||||
from := ""
|
from := ""
|
||||||
@ -297,7 +297,7 @@ func runUsingChrootMain() {
|
|||||||
op = "changing"
|
op = "changing"
|
||||||
from = fmt.Sprintf("from %d/%d ", cttyInfo.Uid, cttyInfo.Gid)
|
from = fmt.Sprintf("from %d/%d ", cttyInfo.Uid, cttyInfo.Gid)
|
||||||
}
|
}
|
||||||
logrus.Warnf("error %s ownership of container PTY %sto %d/%d: %v", op, from, rootUid, rootGid, err)
|
logrus.Warnf("error %s ownership of container PTY %sto %d/%d: %v", op, from, rootUID, rootGID, err)
|
||||||
}
|
}
|
||||||
// Set permissions on the PTY.
|
// Set permissions on the PTY.
|
||||||
if err = ctty.Chmod(0620); err != nil {
|
if err = ctty.Chmod(0620); err != nil {
|
||||||
@ -336,15 +336,15 @@ func runUsingChrootMain() {
|
|||||||
fdDesc[unix.Stdout] = "stdout"
|
fdDesc[unix.Stdout] = "stdout"
|
||||||
fdDesc[unix.Stderr] = "stderr"
|
fdDesc[unix.Stderr] = "stderr"
|
||||||
// Set ownership for the pipes.
|
// Set ownership for the pipes.
|
||||||
if err = stdinRead.Chown(rootUid, rootGid); err != nil {
|
if err = stdinRead.Chown(rootUID, rootGID); err != nil {
|
||||||
logrus.Errorf("error setting ownership of container stdin pipe: %v", err)
|
logrus.Errorf("error setting ownership of container stdin pipe: %v", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
if err = stdoutWrite.Chown(rootUid, rootGid); err != nil {
|
if err = stdoutWrite.Chown(rootUID, rootGID); err != nil {
|
||||||
logrus.Errorf("error setting ownership of container stdout pipe: %v", err)
|
logrus.Errorf("error setting ownership of container stdout pipe: %v", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
if err = stderrWrite.Chown(rootUid, rootGid); err != nil {
|
if err = stderrWrite.Chown(rootUID, rootGID); err != nil {
|
||||||
logrus.Errorf("error setting ownership of container stderr pipe: %v", err)
|
logrus.Errorf("error setting ownership of container stderr pipe: %v", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
10
vendor/github.com/containers/buildah/config.go
generated
vendored
10
vendor/github.com/containers/buildah/config.go
generated
vendored
@ -422,6 +422,16 @@ func (b *Builder) Volumes() []string {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CheckVolume returns True if the location exists in the image's list of locations
|
||||||
|
// which should be mounted from outside of the container when a container
|
||||||
|
// based on an image built from this container is run
|
||||||
|
|
||||||
|
func (b *Builder) CheckVolume(v string) bool {
|
||||||
|
_, OCIv1Volume := b.OCIv1.Config.Volumes[v]
|
||||||
|
_, DockerVolume := b.Docker.Config.Volumes[v]
|
||||||
|
return OCIv1Volume || DockerVolume
|
||||||
|
}
|
||||||
|
|
||||||
// AddVolume adds a location to the image's list of locations which should be
|
// AddVolume adds a location to the image's list of locations which should be
|
||||||
// mounted from outside of the container when a container based on an image
|
// mounted from outside of the container when a container based on an image
|
||||||
// built from this container is run.
|
// built from this container is run.
|
||||||
|
28
vendor/github.com/containers/buildah/go.mod
generated
vendored
28
vendor/github.com/containers/buildah/go.mod
generated
vendored
@ -3,19 +3,16 @@ module github.com/containers/buildah
|
|||||||
go 1.12
|
go 1.12
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/BurntSushi/toml v0.2.0 // indirect
|
|
||||||
github.com/DataDog/zstd v1.4.0 // indirect
|
|
||||||
github.com/Microsoft/hcsshim v0.8.3 // indirect
|
|
||||||
github.com/VividCortex/ewma v1.1.1 // indirect
|
github.com/VividCortex/ewma v1.1.1 // indirect
|
||||||
github.com/blang/semver v3.5.0+incompatible // indirect
|
github.com/blang/semver v3.5.0+incompatible // indirect
|
||||||
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect
|
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect
|
||||||
github.com/containernetworking/cni v0.7.0-rc2
|
github.com/containernetworking/cni v0.7.1
|
||||||
github.com/containers/image v2.0.0+incompatible
|
github.com/containers/image v3.0.2+incompatible
|
||||||
github.com/containers/storage v1.12.13
|
github.com/containers/storage v1.13.1
|
||||||
github.com/cyphar/filepath-securejoin v0.2.1
|
github.com/cyphar/filepath-securejoin v0.2.1
|
||||||
github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65
|
github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65
|
||||||
github.com/docker/docker-credential-helpers v0.6.1 // indirect
|
github.com/docker/docker-credential-helpers v0.6.1 // indirect
|
||||||
github.com/docker/go-units v0.3.3
|
github.com/docker/go-units v0.4.0
|
||||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
|
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
|
||||||
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
|
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
|
||||||
github.com/etcd-io/bbolt v1.3.2
|
github.com/etcd-io/bbolt v1.3.2
|
||||||
@ -26,12 +23,8 @@ require (
|
|||||||
github.com/imdario/mergo v0.3.6 // indirect
|
github.com/imdario/mergo v0.3.6 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||||
github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111 // indirect
|
github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111 // indirect
|
||||||
github.com/klauspost/compress v1.4.1 // indirect
|
|
||||||
github.com/klauspost/cpuid v1.2.0 // indirect
|
|
||||||
github.com/klauspost/pgzip v1.2.1 // indirect
|
|
||||||
github.com/mattn/go-isatty v0.0.4 // indirect
|
github.com/mattn/go-isatty v0.0.4 // indirect
|
||||||
github.com/mattn/go-shellwords v1.0.3
|
github.com/mattn/go-shellwords v1.0.5
|
||||||
github.com/mistifyio/go-zfs v2.1.1+incompatible // indirect
|
|
||||||
github.com/moby/moby v0.0.0-20171005181806-f8806b18b4b9 // indirect
|
github.com/moby/moby v0.0.0-20171005181806-f8806b18b4b9 // indirect
|
||||||
github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c // indirect
|
github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c // indirect
|
||||||
github.com/onsi/ginkgo v1.6.0
|
github.com/onsi/ginkgo v1.6.0
|
||||||
@ -43,26 +36,21 @@ require (
|
|||||||
github.com/opencontainers/runtime-tools v0.9.0
|
github.com/opencontainers/runtime-tools v0.9.0
|
||||||
github.com/opencontainers/selinux v1.2.2
|
github.com/opencontainers/selinux v1.2.2
|
||||||
github.com/openshift/imagebuilder v1.1.0
|
github.com/openshift/imagebuilder v1.1.0
|
||||||
github.com/ostreedev/ostree-go v0.0.0-20181112201119-9ab99253d365 // indirect
|
|
||||||
github.com/pkg/errors v0.8.1
|
github.com/pkg/errors v0.8.1
|
||||||
github.com/pquerna/ffjson v0.0.0-20171002144729-d49c2bc1aa13 // indirect
|
|
||||||
github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4
|
github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4
|
||||||
github.com/seccomp/libseccomp-golang v0.9.0
|
github.com/seccomp/libseccomp-golang v0.9.0
|
||||||
github.com/sirupsen/logrus v1.4.2
|
github.com/sirupsen/logrus v1.4.2
|
||||||
github.com/spf13/cobra v0.0.3
|
github.com/spf13/cobra v0.0.3
|
||||||
github.com/spf13/pflag v1.0.3
|
github.com/spf13/pflag v1.0.3
|
||||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
|
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
|
||||||
github.com/tchap/go-patricia v2.2.6+incompatible // indirect
|
|
||||||
github.com/ulikunitz/xz v0.5.5 // indirect
|
github.com/ulikunitz/xz v0.5.5 // indirect
|
||||||
github.com/vbatts/tar-split v0.10.2 // indirect
|
github.com/vbauerster/mpb v3.4.0+incompatible // indirect
|
||||||
github.com/vbauerster/mpb v3.3.4+incompatible // indirect
|
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||||
github.com/xeipuuv/gojsonschema v1.1.0 // indirect
|
github.com/xeipuuv/gojsonschema v1.1.0 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
|
||||||
golang.org/x/net v0.0.0-20190107210223-45ffb0cd1ba0 // indirect
|
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894
|
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb
|
||||||
gopkg.in/yaml.v2 v2.2.2 // indirect
|
gopkg.in/yaml.v2 v2.2.2 // indirect
|
||||||
k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083 // indirect
|
k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083 // indirect
|
||||||
)
|
)
|
||||||
|
59
vendor/github.com/containers/buildah/go.sum
generated
vendored
59
vendor/github.com/containers/buildah/go.sum
generated
vendored
@ -2,12 +2,18 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7O
|
|||||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||||
github.com/BurntSushi/toml v0.2.0 h1:OthAm9ZSUx4uAmn3WbPwc06nowWrByRwBsYRhbmFjBs=
|
github.com/BurntSushi/toml v0.2.0 h1:OthAm9ZSUx4uAmn3WbPwc06nowWrByRwBsYRhbmFjBs=
|
||||||
github.com/BurntSushi/toml v0.2.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.2.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/DataDog/zstd v1.4.0 h1:vhoV+DUHnRZdKW1i5UMjAk2G4JY8wN4ayRfYDNdEhwo=
|
github.com/DataDog/zstd v1.4.0 h1:vhoV+DUHnRZdKW1i5UMjAk2G4JY8wN4ayRfYDNdEhwo=
|
||||||
github.com/DataDog/zstd v1.4.0/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
github.com/DataDog/zstd v1.4.0/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||||
github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q=
|
github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q=
|
||||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||||
|
github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc=
|
||||||
|
github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||||
github.com/Microsoft/hcsshim v0.8.3 h1:KWCdVGOju81E0RL4ndn9/E6I4qMBi6kuPw1W4yBYlCw=
|
github.com/Microsoft/hcsshim v0.8.3 h1:KWCdVGOju81E0RL4ndn9/E6I4qMBi6kuPw1W4yBYlCw=
|
||||||
github.com/Microsoft/hcsshim v0.8.3/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
github.com/Microsoft/hcsshim v0.8.3/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||||
|
github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA=
|
||||||
|
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||||
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
|
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
|
||||||
@ -19,15 +25,37 @@ github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882b
|
|||||||
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||||
github.com/containernetworking/cni v0.7.0-rc2 h1:2GGDhbwdWPY53iT7LXy+LBP76Ch2D/hnw1U2zVFfGbk=
|
github.com/containernetworking/cni v0.7.0-rc2 h1:2GGDhbwdWPY53iT7LXy+LBP76Ch2D/hnw1U2zVFfGbk=
|
||||||
github.com/containernetworking/cni v0.7.0-rc2/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
github.com/containernetworking/cni v0.7.0-rc2/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||||
|
github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE=
|
||||||
|
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||||
github.com/containers/image v2.0.0+incompatible h1:FTr6Br7jlIKNCKMjSOMbAxKp2keQ0//jzJaYNTVhauk=
|
github.com/containers/image v2.0.0+incompatible h1:FTr6Br7jlIKNCKMjSOMbAxKp2keQ0//jzJaYNTVhauk=
|
||||||
github.com/containers/image v2.0.0+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
|
github.com/containers/image v2.0.0+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
|
||||||
|
github.com/containers/image v2.0.1+incompatible h1:w39mlElA/aSFZ6moFa5N+A4MWu9c8hgdMiMMYnH94Hs=
|
||||||
|
github.com/containers/image v2.0.1+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
|
||||||
|
github.com/containers/image v3.0.0+incompatible h1:pdUHY//H+3jYNnoTt+rqY8NsStX4ZBLKzPTlMC+XvnU=
|
||||||
|
github.com/containers/image v3.0.0+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
|
||||||
|
github.com/containers/image v3.0.1+incompatible h1:VlNEQUI1JHa1SJfJ4jz/GBt7gpk+aRYGR6TUKsxXMkU=
|
||||||
|
github.com/containers/image v3.0.1+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
|
||||||
|
github.com/containers/image v3.0.2+incompatible h1:B1lqAE8MUPCrsBLE86J0gnXleeRq8zJnQryhiiGQNyE=
|
||||||
|
github.com/containers/image v3.0.2+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
|
||||||
|
github.com/containers/storage v1.12.10-0.20190725063046-8038df61d6f6 h1:c7Fq9bbRl0Ua6swRHAH8rkrK2fSt6K+ZBrXHD50kDR4=
|
||||||
|
github.com/containers/storage v1.12.10-0.20190725063046-8038df61d6f6/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c=
|
||||||
github.com/containers/storage v1.12.13 h1:GtaLCY8p1Drlk1Oew581jGvB137UaO+kpz0HII67T0A=
|
github.com/containers/storage v1.12.13 h1:GtaLCY8p1Drlk1Oew581jGvB137UaO+kpz0HII67T0A=
|
||||||
github.com/containers/storage v1.12.13/go.mod h1:+RirK6VQAqskQlaTBrOG6ulDvn4si2QjFE1NZCn06MM=
|
github.com/containers/storage v1.12.13/go.mod h1:+RirK6VQAqskQlaTBrOG6ulDvn4si2QjFE1NZCn06MM=
|
||||||
|
github.com/containers/storage v1.12.14 h1:S1QGlC15gj5JOvB73W5tpVBApS4I7b/6rvxfflBAg+Q=
|
||||||
|
github.com/containers/storage v1.12.14/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c=
|
||||||
|
github.com/containers/storage v1.12.15 h1:nN/RxtEe4ejasGVJqzy+y5++pIYp54XPXzRO46xXnns=
|
||||||
|
github.com/containers/storage v1.12.15/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c=
|
||||||
|
github.com/containers/storage v1.12.16 h1:zePYS1GiG8CuRqLCeA0ufx4X27K06HcJLV50DdojL+Y=
|
||||||
|
github.com/containers/storage v1.12.16/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c=
|
||||||
|
github.com/containers/storage v1.13.1 h1:rjVirLS9fCGkUFlLDZEoGDDUugtIf46DufWvJu08wxQ=
|
||||||
|
github.com/containers/storage v1.13.1/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.1 h1:5DPkzz/0MwUpvR4fxASKzgApeq2OMFY5FfYtrX28Coo=
|
github.com/cyphar/filepath-securejoin v0.2.1 h1:5DPkzz/0MwUpvR4fxASKzgApeq2OMFY5FfYtrX28Coo=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.1/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
github.com/cyphar/filepath-securejoin v0.2.1/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65 h1:4zlOyrJUbYnrvlzChJ+jP2J3i77Jbhm336NEuCv7kZo=
|
github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65 h1:4zlOyrJUbYnrvlzChJ+jP2J3i77Jbhm336NEuCv7kZo=
|
||||||
github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
|
github.com/docker/docker v0.0.0-20171019062838-86f080cff091/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/docker v0.7.3-0.20180827131323-0c5f8d2b9b23 h1:mJtkfC9RUrUWHMk0cFDNhVoc9U3k2FRAzEZ+5pqSIHo=
|
github.com/docker/docker v0.7.3-0.20180827131323-0c5f8d2b9b23 h1:mJtkfC9RUrUWHMk0cFDNhVoc9U3k2FRAzEZ+5pqSIHo=
|
||||||
github.com/docker/docker v0.7.3-0.20180827131323-0c5f8d2b9b23/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v0.7.3-0.20180827131323-0c5f8d2b9b23/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/docker-credential-helpers v0.6.1 h1:Dq4iIfcM7cNtddhLVWe9h4QDjsi4OER3Z8voPu/I52g=
|
github.com/docker/docker-credential-helpers v0.6.1 h1:Dq4iIfcM7cNtddhLVWe9h4QDjsi4OER3Z8voPu/I52g=
|
||||||
@ -36,6 +64,8 @@ github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKoh
|
|||||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||||
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
||||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
|
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||||
|
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20180608203834-19279f049241/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
|
github.com/docker/libnetwork v0.8.0-dev.2.0.20180608203834-19279f049241/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
|
||||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 h1:moehPjPiGUaWdwgOl92xRyFHJyaqXDHcCyW9M6nmCK4=
|
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 h1:moehPjPiGUaWdwgOl92xRyFHJyaqXDHcCyW9M6nmCK4=
|
||||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
|
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
|
||||||
@ -71,8 +101,12 @@ github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111 h1:NAAiV9ass6VRe
|
|||||||
github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8=
|
github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8=
|
||||||
github.com/klauspost/compress v1.4.1 h1:8VMb5+0wMgdBykOV96DwNwKFQ+WTI4pzYURP99CcB9E=
|
github.com/klauspost/compress v1.4.1 h1:8VMb5+0wMgdBykOV96DwNwKFQ+WTI4pzYURP99CcB9E=
|
||||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||||
|
github.com/klauspost/compress v1.7.2 h1:liMOoeIvFpr9kEvalrZ7VVBA4wGf7zfOgwBjzz/5g2Y=
|
||||||
|
github.com/klauspost/compress v1.7.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||||
github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE=
|
github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE=
|
||||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
|
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
|
||||||
|
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
|
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
|
||||||
github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||||
@ -81,6 +115,8 @@ github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs
|
|||||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
github.com/mattn/go-shellwords v1.0.3 h1:K/VxK7SZ+cvuPgFSLKi5QPI9Vr/ipOf4C1gN+ntueUk=
|
github.com/mattn/go-shellwords v1.0.3 h1:K/VxK7SZ+cvuPgFSLKi5QPI9Vr/ipOf4C1gN+ntueUk=
|
||||||
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||||
|
github.com/mattn/go-shellwords v1.0.5 h1:JhhFTIOslh5ZsPrpa3Wdg8bF0WI3b44EMblmU9wIsXc=
|
||||||
|
github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||||
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
|
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
|
||||||
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
||||||
github.com/moby/moby v0.0.0-20171005181806-f8806b18b4b9/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
|
github.com/moby/moby v0.0.0-20171005181806-f8806b18b4b9/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
|
||||||
@ -108,12 +144,16 @@ github.com/openshift/imagebuilder v1.1.0 h1:oT704SkwMEzmIMU/+Uv1Wmvt+p10q3v2WuYM
|
|||||||
github.com/openshift/imagebuilder v1.1.0/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
|
github.com/openshift/imagebuilder v1.1.0/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
|
||||||
github.com/ostreedev/ostree-go v0.0.0-20181112201119-9ab99253d365 h1:5DKEDlc/DLftia3h4tk5K0KBiqBXogCc6EarWTlD3fM=
|
github.com/ostreedev/ostree-go v0.0.0-20181112201119-9ab99253d365 h1:5DKEDlc/DLftia3h4tk5K0KBiqBXogCc6EarWTlD3fM=
|
||||||
github.com/ostreedev/ostree-go v0.0.0-20181112201119-9ab99253d365/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
|
github.com/ostreedev/ostree-go v0.0.0-20181112201119-9ab99253d365/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
|
||||||
|
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
|
||||||
|
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pquerna/ffjson v0.0.0-20171002144729-d49c2bc1aa13 h1:AUK/hm/tPsiNNASdb3J8fySVRZoI7fnK5mlOvdFD43o=
|
github.com/pquerna/ffjson v0.0.0-20171002144729-d49c2bc1aa13 h1:AUK/hm/tPsiNNASdb3J8fySVRZoI7fnK5mlOvdFD43o=
|
||||||
github.com/pquerna/ffjson v0.0.0-20171002144729-d49c2bc1aa13/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
|
github.com/pquerna/ffjson v0.0.0-20171002144729-d49c2bc1aa13/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
|
||||||
|
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 h1:gGBSHPOU7g8YjTbhwn+lvFm2VDEhhA+PwDIlstkgSxE=
|
||||||
|
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
|
||||||
github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4 h1:rOG9oHVIndNR14f3HRyBy9UPQYmIPniWqTU1TDdHhq4=
|
github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4 h1:rOG9oHVIndNR14f3HRyBy9UPQYmIPniWqTU1TDdHhq4=
|
||||||
github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4/go.mod h1:f/98/SnvAzhAEFQJ3u836FePXvcbE8BS0YGMQNn4mhA=
|
github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4/go.mod h1:f/98/SnvAzhAEFQJ3u836FePXvcbE8BS0YGMQNn4mhA=
|
||||||
github.com/seccomp/libseccomp-golang v0.9.0 h1:S1pmhdFh5spQtVojA+4GUdWBqvI8ydYHxrx8iR6xN8o=
|
github.com/seccomp/libseccomp-golang v0.9.0 h1:S1pmhdFh5spQtVojA+4GUdWBqvI8ydYHxrx8iR6xN8o=
|
||||||
@ -125,20 +165,30 @@ github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
|||||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
|
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
|
||||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||||
github.com/tchap/go-patricia v2.2.6+incompatible h1:JvoDL7JSoIP2HDE8AbDH3zC8QBPxmzYe32HHy5yQ+Ck=
|
github.com/tchap/go-patricia v2.2.6+incompatible h1:JvoDL7JSoIP2HDE8AbDH3zC8QBPxmzYe32HHy5yQ+Ck=
|
||||||
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
|
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
|
||||||
|
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
|
||||||
|
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
|
||||||
github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
|
github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
|
||||||
github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||||
github.com/vbatts/tar-split v0.10.2 h1:CXd7HEKGkTLjBMinpObcJZU5Hm8EKlor2a1JtX6msXQ=
|
github.com/vbatts/tar-split v0.10.2 h1:CXd7HEKGkTLjBMinpObcJZU5Hm8EKlor2a1JtX6msXQ=
|
||||||
github.com/vbatts/tar-split v0.10.2/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
|
github.com/vbatts/tar-split v0.10.2/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
|
||||||
|
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
|
||||||
|
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
|
||||||
github.com/vbauerster/mpb v3.3.4+incompatible h1:DDIhnwmgTQIDZo+SWlEr5d6mJBxkOLBwCXPzunhEfJ4=
|
github.com/vbauerster/mpb v3.3.4+incompatible h1:DDIhnwmgTQIDZo+SWlEr5d6mJBxkOLBwCXPzunhEfJ4=
|
||||||
github.com/vbauerster/mpb v3.3.4+incompatible/go.mod h1:zAHG26FUhVKETRu+MWqYXcI70POlC6N8up9p1dID7SU=
|
github.com/vbauerster/mpb v3.3.4+incompatible/go.mod h1:zAHG26FUhVKETRu+MWqYXcI70POlC6N8up9p1dID7SU=
|
||||||
|
github.com/vbauerster/mpb v3.4.0+incompatible h1:mfiiYw87ARaeRW6x5gWwYRUawxaW1tLAD8IceomUCNw=
|
||||||
|
github.com/vbauerster/mpb v3.4.0+incompatible/go.mod h1:zAHG26FUhVKETRu+MWqYXcI70POlC6N8up9p1dID7SU=
|
||||||
github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||||
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||||
|
github.com/vrothberg/storage v0.0.0-20190724065215-a1e42fd78930 h1:/LeIxi2kj5UYTJR9W35t5Pq2gqz03ZNoTURchTH3vc0=
|
||||||
|
github.com/vrothberg/storage v0.0.0-20190724065215-a1e42fd78930/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c=
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
|
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||||
@ -148,19 +198,27 @@ github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4m
|
|||||||
golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc h1:F5tKCVGp+MUAHhKp5MZtGqAlGX3+oCsiL1Q629FL90M=
|
golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc h1:F5tKCVGp+MUAHhKp5MZtGqAlGX3+oCsiL1Q629FL90M=
|
||||||
golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190107210223-45ffb0cd1ba0 h1:1DW40AJQ7AP4nY6ORUGUdkpXyEC9W2GAXcOPaMZK0K8=
|
golang.org/x/net v0.0.0-20190107210223-45ffb0cd1ba0 h1:1DW40AJQ7AP4nY6ORUGUdkpXyEC9W2GAXcOPaMZK0K8=
|
||||||
golang.org/x/net v0.0.0-20190107210223-45ffb0cd1ba0/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190107210223-45ffb0cd1ba0/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU=
|
||||||
|
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180824143301-4910a1d54f87/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180824143301-4910a1d54f87/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k=
|
||||||
|
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||||
@ -171,6 +229,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
|
|||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gotest.tools v0.0.0-20190624233834-05ebafbffc79/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90=
|
||||||
gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||||
k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083 h1:+Qf/nITucAbm09aIdxvoA+7X0BwaXmQGVoR8k7Ynk9o=
|
k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083 h1:+Qf/nITucAbm09aIdxvoA+7X0BwaXmQGVoR8k7Ynk9o=
|
||||||
k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
|
k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
|
||||||
|
2
vendor/github.com/containers/buildah/install.md
generated
vendored
2
vendor/github.com/containers/buildah/install.md
generated
vendored
@ -378,7 +378,7 @@ cat /etc/containers/policy.json
|
|||||||
## Vendoring
|
## Vendoring
|
||||||
|
|
||||||
Buildah uses Go Modules for vendoring purposes. If you need to update or add a vendored package into Buildah, please follow this proceedure:
|
Buildah uses Go Modules for vendoring purposes. If you need to update or add a vendored package into Buildah, please follow this proceedure:
|
||||||
* Enter into your sandbox `src/github.com/containers/buildah` and ensure that he GOPATH variable is set to the directory prior as noted above.
|
* Enter into your sandbox `src/github.com/containers/buildah` and ensure that the GOPATH variable is set to the directory prior as noted above.
|
||||||
* `export GO111MODULE=on`
|
* `export GO111MODULE=on`
|
||||||
* Assuming you want to 'bump' the `github.com/containers/storage` package to version 1.12.13, use this command: `go get github.com/containers/storage@v1.12.13`
|
* Assuming you want to 'bump' the `github.com/containers/storage` package to version 1.12.13, use this command: `go get github.com/containers/storage@v1.12.13`
|
||||||
* `make vendor`
|
* `make vendor`
|
||||||
|
4
vendor/github.com/containers/buildah/new.go
generated
vendored
4
vendor/github.com/containers/buildah/new.go
generated
vendored
@ -8,7 +8,7 @@ import (
|
|||||||
|
|
||||||
"github.com/containers/buildah/util"
|
"github.com/containers/buildah/util"
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/manifest"
|
||||||
"github.com/containers/image/pkg/sysregistries"
|
"github.com/containers/image/pkg/sysregistriesv2"
|
||||||
is "github.com/containers/image/storage"
|
is "github.com/containers/image/storage"
|
||||||
"github.com/containers/image/transports"
|
"github.com/containers/image/transports"
|
||||||
"github.com/containers/image/transports/alltransports"
|
"github.com/containers/image/transports/alltransports"
|
||||||
@ -186,7 +186,7 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store
|
|||||||
return nil, "", nil, fmt.Errorf("internal error: %d candidates (%#v) vs. %d failures (%#v)", len(candidates), candidates, len(failures), failures)
|
return nil, "", nil, fmt.Errorf("internal error: %d candidates (%#v) vs. %d failures (%#v)", len(candidates), candidates, len(failures), failures)
|
||||||
}
|
}
|
||||||
|
|
||||||
registriesConfPath := sysregistries.RegistriesConfPath(systemContext)
|
registriesConfPath := sysregistriesv2.ConfigPath(systemContext)
|
||||||
switch len(failures) {
|
switch len(failures) {
|
||||||
case 0:
|
case 0:
|
||||||
if searchRegistriesWereUsedButEmpty {
|
if searchRegistriesWereUsedButEmpty {
|
||||||
|
2
vendor/github.com/containers/buildah/ostree_tag.sh
generated
vendored
2
vendor/github.com/containers/buildah/ostree_tag.sh
generated
vendored
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
if pkg-config ostree-1 2> /dev/null ; then
|
if pkg-config ostree-1 2> /dev/null ; then
|
||||||
echo ostree
|
echo containers_image_ostree
|
||||||
else
|
else
|
||||||
echo containers_image_ostree_stub
|
echo containers_image_ostree_stub
|
||||||
fi
|
fi
|
||||||
|
4
vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
generated
vendored
4
vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
generated
vendored
@ -414,8 +414,8 @@ func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *blobCacheDestination) HasThreadSafePutBlob() bool {
|
func (d *blobCacheDestination) HasThreadSafePutBlob() bool {
|
||||||
return s.destination.HasThreadSafePutBlob()
|
return d.destination.HasThreadSafePutBlob()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||||
|
8
vendor/github.com/containers/buildah/pkg/cli/common.go
generated
vendored
8
vendor/github.com/containers/buildah/pkg/cli/common.go
generated
vendored
@ -71,7 +71,7 @@ type BudResults struct {
|
|||||||
Squash bool
|
Squash bool
|
||||||
Tag []string
|
Tag []string
|
||||||
Target string
|
Target string
|
||||||
TlsVerify bool
|
TLSVerify bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// FromAndBugResults represents the results for common flags
|
// FromAndBugResults represents the results for common flags
|
||||||
@ -90,7 +90,7 @@ type FromAndBudResults struct {
|
|||||||
DNSSearch []string
|
DNSSearch []string
|
||||||
DNSServers []string
|
DNSServers []string
|
||||||
DNSOptions []string
|
DNSOptions []string
|
||||||
HttpProxy bool
|
HTTPProxy bool
|
||||||
Isolation string
|
Isolation string
|
||||||
Memory string
|
Memory string
|
||||||
MemorySwap string
|
MemorySwap string
|
||||||
@ -166,7 +166,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
|
|||||||
fs.BoolVar(&flags.Squash, "squash", false, "Squash newly built layers into a single new layer.")
|
fs.BoolVar(&flags.Squash, "squash", false, "Squash newly built layers into a single new layer.")
|
||||||
fs.StringArrayVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image")
|
fs.StringArrayVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image")
|
||||||
fs.StringVar(&flags.Target, "target", "", "set the target build stage to build")
|
fs.StringVar(&flags.Target, "target", "", "set the target build stage to build")
|
||||||
fs.BoolVar(&flags.TlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
|
fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
|
||||||
return fs
|
return fs
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -188,7 +188,7 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults,
|
|||||||
fs.StringSliceVar(&flags.DNSSearch, "dns-search", []string{}, "Set custom DNS search domains")
|
fs.StringSliceVar(&flags.DNSSearch, "dns-search", []string{}, "Set custom DNS search domains")
|
||||||
fs.StringSliceVar(&flags.DNSServers, "dns", []string{}, "Set custom DNS servers")
|
fs.StringSliceVar(&flags.DNSServers, "dns", []string{}, "Set custom DNS servers")
|
||||||
fs.StringSliceVar(&flags.DNSOptions, "dns-option", []string{}, "Set custom DNS options")
|
fs.StringSliceVar(&flags.DNSOptions, "dns-option", []string{}, "Set custom DNS options")
|
||||||
fs.BoolVar(&flags.HttpProxy, "http-proxy", true, "pass thru HTTP Proxy environment variables")
|
fs.BoolVar(&flags.HTTPProxy, "http-proxy", true, "pass thru HTTP Proxy environment variables")
|
||||||
fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.")
|
fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.")
|
||||||
fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)")
|
fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)")
|
||||||
fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap")
|
fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap")
|
||||||
|
73
vendor/github.com/containers/buildah/pkg/overlay/overlay.go
generated
vendored
73
vendor/github.com/containers/buildah/pkg/overlay/overlay.go
generated
vendored
@ -4,21 +4,24 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/containers/buildah/pkg/unshare"
|
||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/opencontainers/runtime-spec/specs-go"
|
"github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MountTemp creates a subdir of the contentDir based on the source directory
|
// MountTemp creates a subdir of the contentDir based on the source directory
|
||||||
// from the source system. It then mounds up the source directory on to the
|
// from the source system. It then mounts up the source directory on to the
|
||||||
// generated mount point and returns the mount point to the caller.
|
// generated mount point and returns the mount point to the caller.
|
||||||
func MountTemp(store storage.Store, containerId, source, dest string, rootUID, rootGID int) (mount specs.Mount, contentDir string, Err error) {
|
func MountTemp(store storage.Store, containerID, source, dest string, rootUID, rootGID int) (mount specs.Mount, contentDir string, Err error) {
|
||||||
|
|
||||||
containerDir, err := store.ContainerDirectory(containerId)
|
containerDir, err := store.ContainerDirectory(containerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mount, "", err
|
return mount, "", err
|
||||||
}
|
}
|
||||||
@ -46,10 +49,55 @@ func MountTemp(store storage.Store, containerId, source, dest string, rootUID, r
|
|||||||
return mount, "", errors.Wrapf(err, "failed to create the overlay %s directory", workDir)
|
return mount, "", errors.Wrapf(err, "failed to create the overlay %s directory", workDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
overlayOptions := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", source, upperDir, workDir)
|
||||||
|
|
||||||
|
if unshare.IsRootless() {
|
||||||
|
mountProgram := ""
|
||||||
|
|
||||||
|
mountMap := map[string]bool{
|
||||||
|
".mount_program": true,
|
||||||
|
"overlay.mount_program": true,
|
||||||
|
"overlay2.mount_program": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, i := range store.GraphOptions() {
|
||||||
|
s := strings.SplitN(i, "=", 2)
|
||||||
|
if len(s) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
k := s[0]
|
||||||
|
v := s[1]
|
||||||
|
if mountMap[k] {
|
||||||
|
mountProgram = v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if mountProgram != "" {
|
||||||
|
mergeDir := filepath.Join(contentDir, "merge")
|
||||||
|
|
||||||
|
if err := idtools.MkdirAllAs(mergeDir, 0700, rootUID, rootGID); err != nil {
|
||||||
|
return mount, "", errors.Wrapf(err, "failed to create the overlay %s directory", mergeDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command(mountProgram, "-o", overlayOptions, mergeDir)
|
||||||
|
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return mount, "", errors.Wrapf(err, "exec %s", mountProgram)
|
||||||
|
}
|
||||||
|
|
||||||
|
mount.Source = mergeDir
|
||||||
|
mount.Destination = dest
|
||||||
|
mount.Type = "bind"
|
||||||
|
mount.Options = []string{"bind", "slave"}
|
||||||
|
return mount, contentDir, nil
|
||||||
|
}
|
||||||
|
/* If a mount_program is not specified, fallback to try mount native overlay. */
|
||||||
|
}
|
||||||
|
|
||||||
mount.Source = "overlay"
|
mount.Source = "overlay"
|
||||||
mount.Destination = dest
|
mount.Destination = dest
|
||||||
mount.Type = "overlay"
|
mount.Type = "overlay"
|
||||||
mount.Options = strings.Split(fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", source, upperDir, workDir), ",")
|
mount.Options = strings.Split(overlayOptions, ",")
|
||||||
|
|
||||||
return mount, contentDir, nil
|
return mount, contentDir, nil
|
||||||
}
|
}
|
||||||
@ -57,6 +105,14 @@ func MountTemp(store storage.Store, containerId, source, dest string, rootUID, r
|
|||||||
// RemoveTemp removes temporary mountpoint and all content from its parent
|
// RemoveTemp removes temporary mountpoint and all content from its parent
|
||||||
// directory
|
// directory
|
||||||
func RemoveTemp(contentDir string) error {
|
func RemoveTemp(contentDir string) error {
|
||||||
|
if unshare.IsRootless() {
|
||||||
|
mergeDir := filepath.Join(contentDir, "merge")
|
||||||
|
if err := unix.Unmount(mergeDir, 0); err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return errors.Wrapf(err, "unmount overlay %s", mergeDir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return os.RemoveAll(contentDir)
|
return os.RemoveAll(contentDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,6 +120,15 @@ func RemoveTemp(contentDir string) error {
|
|||||||
// directory
|
// directory
|
||||||
func CleanupContent(containerDir string) (Err error) {
|
func CleanupContent(containerDir string) (Err error) {
|
||||||
contentDir := filepath.Join(containerDir, "overlay")
|
contentDir := filepath.Join(containerDir, "overlay")
|
||||||
|
|
||||||
|
if unshare.IsRootless() {
|
||||||
|
mergeDir := filepath.Join(contentDir, "merge")
|
||||||
|
if err := unix.Unmount(mergeDir, 0); err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return errors.Wrapf(err, "unmount overlay %s", mergeDir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if err := os.RemoveAll(contentDir); err != nil && !os.IsNotExist(err) {
|
if err := os.RemoveAll(contentDir); err != nil && !os.IsNotExist(err) {
|
||||||
return errors.Wrapf(err, "failed to cleanup overlay %s directory", contentDir)
|
return errors.Wrapf(err, "failed to cleanup overlay %s directory", contentDir)
|
||||||
}
|
}
|
||||||
|
18
vendor/github.com/containers/buildah/pkg/parse/parse.go
generated
vendored
18
vendor/github.com/containers/buildah/pkg/parse/parse.go
generated
vendored
@ -14,7 +14,6 @@ import (
|
|||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
"github.com/containers/buildah"
|
"github.com/containers/buildah"
|
||||||
"github.com/containers/buildah/pkg/unshare"
|
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
@ -104,7 +103,7 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) {
|
|||||||
return nil, errors.Wrapf(err, "invalid --shm-size")
|
return nil, errors.Wrapf(err, "invalid --shm-size")
|
||||||
}
|
}
|
||||||
volumes, _ := c.Flags().GetStringSlice("volume")
|
volumes, _ := c.Flags().GetStringSlice("volume")
|
||||||
if err := ParseVolumes(volumes); err != nil {
|
if err := Volumes(volumes); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
cpuPeriod, _ := c.Flags().GetUint64("cpu-period")
|
cpuPeriod, _ := c.Flags().GetUint64("cpu-period")
|
||||||
@ -179,8 +178,8 @@ func parseSecurityOpts(securityOpts []string, commonOpts *buildah.CommonBuildOpt
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseVolume parses the input of --volume
|
// Volume parses the input of --volume
|
||||||
func ParseVolume(volume string) (specs.Mount, error) {
|
func Volume(volume string) (specs.Mount, error) {
|
||||||
mount := specs.Mount{}
|
mount := specs.Mount{}
|
||||||
arr := strings.SplitN(volume, ":", 3)
|
arr := strings.SplitN(volume, ":", 3)
|
||||||
if len(arr) < 2 {
|
if len(arr) < 2 {
|
||||||
@ -207,13 +206,13 @@ func ParseVolume(volume string) (specs.Mount, error) {
|
|||||||
return mount, nil
|
return mount, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseVolumes validates the host and container paths passed in to the --volume flag
|
// Volumes validates the host and container paths passed in to the --volume flag
|
||||||
func ParseVolumes(volumes []string) error {
|
func Volumes(volumes []string) error {
|
||||||
if len(volumes) == 0 {
|
if len(volumes) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
for _, volume := range volumes {
|
for _, volume := range volumes {
|
||||||
if _, err := ParseVolume(volume); err != nil {
|
if _, err := Volume(volume); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -224,7 +223,7 @@ func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) {
|
|||||||
finalVolumeMounts := make(map[string]specs.Mount)
|
finalVolumeMounts := make(map[string]specs.Mount)
|
||||||
|
|
||||||
for _, volume := range volumes {
|
for _, volume := range volumes {
|
||||||
volumeMount, err := ParseVolume(volume)
|
volumeMount, err := Volume(volume)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -473,9 +472,6 @@ func ValidateVolumeOpts(options []string) ([]string, error) {
|
|||||||
}
|
}
|
||||||
foundRWRO++
|
foundRWRO++
|
||||||
case "z", "Z", "O":
|
case "z", "Z", "O":
|
||||||
if opt == "O" && unshare.IsRootless() {
|
|
||||||
return nil, errors.Errorf("invalid options %q, overlay mounts not supported in rootless mode", strings.Join(options, ", "))
|
|
||||||
}
|
|
||||||
if foundLabelChange > 1 {
|
if foundLabelChange > 1 {
|
||||||
return nil, errors.Errorf("invalid options %q, can only specify 1 'z', 'Z', or 'O' option", strings.Join(options, ", "))
|
return nil, errors.Errorf("invalid options %q, can only specify 1 'z', 'Z', or 'O' option", strings.Join(options, ", "))
|
||||||
}
|
}
|
||||||
|
34
vendor/github.com/containers/buildah/troubleshooting.md
generated
vendored
34
vendor/github.com/containers/buildah/troubleshooting.md
generated
vendored
@ -112,9 +112,7 @@ lstat /home/myusername/~: no such file or directory
|
|||||||
---
|
---
|
||||||
### 5) Rootless buildah bud fails EPERM on NFS:
|
### 5) Rootless buildah bud fails EPERM on NFS:
|
||||||
|
|
||||||
NFS enforces file creation on different UIDs on the server side and does not understand User Namespace.
|
NFS enforces file creation on different UIDs on the server side and does not understand user namespace, which rootless Podman requires. When a container root process like YUM attempts to create a file owned by a different UID, NFS Server denies the creation. NFS is also a problem for the file locks when the storage is on it. Other distributed file systems (for example: Lustre, Spectrum Scale, the General Parallel File System (GPFS)) are also not supported when running in rootless mode as these file systems do not understand user namespace.
|
||||||
When a container root process like YUM attempts to create a file owned by a different UID, NFS Server denies the creation.
|
|
||||||
NFS is also a problem for the file locks when the storage is on it.
|
|
||||||
|
|
||||||
#### Symptom
|
#### Symptom
|
||||||
```console
|
```console
|
||||||
@ -128,3 +126,33 @@ Choose one of the following:
|
|||||||
* Setup containers/storage in a different directory, not on an NFS share.
|
* Setup containers/storage in a different directory, not on an NFS share.
|
||||||
* Otherwise just run buildah as root, via `sudo buildah`
|
* Otherwise just run buildah as root, via `sudo buildah`
|
||||||
---
|
---
|
||||||
|
### 6) Rootless buildah bud fails when using OverlayFS:
|
||||||
|
|
||||||
|
The Overlay file system (OverlayFS) requires the ability to call the `mknod` command when creating whiteout files
|
||||||
|
when extracting an image. However, a rootless user does not have the privileges to use `mknod` in this capacity.
|
||||||
|
|
||||||
|
#### Symptom
|
||||||
|
```console
|
||||||
|
buildah bud --storage-driver overlay .
|
||||||
|
STEP 1: FROM docker.io/ubuntu:xenial
|
||||||
|
Getting image source signatures
|
||||||
|
Copying blob edf72af6d627 done
|
||||||
|
Copying blob 3e4f86211d23 done
|
||||||
|
Copying blob 8d3eac894db4 done
|
||||||
|
Copying blob f7277927d38a done
|
||||||
|
Copying config 5e13f8dd4c done
|
||||||
|
Writing manifest to image destination
|
||||||
|
Storing signatures
|
||||||
|
Error: error creating build container: Error committing the finished image: error adding layer with blob "sha256:8d3eac894db4dc4154377ad28643dfe6625ff0e54bcfa63e0d04921f1a8ef7f8": Error processing tar file(exit status 1): operation not permitted
|
||||||
|
$ buildah bud .
|
||||||
|
ERRO[0014] Error while applying layer: ApplyLayer exit status 1 stdout: stderr: open /root/.bash_logout: permission denied
|
||||||
|
error creating build container: Error committing the finished image: error adding layer with blob "sha256:a02a4930cb5d36f3290eb84f4bfa30668ef2e9fe3a1fb73ec015fc58b9958b17": ApplyLayer exit status 1 stdout: stderr: open /root/.bash_logout: permission denied
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Solution
|
||||||
|
Choose one of the following:
|
||||||
|
* Complete the build operation as a privileged user.
|
||||||
|
* Install and configure fuse-overlayfs.
|
||||||
|
* Install the fuse-overlayfs package for your Linux Distribution.
|
||||||
|
* Add `mount_program = "/usr/bin/fuse-overlayfs` under `[storage.options]` in your `~/.config/containers/storage.conf` file.
|
||||||
|
---
|
||||||
|
9
vendor/github.com/containers/buildah/util.go
generated
vendored
9
vendor/github.com/containers/buildah/util.go
generated
vendored
@ -8,7 +8,6 @@ import (
|
|||||||
|
|
||||||
"github.com/containers/buildah/util"
|
"github.com/containers/buildah/util"
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/pkg/sysregistries"
|
|
||||||
"github.com/containers/image/pkg/sysregistriesv2"
|
"github.com/containers/image/pkg/sysregistriesv2"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
@ -279,17 +278,17 @@ func (b *Builder) untar(chownOpts *idtools.IDPair, hasher io.Writer) func(tarArc
|
|||||||
func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) {
|
func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) {
|
||||||
reginfo, err := sysregistriesv2.FindRegistry(sc, registry)
|
reginfo, err := sysregistriesv2.FindRegistry(sc, registry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrapf(err, "unable to parse the registries configuration (%s)", sysregistries.RegistriesConfPath(sc))
|
return false, errors.Wrapf(err, "unable to parse the registries configuration (%s)", sysregistriesv2.ConfigPath(sc))
|
||||||
}
|
}
|
||||||
if reginfo != nil {
|
if reginfo != nil {
|
||||||
if reginfo.Blocked {
|
if reginfo.Blocked {
|
||||||
logrus.Debugf("registry %q is marked as blocked in registries configuration %q", registry, sysregistries.RegistriesConfPath(sc))
|
logrus.Debugf("registry %q is marked as blocked in registries configuration %q", registry, sysregistriesv2.ConfigPath(sc))
|
||||||
} else {
|
} else {
|
||||||
logrus.Debugf("registry %q is not marked as blocked in registries configuration %q", registry, sysregistries.RegistriesConfPath(sc))
|
logrus.Debugf("registry %q is not marked as blocked in registries configuration %q", registry, sysregistriesv2.ConfigPath(sc))
|
||||||
}
|
}
|
||||||
return reginfo.Blocked, nil
|
return reginfo.Blocked, nil
|
||||||
}
|
}
|
||||||
logrus.Debugf("registry %q is not listed in registries configuration %q, assuming it's not blocked", registry, sysregistries.RegistriesConfPath(sc))
|
logrus.Debugf("registry %q is not listed in registries configuration %q, assuming it's not blocked", registry, sysregistriesv2.ConfigPath(sc))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
9
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
9
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
@ -254,6 +254,9 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
|
|||||||
return nil, errors.Wrapf(err, "error loading registries")
|
return nil, errors.Wrapf(err, "error loading registries")
|
||||||
}
|
}
|
||||||
if reg != nil {
|
if reg != nil {
|
||||||
|
if reg.Blocked {
|
||||||
|
return nil, fmt.Errorf("registry %s is blocked in %s", reg.Prefix, sysregistriesv2.ConfigPath(sys))
|
||||||
|
}
|
||||||
skipVerify = reg.Insecure
|
skipVerify = reg.Insecure
|
||||||
}
|
}
|
||||||
tlsClientConfig.InsecureSkipVerify = skipVerify
|
tlsClientConfig.InsecureSkipVerify = skipVerify
|
||||||
@ -523,11 +526,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
|
|||||||
authReq.SetBasicAuth(c.username, c.password)
|
authReq.SetBasicAuth(c.username, c.password)
|
||||||
}
|
}
|
||||||
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
|
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
|
||||||
tr := tlsclientconfig.NewTransport()
|
res, err := c.client.Do(authReq)
|
||||||
// TODO(runcom): insecure for now to contact the external token service
|
|
||||||
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
|
||||||
client := &http.Client{Transport: tr}
|
|
||||||
res, err := client.Do(authReq)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
16
vendor/github.com/containers/image/ostree/ostree_src.go
generated
vendored
16
vendor/github.com/containers/image/ostree/ostree_src.go
generated
vendored
@ -59,9 +59,15 @@ func (s *ostreeImageSource) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ostreeImageSource) getLayerSize(blob string) (int64, error) {
|
func (s *ostreeImageSource) getBlobUncompressedSize(blob string, isCompressed bool) (int64, error) {
|
||||||
|
var metadataKey string
|
||||||
|
if isCompressed {
|
||||||
|
metadataKey = "docker.uncompressed_size"
|
||||||
|
} else {
|
||||||
|
metadataKey = "docker.size"
|
||||||
|
}
|
||||||
b := fmt.Sprintf("ociimage/%s", blob)
|
b := fmt.Sprintf("ociimage/%s", blob)
|
||||||
found, data, err := readMetadata(s.repo, b, "docker.size")
|
found, data, err := readMetadata(s.repo, b, metadataKey)
|
||||||
if err != nil || !found {
|
if err != nil || !found {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -275,8 +281,8 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
compressedBlob, found := s.compressed[info.Digest]
|
compressedBlob, isCompressed := s.compressed[info.Digest]
|
||||||
if found {
|
if isCompressed {
|
||||||
blob = compressedBlob.Hex()
|
blob = compressedBlob.Hex()
|
||||||
}
|
}
|
||||||
branch := fmt.Sprintf("ociimage/%s", blob)
|
branch := fmt.Sprintf("ociimage/%s", blob)
|
||||||
@ -289,7 +295,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
|||||||
s.repo = repo
|
s.repo = repo
|
||||||
}
|
}
|
||||||
|
|
||||||
layerSize, err := s.getLayerSize(blob)
|
layerSize, err := s.getBlobUncompressedSize(blob, isCompressed)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
34
vendor/github.com/containers/image/pkg/docker/config/config.go
generated
vendored
34
vendor/github.com/containers/image/pkg/docker/config/config.go
generated
vendored
@ -32,9 +32,13 @@ var (
|
|||||||
dockerHomePath = filepath.FromSlash(".docker/config.json")
|
dockerHomePath = filepath.FromSlash(".docker/config.json")
|
||||||
dockerLegacyHomePath = ".dockercfg"
|
dockerLegacyHomePath = ".dockercfg"
|
||||||
|
|
||||||
|
enableKeyring = false
|
||||||
|
|
||||||
// ErrNotLoggedIn is returned for users not logged into a registry
|
// ErrNotLoggedIn is returned for users not logged into a registry
|
||||||
// that they are trying to logout of
|
// that they are trying to logout of
|
||||||
ErrNotLoggedIn = errors.New("not logged in")
|
ErrNotLoggedIn = errors.New("not logged in")
|
||||||
|
// ErrNotSupported is returned for unsupported methods
|
||||||
|
ErrNotSupported = errors.New("not supported")
|
||||||
)
|
)
|
||||||
|
|
||||||
// SetAuthentication stores the username and password in the auth.json file
|
// SetAuthentication stores the username and password in the auth.json file
|
||||||
@ -44,6 +48,18 @@ func SetAuthentication(sys *types.SystemContext, registry, username, password st
|
|||||||
return false, setAuthToCredHelper(ch, registry, username, password)
|
return false, setAuthToCredHelper(ch, registry, username, password)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set the credentials to kernel keyring if enableKeyring is true.
|
||||||
|
// The keyring might not work in all environments (e.g., missing capability) and isn't supported on all platforms.
|
||||||
|
// Hence, we want to fall-back to using the authfile in case the keyring failed.
|
||||||
|
// However, if the enableKeyring is false, we want adhere to the user specification and not use the keyring.
|
||||||
|
if enableKeyring {
|
||||||
|
err := setAuthToKernelKeyring(registry, username, password)
|
||||||
|
if err == nil {
|
||||||
|
logrus.Debugf("credentials for (%s, %s) were stored in the kernel keyring\n", registry, username)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
logrus.Debugf("failed to authenticate with the kernel keyring, falling back to authfiles. %v", err)
|
||||||
|
}
|
||||||
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
|
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
|
||||||
newCreds := dockerAuthConfig{Auth: creds}
|
newCreds := dockerAuthConfig{Auth: creds}
|
||||||
auths.AuthConfigs[registry] = newCreds
|
auths.AuthConfigs[registry] = newCreds
|
||||||
@ -60,6 +76,14 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin
|
|||||||
return sys.DockerAuthConfig.Username, sys.DockerAuthConfig.Password, nil
|
return sys.DockerAuthConfig.Username, sys.DockerAuthConfig.Password, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if enableKeyring {
|
||||||
|
username, password, err := getAuthFromKernelKeyring(registry)
|
||||||
|
if err == nil {
|
||||||
|
logrus.Debug("returning credentials from kernel keyring")
|
||||||
|
return username, password, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyHomePath)
|
dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyHomePath)
|
||||||
var paths []string
|
var paths []string
|
||||||
pathToAuth, err := getPathToAuth(sys)
|
pathToAuth, err := getPathToAuth(sys)
|
||||||
@ -97,6 +121,16 @@ func RemoveAuthentication(sys *types.SystemContext, registry string) error {
|
|||||||
return false, deleteAuthFromCredHelper(ch, registry)
|
return false, deleteAuthFromCredHelper(ch, registry)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Next if keyring is enabled try kernel keyring
|
||||||
|
if enableKeyring {
|
||||||
|
err := deleteAuthFromKernelKeyring(registry)
|
||||||
|
if err == nil {
|
||||||
|
logrus.Debugf("credentials for %s were deleted from the kernel keyring", registry)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
logrus.Debugf("failed to delete credentials from the kernel keyring, falling back to authfiles")
|
||||||
|
}
|
||||||
|
|
||||||
if _, ok := auths.AuthConfigs[registry]; ok {
|
if _, ok := auths.AuthConfigs[registry]; ok {
|
||||||
delete(auths.AuthConfigs, registry)
|
delete(auths.AuthConfigs, registry)
|
||||||
} else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok {
|
} else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok {
|
||||||
|
79
vendor/github.com/containers/image/pkg/docker/config/config_linux.go
generated
vendored
Normal file
79
vendor/github.com/containers/image/pkg/docker/config/config_linux.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/containers/image/pkg/keyctl"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getAuthFromKernelKeyring(registry string) (string, string, error) {
|
||||||
|
userkeyring, err := keyctl.UserKeyring()
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
key, err := userkeyring.Search(genDescription(registry))
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
authData, err := key.Get()
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
parts := strings.SplitN(string(authData), "\x00", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return "", "", nil
|
||||||
|
}
|
||||||
|
return parts[0], parts[1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteAuthFromKernelKeyring(registry string) error {
|
||||||
|
userkeyring, err := keyctl.UserKeyring()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
key, err := userkeyring.Search(genDescription(registry))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return key.Unlink()
|
||||||
|
}
|
||||||
|
|
||||||
|
func setAuthToKernelKeyring(registry, username, password string) error {
|
||||||
|
keyring, err := keyctl.SessionKeyring()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
id, err := keyring.Add(genDescription(registry), []byte(fmt.Sprintf("%s\x00%s", username, password)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// sets all permission(view,read,write,search,link,set attribute) for current user
|
||||||
|
// it enables the user to search the key after it linked to user keyring and unlinked from session keyring
|
||||||
|
err = keyctl.SetPerm(id, keyctl.PermUserAll)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// link the key to userKeyring
|
||||||
|
userKeyring, err := keyctl.UserKeyring()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "error getting user keyring")
|
||||||
|
}
|
||||||
|
err = keyctl.Link(userKeyring, id)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "error linking the key to user keyring")
|
||||||
|
}
|
||||||
|
// unlink the key from session keyring
|
||||||
|
err = keyctl.Unlink(keyring, id)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "error unlinking the key from session keyring")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func genDescription(registry string) string {
|
||||||
|
return fmt.Sprintf("container-registry-login:%s", registry)
|
||||||
|
}
|
16
vendor/github.com/containers/image/pkg/docker/config/config_unsupported.go
generated
vendored
Normal file
16
vendor/github.com/containers/image/pkg/docker/config/config_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
// +build !linux
|
||||||
|
// +build !386 !amd64
|
||||||
|
|
||||||
|
package config
|
||||||
|
|
||||||
|
func getAuthFromKernelKeyring(registry string) (string, string, error) {
|
||||||
|
return "", "", ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteAuthFromKernelKeyring(registry string) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func setAuthToKernelKeyring(registry, username, password string) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
64
vendor/github.com/containers/image/pkg/keyctl/key.go
generated
vendored
Normal file
64
vendor/github.com/containers/image/pkg/keyctl/key.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
// Copyright 2015 Jesse Sipprell. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package keyctl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Key represents a single key linked to one or more kernel keyrings.
|
||||||
|
type Key struct {
|
||||||
|
Name string
|
||||||
|
|
||||||
|
id, ring keyID
|
||||||
|
size int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID returns the 32-bit kernel identifier for a specific key
|
||||||
|
func (k *Key) ID() int32 {
|
||||||
|
return int32(k.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the key's value as a byte slice
|
||||||
|
func (k *Key) Get() ([]byte, error) {
|
||||||
|
var (
|
||||||
|
b []byte
|
||||||
|
err error
|
||||||
|
sizeRead int
|
||||||
|
)
|
||||||
|
|
||||||
|
if k.size == 0 {
|
||||||
|
k.size = 512
|
||||||
|
}
|
||||||
|
|
||||||
|
size := k.size
|
||||||
|
|
||||||
|
b = make([]byte, int(size))
|
||||||
|
sizeRead = size + 1
|
||||||
|
for sizeRead > size {
|
||||||
|
r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, int(k.id), b, size)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if sizeRead = int(r1); sizeRead > size {
|
||||||
|
b = make([]byte, sizeRead)
|
||||||
|
size = sizeRead
|
||||||
|
sizeRead = size + 1
|
||||||
|
} else {
|
||||||
|
k.size = sizeRead
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b[:k.size], err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlink a key from the keyring it was loaded from (or added to). If the key
|
||||||
|
// is not linked to any other keyrings, it is destroyed.
|
||||||
|
func (k *Key) Unlink() error {
|
||||||
|
_, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(k.id), int(k.ring), 0, 0)
|
||||||
|
return err
|
||||||
|
}
|
79
vendor/github.com/containers/image/pkg/keyctl/keyring.go
generated
vendored
Normal file
79
vendor/github.com/containers/image/pkg/keyctl/keyring.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
// Copyright 2015 Jesse Sipprell. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface)
|
||||||
|
//
|
||||||
|
// Deprecated: Most callers should use either golang.org/x/sys/unix directly,
|
||||||
|
// or the original (and more extensive) github.com/jsipprell/keyctl .
|
||||||
|
package keyctl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Keyring is the basic interface to a linux keyctl keyring.
|
||||||
|
type Keyring interface {
|
||||||
|
ID
|
||||||
|
Add(string, []byte) (*Key, error)
|
||||||
|
Search(string) (*Key, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type keyring struct {
|
||||||
|
id keyID
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID is unique 32-bit serial number identifiers for all Keys and Keyrings have.
|
||||||
|
type ID interface {
|
||||||
|
ID() int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a new key to a keyring. The key can be searched for later by name.
|
||||||
|
func (kr *keyring) Add(name string, key []byte) (*Key, error) {
|
||||||
|
r, err := unix.AddKey("user", name, key, int(kr.id))
|
||||||
|
if err == nil {
|
||||||
|
key := &Key{Name: name, id: keyID(r), ring: kr.id}
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search for a key by name, this also searches child keyrings linked to this
|
||||||
|
// one. The key, if found, is linked to the top keyring that Search() was called
|
||||||
|
// from.
|
||||||
|
func (kr *keyring) Search(name string) (*Key, error) {
|
||||||
|
id, err := unix.KeyctlSearch(int(kr.id), "user", name, 0)
|
||||||
|
if err == nil {
|
||||||
|
return &Key{Name: name, id: keyID(id), ring: kr.id}, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID returns the 32-bit kernel identifier of a keyring
|
||||||
|
func (kr *keyring) ID() int32 {
|
||||||
|
return int32(kr.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SessionKeyring returns the current login session keyring
|
||||||
|
func SessionKeyring() (Keyring, error) {
|
||||||
|
return newKeyring(unix.KEY_SPEC_SESSION_KEYRING)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserKeyring returns the keyring specific to the current user.
|
||||||
|
func UserKeyring() (Keyring, error) {
|
||||||
|
return newKeyring(unix.KEY_SPEC_USER_KEYRING)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlink an object from a keyring
|
||||||
|
func Unlink(parent Keyring, child ID) error {
|
||||||
|
_, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(child.ID()), int(parent.ID()), 0, 0)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Link a key into a keyring
|
||||||
|
func Link(parent Keyring, child ID) error {
|
||||||
|
_, err := unix.KeyctlInt(unix.KEYCTL_LINK, int(child.ID()), int(parent.ID()), 0, 0)
|
||||||
|
return err
|
||||||
|
}
|
33
vendor/github.com/containers/image/pkg/keyctl/perm.go
generated
vendored
Normal file
33
vendor/github.com/containers/image/pkg/keyctl/perm.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
// Copyright 2015 Jesse Sipprell. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package keyctl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// KeyPerm represents in-kernel access control permission to keys and keyrings
|
||||||
|
// as a 32-bit integer broken up into four permission sets, one per byte.
|
||||||
|
// In MSB order, the perms are: Processor, User, Group, Other.
|
||||||
|
type KeyPerm uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// PermOtherAll sets all permission for Other
|
||||||
|
PermOtherAll KeyPerm = 0x3f << (8 * iota)
|
||||||
|
// PermGroupAll sets all permission for Group
|
||||||
|
PermGroupAll
|
||||||
|
// PermUserAll sets all permission for User
|
||||||
|
PermUserAll
|
||||||
|
// PermProcessAll sets all permission for Processor
|
||||||
|
PermProcessAll
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetPerm sets the permissions on a key or keyring.
|
||||||
|
func SetPerm(k ID, p KeyPerm) error {
|
||||||
|
err := unix.KeyctlSetperm(int(k.ID()), uint32(p))
|
||||||
|
return err
|
||||||
|
}
|
25
vendor/github.com/containers/image/pkg/keyctl/sys_linux.go
generated
vendored
Normal file
25
vendor/github.com/containers/image/pkg/keyctl/sys_linux.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
// Copyright 2015 Jesse Sipprell. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package keyctl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
type keyID int32
|
||||||
|
|
||||||
|
func newKeyring(id keyID) (*keyring, error) {
|
||||||
|
r1, err := unix.KeyctlGetKeyringID(int(id), true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if id < 0 {
|
||||||
|
r1 = int(id)
|
||||||
|
}
|
||||||
|
return &keyring{id: keyID(r1)}, nil
|
||||||
|
}
|
103
vendor/github.com/containers/image/pkg/sysregistries/system_registries.go
generated
vendored
103
vendor/github.com/containers/image/pkg/sysregistries/system_registries.go
generated
vendored
@ -1,103 +0,0 @@
|
|||||||
package sysregistries
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
"github.com/containers/image/types"
|
|
||||||
"io/ioutil"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
// systemRegistriesConfPath is the path to the system-wide registry configuration file
|
|
||||||
// and is used to add/subtract potential registries for obtaining images.
|
|
||||||
// You can override this at build time with
|
|
||||||
// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path'
|
|
||||||
var systemRegistriesConfPath = builtinRegistriesConfPath
|
|
||||||
|
|
||||||
// builtinRegistriesConfPath is the path to registry configuration file
|
|
||||||
// DO NOT change this, instead see systemRegistriesConfPath above.
|
|
||||||
const builtinRegistriesConfPath = "/etc/containers/registries.conf"
|
|
||||||
|
|
||||||
type registries struct {
|
|
||||||
Registries []string `toml:"registries"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type tomlConfig struct {
|
|
||||||
Registries struct {
|
|
||||||
Search registries `toml:"search"`
|
|
||||||
Insecure registries `toml:"insecure"`
|
|
||||||
Block registries `toml:"block"`
|
|
||||||
} `toml:"registries"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// normalizeRegistries removes trailing slashes from registries, which is a
|
|
||||||
// common pitfall when configuring registries (e.g., "docker.io/library/).
|
|
||||||
func normalizeRegistries(regs *registries) {
|
|
||||||
for i := range regs.Registries {
|
|
||||||
regs.Registries[i] = strings.TrimRight(regs.Registries[i], "/")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reads the global registry file from the filesystem. Returns
|
|
||||||
// a byte array
|
|
||||||
func readRegistryConf(sys *types.SystemContext) ([]byte, error) {
|
|
||||||
return ioutil.ReadFile(RegistriesConfPath(sys))
|
|
||||||
}
|
|
||||||
|
|
||||||
// For mocking in unittests
|
|
||||||
var readConf = readRegistryConf
|
|
||||||
|
|
||||||
// Loads the registry configuration file from the filesystem and
|
|
||||||
// then unmarshals it. Returns the unmarshalled object.
|
|
||||||
func loadRegistryConf(sys *types.SystemContext) (*tomlConfig, error) {
|
|
||||||
config := &tomlConfig{}
|
|
||||||
|
|
||||||
configBytes, err := readConf(sys)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = toml.Unmarshal(configBytes, &config)
|
|
||||||
normalizeRegistries(&config.Registries.Search)
|
|
||||||
normalizeRegistries(&config.Registries.Insecure)
|
|
||||||
normalizeRegistries(&config.Registries.Block)
|
|
||||||
return config, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRegistries returns an array of strings that contain the names
|
|
||||||
// of the registries as defined in the system-wide
|
|
||||||
// registries file. it returns an empty array if none are
|
|
||||||
// defined
|
|
||||||
func GetRegistries(sys *types.SystemContext) ([]string, error) {
|
|
||||||
config, err := loadRegistryConf(sys)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return config.Registries.Search.Registries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInsecureRegistries returns an array of strings that contain the names
|
|
||||||
// of the insecure registries as defined in the system-wide
|
|
||||||
// registries file. it returns an empty array if none are
|
|
||||||
// defined
|
|
||||||
func GetInsecureRegistries(sys *types.SystemContext) ([]string, error) {
|
|
||||||
config, err := loadRegistryConf(sys)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return config.Registries.Insecure.Registries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegistriesConfPath is the path to the system-wide registry configuration file
|
|
||||||
func RegistriesConfPath(ctx *types.SystemContext) string {
|
|
||||||
path := systemRegistriesConfPath
|
|
||||||
if ctx != nil {
|
|
||||||
if ctx.SystemRegistriesConfPath != "" {
|
|
||||||
path = ctx.SystemRegistriesConfPath
|
|
||||||
} else if ctx.RootForImplicitAbsolutePaths != "" {
|
|
||||||
path = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return path
|
|
||||||
}
|
|
28
vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
28
vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
@ -303,9 +303,8 @@ func (config *V2RegistriesConf) postProcess() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getConfigPath returns the system-registries config path if specified.
|
// ConfigPath returns the path to the system-wide registry configuration file.
|
||||||
// Otherwise, systemRegistriesConfPath is returned.
|
func ConfigPath(ctx *types.SystemContext) string {
|
||||||
func getConfigPath(ctx *types.SystemContext) string {
|
|
||||||
confPath := systemRegistriesConfPath
|
confPath := systemRegistriesConfPath
|
||||||
if ctx != nil {
|
if ctx != nil {
|
||||||
if ctx.SystemRegistriesConfPath != "" {
|
if ctx.SystemRegistriesConfPath != "" {
|
||||||
@ -336,14 +335,27 @@ func InvalidateCache() {
|
|||||||
|
|
||||||
// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached.
|
// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached.
|
||||||
func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) {
|
func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) {
|
||||||
configPath := getConfigPath(ctx)
|
configPath := ConfigPath(ctx)
|
||||||
|
|
||||||
|
configMutex.Lock()
|
||||||
|
// if the config has already been loaded, return the cached registries
|
||||||
|
if config, inCache := configCache[configPath]; inCache {
|
||||||
|
configMutex.Unlock()
|
||||||
|
return config, nil
|
||||||
|
}
|
||||||
|
configMutex.Unlock()
|
||||||
|
|
||||||
|
return TryUpdatingCache(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryUpdatingCache loads the configuration from the provided `SystemContext`
|
||||||
|
// without using the internal cache. On success, the loaded configuration will
|
||||||
|
// be added into the internal registry cache.
|
||||||
|
func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) {
|
||||||
|
configPath := ConfigPath(ctx)
|
||||||
|
|
||||||
configMutex.Lock()
|
configMutex.Lock()
|
||||||
defer configMutex.Unlock()
|
defer configMutex.Unlock()
|
||||||
// if the config has already been loaded, return the cached registries
|
|
||||||
if config, inCache := configCache[configPath]; inCache {
|
|
||||||
return config, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// load the config
|
// load the config
|
||||||
config, err := loadRegistryConf(configPath)
|
config, err := loadRegistryConf(configPath)
|
||||||
|
2
vendor/github.com/containers/image/signature/policy_types.go
generated
vendored
2
vendor/github.com/containers/image/signature/policy_types.go
generated
vendored
@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
package signature
|
package signature
|
||||||
|
|
||||||
// NOTE: Keep this in sync with docs/policy.json.md!
|
// NOTE: Keep this in sync with docs/containers-policy.json.5.md!
|
||||||
|
|
||||||
// Policy defines requirements for considering a signature, or an image, valid.
|
// Policy defines requirements for considering a signature, or an image, valid.
|
||||||
type Policy struct {
|
type Policy struct {
|
||||||
|
2
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
2
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
@ -4,7 +4,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
// register all known transports
|
// register all known transports
|
||||||
// NOTE: Make sure docs/policy.json.md is updated when adding or updating
|
// NOTE: Make sure docs/containers-policy.json.5.md is updated when adding or updating
|
||||||
// a transport.
|
// a transport.
|
||||||
_ "github.com/containers/image/directory"
|
_ "github.com/containers/image/directory"
|
||||||
_ "github.com/containers/image/docker"
|
_ "github.com/containers/image/docker"
|
||||||
|
4
vendor/github.com/containers/image/version/version.go
generated
vendored
4
vendor/github.com/containers/image/version/version.go
generated
vendored
@ -4,11 +4,11 @@ import "fmt"
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// VersionMajor is for an API incompatible changes
|
// VersionMajor is for an API incompatible changes
|
||||||
VersionMajor = 2
|
VersionMajor = 3
|
||||||
// VersionMinor is for functionality in a backwards-compatible manner
|
// VersionMinor is for functionality in a backwards-compatible manner
|
||||||
VersionMinor = 0
|
VersionMinor = 0
|
||||||
// VersionPatch is for backwards-compatible bug fixes
|
// VersionPatch is for backwards-compatible bug fixes
|
||||||
VersionPatch = 1
|
VersionPatch = 2
|
||||||
|
|
||||||
// VersionDev indicates development branch. Releases will be empty string.
|
// VersionDev indicates development branch. Releases will be empty string.
|
||||||
VersionDev = ""
|
VersionDev = ""
|
||||||
|
18
vendor/github.com/containers/storage/Makefile
generated
vendored
18
vendor/github.com/containers/storage/Makefile
generated
vendored
@ -27,10 +27,16 @@ PACKAGE := github.com/containers/storage
|
|||||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||||
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
|
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
|
||||||
EPOCH_TEST_COMMIT := 0418ebf59f9e1f564831c0ba9378b7f8e40a1c73
|
EPOCH_TEST_COMMIT := 0418ebf59f9e1f564831c0ba9378b7f8e40a1c73
|
||||||
NATIVETAGS := exclude_graphdriver_devicemapper exclude_graphdriver_btrfs exclude_graphdriver_overlay
|
NATIVETAGS :=
|
||||||
AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/ostree_tag.sh)
|
AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/ostree_tag.sh)
|
||||||
BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS)
|
BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS)
|
||||||
GO := go
|
GO ?= go
|
||||||
|
|
||||||
|
GO_BUILD=$(GO) build
|
||||||
|
# Go module support: set `-mod=vendor` to use the vendored sources
|
||||||
|
ifeq ($(shell $(GO) help mod >/dev/null 2>&1 && echo true), true)
|
||||||
|
GO_BUILD=GO111MODULE=on $(GO) build -mod=vendor
|
||||||
|
endif
|
||||||
|
|
||||||
RUNINVM := vagrant/runinvm.sh
|
RUNINVM := vagrant/runinvm.sh
|
||||||
FFJSON := tests/tools/build/ffjson
|
FFJSON := tests/tools/build/ffjson
|
||||||
@ -43,7 +49,7 @@ clean: ## remove all built files
|
|||||||
sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.go pkg/*/*.go pkg/*/*/*.go) layers_ffjson.go images_ffjson.go containers_ffjson.go pkg/archive/archive_ffjson.go
|
sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.go pkg/*/*.go pkg/*/*/*.go) layers_ffjson.go images_ffjson.go containers_ffjson.go pkg/archive/archive_ffjson.go
|
||||||
|
|
||||||
containers-storage: $(sources) ## build using gc on the host
|
containers-storage: $(sources) ## build using gc on the host
|
||||||
$(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
|
$(GO_BUILD) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
|
||||||
|
|
||||||
layers_ffjson.go: layers.go
|
layers_ffjson.go: layers.go
|
||||||
$(RM) $@
|
$(RM) $@
|
||||||
@ -64,15 +70,15 @@ pkg/archive/archive_ffjson.go: pkg/archive/archive.go
|
|||||||
binary local-binary: containers-storage
|
binary local-binary: containers-storage
|
||||||
|
|
||||||
local-gccgo: ## build using gccgo on the host
|
local-gccgo: ## build using gccgo on the host
|
||||||
GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
|
GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO_BUILD) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
|
||||||
|
|
||||||
local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd
|
local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd
|
||||||
@for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 ; do \
|
@for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 ; do \
|
||||||
os=`echo $${target} | cut -f1 -d/` ; \
|
os=`echo $${target} | cut -f1 -d/` ; \
|
||||||
arch=`echo $${target} | cut -f2 -d/` ; \
|
arch=`echo $${target} | cut -f2 -d/` ; \
|
||||||
suffix=$${os}.$${arch} ; \
|
suffix=$${os}.$${arch} ; \
|
||||||
echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \
|
echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO_BUILD) -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \
|
||||||
env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \
|
env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO_BUILD) -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \
|
||||||
done
|
done
|
||||||
|
|
||||||
cross: ## cross build the binaries for arm, darwin, and\nfreebsd using VMs
|
cross: ## cross build the binaries for arm, darwin, and\nfreebsd using VMs
|
||||||
|
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@ -1 +1 @@
|
|||||||
1.12.16
|
1.13.1
|
||||||
|
2
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build linux
|
// +build linux,cgo
|
||||||
|
|
||||||
package btrfs
|
package btrfs
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/storage/drivers/btrfs/version.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/btrfs/version.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build linux,!btrfs_noversion
|
// +build linux,!btrfs_noversion,cgo
|
||||||
|
|
||||||
package btrfs
|
package btrfs
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/storage/drivers/btrfs/version_none.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/btrfs/version_none.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build linux,btrfs_noversion
|
// +build !linux btrfs_noversion !cgo
|
||||||
|
|
||||||
package btrfs
|
package btrfs
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// +build linux,cgo
|
||||||
|
|
||||||
package devmapper
|
package devmapper
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build linux
|
// +build linux,cgo
|
||||||
|
|
||||||
package devmapper
|
package devmapper
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// +build linux,cgo
|
||||||
|
|
||||||
package devmapper
|
package devmapper
|
||||||
|
|
||||||
// Definition of struct dm_task and sub structures (from lvm2)
|
// Definition of struct dm_task and sub structures (from lvm2)
|
||||||
|
2
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build linux
|
// +build linux,cgo
|
||||||
|
|
||||||
package devmapper
|
package devmapper
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/storage/drivers/devmapper/mount.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/devmapper/mount.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build linux
|
// +build linux,cgo
|
||||||
|
|
||||||
package devmapper
|
package devmapper
|
||||||
|
|
||||||
|
1
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
@ -82,6 +82,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
|
|||||||
}), nil
|
}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
options.Options = append(options.Options, "ro")
|
||||||
parentFs, err := driver.Get(parent, options)
|
parentFs, err := driver.Get(parent, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
53
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
53
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -322,7 +322,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
|||||||
return nil, fmt.Errorf("overlay: ostree_repo specified but support for ostree is missing")
|
return nil, fmt.Errorf("overlay: ostree_repo specified but support for ostree is missing")
|
||||||
}
|
}
|
||||||
o.ostreeRepo = val
|
o.ostreeRepo = val
|
||||||
case "overlay2.ignore_chown_errors", "overlay.ignore_chown_errors":
|
case ".ignore_chown_errors", "overlay2.ignore_chown_errors", "overlay.ignore_chown_errors":
|
||||||
logrus.Debugf("overlay: ignore_chown_errors=%s", val)
|
logrus.Debugf("overlay: ignore_chown_errors=%s", val)
|
||||||
o.ignoreChownErrors, err = strconv.ParseBool(val)
|
o.ignoreChownErrors, err = strconv.ParseBool(val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -839,8 +839,17 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||||||
if _, err := os.Stat(dir); err != nil {
|
if _, err := os.Stat(dir); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
readWrite := true
|
||||||
|
// fuse-overlayfs doesn't support working without an upperdir.
|
||||||
|
if d.options.mountProgram == "" {
|
||||||
|
for _, o := range options.Options {
|
||||||
|
if o == "ro" {
|
||||||
|
readWrite = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
diffDir := path.Join(dir, "diff")
|
|
||||||
lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile))
|
lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile))
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return "", err
|
return "", err
|
||||||
@ -911,16 +920,32 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||||||
|
|
||||||
// If the lowers list is still empty, use an empty lower so that we can still force an
|
// If the lowers list is still empty, use an empty lower so that we can still force an
|
||||||
// SELinux context for the mount.
|
// SELinux context for the mount.
|
||||||
|
|
||||||
|
// if we are doing a readOnly mount, and there is only one lower
|
||||||
|
// We should just return the lower directory, no reason to mount.
|
||||||
|
if !readWrite {
|
||||||
|
if len(absLowers) == 0 {
|
||||||
|
return path.Join(dir, "empty"), nil
|
||||||
|
}
|
||||||
|
if len(absLowers) == 1 {
|
||||||
|
return absLowers[0], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
if len(absLowers) == 0 {
|
if len(absLowers) == 0 {
|
||||||
absLowers = append(absLowers, path.Join(dir, "empty"))
|
absLowers = append(absLowers, path.Join(dir, "empty"))
|
||||||
relLowers = append(relLowers, path.Join(id, "empty"))
|
relLowers = append(relLowers, path.Join(id, "empty"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// user namespace requires this to move a directory from lower to upper.
|
// user namespace requires this to move a directory from lower to upper.
|
||||||
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
diffDir := path.Join(dir, "diff")
|
||||||
|
if readWrite {
|
||||||
|
if err := idtools.MkdirAllAs(diffDir, 0755, rootUID, rootGID); err != nil && !os.IsExist(err) {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
mergedDir := path.Join(dir, "merged")
|
mergedDir := path.Join(dir, "merged")
|
||||||
// Create the driver merged dir
|
// Create the driver merged dir
|
||||||
@ -940,8 +965,12 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
workDir := path.Join(dir, "work")
|
var opts string
|
||||||
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir)
|
if readWrite {
|
||||||
|
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, path.Join(dir, "work"))
|
||||||
|
} else {
|
||||||
|
opts = fmt.Sprintf("lowerdir=%s", strings.Join(absLowers, ":"))
|
||||||
|
}
|
||||||
if len(options.Options) > 0 {
|
if len(options.Options) > 0 {
|
||||||
opts = fmt.Sprintf("%s,%s", strings.Join(options.Options, ","), opts)
|
opts = fmt.Sprintf("%s,%s", strings.Join(options.Options, ","), opts)
|
||||||
} else if d.options.mountOptions != "" {
|
} else if d.options.mountOptions != "" {
|
||||||
@ -979,7 +1008,12 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||||||
}
|
}
|
||||||
} else if len(mountData) > pageSize {
|
} else if len(mountData) > pageSize {
|
||||||
//FIXME: We need to figure out to get this to work with additional stores
|
//FIXME: We need to figure out to get this to work with additional stores
|
||||||
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), path.Join(id, "diff"), path.Join(id, "work"))
|
if readWrite {
|
||||||
|
diffDir := path.Join(id, "diff")
|
||||||
|
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), diffDir, path.Join(id, "work"))
|
||||||
|
} else {
|
||||||
|
opts = fmt.Sprintf("lowerdir=%s", strings.Join(absLowers, ":"))
|
||||||
|
}
|
||||||
mountData = label.FormatMountLabel(opts, options.MountLabel)
|
mountData = label.FormatMountLabel(opts, options.MountLabel)
|
||||||
if len(mountData) > pageSize {
|
if len(mountData) > pageSize {
|
||||||
return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData))
|
return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData))
|
||||||
@ -995,11 +1029,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||||||
return "", fmt.Errorf("error creating overlay mount to %s: %v", mountTarget, err)
|
return "", fmt.Errorf("error creating overlay mount to %s: %v", mountTarget, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
|
|
||||||
if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return mergedDir, nil
|
return mergedDir, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1018,7 +1047,7 @@ func (d *Driver) Put(id string) error {
|
|||||||
if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
|
if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
|
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) {
|
||||||
logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
|
logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build linux,!exclude_disk_quota
|
// +build linux,!exclude_disk_quota,cgo
|
||||||
|
|
||||||
//
|
//
|
||||||
// projectquota.go - implements XFS project quota controls
|
// projectquota.go - implements XFS project quota controls
|
||||||
|
2
vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build linux,exclude_disk_quota
|
// +build !linux exclude_disk_quota !cgo
|
||||||
|
|
||||||
package quota
|
package quota
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/storage/drivers/register/register_devicemapper.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/register/register_devicemapper.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build !exclude_graphdriver_devicemapper,linux
|
// +build !exclude_graphdriver_devicemapper,linux,cgo
|
||||||
|
|
||||||
package register
|
package register
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/storage/drivers/register/register_overlay.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/register/register_overlay.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build !exclude_graphdriver_overlay,linux
|
// +build !exclude_graphdriver_overlay,linux,cgo
|
||||||
|
|
||||||
package register
|
package register
|
||||||
|
|
||||||
|
12
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
12
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
@ -58,7 +58,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||||||
d.ostreeRepo = val
|
d.ostreeRepo = val
|
||||||
case "vfs.mountopt":
|
case "vfs.mountopt":
|
||||||
return nil, fmt.Errorf("vfs driver does not support mount options")
|
return nil, fmt.Errorf("vfs driver does not support mount options")
|
||||||
case "vfs.ignore_chown_errors":
|
case ".ignore_chown_errors", "vfs.ignore_chown_errors":
|
||||||
logrus.Debugf("vfs: ignore_chown_errors=%s", val)
|
logrus.Debugf("vfs: ignore_chown_errors=%s", val)
|
||||||
var err error
|
var err error
|
||||||
d.ignoreChownErrors, err = strconv.ParseBool(val)
|
d.ignoreChownErrors, err = strconv.ParseBool(val)
|
||||||
@ -226,7 +226,15 @@ func (d *Driver) Remove(id string) error {
|
|||||||
// Get returns the directory for the given id.
|
// Get returns the directory for the given id.
|
||||||
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
|
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
|
||||||
dir := d.dir(id)
|
dir := d.dir(id)
|
||||||
if len(options.Options) > 0 {
|
switch len(options.Options) {
|
||||||
|
case 0:
|
||||||
|
case 1:
|
||||||
|
if options.Options[0] == "ro" {
|
||||||
|
// ignore "ro" option
|
||||||
|
break
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
return "", fmt.Errorf("vfs driver does not support mount options")
|
return "", fmt.Errorf("vfs driver does not support mount options")
|
||||||
}
|
}
|
||||||
if st, err := os.Stat(dir); err != nil {
|
if st, err := os.Stat(dir); err != nil {
|
||||||
|
10
vendor/github.com/containers/storage/ffjson_deps.go
generated
vendored
Normal file
10
vendor/github.com/containers/storage/ffjson_deps.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
package storage
|
||||||
|
|
||||||
|
// NOTE: this is a hack to trick go modules into vendoring the below
|
||||||
|
// dependencies. Those are required during ffjson generation
|
||||||
|
// but do NOT end up in the final file.
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "github.com/pquerna/ffjson/inception" // nolint:typecheck
|
||||||
|
_ "github.com/pquerna/ffjson/shared" // nolint:typecheck
|
||||||
|
)
|
2
vendor/github.com/containers/storage/go.mod
generated
vendored
2
vendor/github.com/containers/storage/go.mod
generated
vendored
@ -1,7 +1,5 @@
|
|||||||
module github.com/containers/storage
|
module github.com/containers/storage
|
||||||
|
|
||||||
go 1.12
|
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/BurntSushi/toml v0.3.1
|
github.com/BurntSushi/toml v0.3.1
|
||||||
github.com/DataDog/zstd v1.4.0 // indirect
|
github.com/DataDog/zstd v1.4.0 // indirect
|
||||||
|
2
vendor/github.com/containers/storage/layers.go
generated
vendored
2
vendor/github.com/containers/storage/layers.go
generated
vendored
@ -1,7 +1,6 @@
|
|||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -27,6 +26,7 @@ import (
|
|||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/selinux/go-selinux/label"
|
"github.com/opencontainers/selinux/go-selinux/label"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/vbatts/tar-split/archive/tar"
|
||||||
"github.com/vbatts/tar-split/tar/asm"
|
"github.com/vbatts/tar-split/tar/asm"
|
||||||
"github.com/vbatts/tar-split/tar/storage"
|
"github.com/vbatts/tar-split/tar/storage"
|
||||||
)
|
)
|
||||||
|
11
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
11
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
@ -10,6 +10,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/containers/storage/pkg/system"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -296,9 +297,19 @@ func checkChownErr(err error, name string, uid, gid int) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func SafeChown(name string, uid, gid int) error {
|
func SafeChown(name string, uid, gid int) error {
|
||||||
|
if stat, statErr := system.Stat(name); statErr == nil {
|
||||||
|
if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
return checkChownErr(os.Chown(name, uid, gid), name, uid, gid)
|
return checkChownErr(os.Chown(name, uid, gid), name, uid, gid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func SafeLchown(name string, uid, gid int) error {
|
func SafeLchown(name string, uid, gid int) error {
|
||||||
|
if stat, statErr := system.Lstat(name); statErr == nil {
|
||||||
|
if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
return checkChownErr(os.Lchown(name, uid, gid), name, uid, gid)
|
return checkChownErr(os.Lchown(name, uid, gid), name, uid, gid)
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build linux
|
// +build linux,cgo
|
||||||
|
|
||||||
package loopback
|
package loopback
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/storage/pkg/loopback/ioctl.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/loopback/ioctl.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build linux
|
// +build linux,cgo
|
||||||
|
|
||||||
package loopback
|
package loopback
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build linux
|
// +build linux,cgo
|
||||||
|
|
||||||
package loopback
|
package loopback
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/storage/pkg/loopback/loopback.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/loopback/loopback.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build linux
|
// +build linux,cgo
|
||||||
|
|
||||||
package loopback
|
package loopback
|
||||||
|
|
||||||
|
1
vendor/github.com/containers/storage/pkg/loopback/loopback_unsupported.go
generated
vendored
Normal file
1
vendor/github.com/containers/storage/pkg/loopback/loopback_unsupported.go
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
package loopback
|
2
vendor/github.com/containers/storage/pkg/ostree/no_ostree.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/ostree/no_ostree.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build !ostree
|
// +build !ostree !cgo
|
||||||
|
|
||||||
package ostree
|
package ostree
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/storage/pkg/ostree/ostree.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/ostree/ostree.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build ostree
|
// +build ostree,cgo
|
||||||
|
|
||||||
package ostree
|
package ostree
|
||||||
|
|
||||||
|
48
vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go
generated
vendored
48
vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go
generated
vendored
@ -1,47 +1,69 @@
|
|||||||
package tarlog
|
package tarlog
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/vbatts/tar-split/archive/tar"
|
||||||
)
|
)
|
||||||
|
|
||||||
type tarLogger struct {
|
type tarLogger struct {
|
||||||
writer *os.File
|
writer *io.PipeWriter
|
||||||
wg sync.WaitGroup
|
closeMutex *sync.Mutex
|
||||||
|
stateMutex *sync.Mutex
|
||||||
|
closed bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLogger returns a writer that, when a tar archive is written to it, calls
|
// NewLogger returns a writer that, when a tar archive is written to it, calls
|
||||||
// `logger` for each file header it encounters in the archive.
|
// `logger` for each file header it encounters in the archive.
|
||||||
func NewLogger(logger func(*tar.Header)) (io.WriteCloser, error) {
|
func NewLogger(logger func(*tar.Header)) (io.WriteCloser, error) {
|
||||||
reader, writer, err := os.Pipe()
|
reader, writer := io.Pipe()
|
||||||
if err != nil {
|
t := &tarLogger{
|
||||||
return nil, errors.Wrapf(err, "error creating pipe for tar logger")
|
writer: writer,
|
||||||
|
closeMutex: new(sync.Mutex),
|
||||||
|
stateMutex: new(sync.Mutex),
|
||||||
|
closed: false,
|
||||||
}
|
}
|
||||||
t := &tarLogger{writer: writer}
|
|
||||||
tr := tar.NewReader(reader)
|
tr := tar.NewReader(reader)
|
||||||
t.wg.Add(1)
|
tr.RawAccounting = true
|
||||||
|
t.closeMutex.Lock()
|
||||||
go func() {
|
go func() {
|
||||||
hdr, err := tr.Next()
|
hdr, err := tr.Next()
|
||||||
for err == nil {
|
for err == nil {
|
||||||
logger(hdr)
|
logger(hdr)
|
||||||
hdr, err = tr.Next()
|
hdr, err = tr.Next()
|
||||||
|
|
||||||
}
|
}
|
||||||
reader.Close()
|
// Make sure to avoid writes after the reader has been closed.
|
||||||
t.wg.Done()
|
t.stateMutex.Lock()
|
||||||
|
t.closed = true
|
||||||
|
if err := reader.Close(); err != nil {
|
||||||
|
logrus.Errorf("error closing tarlogger reader: %v", err)
|
||||||
|
}
|
||||||
|
t.stateMutex.Unlock()
|
||||||
|
// Unblock the Close().
|
||||||
|
t.closeMutex.Unlock()
|
||||||
}()
|
}()
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tarLogger) Write(b []byte) (int, error) {
|
func (t *tarLogger) Write(b []byte) (int, error) {
|
||||||
|
t.stateMutex.Lock()
|
||||||
|
if t.closed {
|
||||||
|
// We cannot use os.Pipe() as this alters the tar's digest. Using
|
||||||
|
// io.Pipe() requires this workaround as it does not allow for writes
|
||||||
|
// after close.
|
||||||
|
t.stateMutex.Unlock()
|
||||||
|
return len(b), nil
|
||||||
|
}
|
||||||
|
t.stateMutex.Unlock()
|
||||||
return t.writer.Write(b)
|
return t.writer.Write(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tarLogger) Close() error {
|
func (t *tarLogger) Close() error {
|
||||||
err := t.writer.Close()
|
err := t.writer.Close()
|
||||||
t.wg.Wait()
|
// Wait for the reader to finish.
|
||||||
|
t.closeMutex.Lock()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
323
vendor/github.com/pquerna/ffjson/inception/decoder.go
generated
vendored
Normal file
323
vendor/github.com/pquerna/ffjson/inception/decoder.go
generated
vendored
Normal file
@ -0,0 +1,323 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2014 Paul Querna
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ffjsoninception
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/pquerna/ffjson/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
var validValues []string = []string{
|
||||||
|
"FFTok_left_brace",
|
||||||
|
"FFTok_left_bracket",
|
||||||
|
"FFTok_integer",
|
||||||
|
"FFTok_double",
|
||||||
|
"FFTok_string",
|
||||||
|
"FFTok_bool",
|
||||||
|
"FFTok_null",
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateUnmarshalJSON(ic *Inception, si *StructInfo) error {
|
||||||
|
out := ""
|
||||||
|
ic.OutputImports[`fflib "github.com/pquerna/ffjson/fflib/v1"`] = true
|
||||||
|
if len(si.Fields) > 0 {
|
||||||
|
ic.OutputImports[`"bytes"`] = true
|
||||||
|
}
|
||||||
|
ic.OutputImports[`"fmt"`] = true
|
||||||
|
|
||||||
|
out += tplStr(decodeTpl["header"], header{
|
||||||
|
IC: ic,
|
||||||
|
SI: si,
|
||||||
|
})
|
||||||
|
|
||||||
|
out += tplStr(decodeTpl["ujFunc"], ujFunc{
|
||||||
|
SI: si,
|
||||||
|
IC: ic,
|
||||||
|
ValidValues: validValues,
|
||||||
|
ResetFields: ic.ResetFields,
|
||||||
|
})
|
||||||
|
|
||||||
|
ic.OutputFuncs = append(ic.OutputFuncs, out)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleField(ic *Inception, name string, typ reflect.Type, ptr bool, quoted bool) string {
|
||||||
|
return handleFieldAddr(ic, name, false, typ, ptr, quoted)
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleFieldAddr(ic *Inception, name string, takeAddr bool, typ reflect.Type, ptr bool, quoted bool) string {
|
||||||
|
out := fmt.Sprintf("/* handler: %s type=%v kind=%v quoted=%t*/\n", name, typ, typ.Kind(), quoted)
|
||||||
|
|
||||||
|
umlx := typ.Implements(unmarshalFasterType) || typeInInception(ic, typ, shared.MustDecoder)
|
||||||
|
umlx = umlx || reflect.PtrTo(typ).Implements(unmarshalFasterType)
|
||||||
|
|
||||||
|
umlstd := typ.Implements(unmarshalerType) || reflect.PtrTo(typ).Implements(unmarshalerType)
|
||||||
|
|
||||||
|
out += tplStr(decodeTpl["handleUnmarshaler"], handleUnmarshaler{
|
||||||
|
IC: ic,
|
||||||
|
Name: name,
|
||||||
|
Typ: typ,
|
||||||
|
Ptr: reflect.Ptr,
|
||||||
|
TakeAddr: takeAddr || ptr,
|
||||||
|
UnmarshalJSONFFLexer: umlx,
|
||||||
|
Unmarshaler: umlstd,
|
||||||
|
})
|
||||||
|
|
||||||
|
if umlx || umlstd {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(pquerna): generic handling of token type mismatching struct type
|
||||||
|
switch typ.Kind() {
|
||||||
|
case reflect.Int,
|
||||||
|
reflect.Int8,
|
||||||
|
reflect.Int16,
|
||||||
|
reflect.Int32,
|
||||||
|
reflect.Int64:
|
||||||
|
|
||||||
|
allowed := buildTokens(quoted, "FFTok_string", "FFTok_integer", "FFTok_null")
|
||||||
|
out += getAllowTokens(typ.Name(), allowed...)
|
||||||
|
|
||||||
|
out += getNumberHandler(ic, name, takeAddr || ptr, typ, "ParseInt")
|
||||||
|
|
||||||
|
case reflect.Uint,
|
||||||
|
reflect.Uint8,
|
||||||
|
reflect.Uint16,
|
||||||
|
reflect.Uint32,
|
||||||
|
reflect.Uint64:
|
||||||
|
|
||||||
|
allowed := buildTokens(quoted, "FFTok_string", "FFTok_integer", "FFTok_null")
|
||||||
|
out += getAllowTokens(typ.Name(), allowed...)
|
||||||
|
|
||||||
|
out += getNumberHandler(ic, name, takeAddr || ptr, typ, "ParseUint")
|
||||||
|
|
||||||
|
case reflect.Float32,
|
||||||
|
reflect.Float64:
|
||||||
|
|
||||||
|
allowed := buildTokens(quoted, "FFTok_string", "FFTok_double", "FFTok_integer", "FFTok_null")
|
||||||
|
out += getAllowTokens(typ.Name(), allowed...)
|
||||||
|
|
||||||
|
out += getNumberHandler(ic, name, takeAddr || ptr, typ, "ParseFloat")
|
||||||
|
|
||||||
|
case reflect.Bool:
|
||||||
|
ic.OutputImports[`"bytes"`] = true
|
||||||
|
ic.OutputImports[`"errors"`] = true
|
||||||
|
|
||||||
|
allowed := buildTokens(quoted, "FFTok_string", "FFTok_bool", "FFTok_null")
|
||||||
|
out += getAllowTokens(typ.Name(), allowed...)
|
||||||
|
|
||||||
|
out += tplStr(decodeTpl["handleBool"], handleBool{
|
||||||
|
Name: name,
|
||||||
|
Typ: typ,
|
||||||
|
TakeAddr: takeAddr || ptr,
|
||||||
|
})
|
||||||
|
|
||||||
|
case reflect.Ptr:
|
||||||
|
out += tplStr(decodeTpl["handlePtr"], handlePtr{
|
||||||
|
IC: ic,
|
||||||
|
Name: name,
|
||||||
|
Typ: typ,
|
||||||
|
Quoted: quoted,
|
||||||
|
})
|
||||||
|
|
||||||
|
case reflect.Array,
|
||||||
|
reflect.Slice:
|
||||||
|
out += getArrayHandler(ic, name, typ, ptr)
|
||||||
|
|
||||||
|
case reflect.String:
|
||||||
|
// Is it a json.Number?
|
||||||
|
if typ.PkgPath() == "encoding/json" && typ.Name() == "Number" {
|
||||||
|
// Fall back to json package to rely on the valid number check.
|
||||||
|
// See: https://github.com/golang/go/blob/f05c3aa24d815cd3869153750c9875e35fc48a6e/src/encoding/json/decode.go#L897
|
||||||
|
ic.OutputImports[`"encoding/json"`] = true
|
||||||
|
out += tplStr(decodeTpl["handleFallback"], handleFallback{
|
||||||
|
Name: name,
|
||||||
|
Typ: typ,
|
||||||
|
Kind: typ.Kind(),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
out += tplStr(decodeTpl["handleString"], handleString{
|
||||||
|
IC: ic,
|
||||||
|
Name: name,
|
||||||
|
Typ: typ,
|
||||||
|
TakeAddr: takeAddr || ptr,
|
||||||
|
Quoted: quoted,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
case reflect.Interface:
|
||||||
|
ic.OutputImports[`"encoding/json"`] = true
|
||||||
|
out += tplStr(decodeTpl["handleFallback"], handleFallback{
|
||||||
|
Name: name,
|
||||||
|
Typ: typ,
|
||||||
|
Kind: typ.Kind(),
|
||||||
|
})
|
||||||
|
case reflect.Map:
|
||||||
|
out += tplStr(decodeTpl["handleObject"], handleObject{
|
||||||
|
IC: ic,
|
||||||
|
Name: name,
|
||||||
|
Typ: typ,
|
||||||
|
Ptr: reflect.Ptr,
|
||||||
|
TakeAddr: takeAddr || ptr,
|
||||||
|
})
|
||||||
|
default:
|
||||||
|
ic.OutputImports[`"encoding/json"`] = true
|
||||||
|
out += tplStr(decodeTpl["handleFallback"], handleFallback{
|
||||||
|
Name: name,
|
||||||
|
Typ: typ,
|
||||||
|
Kind: typ.Kind(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func getArrayHandler(ic *Inception, name string, typ reflect.Type, ptr bool) string {
|
||||||
|
if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 {
|
||||||
|
ic.OutputImports[`"encoding/base64"`] = true
|
||||||
|
useReflectToSet := false
|
||||||
|
if typ.Elem().Name() != "byte" {
|
||||||
|
ic.OutputImports[`"reflect"`] = true
|
||||||
|
useReflectToSet = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return tplStr(decodeTpl["handleByteSlice"], handleArray{
|
||||||
|
IC: ic,
|
||||||
|
Name: name,
|
||||||
|
Typ: typ,
|
||||||
|
Ptr: reflect.Ptr,
|
||||||
|
UseReflectToSet: useReflectToSet,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if typ.Elem().Kind() == reflect.Struct && typ.Elem().Name() != "" {
|
||||||
|
goto sliceOrArray
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typ.Elem().Kind() == reflect.Struct || typ.Elem().Kind() == reflect.Map) ||
|
||||||
|
typ.Elem().Kind() == reflect.Array || typ.Elem().Kind() == reflect.Slice &&
|
||||||
|
typ.Elem().Name() == "" {
|
||||||
|
ic.OutputImports[`"encoding/json"`] = true
|
||||||
|
|
||||||
|
return tplStr(decodeTpl["handleFallback"], handleFallback{
|
||||||
|
Name: name,
|
||||||
|
Typ: typ,
|
||||||
|
Kind: typ.Kind(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
sliceOrArray:
|
||||||
|
|
||||||
|
if typ.Kind() == reflect.Array {
|
||||||
|
return tplStr(decodeTpl["handleArray"], handleArray{
|
||||||
|
IC: ic,
|
||||||
|
Name: name,
|
||||||
|
Typ: typ,
|
||||||
|
IsPtr: ptr,
|
||||||
|
Ptr: reflect.Ptr,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return tplStr(decodeTpl["handleSlice"], handleArray{
|
||||||
|
IC: ic,
|
||||||
|
Name: name,
|
||||||
|
Typ: typ,
|
||||||
|
IsPtr: ptr,
|
||||||
|
Ptr: reflect.Ptr,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAllowTokens(name string, tokens ...string) string {
|
||||||
|
return tplStr(decodeTpl["allowTokens"], allowTokens{
|
||||||
|
Name: name,
|
||||||
|
Tokens: tokens,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func getNumberHandler(ic *Inception, name string, takeAddr bool, typ reflect.Type, parsefunc string) string {
|
||||||
|
return tplStr(decodeTpl["handlerNumeric"], handlerNumeric{
|
||||||
|
IC: ic,
|
||||||
|
Name: name,
|
||||||
|
ParseFunc: parsefunc,
|
||||||
|
TakeAddr: takeAddr,
|
||||||
|
Typ: typ,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func getNumberSize(typ reflect.Type) string {
|
||||||
|
return fmt.Sprintf("%d", typ.Bits())
|
||||||
|
}
|
||||||
|
|
||||||
|
func getType(ic *Inception, name string, typ reflect.Type) string {
|
||||||
|
s := typ.Name()
|
||||||
|
|
||||||
|
if typ.PkgPath() != "" && typ.PkgPath() != ic.PackagePath {
|
||||||
|
path := removeVendor(typ.PkgPath())
|
||||||
|
ic.OutputImports[`"`+path+`"`] = true
|
||||||
|
s = typ.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
if s == "" {
|
||||||
|
return typ.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeVendor removes everything before and including a '/vendor/'
|
||||||
|
// substring in the package path.
|
||||||
|
// This is needed becuase that full path can't be used in the
|
||||||
|
// import statement.
|
||||||
|
func removeVendor(path string) string {
|
||||||
|
i := strings.Index(path, "/vendor/")
|
||||||
|
if i == -1 {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
return path[i+8:]
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildTokens(containsOptional bool, optional string, required ...string) []string {
|
||||||
|
if containsOptional {
|
||||||
|
return append(required, optional)
|
||||||
|
}
|
||||||
|
|
||||||
|
return required
|
||||||
|
}
|
||||||
|
|
||||||
|
func unquoteField(quoted bool) string {
|
||||||
|
// The outer quote of a string is already stripped out by
|
||||||
|
// the lexer. We need to check if the inner string is also
|
||||||
|
// quoted. If so, we will decode it as json string. If decoding
|
||||||
|
// fails, we will use the original string
|
||||||
|
if quoted {
|
||||||
|
return `
|
||||||
|
unquoted, ok := fflib.UnquoteBytes(outBuf)
|
||||||
|
if ok {
|
||||||
|
outBuf = unquoted
|
||||||
|
}
|
||||||
|
`
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTmpVarFor(name string) string {
|
||||||
|
return "tmp" + strings.Replace(strings.Title(name), ".", "", -1)
|
||||||
|
}
|
773
vendor/github.com/pquerna/ffjson/inception/decoder_tpl.go
generated
vendored
Normal file
773
vendor/github.com/pquerna/ffjson/inception/decoder_tpl.go
generated
vendored
Normal file
@ -0,0 +1,773 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2014 Paul Querna
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ffjsoninception
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"text/template"
|
||||||
|
)
|
||||||
|
|
||||||
|
var decodeTpl map[string]*template.Template
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
decodeTpl = make(map[string]*template.Template)
|
||||||
|
|
||||||
|
funcs := map[string]string{
|
||||||
|
"handlerNumeric": handlerNumericTxt,
|
||||||
|
"allowTokens": allowTokensTxt,
|
||||||
|
"handleFallback": handleFallbackTxt,
|
||||||
|
"handleString": handleStringTxt,
|
||||||
|
"handleObject": handleObjectTxt,
|
||||||
|
"handleArray": handleArrayTxt,
|
||||||
|
"handleSlice": handleSliceTxt,
|
||||||
|
"handleByteSlice": handleByteSliceTxt,
|
||||||
|
"handleBool": handleBoolTxt,
|
||||||
|
"handlePtr": handlePtrTxt,
|
||||||
|
"header": headerTxt,
|
||||||
|
"ujFunc": ujFuncTxt,
|
||||||
|
"handleUnmarshaler": handleUnmarshalerTxt,
|
||||||
|
}
|
||||||
|
|
||||||
|
tplFuncs := template.FuncMap{
|
||||||
|
"getAllowTokens": getAllowTokens,
|
||||||
|
"getNumberSize": getNumberSize,
|
||||||
|
"getType": getType,
|
||||||
|
"handleField": handleField,
|
||||||
|
"handleFieldAddr": handleFieldAddr,
|
||||||
|
"unquoteField": unquoteField,
|
||||||
|
"getTmpVarFor": getTmpVarFor,
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range funcs {
|
||||||
|
decodeTpl[k] = template.Must(template.New(k).Funcs(tplFuncs).Parse(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type handlerNumeric struct {
|
||||||
|
IC *Inception
|
||||||
|
Name string
|
||||||
|
ParseFunc string
|
||||||
|
Typ reflect.Type
|
||||||
|
TakeAddr bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var handlerNumericTxt = `
|
||||||
|
{
|
||||||
|
{{$ic := .IC}}
|
||||||
|
|
||||||
|
if tok == fflib.FFTok_null {
|
||||||
|
{{if eq .TakeAddr true}}
|
||||||
|
{{.Name}} = nil
|
||||||
|
{{end}}
|
||||||
|
} else {
|
||||||
|
{{if eq .ParseFunc "ParseFloat" }}
|
||||||
|
tval, err := fflib.{{ .ParseFunc}}(fs.Output.Bytes(), {{getNumberSize .Typ}})
|
||||||
|
{{else}}
|
||||||
|
tval, err := fflib.{{ .ParseFunc}}(fs.Output.Bytes(), 10, {{getNumberSize .Typ}})
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fs.WrapErr(err)
|
||||||
|
}
|
||||||
|
{{if eq .TakeAddr true}}
|
||||||
|
ttypval := {{getType $ic .Name .Typ}}(tval)
|
||||||
|
{{.Name}} = &ttypval
|
||||||
|
{{else}}
|
||||||
|
{{.Name}} = {{getType $ic .Name .Typ}}(tval)
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
type allowTokens struct {
|
||||||
|
Name string
|
||||||
|
Tokens []string
|
||||||
|
}
|
||||||
|
|
||||||
|
var allowTokensTxt = `
|
||||||
|
{
|
||||||
|
if {{range $index, $element := .Tokens}}{{if ne $index 0 }}&&{{end}} tok != fflib.{{$element}}{{end}} {
|
||||||
|
return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for {{.Name}}", tok))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
type handleFallback struct {
|
||||||
|
Name string
|
||||||
|
Typ reflect.Type
|
||||||
|
Kind reflect.Kind
|
||||||
|
}
|
||||||
|
|
||||||
|
var handleFallbackTxt = `
|
||||||
|
{
|
||||||
|
/* Falling back. type={{printf "%v" .Typ}} kind={{printf "%v" .Kind}} */
|
||||||
|
tbuf, err := fs.CaptureField(tok)
|
||||||
|
if err != nil {
|
||||||
|
return fs.WrapErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal(tbuf, &{{.Name}})
|
||||||
|
if err != nil {
|
||||||
|
return fs.WrapErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
type handleString struct {
|
||||||
|
IC *Inception
|
||||||
|
Name string
|
||||||
|
Typ reflect.Type
|
||||||
|
TakeAddr bool
|
||||||
|
Quoted bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var handleStringTxt = `
|
||||||
|
{
|
||||||
|
{{$ic := .IC}}
|
||||||
|
|
||||||
|
{{getAllowTokens .Typ.Name "FFTok_string" "FFTok_null"}}
|
||||||
|
if tok == fflib.FFTok_null {
|
||||||
|
{{if eq .TakeAddr true}}
|
||||||
|
{{.Name}} = nil
|
||||||
|
{{end}}
|
||||||
|
} else {
|
||||||
|
{{if eq .TakeAddr true}}
|
||||||
|
var tval {{getType $ic .Name .Typ}}
|
||||||
|
outBuf := fs.Output.Bytes()
|
||||||
|
{{unquoteField .Quoted}}
|
||||||
|
tval = {{getType $ic .Name .Typ}}(string(outBuf))
|
||||||
|
{{.Name}} = &tval
|
||||||
|
{{else}}
|
||||||
|
outBuf := fs.Output.Bytes()
|
||||||
|
{{unquoteField .Quoted}}
|
||||||
|
{{.Name}} = {{getType $ic .Name .Typ}}(string(outBuf))
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
type handleObject struct {
|
||||||
|
IC *Inception
|
||||||
|
Name string
|
||||||
|
Typ reflect.Type
|
||||||
|
Ptr reflect.Kind
|
||||||
|
TakeAddr bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var handleObjectTxt = `
|
||||||
|
{
|
||||||
|
{{$ic := .IC}}
|
||||||
|
{{getAllowTokens .Typ.Name "FFTok_left_bracket" "FFTok_null"}}
|
||||||
|
if tok == fflib.FFTok_null {
|
||||||
|
{{.Name}} = nil
|
||||||
|
} else {
|
||||||
|
|
||||||
|
{{if eq .TakeAddr true}}
|
||||||
|
{{if eq .Typ.Elem.Kind .Ptr }}
|
||||||
|
{{if eq .Typ.Key.Kind .Ptr }}
|
||||||
|
var tval = make(map[*{{getType $ic .Name .Typ.Key.Elem}}]*{{getType $ic .Name .Typ.Elem.Elem}}, 0)
|
||||||
|
{{else}}
|
||||||
|
var tval = make(map[{{getType $ic .Name .Typ.Key}}]*{{getType $ic .Name .Typ.Elem.Elem}}, 0)
|
||||||
|
{{end}}
|
||||||
|
{{else}}
|
||||||
|
{{if eq .Typ.Key.Kind .Ptr }}
|
||||||
|
var tval = make(map[*{{getType $ic .Name .Typ.Key.Elem}}]{{getType $ic .Name .Typ.Elem}}, 0)
|
||||||
|
{{else}}
|
||||||
|
var tval = make(map[{{getType $ic .Name .Typ.Key}}]{{getType $ic .Name .Typ.Elem}}, 0)
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
{{else}}
|
||||||
|
{{if eq .Typ.Elem.Kind .Ptr }}
|
||||||
|
{{if eq .Typ.Key.Kind .Ptr }}
|
||||||
|
{{.Name}} = make(map[*{{getType $ic .Name .Typ.Key.Elem}}]*{{getType $ic .Name .Typ.Elem.Elem}}, 0)
|
||||||
|
{{else}}
|
||||||
|
{{.Name}} = make(map[{{getType $ic .Name .Typ.Key}}]*{{getType $ic .Name .Typ.Elem.Elem}}, 0)
|
||||||
|
{{end}}
|
||||||
|
{{else}}
|
||||||
|
{{if eq .Typ.Key.Kind .Ptr }}
|
||||||
|
{{.Name}} = make(map[*{{getType $ic .Name .Typ.Key.Elem}}]{{getType $ic .Name .Typ.Elem}}, 0)
|
||||||
|
{{else}}
|
||||||
|
{{.Name}} = make(map[{{getType $ic .Name .Typ.Key}}]{{getType $ic .Name .Typ.Elem}}, 0)
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
wantVal := true
|
||||||
|
|
||||||
|
for {
|
||||||
|
{{$keyPtr := false}}
|
||||||
|
{{if eq .Typ.Key.Kind .Ptr }}
|
||||||
|
{{$keyPtr := true}}
|
||||||
|
var k *{{getType $ic .Name .Typ.Key.Elem}}
|
||||||
|
{{else}}
|
||||||
|
var k {{getType $ic .Name .Typ.Key}}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
{{$valPtr := false}}
|
||||||
|
{{$tmpVar := getTmpVarFor .Name}}
|
||||||
|
{{if eq .Typ.Elem.Kind .Ptr }}
|
||||||
|
{{$valPtr := true}}
|
||||||
|
var {{$tmpVar}} *{{getType $ic .Name .Typ.Elem.Elem}}
|
||||||
|
{{else}}
|
||||||
|
var {{$tmpVar}} {{getType $ic .Name .Typ.Elem}}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
tok = fs.Scan()
|
||||||
|
if tok == fflib.FFTok_error {
|
||||||
|
goto tokerror
|
||||||
|
}
|
||||||
|
if tok == fflib.FFTok_right_bracket {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if tok == fflib.FFTok_comma {
|
||||||
|
if wantVal == true {
|
||||||
|
// TODO(pquerna): this isn't an ideal error message, this handles
|
||||||
|
// things like [,,,] as an array value.
|
||||||
|
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
wantVal = true
|
||||||
|
}
|
||||||
|
|
||||||
|
{{handleField .IC "k" .Typ.Key $keyPtr false}}
|
||||||
|
|
||||||
|
// Expect ':' after key
|
||||||
|
tok = fs.Scan()
|
||||||
|
if tok != fflib.FFTok_colon {
|
||||||
|
return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok))
|
||||||
|
}
|
||||||
|
|
||||||
|
tok = fs.Scan()
|
||||||
|
{{handleField .IC $tmpVar .Typ.Elem $valPtr false}}
|
||||||
|
|
||||||
|
{{if eq .TakeAddr true}}
|
||||||
|
tval[k] = {{$tmpVar}}
|
||||||
|
{{else}}
|
||||||
|
{{.Name}}[k] = {{$tmpVar}}
|
||||||
|
{{end}}
|
||||||
|
wantVal = false
|
||||||
|
}
|
||||||
|
|
||||||
|
{{if eq .TakeAddr true}}
|
||||||
|
{{.Name}} = &tval
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
type handleArray struct {
|
||||||
|
IC *Inception
|
||||||
|
Name string
|
||||||
|
Typ reflect.Type
|
||||||
|
Ptr reflect.Kind
|
||||||
|
UseReflectToSet bool
|
||||||
|
IsPtr bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var handleArrayTxt = `
|
||||||
|
{
|
||||||
|
{{$ic := .IC}}
|
||||||
|
{{getAllowTokens .Typ.Name "FFTok_left_brace" "FFTok_null"}}
|
||||||
|
{{if eq .Typ.Elem.Kind .Ptr}}
|
||||||
|
{{.Name}} = [{{.Typ.Len}}]*{{getType $ic .Name .Typ.Elem.Elem}}{}
|
||||||
|
{{else}}
|
||||||
|
{{.Name}} = [{{.Typ.Len}}]{{getType $ic .Name .Typ.Elem}}{}
|
||||||
|
{{end}}
|
||||||
|
if tok != fflib.FFTok_null {
|
||||||
|
wantVal := true
|
||||||
|
|
||||||
|
idx := 0
|
||||||
|
for {
|
||||||
|
{{$ptr := false}}
|
||||||
|
{{$tmpVar := getTmpVarFor .Name}}
|
||||||
|
{{if eq .Typ.Elem.Kind .Ptr }}
|
||||||
|
{{$ptr := true}}
|
||||||
|
var {{$tmpVar}} *{{getType $ic .Name .Typ.Elem.Elem}}
|
||||||
|
{{else}}
|
||||||
|
var {{$tmpVar}} {{getType $ic .Name .Typ.Elem}}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
tok = fs.Scan()
|
||||||
|
if tok == fflib.FFTok_error {
|
||||||
|
goto tokerror
|
||||||
|
}
|
||||||
|
if tok == fflib.FFTok_right_brace {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if tok == fflib.FFTok_comma {
|
||||||
|
if wantVal == true {
|
||||||
|
// TODO(pquerna): this isn't an ideal error message, this handles
|
||||||
|
// things like [,,,] as an array value.
|
||||||
|
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
wantVal = true
|
||||||
|
}
|
||||||
|
|
||||||
|
{{handleField .IC $tmpVar .Typ.Elem $ptr false}}
|
||||||
|
|
||||||
|
// Standard json.Unmarshal ignores elements out of array bounds,
|
||||||
|
// that what we do as well.
|
||||||
|
if idx < {{.Typ.Len}} {
|
||||||
|
{{.Name}}[idx] = {{$tmpVar}}
|
||||||
|
idx++
|
||||||
|
}
|
||||||
|
|
||||||
|
wantVal = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var handleSliceTxt = `
|
||||||
|
{
|
||||||
|
{{$ic := .IC}}
|
||||||
|
{{getAllowTokens .Typ.Name "FFTok_left_brace" "FFTok_null"}}
|
||||||
|
if tok == fflib.FFTok_null {
|
||||||
|
{{.Name}} = nil
|
||||||
|
} else {
|
||||||
|
{{if eq .Typ.Elem.Kind .Ptr }}
|
||||||
|
{{if eq .IsPtr true}}
|
||||||
|
{{.Name}} = &[]*{{getType $ic .Name .Typ.Elem.Elem}}{}
|
||||||
|
{{else}}
|
||||||
|
{{.Name}} = []*{{getType $ic .Name .Typ.Elem.Elem}}{}
|
||||||
|
{{end}}
|
||||||
|
{{else}}
|
||||||
|
{{if eq .IsPtr true}}
|
||||||
|
{{.Name}} = &[]{{getType $ic .Name .Typ.Elem}}{}
|
||||||
|
{{else}}
|
||||||
|
{{.Name}} = []{{getType $ic .Name .Typ.Elem}}{}
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
wantVal := true
|
||||||
|
|
||||||
|
for {
|
||||||
|
{{$ptr := false}}
|
||||||
|
{{$tmpVar := getTmpVarFor .Name}}
|
||||||
|
{{if eq .Typ.Elem.Kind .Ptr }}
|
||||||
|
{{$ptr := true}}
|
||||||
|
var {{$tmpVar}} *{{getType $ic .Name .Typ.Elem.Elem}}
|
||||||
|
{{else}}
|
||||||
|
var {{$tmpVar}} {{getType $ic .Name .Typ.Elem}}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
tok = fs.Scan()
|
||||||
|
if tok == fflib.FFTok_error {
|
||||||
|
goto tokerror
|
||||||
|
}
|
||||||
|
if tok == fflib.FFTok_right_brace {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if tok == fflib.FFTok_comma {
|
||||||
|
if wantVal == true {
|
||||||
|
// TODO(pquerna): this isn't an ideal error message, this handles
|
||||||
|
// things like [,,,] as an array value.
|
||||||
|
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
wantVal = true
|
||||||
|
}
|
||||||
|
|
||||||
|
{{handleField .IC $tmpVar .Typ.Elem $ptr false}}
|
||||||
|
{{if eq .IsPtr true}}
|
||||||
|
*{{.Name}} = append(*{{.Name}}, {{$tmpVar}})
|
||||||
|
{{else}}
|
||||||
|
{{.Name}} = append({{.Name}}, {{$tmpVar}})
|
||||||
|
{{end}}
|
||||||
|
wantVal = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var handleByteSliceTxt = `
|
||||||
|
{
|
||||||
|
{{getAllowTokens .Typ.Name "FFTok_string" "FFTok_null"}}
|
||||||
|
if tok == fflib.FFTok_null {
|
||||||
|
{{.Name}} = nil
|
||||||
|
} else {
|
||||||
|
b := make([]byte, base64.StdEncoding.DecodedLen(fs.Output.Len()))
|
||||||
|
n, err := base64.StdEncoding.Decode(b, fs.Output.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return fs.WrapErr(err)
|
||||||
|
}
|
||||||
|
{{if eq .UseReflectToSet true}}
|
||||||
|
v := reflect.ValueOf(&{{.Name}}).Elem()
|
||||||
|
v.SetBytes(b[0:n])
|
||||||
|
{{else}}
|
||||||
|
{{.Name}} = append([]byte(), b[0:n]...)
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
type handleBool struct {
|
||||||
|
Name string
|
||||||
|
Typ reflect.Type
|
||||||
|
TakeAddr bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var handleBoolTxt = `
|
||||||
|
{
|
||||||
|
if tok == fflib.FFTok_null {
|
||||||
|
{{if eq .TakeAddr true}}
|
||||||
|
{{.Name}} = nil
|
||||||
|
{{end}}
|
||||||
|
} else {
|
||||||
|
tmpb := fs.Output.Bytes()
|
||||||
|
|
||||||
|
{{if eq .TakeAddr true}}
|
||||||
|
var tval bool
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
|
||||||
|
{{if eq .TakeAddr true}}
|
||||||
|
tval = true
|
||||||
|
{{else}}
|
||||||
|
{{.Name}} = true
|
||||||
|
{{end}}
|
||||||
|
} else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
|
||||||
|
{{if eq .TakeAddr true}}
|
||||||
|
tval = false
|
||||||
|
{{else}}
|
||||||
|
{{.Name}} = false
|
||||||
|
{{end}}
|
||||||
|
} else {
|
||||||
|
err = errors.New("unexpected bytes for true/false value")
|
||||||
|
return fs.WrapErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
{{if eq .TakeAddr true}}
|
||||||
|
{{.Name}} = &tval
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
type handlePtr struct {
|
||||||
|
IC *Inception
|
||||||
|
Name string
|
||||||
|
Typ reflect.Type
|
||||||
|
Quoted bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var handlePtrTxt = `
|
||||||
|
{
|
||||||
|
{{$ic := .IC}}
|
||||||
|
|
||||||
|
if tok == fflib.FFTok_null {
|
||||||
|
{{.Name}} = nil
|
||||||
|
} else {
|
||||||
|
if {{.Name}} == nil {
|
||||||
|
{{.Name}} = new({{getType $ic .Typ.Elem.Name .Typ.Elem}})
|
||||||
|
}
|
||||||
|
|
||||||
|
{{handleFieldAddr .IC .Name true .Typ.Elem false .Quoted}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
type header struct {
|
||||||
|
IC *Inception
|
||||||
|
SI *StructInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
var headerTxt = `
|
||||||
|
const (
|
||||||
|
ffjt{{.SI.Name}}base = iota
|
||||||
|
ffjt{{.SI.Name}}nosuchkey
|
||||||
|
{{with $si := .SI}}
|
||||||
|
{{range $index, $field := $si.Fields}}
|
||||||
|
{{if ne $field.JsonName "-"}}
|
||||||
|
ffjt{{$si.Name}}{{$field.Name}}
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
)
|
||||||
|
|
||||||
|
{{with $si := .SI}}
|
||||||
|
{{range $index, $field := $si.Fields}}
|
||||||
|
{{if ne $field.JsonName "-"}}
|
||||||
|
var ffjKey{{$si.Name}}{{$field.Name}} = []byte({{$field.JsonName}})
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
`
|
||||||
|
|
||||||
|
type ujFunc struct {
|
||||||
|
IC *Inception
|
||||||
|
SI *StructInfo
|
||||||
|
ValidValues []string
|
||||||
|
ResetFields bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var ujFuncTxt = `
|
||||||
|
{{$si := .SI}}
|
||||||
|
{{$ic := .IC}}
|
||||||
|
|
||||||
|
// UnmarshalJSON umarshall json - template of ffjson
|
||||||
|
func (j *{{.SI.Name}}) UnmarshalJSON(input []byte) error {
|
||||||
|
fs := fflib.NewFFLexer(input)
|
||||||
|
return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
|
||||||
|
func (j *{{.SI.Name}}) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
|
||||||
|
var err error
|
||||||
|
currentKey := ffjt{{.SI.Name}}base
|
||||||
|
_ = currentKey
|
||||||
|
tok := fflib.FFTok_init
|
||||||
|
wantedTok := fflib.FFTok_init
|
||||||
|
|
||||||
|
{{if eq .ResetFields true}}
|
||||||
|
{{range $index, $field := $si.Fields}}
|
||||||
|
var ffjSet{{$si.Name}}{{$field.Name}} = false
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
mainparse:
|
||||||
|
for {
|
||||||
|
tok = fs.Scan()
|
||||||
|
// println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
|
||||||
|
if tok == fflib.FFTok_error {
|
||||||
|
goto tokerror
|
||||||
|
}
|
||||||
|
|
||||||
|
switch state {
|
||||||
|
|
||||||
|
case fflib.FFParse_map_start:
|
||||||
|
if tok != fflib.FFTok_left_bracket {
|
||||||
|
wantedTok = fflib.FFTok_left_bracket
|
||||||
|
goto wrongtokenerror
|
||||||
|
}
|
||||||
|
state = fflib.FFParse_want_key
|
||||||
|
continue
|
||||||
|
|
||||||
|
case fflib.FFParse_after_value:
|
||||||
|
if tok == fflib.FFTok_comma {
|
||||||
|
state = fflib.FFParse_want_key
|
||||||
|
} else if tok == fflib.FFTok_right_bracket {
|
||||||
|
goto done
|
||||||
|
} else {
|
||||||
|
wantedTok = fflib.FFTok_comma
|
||||||
|
goto wrongtokenerror
|
||||||
|
}
|
||||||
|
|
||||||
|
case fflib.FFParse_want_key:
|
||||||
|
// json {} ended. goto exit. woo.
|
||||||
|
if tok == fflib.FFTok_right_bracket {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
if tok != fflib.FFTok_string {
|
||||||
|
wantedTok = fflib.FFTok_string
|
||||||
|
goto wrongtokenerror
|
||||||
|
}
|
||||||
|
|
||||||
|
kn := fs.Output.Bytes()
|
||||||
|
if len(kn) <= 0 {
|
||||||
|
// "" case. hrm.
|
||||||
|
currentKey = ffjt{{.SI.Name}}nosuchkey
|
||||||
|
state = fflib.FFParse_want_colon
|
||||||
|
goto mainparse
|
||||||
|
} else {
|
||||||
|
switch kn[0] {
|
||||||
|
{{range $byte, $fields := $si.FieldsByFirstByte}}
|
||||||
|
case '{{$byte}}':
|
||||||
|
{{range $index, $field := $fields}}
|
||||||
|
{{if ne $index 0 }}} else if {{else}}if {{end}} bytes.Equal(ffjKey{{$si.Name}}{{$field.Name}}, kn) {
|
||||||
|
currentKey = ffjt{{$si.Name}}{{$field.Name}}
|
||||||
|
state = fflib.FFParse_want_colon
|
||||||
|
goto mainparse
|
||||||
|
{{end}} }
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
{{range $index, $field := $si.ReverseFields}}
|
||||||
|
if {{$field.FoldFuncName}}(ffjKey{{$si.Name}}{{$field.Name}}, kn) {
|
||||||
|
currentKey = ffjt{{$si.Name}}{{$field.Name}}
|
||||||
|
state = fflib.FFParse_want_colon
|
||||||
|
goto mainparse
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
currentKey = ffjt{{.SI.Name}}nosuchkey
|
||||||
|
state = fflib.FFParse_want_colon
|
||||||
|
goto mainparse
|
||||||
|
}
|
||||||
|
|
||||||
|
case fflib.FFParse_want_colon:
|
||||||
|
if tok != fflib.FFTok_colon {
|
||||||
|
wantedTok = fflib.FFTok_colon
|
||||||
|
goto wrongtokenerror
|
||||||
|
}
|
||||||
|
state = fflib.FFParse_want_value
|
||||||
|
continue
|
||||||
|
case fflib.FFParse_want_value:
|
||||||
|
|
||||||
|
if {{range $index, $v := .ValidValues}}{{if ne $index 0 }}||{{end}}tok == fflib.{{$v}}{{end}} {
|
||||||
|
switch currentKey {
|
||||||
|
{{range $index, $field := $si.Fields}}
|
||||||
|
case ffjt{{$si.Name}}{{$field.Name}}:
|
||||||
|
goto handle_{{$field.Name}}
|
||||||
|
{{end}}
|
||||||
|
case ffjt{{$si.Name}}nosuchkey:
|
||||||
|
err = fs.SkipField(tok)
|
||||||
|
if err != nil {
|
||||||
|
return fs.WrapErr(err)
|
||||||
|
}
|
||||||
|
state = fflib.FFParse_after_value
|
||||||
|
goto mainparse
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
goto wantedvalue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{{range $index, $field := $si.Fields}}
|
||||||
|
handle_{{$field.Name}}:
|
||||||
|
{{with $fieldName := $field.Name | printf "j.%s"}}
|
||||||
|
{{handleField $ic $fieldName $field.Typ $field.Pointer $field.ForceString}}
|
||||||
|
{{if eq $.ResetFields true}}
|
||||||
|
ffjSet{{$si.Name}}{{$field.Name}} = true
|
||||||
|
{{end}}
|
||||||
|
state = fflib.FFParse_after_value
|
||||||
|
goto mainparse
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
wantedvalue:
|
||||||
|
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
|
||||||
|
wrongtokenerror:
|
||||||
|
return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
|
||||||
|
tokerror:
|
||||||
|
if fs.BigError != nil {
|
||||||
|
return fs.WrapErr(fs.BigError)
|
||||||
|
}
|
||||||
|
err = fs.Error.ToError()
|
||||||
|
if err != nil {
|
||||||
|
return fs.WrapErr(err)
|
||||||
|
}
|
||||||
|
panic("ffjson-generated: unreachable, please report bug.")
|
||||||
|
done:
|
||||||
|
{{if eq .ResetFields true}}
|
||||||
|
{{range $index, $field := $si.Fields}}
|
||||||
|
if !ffjSet{{$si.Name}}{{$field.Name}} {
|
||||||
|
{{with $fieldName := $field.Name | printf "j.%s"}}
|
||||||
|
{{if eq $field.Pointer true}}
|
||||||
|
{{$fieldName}} = nil
|
||||||
|
{{else if eq $field.Typ.Kind ` + strconv.FormatUint(uint64(reflect.Interface), 10) + `}}
|
||||||
|
{{$fieldName}} = nil
|
||||||
|
{{else if eq $field.Typ.Kind ` + strconv.FormatUint(uint64(reflect.Slice), 10) + `}}
|
||||||
|
{{$fieldName}} = nil
|
||||||
|
{{else if eq $field.Typ.Kind ` + strconv.FormatUint(uint64(reflect.Array), 10) + `}}
|
||||||
|
{{$fieldName}} = [{{$field.Typ.Len}}]{{getType $ic $fieldName $field.Typ.Elem}}{}
|
||||||
|
{{else if eq $field.Typ.Kind ` + strconv.FormatUint(uint64(reflect.Map), 10) + `}}
|
||||||
|
{{$fieldName}} = nil
|
||||||
|
{{else if eq $field.Typ.Kind ` + strconv.FormatUint(uint64(reflect.Bool), 10) + `}}
|
||||||
|
{{$fieldName}} = false
|
||||||
|
{{else if eq $field.Typ.Kind ` + strconv.FormatUint(uint64(reflect.String), 10) + `}}
|
||||||
|
{{$fieldName}} = ""
|
||||||
|
{{else if eq $field.Typ.Kind ` + strconv.FormatUint(uint64(reflect.Struct), 10) + `}}
|
||||||
|
{{$fieldName}} = {{getType $ic $fieldName $field.Typ}}{}
|
||||||
|
{{else}}
|
||||||
|
{{$fieldName}} = {{getType $ic $fieldName $field.Typ}}(0)
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
type handleUnmarshaler struct {
|
||||||
|
IC *Inception
|
||||||
|
Name string
|
||||||
|
Typ reflect.Type
|
||||||
|
Ptr reflect.Kind
|
||||||
|
TakeAddr bool
|
||||||
|
UnmarshalJSONFFLexer bool
|
||||||
|
Unmarshaler bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var handleUnmarshalerTxt = `
|
||||||
|
{{$ic := .IC}}
|
||||||
|
|
||||||
|
{{if eq .UnmarshalJSONFFLexer true}}
|
||||||
|
{
|
||||||
|
if tok == fflib.FFTok_null {
|
||||||
|
{{if eq .Typ.Kind .Ptr }}
|
||||||
|
{{.Name}} = nil
|
||||||
|
{{end}}
|
||||||
|
{{if eq .TakeAddr true }}
|
||||||
|
{{.Name}} = nil
|
||||||
|
{{end}}
|
||||||
|
} else {
|
||||||
|
{{if eq .Typ.Kind .Ptr }}
|
||||||
|
if {{.Name}} == nil {
|
||||||
|
{{.Name}} = new({{getType $ic .Typ.Elem.Name .Typ.Elem}})
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
{{if eq .TakeAddr true }}
|
||||||
|
if {{.Name}} == nil {
|
||||||
|
{{.Name}} = new({{getType $ic .Typ.Name .Typ}})
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
err = {{.Name}}.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
state = fflib.FFParse_after_value
|
||||||
|
}
|
||||||
|
{{else}}
|
||||||
|
{{if eq .Unmarshaler true}}
|
||||||
|
{
|
||||||
|
if tok == fflib.FFTok_null {
|
||||||
|
{{if eq .TakeAddr true }}
|
||||||
|
{{.Name}} = nil
|
||||||
|
{{end}}
|
||||||
|
} else {
|
||||||
|
|
||||||
|
tbuf, err := fs.CaptureField(tok)
|
||||||
|
if err != nil {
|
||||||
|
return fs.WrapErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
{{if eq .TakeAddr true }}
|
||||||
|
if {{.Name}} == nil {
|
||||||
|
{{.Name}} = new({{getType $ic .Typ.Name .Typ}})
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
err = {{.Name}}.UnmarshalJSON(tbuf)
|
||||||
|
if err != nil {
|
||||||
|
return fs.WrapErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
state = fflib.FFParse_after_value
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
`
|
544
vendor/github.com/pquerna/ffjson/inception/encoder.go
generated
vendored
Normal file
544
vendor/github.com/pquerna/ffjson/inception/encoder.go
generated
vendored
Normal file
@ -0,0 +1,544 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2014 Paul Querna
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ffjsoninception
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/pquerna/ffjson/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
func typeInInception(ic *Inception, typ reflect.Type, f shared.Feature) bool {
|
||||||
|
for _, v := range ic.objs {
|
||||||
|
if v.Typ == typ {
|
||||||
|
return v.Options.HasFeature(f)
|
||||||
|
}
|
||||||
|
if typ.Kind() == reflect.Ptr {
|
||||||
|
if v.Typ == typ.Elem() {
|
||||||
|
return v.Options.HasFeature(f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOmitEmpty(ic *Inception, sf *StructField) string {
|
||||||
|
ptname := "j." + sf.Name
|
||||||
|
if sf.Pointer {
|
||||||
|
ptname = "*" + ptname
|
||||||
|
return "if true {\n"
|
||||||
|
}
|
||||||
|
switch sf.Typ.Kind() {
|
||||||
|
|
||||||
|
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||||
|
return "if len(" + ptname + ") != 0 {" + "\n"
|
||||||
|
|
||||||
|
case reflect.Int,
|
||||||
|
reflect.Int8,
|
||||||
|
reflect.Int16,
|
||||||
|
reflect.Int32,
|
||||||
|
reflect.Int64,
|
||||||
|
reflect.Uint,
|
||||||
|
reflect.Uint8,
|
||||||
|
reflect.Uint16,
|
||||||
|
reflect.Uint32,
|
||||||
|
reflect.Uint64,
|
||||||
|
reflect.Uintptr,
|
||||||
|
reflect.Float32,
|
||||||
|
reflect.Float64:
|
||||||
|
return "if " + ptname + " != 0 {" + "\n"
|
||||||
|
|
||||||
|
case reflect.Bool:
|
||||||
|
return "if " + ptname + " != false {" + "\n"
|
||||||
|
|
||||||
|
case reflect.Interface, reflect.Ptr:
|
||||||
|
return "if " + ptname + " != nil {" + "\n"
|
||||||
|
|
||||||
|
default:
|
||||||
|
// TODO(pquerna): fix types
|
||||||
|
return "if true {" + "\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMapValue(ic *Inception, name string, typ reflect.Type, ptr bool, forceString bool) string {
|
||||||
|
var out = ""
|
||||||
|
|
||||||
|
if typ.Key().Kind() != reflect.String {
|
||||||
|
out += fmt.Sprintf("/* Falling back. type=%v kind=%v */\n", typ, typ.Kind())
|
||||||
|
out += ic.q.Flush()
|
||||||
|
out += "err = buf.Encode(" + name + ")" + "\n"
|
||||||
|
out += "if err != nil {" + "\n"
|
||||||
|
out += " return err" + "\n"
|
||||||
|
out += "}" + "\n"
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
var elemKind reflect.Kind
|
||||||
|
elemKind = typ.Elem().Kind()
|
||||||
|
|
||||||
|
switch elemKind {
|
||||||
|
case reflect.String,
|
||||||
|
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||||
|
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||||
|
reflect.Float32,
|
||||||
|
reflect.Float64,
|
||||||
|
reflect.Bool:
|
||||||
|
|
||||||
|
ic.OutputImports[`fflib "github.com/pquerna/ffjson/fflib/v1"`] = true
|
||||||
|
|
||||||
|
out += "if " + name + " == nil {" + "\n"
|
||||||
|
ic.q.Write("null")
|
||||||
|
out += ic.q.GetQueued()
|
||||||
|
ic.q.DeleteLast()
|
||||||
|
out += "} else {" + "\n"
|
||||||
|
out += ic.q.WriteFlush("{ ")
|
||||||
|
out += " for key, value := range " + name + " {" + "\n"
|
||||||
|
out += " fflib.WriteJsonString(buf, key)" + "\n"
|
||||||
|
out += " buf.WriteString(`:`)" + "\n"
|
||||||
|
out += getGetInnerValue(ic, "value", typ.Elem(), false, forceString)
|
||||||
|
out += " buf.WriteByte(',')" + "\n"
|
||||||
|
out += " }" + "\n"
|
||||||
|
out += "buf.Rewind(1)" + "\n"
|
||||||
|
out += ic.q.WriteFlush("}")
|
||||||
|
out += "}" + "\n"
|
||||||
|
|
||||||
|
default:
|
||||||
|
out += ic.q.Flush()
|
||||||
|
out += fmt.Sprintf("/* Falling back. type=%v kind=%v */\n", typ, typ.Kind())
|
||||||
|
out += "err = buf.Encode(" + name + ")" + "\n"
|
||||||
|
out += "if err != nil {" + "\n"
|
||||||
|
out += " return err" + "\n"
|
||||||
|
out += "}" + "\n"
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func getGetInnerValue(ic *Inception, name string, typ reflect.Type, ptr bool, forceString bool) string {
|
||||||
|
var out = ""
|
||||||
|
|
||||||
|
// Flush if not bool or maps
|
||||||
|
if typ.Kind() != reflect.Bool && typ.Kind() != reflect.Map && typ.Kind() != reflect.Struct {
|
||||||
|
out += ic.q.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
if typ.Implements(marshalerFasterType) ||
|
||||||
|
reflect.PtrTo(typ).Implements(marshalerFasterType) ||
|
||||||
|
typeInInception(ic, typ, shared.MustEncoder) ||
|
||||||
|
typ.Implements(marshalerType) ||
|
||||||
|
reflect.PtrTo(typ).Implements(marshalerType) {
|
||||||
|
|
||||||
|
out += ic.q.Flush()
|
||||||
|
out += tplStr(encodeTpl["handleMarshaler"], handleMarshaler{
|
||||||
|
IC: ic,
|
||||||
|
Name: name,
|
||||||
|
Typ: typ,
|
||||||
|
Ptr: reflect.Ptr,
|
||||||
|
MarshalJSONBuf: typ.Implements(marshalerFasterType) || reflect.PtrTo(typ).Implements(marshalerFasterType) || typeInInception(ic, typ, shared.MustEncoder),
|
||||||
|
Marshaler: typ.Implements(marshalerType) || reflect.PtrTo(typ).Implements(marshalerType),
|
||||||
|
})
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
ptname := name
|
||||||
|
if ptr {
|
||||||
|
ptname = "*" + name
|
||||||
|
}
|
||||||
|
|
||||||
|
switch typ.Kind() {
|
||||||
|
case reflect.Int,
|
||||||
|
reflect.Int8,
|
||||||
|
reflect.Int16,
|
||||||
|
reflect.Int32,
|
||||||
|
reflect.Int64:
|
||||||
|
ic.OutputImports[`fflib "github.com/pquerna/ffjson/fflib/v1"`] = true
|
||||||
|
out += "fflib.FormatBits2(buf, uint64(" + ptname + "), 10, " + ptname + " < 0)" + "\n"
|
||||||
|
case reflect.Uint,
|
||||||
|
reflect.Uint8,
|
||||||
|
reflect.Uint16,
|
||||||
|
reflect.Uint32,
|
||||||
|
reflect.Uint64,
|
||||||
|
reflect.Uintptr:
|
||||||
|
ic.OutputImports[`fflib "github.com/pquerna/ffjson/fflib/v1"`] = true
|
||||||
|
out += "fflib.FormatBits2(buf, uint64(" + ptname + "), 10, false)" + "\n"
|
||||||
|
case reflect.Float32:
|
||||||
|
ic.OutputImports[`fflib "github.com/pquerna/ffjson/fflib/v1"`] = true
|
||||||
|
out += "fflib.AppendFloat(buf, float64(" + ptname + "), 'g', -1, 32)" + "\n"
|
||||||
|
case reflect.Float64:
|
||||||
|
ic.OutputImports[`fflib "github.com/pquerna/ffjson/fflib/v1"`] = true
|
||||||
|
out += "fflib.AppendFloat(buf, float64(" + ptname + "), 'g', -1, 64)" + "\n"
|
||||||
|
case reflect.Array,
|
||||||
|
reflect.Slice:
|
||||||
|
|
||||||
|
// Arrays cannot be nil
|
||||||
|
if typ.Kind() != reflect.Array {
|
||||||
|
out += "if " + name + "!= nil {" + "\n"
|
||||||
|
}
|
||||||
|
// Array and slice values encode as JSON arrays, except that
|
||||||
|
// []byte encodes as a base64-encoded string, and a nil slice
|
||||||
|
// encodes as the null JSON object.
|
||||||
|
if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 {
|
||||||
|
ic.OutputImports[`"encoding/base64"`] = true
|
||||||
|
|
||||||
|
out += "buf.WriteString(`\"`)" + "\n"
|
||||||
|
out += `{` + "\n"
|
||||||
|
out += `enc := base64.NewEncoder(base64.StdEncoding, buf)` + "\n"
|
||||||
|
if typ.Elem().Name() != "byte" {
|
||||||
|
ic.OutputImports[`"reflect"`] = true
|
||||||
|
out += `enc.Write(reflect.Indirect(reflect.ValueOf(` + ptname + `)).Bytes())` + "\n"
|
||||||
|
|
||||||
|
} else {
|
||||||
|
out += `enc.Write(` + ptname + `)` + "\n"
|
||||||
|
}
|
||||||
|
out += `enc.Close()` + "\n"
|
||||||
|
out += `}` + "\n"
|
||||||
|
out += "buf.WriteString(`\"`)" + "\n"
|
||||||
|
} else {
|
||||||
|
out += "buf.WriteString(`[`)" + "\n"
|
||||||
|
out += "for i, v := range " + ptname + "{" + "\n"
|
||||||
|
out += "if i != 0 {" + "\n"
|
||||||
|
out += "buf.WriteString(`,`)" + "\n"
|
||||||
|
out += "}" + "\n"
|
||||||
|
out += getGetInnerValue(ic, "v", typ.Elem(), false, false)
|
||||||
|
out += "}" + "\n"
|
||||||
|
out += "buf.WriteString(`]`)" + "\n"
|
||||||
|
}
|
||||||
|
if typ.Kind() != reflect.Array {
|
||||||
|
out += "} else {" + "\n"
|
||||||
|
out += "buf.WriteString(`null`)" + "\n"
|
||||||
|
out += "}" + "\n"
|
||||||
|
}
|
||||||
|
case reflect.String:
|
||||||
|
// Is it a json.Number?
|
||||||
|
if typ.PkgPath() == "encoding/json" && typ.Name() == "Number" {
|
||||||
|
// Fall back to json package to rely on the valid number check.
|
||||||
|
// See: https://github.com/golang/go/blob/92cd6e3af9f423ab4d8ac78f24e7fd81c31a8ce6/src/encoding/json/encode.go#L550
|
||||||
|
out += fmt.Sprintf("/* json.Number */\n")
|
||||||
|
out += "err = buf.Encode(" + name + ")" + "\n"
|
||||||
|
out += "if err != nil {" + "\n"
|
||||||
|
out += " return err" + "\n"
|
||||||
|
out += "}" + "\n"
|
||||||
|
} else {
|
||||||
|
ic.OutputImports[`fflib "github.com/pquerna/ffjson/fflib/v1"`] = true
|
||||||
|
if forceString {
|
||||||
|
// Forcestring on strings does double-escaping of the entire value.
|
||||||
|
// We create a temporary buffer, encode to that an re-encode it.
|
||||||
|
out += "{" + "\n"
|
||||||
|
out += "tmpbuf := fflib.Buffer{}" + "\n"
|
||||||
|
out += "tmpbuf.Grow(len(" + ptname + ") + 16)" + "\n"
|
||||||
|
out += "fflib.WriteJsonString(&tmpbuf, string(" + ptname + "))" + "\n"
|
||||||
|
out += "fflib.WriteJsonString(buf, string( tmpbuf.Bytes() " + `))` + "\n"
|
||||||
|
out += "}" + "\n"
|
||||||
|
} else {
|
||||||
|
out += "fflib.WriteJsonString(buf, string(" + ptname + "))" + "\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Ptr:
|
||||||
|
out += "if " + name + "!= nil {" + "\n"
|
||||||
|
switch typ.Elem().Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
out += getGetInnerValue(ic, name, typ.Elem(), false, false)
|
||||||
|
default:
|
||||||
|
out += getGetInnerValue(ic, "*"+name, typ.Elem(), false, false)
|
||||||
|
}
|
||||||
|
out += "} else {" + "\n"
|
||||||
|
out += "buf.WriteString(`null`)" + "\n"
|
||||||
|
out += "}" + "\n"
|
||||||
|
case reflect.Bool:
|
||||||
|
out += "if " + ptname + " {" + "\n"
|
||||||
|
ic.q.Write("true")
|
||||||
|
out += ic.q.GetQueued()
|
||||||
|
out += "} else {" + "\n"
|
||||||
|
// Delete 'true'
|
||||||
|
ic.q.DeleteLast()
|
||||||
|
out += ic.q.WriteFlush("false")
|
||||||
|
out += "}" + "\n"
|
||||||
|
case reflect.Interface:
|
||||||
|
out += fmt.Sprintf("/* Interface types must use runtime reflection. type=%v kind=%v */\n", typ, typ.Kind())
|
||||||
|
out += "err = buf.Encode(" + name + ")" + "\n"
|
||||||
|
out += "if err != nil {" + "\n"
|
||||||
|
out += " return err" + "\n"
|
||||||
|
out += "}" + "\n"
|
||||||
|
case reflect.Map:
|
||||||
|
out += getMapValue(ic, ptname, typ, ptr, forceString)
|
||||||
|
case reflect.Struct:
|
||||||
|
if typ.Name() == "" {
|
||||||
|
ic.q.Write("{")
|
||||||
|
ic.q.Write(" ")
|
||||||
|
out += fmt.Sprintf("/* Inline struct. type=%v kind=%v */\n", typ, typ.Kind())
|
||||||
|
newV := reflect.Indirect(reflect.New(typ)).Interface()
|
||||||
|
fields := extractFields(newV)
|
||||||
|
|
||||||
|
// Output all fields
|
||||||
|
for _, field := range fields {
|
||||||
|
// Adjust field name
|
||||||
|
field.Name = name + "." + field.Name
|
||||||
|
out += getField(ic, field, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
if lastConditional(fields) {
|
||||||
|
out += ic.q.Flush()
|
||||||
|
out += `buf.Rewind(1)` + "\n"
|
||||||
|
} else {
|
||||||
|
ic.q.DeleteLast()
|
||||||
|
}
|
||||||
|
out += ic.q.WriteFlush("}")
|
||||||
|
} else {
|
||||||
|
out += fmt.Sprintf("/* Struct fall back. type=%v kind=%v */\n", typ, typ.Kind())
|
||||||
|
out += ic.q.Flush()
|
||||||
|
if ptr {
|
||||||
|
out += "err = buf.Encode(" + name + ")" + "\n"
|
||||||
|
} else {
|
||||||
|
// We send pointer to avoid copying entire struct
|
||||||
|
out += "err = buf.Encode(&" + name + ")" + "\n"
|
||||||
|
}
|
||||||
|
out += "if err != nil {" + "\n"
|
||||||
|
out += " return err" + "\n"
|
||||||
|
out += "}" + "\n"
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
out += fmt.Sprintf("/* Falling back. type=%v kind=%v */\n", typ, typ.Kind())
|
||||||
|
out += "err = buf.Encode(" + name + ")" + "\n"
|
||||||
|
out += "if err != nil {" + "\n"
|
||||||
|
out += " return err" + "\n"
|
||||||
|
out += "}" + "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func getValue(ic *Inception, sf *StructField, prefix string) string {
|
||||||
|
closequote := false
|
||||||
|
if sf.ForceString {
|
||||||
|
switch sf.Typ.Kind() {
|
||||||
|
case reflect.Int,
|
||||||
|
reflect.Int8,
|
||||||
|
reflect.Int16,
|
||||||
|
reflect.Int32,
|
||||||
|
reflect.Int64,
|
||||||
|
reflect.Uint,
|
||||||
|
reflect.Uint8,
|
||||||
|
reflect.Uint16,
|
||||||
|
reflect.Uint32,
|
||||||
|
reflect.Uint64,
|
||||||
|
reflect.Uintptr,
|
||||||
|
reflect.Float32,
|
||||||
|
reflect.Float64,
|
||||||
|
reflect.Bool:
|
||||||
|
ic.q.Write(`"`)
|
||||||
|
closequote = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out := getGetInnerValue(ic, prefix+sf.Name, sf.Typ, sf.Pointer, sf.ForceString)
|
||||||
|
if closequote {
|
||||||
|
if sf.Pointer {
|
||||||
|
out += ic.q.WriteFlush(`"`)
|
||||||
|
} else {
|
||||||
|
ic.q.Write(`"`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func p2(v uint32) uint32 {
|
||||||
|
v--
|
||||||
|
v |= v >> 1
|
||||||
|
v |= v >> 2
|
||||||
|
v |= v >> 4
|
||||||
|
v |= v >> 8
|
||||||
|
v |= v >> 16
|
||||||
|
v++
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTypeSize(t reflect.Type) uint32 {
|
||||||
|
switch t.Kind() {
|
||||||
|
case reflect.String:
|
||||||
|
// TODO: consider runtime analysis.
|
||||||
|
return 32
|
||||||
|
case reflect.Array, reflect.Map, reflect.Slice:
|
||||||
|
// TODO: consider runtime analysis.
|
||||||
|
return 4 * getTypeSize(t.Elem())
|
||||||
|
case reflect.Int,
|
||||||
|
reflect.Int8,
|
||||||
|
reflect.Int16,
|
||||||
|
reflect.Int32,
|
||||||
|
reflect.Uint,
|
||||||
|
reflect.Uint8,
|
||||||
|
reflect.Uint16,
|
||||||
|
reflect.Uint32:
|
||||||
|
return 8
|
||||||
|
case reflect.Int64,
|
||||||
|
reflect.Uint64,
|
||||||
|
reflect.Uintptr:
|
||||||
|
return 16
|
||||||
|
case reflect.Float32,
|
||||||
|
reflect.Float64:
|
||||||
|
return 16
|
||||||
|
case reflect.Bool:
|
||||||
|
return 4
|
||||||
|
case reflect.Ptr:
|
||||||
|
return getTypeSize(t.Elem())
|
||||||
|
default:
|
||||||
|
return 16
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTotalSize(si *StructInfo) uint32 {
|
||||||
|
rv := uint32(si.Typ.Size())
|
||||||
|
for _, f := range si.Fields {
|
||||||
|
rv += getTypeSize(f.Typ)
|
||||||
|
}
|
||||||
|
return rv
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBufGrowSize(si *StructInfo) uint32 {
|
||||||
|
|
||||||
|
// TOOD(pquerna): automatically calc a better grow size based on history
|
||||||
|
// of a struct.
|
||||||
|
return p2(getTotalSize(si))
|
||||||
|
}
|
||||||
|
|
||||||
|
func isIntish(t reflect.Type) bool {
|
||||||
|
if t.Kind() >= reflect.Int && t.Kind() <= reflect.Uintptr {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if t.Kind() == reflect.Array || t.Kind() == reflect.Slice || t.Kind() == reflect.Ptr {
|
||||||
|
if t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 {
|
||||||
|
// base64 special case.
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
return isIntish(t.Elem())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func getField(ic *Inception, f *StructField, prefix string) string {
|
||||||
|
out := ""
|
||||||
|
if f.OmitEmpty {
|
||||||
|
out += ic.q.Flush()
|
||||||
|
if f.Pointer {
|
||||||
|
out += "if " + prefix + f.Name + " != nil {" + "\n"
|
||||||
|
}
|
||||||
|
out += getOmitEmpty(ic, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.Pointer && !f.OmitEmpty {
|
||||||
|
// Pointer values encode as the value pointed to. A nil pointer encodes as the null JSON object.
|
||||||
|
out += "if " + prefix + f.Name + " != nil {" + "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
// JsonName is already escaped and quoted.
|
||||||
|
// getInnervalue should flush
|
||||||
|
ic.q.Write(f.JsonName + ":")
|
||||||
|
// We save a copy in case we need it
|
||||||
|
t := ic.q
|
||||||
|
|
||||||
|
out += getValue(ic, f, prefix)
|
||||||
|
ic.q.Write(",")
|
||||||
|
|
||||||
|
if f.Pointer && !f.OmitEmpty {
|
||||||
|
out += "} else {" + "\n"
|
||||||
|
out += t.WriteFlush("null")
|
||||||
|
out += "}" + "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.OmitEmpty {
|
||||||
|
out += ic.q.Flush()
|
||||||
|
if f.Pointer {
|
||||||
|
out += "}" + "\n"
|
||||||
|
}
|
||||||
|
out += "}" + "\n"
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// We check if the last field is conditional.
|
||||||
|
func lastConditional(fields []*StructField) bool {
|
||||||
|
if len(fields) > 0 {
|
||||||
|
f := fields[len(fields)-1]
|
||||||
|
return f.OmitEmpty
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateMarshalJSON(ic *Inception, si *StructInfo) error {
|
||||||
|
conditionalWrites := lastConditional(si.Fields)
|
||||||
|
out := ""
|
||||||
|
|
||||||
|
out += "// MarshalJSON marshal bytes to json - template\n"
|
||||||
|
out += `func (j *` + si.Name + `) MarshalJSON() ([]byte, error) {` + "\n"
|
||||||
|
out += `var buf fflib.Buffer` + "\n"
|
||||||
|
|
||||||
|
out += `if j == nil {` + "\n"
|
||||||
|
out += ` buf.WriteString("null")` + "\n"
|
||||||
|
out += " return buf.Bytes(), nil" + "\n"
|
||||||
|
out += `}` + "\n"
|
||||||
|
|
||||||
|
out += `err := j.MarshalJSONBuf(&buf)` + "\n"
|
||||||
|
out += `if err != nil {` + "\n"
|
||||||
|
out += " return nil, err" + "\n"
|
||||||
|
out += `}` + "\n"
|
||||||
|
out += `return buf.Bytes(), nil` + "\n"
|
||||||
|
out += `}` + "\n"
|
||||||
|
|
||||||
|
out += "// MarshalJSONBuf marshal buff to json - template\n"
|
||||||
|
out += `func (j *` + si.Name + `) MarshalJSONBuf(buf fflib.EncodingBuffer) (error) {` + "\n"
|
||||||
|
out += ` if j == nil {` + "\n"
|
||||||
|
out += ` buf.WriteString("null")` + "\n"
|
||||||
|
out += " return nil" + "\n"
|
||||||
|
out += ` }` + "\n"
|
||||||
|
|
||||||
|
out += `var err error` + "\n"
|
||||||
|
out += `var obj []byte` + "\n"
|
||||||
|
out += `_ = obj` + "\n"
|
||||||
|
out += `_ = err` + "\n"
|
||||||
|
|
||||||
|
ic.q.Write("{")
|
||||||
|
|
||||||
|
// The extra space is inserted here.
|
||||||
|
// If nothing is written to the field this will be deleted
|
||||||
|
// instead of the last comma.
|
||||||
|
if conditionalWrites || len(si.Fields) == 0 {
|
||||||
|
ic.q.Write(" ")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range si.Fields {
|
||||||
|
out += getField(ic, f, "j.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handling the last comma is tricky.
|
||||||
|
// If the last field has omitempty, conditionalWrites is set.
|
||||||
|
// If something has been written, we delete the last comma,
|
||||||
|
// by backing up the buffer, otherwise it will delete a space.
|
||||||
|
if conditionalWrites {
|
||||||
|
out += ic.q.Flush()
|
||||||
|
out += `buf.Rewind(1)` + "\n"
|
||||||
|
} else {
|
||||||
|
ic.q.DeleteLast()
|
||||||
|
}
|
||||||
|
|
||||||
|
out += ic.q.WriteFlush("}")
|
||||||
|
out += `return nil` + "\n"
|
||||||
|
out += `}` + "\n"
|
||||||
|
ic.OutputFuncs = append(ic.OutputFuncs, out)
|
||||||
|
return nil
|
||||||
|
}
|
73
vendor/github.com/pquerna/ffjson/inception/encoder_tpl.go
generated
vendored
Normal file
73
vendor/github.com/pquerna/ffjson/inception/encoder_tpl.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2014 Paul Querna
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ffjsoninception
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"text/template"
|
||||||
|
)
|
||||||
|
|
||||||
|
var encodeTpl map[string]*template.Template
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
encodeTpl = make(map[string]*template.Template)
|
||||||
|
|
||||||
|
funcs := map[string]string{
|
||||||
|
"handleMarshaler": handleMarshalerTxt,
|
||||||
|
}
|
||||||
|
tplFuncs := template.FuncMap{}
|
||||||
|
|
||||||
|
for k, v := range funcs {
|
||||||
|
encodeTpl[k] = template.Must(template.New(k).Funcs(tplFuncs).Parse(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type handleMarshaler struct {
|
||||||
|
IC *Inception
|
||||||
|
Name string
|
||||||
|
Typ reflect.Type
|
||||||
|
Ptr reflect.Kind
|
||||||
|
MarshalJSONBuf bool
|
||||||
|
Marshaler bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var handleMarshalerTxt = `
|
||||||
|
{
|
||||||
|
{{if eq .Typ.Kind .Ptr}}
|
||||||
|
if {{.Name}} == nil {
|
||||||
|
buf.WriteString("null")
|
||||||
|
} else {
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
{{if eq .MarshalJSONBuf true}}
|
||||||
|
err = {{.Name}}.MarshalJSONBuf(buf)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
{{else if eq .Marshaler true}}
|
||||||
|
obj, err = {{.Name}}.MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
buf.Write(obj)
|
||||||
|
{{end}}
|
||||||
|
{{if eq .Typ.Kind .Ptr}}
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
`
|
160
vendor/github.com/pquerna/ffjson/inception/inception.go
generated
vendored
Normal file
160
vendor/github.com/pquerna/ffjson/inception/inception.go
generated
vendored
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2014 Paul Querna
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ffjsoninception
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/pquerna/ffjson/shared"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Inception struct {
|
||||||
|
objs []*StructInfo
|
||||||
|
InputPath string
|
||||||
|
OutputPath string
|
||||||
|
PackageName string
|
||||||
|
PackagePath string
|
||||||
|
OutputImports map[string]bool
|
||||||
|
OutputFuncs []string
|
||||||
|
q ConditionalWrite
|
||||||
|
ResetFields bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewInception(inputPath string, packageName string, outputPath string, resetFields bool) *Inception {
|
||||||
|
return &Inception{
|
||||||
|
objs: make([]*StructInfo, 0),
|
||||||
|
InputPath: inputPath,
|
||||||
|
OutputPath: outputPath,
|
||||||
|
PackageName: packageName,
|
||||||
|
OutputFuncs: make([]string, 0),
|
||||||
|
OutputImports: make(map[string]bool),
|
||||||
|
ResetFields: resetFields,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Inception) AddMany(objs []shared.InceptionType) {
|
||||||
|
for _, obj := range objs {
|
||||||
|
i.Add(obj)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Inception) Add(obj shared.InceptionType) {
|
||||||
|
i.objs = append(i.objs, NewStructInfo(obj))
|
||||||
|
i.PackagePath = i.objs[0].Typ.PkgPath()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Inception) wantUnmarshal(si *StructInfo) bool {
|
||||||
|
if si.Options.SkipDecoder {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
typ := si.Typ
|
||||||
|
umlx := typ.Implements(unmarshalFasterType) || reflect.PtrTo(typ).Implements(unmarshalFasterType)
|
||||||
|
umlstd := typ.Implements(unmarshalerType) || reflect.PtrTo(typ).Implements(unmarshalerType)
|
||||||
|
if umlstd && !umlx {
|
||||||
|
// structure has UnmarshalJSON, but not our faster version -- skip it.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Inception) wantMarshal(si *StructInfo) bool {
|
||||||
|
if si.Options.SkipEncoder {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
typ := si.Typ
|
||||||
|
mlx := typ.Implements(marshalerFasterType) || reflect.PtrTo(typ).Implements(marshalerFasterType)
|
||||||
|
mlstd := typ.Implements(marshalerType) || reflect.PtrTo(typ).Implements(marshalerType)
|
||||||
|
if mlstd && !mlx {
|
||||||
|
// structure has MarshalJSON, but not our faster version -- skip it.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type sortedStructs []*StructInfo
|
||||||
|
|
||||||
|
func (p sortedStructs) Len() int { return len(p) }
|
||||||
|
func (p sortedStructs) Less(i, j int) bool { return p[i].Name < p[j].Name }
|
||||||
|
func (p sortedStructs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||||
|
func (p sortedStructs) Sort() { sort.Sort(p) }
|
||||||
|
|
||||||
|
func (i *Inception) generateCode() error {
|
||||||
|
// We sort the structs by name, so output if predictable.
|
||||||
|
sorted := sortedStructs(i.objs)
|
||||||
|
sorted.Sort()
|
||||||
|
|
||||||
|
for _, si := range sorted {
|
||||||
|
if i.wantMarshal(si) {
|
||||||
|
err := CreateMarshalJSON(i, si)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if i.wantUnmarshal(si) {
|
||||||
|
err := CreateUnmarshalJSON(i, si)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Inception) handleError(err error) {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: %s:\n\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Inception) Execute() {
|
||||||
|
if len(os.Args) != 1 {
|
||||||
|
i.handleError(errors.New(fmt.Sprintf("Internal ffjson error: inception executable takes no args: %v", os.Args)))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err := i.generateCode()
|
||||||
|
if err != nil {
|
||||||
|
i.handleError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := RenderTemplate(i)
|
||||||
|
if err != nil {
|
||||||
|
i.handleError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
stat, err := os.Stat(i.InputPath)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
i.handleError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(i.OutputPath, data, stat.Mode())
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
i.handleError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
290
vendor/github.com/pquerna/ffjson/inception/reflect.go
generated
vendored
Normal file
290
vendor/github.com/pquerna/ffjson/inception/reflect.go
generated
vendored
Normal file
@ -0,0 +1,290 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2014 Paul Querna
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ffjsoninception
|
||||||
|
|
||||||
|
import (
|
||||||
|
fflib "github.com/pquerna/ffjson/fflib/v1"
|
||||||
|
"github.com/pquerna/ffjson/shared"
|
||||||
|
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"reflect"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
type StructField struct {
|
||||||
|
Name string
|
||||||
|
JsonName string
|
||||||
|
FoldFuncName string
|
||||||
|
Typ reflect.Type
|
||||||
|
OmitEmpty bool
|
||||||
|
ForceString bool
|
||||||
|
HasMarshalJSON bool
|
||||||
|
HasUnmarshalJSON bool
|
||||||
|
Pointer bool
|
||||||
|
Tagged bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type FieldByJsonName []*StructField
|
||||||
|
|
||||||
|
func (a FieldByJsonName) Len() int { return len(a) }
|
||||||
|
func (a FieldByJsonName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
func (a FieldByJsonName) Less(i, j int) bool { return a[i].JsonName < a[j].JsonName }
|
||||||
|
|
||||||
|
type StructInfo struct {
|
||||||
|
Name string
|
||||||
|
Obj interface{}
|
||||||
|
Typ reflect.Type
|
||||||
|
Fields []*StructField
|
||||||
|
Options shared.StructOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStructInfo(obj shared.InceptionType) *StructInfo {
|
||||||
|
t := reflect.TypeOf(obj.Obj)
|
||||||
|
return &StructInfo{
|
||||||
|
Obj: obj.Obj,
|
||||||
|
Name: t.Name(),
|
||||||
|
Typ: t,
|
||||||
|
Fields: extractFields(obj.Obj),
|
||||||
|
Options: obj.Options,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (si *StructInfo) FieldsByFirstByte() map[string][]*StructField {
|
||||||
|
rv := make(map[string][]*StructField)
|
||||||
|
for _, f := range si.Fields {
|
||||||
|
b := string(f.JsonName[1])
|
||||||
|
rv[b] = append(rv[b], f)
|
||||||
|
}
|
||||||
|
return rv
|
||||||
|
}
|
||||||
|
|
||||||
|
func (si *StructInfo) ReverseFields() []*StructField {
|
||||||
|
var i int
|
||||||
|
rv := make([]*StructField, 0)
|
||||||
|
for i = len(si.Fields) - 1; i >= 0; i-- {
|
||||||
|
rv = append(rv, si.Fields[i])
|
||||||
|
}
|
||||||
|
return rv
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
|
||||||
|
)
|
||||||
|
|
||||||
|
func foldFunc(key []byte) string {
|
||||||
|
nonLetter := false
|
||||||
|
special := false // special letter
|
||||||
|
for _, b := range key {
|
||||||
|
if b >= utf8.RuneSelf {
|
||||||
|
return "bytes.EqualFold"
|
||||||
|
}
|
||||||
|
upper := b & caseMask
|
||||||
|
if upper < 'A' || upper > 'Z' {
|
||||||
|
nonLetter = true
|
||||||
|
} else if upper == 'K' || upper == 'S' {
|
||||||
|
// See above for why these letters are special.
|
||||||
|
special = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if special {
|
||||||
|
return "fflib.EqualFoldRight"
|
||||||
|
}
|
||||||
|
if nonLetter {
|
||||||
|
return "fflib.AsciiEqualFold"
|
||||||
|
}
|
||||||
|
return "fflib.SimpleLetterEqualFold"
|
||||||
|
}
|
||||||
|
|
||||||
|
type MarshalerFaster interface {
|
||||||
|
MarshalJSONBuf(buf fflib.EncodingBuffer) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type UnmarshalFaster interface {
|
||||||
|
UnmarshalJSONFFLexer(l *fflib.FFLexer, state fflib.FFParseState) error
|
||||||
|
}
|
||||||
|
|
||||||
|
var marshalerType = reflect.TypeOf(new(json.Marshaler)).Elem()
|
||||||
|
var marshalerFasterType = reflect.TypeOf(new(MarshalerFaster)).Elem()
|
||||||
|
var unmarshalerType = reflect.TypeOf(new(json.Unmarshaler)).Elem()
|
||||||
|
var unmarshalFasterType = reflect.TypeOf(new(UnmarshalFaster)).Elem()
|
||||||
|
|
||||||
|
// extractFields returns a list of fields that JSON should recognize for the given type.
|
||||||
|
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
||||||
|
// and then any reachable anonymous structs.
|
||||||
|
func extractFields(obj interface{}) []*StructField {
|
||||||
|
t := reflect.TypeOf(obj)
|
||||||
|
// Anonymous fields to explore at the current level and the next.
|
||||||
|
current := []StructField{}
|
||||||
|
next := []StructField{{Typ: t}}
|
||||||
|
|
||||||
|
// Count of queued names for current level and the next.
|
||||||
|
count := map[reflect.Type]int{}
|
||||||
|
nextCount := map[reflect.Type]int{}
|
||||||
|
|
||||||
|
// Types already visited at an earlier level.
|
||||||
|
visited := map[reflect.Type]bool{}
|
||||||
|
|
||||||
|
// Fields found.
|
||||||
|
var fields []*StructField
|
||||||
|
|
||||||
|
for len(next) > 0 {
|
||||||
|
current, next = next, current[:0]
|
||||||
|
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||||
|
|
||||||
|
for _, f := range current {
|
||||||
|
if visited[f.Typ] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
visited[f.Typ] = true
|
||||||
|
|
||||||
|
// Scan f.typ for fields to include.
|
||||||
|
for i := 0; i < f.Typ.NumField(); i++ {
|
||||||
|
sf := f.Typ.Field(i)
|
||||||
|
if sf.PkgPath != "" { // unexported
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tag := sf.Tag.Get("json")
|
||||||
|
if tag == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name, opts := parseTag(tag)
|
||||||
|
if !isValidTag(name) {
|
||||||
|
name = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
ft := sf.Type
|
||||||
|
ptr := false
|
||||||
|
if ft.Kind() == reflect.Ptr {
|
||||||
|
ptr = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||||
|
// Follow pointer.
|
||||||
|
ft = ft.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record found field and index sequence.
|
||||||
|
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||||
|
tagged := name != ""
|
||||||
|
if name == "" {
|
||||||
|
name = sf.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
fflib.WriteJsonString(&buf, name)
|
||||||
|
|
||||||
|
field := &StructField{
|
||||||
|
Name: sf.Name,
|
||||||
|
JsonName: string(buf.Bytes()),
|
||||||
|
FoldFuncName: foldFunc([]byte(name)),
|
||||||
|
Typ: ft,
|
||||||
|
HasMarshalJSON: ft.Implements(marshalerType),
|
||||||
|
HasUnmarshalJSON: ft.Implements(unmarshalerType),
|
||||||
|
OmitEmpty: opts.Contains("omitempty"),
|
||||||
|
ForceString: opts.Contains("string"),
|
||||||
|
Pointer: ptr,
|
||||||
|
Tagged: tagged,
|
||||||
|
}
|
||||||
|
|
||||||
|
fields = append(fields, field)
|
||||||
|
|
||||||
|
if count[f.Typ] > 1 {
|
||||||
|
// If there were multiple instances, add a second,
|
||||||
|
// so that the annihilation code will see a duplicate.
|
||||||
|
// It only cares about the distinction between 1 or 2,
|
||||||
|
// so don't bother generating any more copies.
|
||||||
|
fields = append(fields, fields[len(fields)-1])
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record new anonymous struct to explore in next round.
|
||||||
|
nextCount[ft]++
|
||||||
|
if nextCount[ft] == 1 {
|
||||||
|
next = append(next, StructField{
|
||||||
|
Name: ft.Name(),
|
||||||
|
Typ: ft,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||||
|
// except that fields with JSON tags are promoted.
|
||||||
|
|
||||||
|
// The fields are sorted in primary order of name, secondary order
|
||||||
|
// of field index length. Loop over names; for each name, delete
|
||||||
|
// hidden fields by choosing the one dominant field that survives.
|
||||||
|
out := fields[:0]
|
||||||
|
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||||
|
// One iteration per name.
|
||||||
|
// Find the sequence of fields with the name of this first field.
|
||||||
|
fi := fields[i]
|
||||||
|
name := fi.JsonName
|
||||||
|
for advance = 1; i+advance < len(fields); advance++ {
|
||||||
|
fj := fields[i+advance]
|
||||||
|
if fj.JsonName != name {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if advance == 1 { // Only one field with this name
|
||||||
|
out = append(out, fi)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dominant, ok := dominantField(fields[i : i+advance])
|
||||||
|
if ok {
|
||||||
|
out = append(out, dominant)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fields = out
|
||||||
|
|
||||||
|
return fields
|
||||||
|
}
|
||||||
|
|
||||||
|
// dominantField looks through the fields, all of which are known to
|
||||||
|
// have the same name, to find the single field that dominates the
|
||||||
|
// others using Go's embedding rules, modified by the presence of
|
||||||
|
// JSON tags. If there are multiple top-level fields, the boolean
|
||||||
|
// will be false: This condition is an error in Go and we skip all
|
||||||
|
// the fields.
|
||||||
|
func dominantField(fields []*StructField) (*StructField, bool) {
|
||||||
|
tagged := -1 // Index of first tagged field.
|
||||||
|
for i, f := range fields {
|
||||||
|
if f.Tagged {
|
||||||
|
if tagged >= 0 {
|
||||||
|
// Multiple tagged fields at the same level: conflict.
|
||||||
|
// Return no field.
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
tagged = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if tagged >= 0 {
|
||||||
|
return fields[tagged], true
|
||||||
|
}
|
||||||
|
// All remaining fields have the same length. If there's more than one,
|
||||||
|
// we have a conflict (two fields named "X" at the same level) and we
|
||||||
|
// return no field.
|
||||||
|
if len(fields) > 1 {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
return fields[0], true
|
||||||
|
}
|
79
vendor/github.com/pquerna/ffjson/inception/tags.go
generated
vendored
Normal file
79
vendor/github.com/pquerna/ffjson/inception/tags.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2014 Paul Querna
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ffjsoninception
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
// from: http://golang.org/src/pkg/encoding/json/tags.go
|
||||||
|
|
||||||
|
// tagOptions is the string following a comma in a struct field's "json"
|
||||||
|
// tag, or the empty string. It does not include the leading comma.
|
||||||
|
type tagOptions string
|
||||||
|
|
||||||
|
// parseTag splits a struct field's json tag into its name and
|
||||||
|
// comma-separated options.
|
||||||
|
func parseTag(tag string) (string, tagOptions) {
|
||||||
|
if idx := strings.Index(tag, ","); idx != -1 {
|
||||||
|
return tag[:idx], tagOptions(tag[idx+1:])
|
||||||
|
}
|
||||||
|
return tag, tagOptions("")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains reports whether a comma-separated list of options
|
||||||
|
// contains a particular substr flag. substr must be surrounded by a
|
||||||
|
// string boundary or commas.
|
||||||
|
func (o tagOptions) Contains(optionName string) bool {
|
||||||
|
if len(o) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
s := string(o)
|
||||||
|
for s != "" {
|
||||||
|
var next string
|
||||||
|
i := strings.Index(s, ",")
|
||||||
|
if i >= 0 {
|
||||||
|
s, next = s[:i], s[i+1:]
|
||||||
|
}
|
||||||
|
if s == optionName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
s = next
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidTag(s string) bool {
|
||||||
|
if s == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, c := range s {
|
||||||
|
switch {
|
||||||
|
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
|
||||||
|
// Backslash and quote chars are reserved, but
|
||||||
|
// otherwise any punctuation chars are allowed
|
||||||
|
// in a tag name.
|
||||||
|
default:
|
||||||
|
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
60
vendor/github.com/pquerna/ffjson/inception/template.go
generated
vendored
Normal file
60
vendor/github.com/pquerna/ffjson/inception/template.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2014 Paul Querna
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ffjsoninception
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"go/format"
|
||||||
|
"text/template"
|
||||||
|
)
|
||||||
|
|
||||||
|
const ffjsonTemplate = `
|
||||||
|
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
|
||||||
|
// source: {{.InputPath}}
|
||||||
|
|
||||||
|
package {{.PackageName}}
|
||||||
|
|
||||||
|
import (
|
||||||
|
{{range $k, $v := .OutputImports}}{{$k}}
|
||||||
|
{{end}}
|
||||||
|
)
|
||||||
|
|
||||||
|
{{range .OutputFuncs}}
|
||||||
|
{{.}}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
`
|
||||||
|
|
||||||
|
func RenderTemplate(ic *Inception) ([]byte, error) {
|
||||||
|
t := template.Must(template.New("ffjson.go").Parse(ffjsonTemplate))
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
err := t.Execute(buf, ic)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return format.Source(buf.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
func tplStr(t *template.Template, data interface{}) string {
|
||||||
|
buf := bytes.Buffer{}
|
||||||
|
err := t.Execute(&buf, data)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
65
vendor/github.com/pquerna/ffjson/inception/writerstack.go
generated
vendored
Normal file
65
vendor/github.com/pquerna/ffjson/inception/writerstack.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
package ffjsoninception
|
||||||
|
|
||||||
|
import "strings"
|
||||||
|
|
||||||
|
// ConditionalWrite is a stack containing a number of pending writes
|
||||||
|
type ConditionalWrite struct {
|
||||||
|
Queued []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write will add a string to be written
|
||||||
|
func (w *ConditionalWrite) Write(s string) {
|
||||||
|
w.Queued = append(w.Queued, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteLast will delete the last added write
|
||||||
|
func (w *ConditionalWrite) DeleteLast() {
|
||||||
|
if len(w.Queued) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.Queued = w.Queued[:len(w.Queued)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Last will return the last added write
|
||||||
|
func (w *ConditionalWrite) Last() string {
|
||||||
|
if len(w.Queued) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return w.Queued[len(w.Queued)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush will return all queued writes, and return
|
||||||
|
// "" (empty string) in nothing has been queued
|
||||||
|
// "buf.WriteByte('" + byte + "')" + '\n' if one bute has been queued.
|
||||||
|
// "buf.WriteString(`" + string + "`)" + "\n" if more than one byte has been queued.
|
||||||
|
func (w *ConditionalWrite) Flush() string {
|
||||||
|
combined := strings.Join(w.Queued, "")
|
||||||
|
if len(combined) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Queued = nil
|
||||||
|
if len(combined) == 1 {
|
||||||
|
return "buf.WriteByte('" + combined + "')" + "\n"
|
||||||
|
}
|
||||||
|
return "buf.WriteString(`" + combined + "`)" + "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *ConditionalWrite) FlushTo(out string) string {
|
||||||
|
out += w.Flush()
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteFlush will add a string and return the Flush result for the queue
|
||||||
|
func (w *ConditionalWrite) WriteFlush(s string) string {
|
||||||
|
w.Write(s)
|
||||||
|
return w.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetQueued will return the current queued content without flushing.
|
||||||
|
func (w *ConditionalWrite) GetQueued() string {
|
||||||
|
t := w.Queued
|
||||||
|
s := w.Flush()
|
||||||
|
w.Queued = t
|
||||||
|
return s
|
||||||
|
}
|
51
vendor/github.com/pquerna/ffjson/shared/options.go
generated
vendored
Normal file
51
vendor/github.com/pquerna/ffjson/shared/options.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2014 Paul Querna, Klaus Post
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package shared
|
||||||
|
|
||||||
|
type StructOptions struct {
|
||||||
|
SkipDecoder bool
|
||||||
|
SkipEncoder bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type InceptionType struct {
|
||||||
|
Obj interface{}
|
||||||
|
Options StructOptions
|
||||||
|
}
|
||||||
|
type Feature int
|
||||||
|
|
||||||
|
const (
|
||||||
|
Nothing Feature = 0
|
||||||
|
MustDecoder = 1 << 1
|
||||||
|
MustEncoder = 1 << 2
|
||||||
|
MustEncDec = MustDecoder | MustEncoder
|
||||||
|
)
|
||||||
|
|
||||||
|
func (i InceptionType) HasFeature(f Feature) bool {
|
||||||
|
return i.HasFeature(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s StructOptions) HasFeature(f Feature) bool {
|
||||||
|
hasNeeded := true
|
||||||
|
if f&MustDecoder != 0 && s.SkipDecoder {
|
||||||
|
hasNeeded = false
|
||||||
|
}
|
||||||
|
if f&MustEncoder != 0 && s.SkipEncoder {
|
||||||
|
hasNeeded = false
|
||||||
|
}
|
||||||
|
return hasNeeded
|
||||||
|
}
|
12
vendor/modules.txt
vendored
12
vendor/modules.txt
vendored
@ -45,7 +45,7 @@ github.com/containernetworking/cni/pkg/version
|
|||||||
github.com/containernetworking/cni/pkg/types/020
|
github.com/containernetworking/cni/pkg/types/020
|
||||||
# github.com/containernetworking/plugins v0.8.1
|
# github.com/containernetworking/plugins v0.8.1
|
||||||
github.com/containernetworking/plugins/pkg/ns
|
github.com/containernetworking/plugins/pkg/ns
|
||||||
# github.com/containers/buildah v1.9.2
|
# github.com/containers/buildah v1.10.1
|
||||||
github.com/containers/buildah
|
github.com/containers/buildah
|
||||||
github.com/containers/buildah/imagebuildah
|
github.com/containers/buildah/imagebuildah
|
||||||
github.com/containers/buildah/pkg/chrootuser
|
github.com/containers/buildah/pkg/chrootuser
|
||||||
@ -60,7 +60,7 @@ github.com/containers/buildah/docker
|
|||||||
github.com/containers/buildah/pkg/blobcache
|
github.com/containers/buildah/pkg/blobcache
|
||||||
github.com/containers/buildah/pkg/overlay
|
github.com/containers/buildah/pkg/overlay
|
||||||
github.com/containers/buildah/pkg/unshare
|
github.com/containers/buildah/pkg/unshare
|
||||||
# github.com/containers/image v2.0.1+incompatible
|
# github.com/containers/image v3.0.2+incompatible
|
||||||
github.com/containers/image/directory
|
github.com/containers/image/directory
|
||||||
github.com/containers/image/docker
|
github.com/containers/image/docker
|
||||||
github.com/containers/image/docker/archive
|
github.com/containers/image/docker/archive
|
||||||
@ -79,12 +79,12 @@ github.com/containers/image/tarball
|
|||||||
github.com/containers/image/pkg/sysregistriesv2
|
github.com/containers/image/pkg/sysregistriesv2
|
||||||
github.com/containers/image/image
|
github.com/containers/image/image
|
||||||
github.com/containers/image/oci/layout
|
github.com/containers/image/oci/layout
|
||||||
github.com/containers/image/pkg/sysregistries
|
|
||||||
github.com/containers/image/directory/explicitfilepath
|
github.com/containers/image/directory/explicitfilepath
|
||||||
github.com/containers/image/docker/policyconfiguration
|
github.com/containers/image/docker/policyconfiguration
|
||||||
github.com/containers/image/pkg/blobinfocache/none
|
github.com/containers/image/pkg/blobinfocache/none
|
||||||
github.com/containers/image/pkg/tlsclientconfig
|
github.com/containers/image/pkg/tlsclientconfig
|
||||||
github.com/containers/image/pkg/strslice
|
github.com/containers/image/pkg/strslice
|
||||||
|
github.com/containers/image/pkg/keyctl
|
||||||
github.com/containers/image/version
|
github.com/containers/image/version
|
||||||
github.com/containers/image/docker/daemon
|
github.com/containers/image/docker/daemon
|
||||||
github.com/containers/image/openshift
|
github.com/containers/image/openshift
|
||||||
@ -103,7 +103,7 @@ github.com/containers/psgo/internal/dev
|
|||||||
github.com/containers/psgo/internal/proc
|
github.com/containers/psgo/internal/proc
|
||||||
github.com/containers/psgo/internal/process
|
github.com/containers/psgo/internal/process
|
||||||
github.com/containers/psgo/internal/host
|
github.com/containers/psgo/internal/host
|
||||||
# github.com/containers/storage v1.12.16
|
# github.com/containers/storage v1.13.1
|
||||||
github.com/containers/storage
|
github.com/containers/storage
|
||||||
github.com/containers/storage/pkg/archive
|
github.com/containers/storage/pkg/archive
|
||||||
github.com/containers/storage/pkg/chrootarchive
|
github.com/containers/storage/pkg/chrootarchive
|
||||||
@ -389,6 +389,8 @@ github.com/pkg/profile
|
|||||||
github.com/pmezard/go-difflib/difflib
|
github.com/pmezard/go-difflib/difflib
|
||||||
# github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
|
# github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
|
||||||
github.com/pquerna/ffjson/fflib/v1
|
github.com/pquerna/ffjson/fflib/v1
|
||||||
|
github.com/pquerna/ffjson/inception
|
||||||
|
github.com/pquerna/ffjson/shared
|
||||||
github.com/pquerna/ffjson/fflib/v1/internal
|
github.com/pquerna/ffjson/fflib/v1/internal
|
||||||
# github.com/prometheus/client_golang v1.0.0
|
# github.com/prometheus/client_golang v1.0.0
|
||||||
github.com/prometheus/client_golang/prometheus
|
github.com/prometheus/client_golang/prometheus
|
||||||
@ -451,9 +453,9 @@ github.com/varlink/go/varlink
|
|||||||
github.com/varlink/go/cmd/varlink-go-interface-generator
|
github.com/varlink/go/cmd/varlink-go-interface-generator
|
||||||
github.com/varlink/go/varlink/idl
|
github.com/varlink/go/varlink/idl
|
||||||
# github.com/vbatts/tar-split v0.11.1
|
# github.com/vbatts/tar-split v0.11.1
|
||||||
|
github.com/vbatts/tar-split/archive/tar
|
||||||
github.com/vbatts/tar-split/tar/asm
|
github.com/vbatts/tar-split/tar/asm
|
||||||
github.com/vbatts/tar-split/tar/storage
|
github.com/vbatts/tar-split/tar/storage
|
||||||
github.com/vbatts/tar-split/archive/tar
|
|
||||||
# github.com/vbauerster/mpb v3.4.0+incompatible
|
# github.com/vbauerster/mpb v3.4.0+incompatible
|
||||||
github.com/vbauerster/mpb
|
github.com/vbauerster/mpb
|
||||||
github.com/vbauerster/mpb/decor
|
github.com/vbauerster/mpb/decor
|
||||||
|
Reference in New Issue
Block a user