Merge pull request #7944 from cevich/new_get_ci_vm

Cirrus: Fix obtaining a CI VM
This commit is contained in:
OpenShift Merge Robot
2020-10-09 06:14:17 -04:00
committed by GitHub
6 changed files with 170 additions and 205 deletions

View File

@ -47,7 +47,7 @@ env:
TEST_ENVIRON: host # 'host' or 'container' TEST_ENVIRON: host # 'host' or 'container'
PODBIN_NAME: podman # 'podman' or 'remote' PODBIN_NAME: podman # 'podman' or 'remote'
PRIV_NAME: root # 'root' or 'rootless' PRIV_NAME: root # 'root' or 'rootless'
DISTRO_NV: $FEDORA_NAME # any {PRIOR_,}{FEDORA,UBUNTU}_NAME value DISTRO_NV: # any {PRIOR_,}{FEDORA,UBUNTU}_NAME value
VM_IMAGE_NAME: # One of the "Google-cloud VM Images" (above) VM_IMAGE_NAME: # One of the "Google-cloud VM Images" (above)
CTR_FQIN: # One of the "Container FQIN's" (above) CTR_FQIN: # One of the "Container FQIN's" (above)

View File

@ -6,18 +6,20 @@
# BEGIN Global export of all variables # BEGIN Global export of all variables
set -a set -a
# Due to differences across platforms and runtime execution environments, if [[ "$CI" == "true" ]]; then
# handling of the (otherwise) default shell setup is non-uniform. Rather # Due to differences across platforms and runtime execution environments,
# than attempt to workaround differences, simply force-load/set required # handling of the (otherwise) default shell setup is non-uniform. Rather
# items every time this library is utilized. # than attempt to workaround differences, simply force-load/set required
source /etc/profile # items every time this library is utilized.
source /etc/environment source /etc/profile
USER="$(whoami)" source /etc/environment
HOME="$(getent passwd $USER | cut -d : -f 6)" USER="$(whoami)"
# Some platforms set and make this read-only HOME="$(getent passwd $USER | cut -d : -f 6)"
[[ -n "$UID" ]] || \ # Some platforms set and make this read-only
UID=$(getent passwd $USER | cut -d : -f 3) [[ -n "$UID" ]] || \
GID=$(getent passwd $USER | cut -d : -f 4) UID=$(getent passwd $USER | cut -d : -f 3)
GID=$(getent passwd $USER | cut -d : -f 4)
fi
# During VM Image build, the 'containers/automation' installation # During VM Image build, the 'containers/automation' installation
# was performed. The final step of that installation sets the # was performed. The final step of that installation sets the
@ -43,6 +45,9 @@ OS_RELEASE_ID="$(source /etc/os-release; echo $ID)"
OS_RELEASE_VER="$(source /etc/os-release; echo $VERSION_ID | cut -d '.' -f 1)" OS_RELEASE_VER="$(source /etc/os-release; echo $VERSION_ID | cut -d '.' -f 1)"
# Combined to ease soe usage # Combined to ease soe usage
OS_REL_VER="${OS_RELEASE_ID}-${OS_RELEASE_VER}" OS_REL_VER="${OS_RELEASE_ID}-${OS_RELEASE_VER}"
# This is normally set from .cirrus.yml but default is necessary when
# running under hack/get_ci_vm.sh since it cannot infer the value.
DISTRO_NV="${DISTRO_NV:-$OS_REL_VER}"
# Essential default paths, many are overridden when executing under Cirrus-CI # Essential default paths, many are overridden when executing under Cirrus-CI
GOPATH="${GOPATH:-/var/tmp/go}" GOPATH="${GOPATH:-/var/tmp/go}"

View File

@ -139,6 +139,9 @@ function _run_vendor() {
} }
function _run_build() { function _run_build() {
# Ensure always start from clean-slate with all vendor modules downloaded
make clean
make vendor
make podman-release make podman-release
make podman-remote-linux-release make podman-remote-linux-release
} }

View File

@ -67,9 +67,8 @@ case "$CG_FS_TYPE" in
*) die_unknown CG_FS_TYPE *) die_unknown CG_FS_TYPE
esac esac
# Required to be defined by caller: Which distribution are we testing on # Which distribution are we testing on.
# shellcheck disable=SC2154 case "$OS_RELEASE_ID" in
case "$DISTRO_NV" in
ubuntu*) ;; ubuntu*) ;;
fedora*) fedora*)
if ((CONTAINER==0)); then # Not yet running inside a container if ((CONTAINER==0)); then # Not yet running inside a container
@ -83,7 +82,7 @@ case "$DISTRO_NV" in
setsebool container_manage_cgroup true setsebool container_manage_cgroup true
fi fi
;; ;;
*) die_unknown DISTRO_NV *) die_unknown OS_RELEASE_ID
esac esac
# Required to be defined by caller: The environment where primary testing happens # Required to be defined by caller: The environment where primary testing happens

View File

@ -11,6 +11,6 @@ shellcheck --color=always --format=tty \
--enable add-default-case,avoid-nullary-conditions,check-unassigned-uppercase \ --enable add-default-case,avoid-nullary-conditions,check-unassigned-uppercase \
--exclude SC2046,SC2034,SC2090,SC2064 \ --exclude SC2046,SC2034,SC2090,SC2064 \
--wiki-link-count=0 --severity=warning \ --wiki-link-count=0 --severity=warning \
$SCRIPT_BASE/*.sh $SCRIPT_BASE/*.sh hack/get_ci_vm.sh
echo "Shellcheck: PASS" echo "Shellcheck: PASS"

View File

@ -1,49 +1,82 @@
#!/usr/bin/env bash #!/usr/bin/env bash
#
# For help and usage information, simply execute the script w/o any arguments.
#
# This script is intended to be run by podman developers who need to debug
# problems specifically related to Cirrus-CI automated testing. However,
# because it's only loosely coupled to the `.cirrus.yml` configuration, it must
# orchestrate VMs in GCP directly. This means users need to have
# pre-authorization (access) to manipulate google-cloud resoures. Additionally,
# there are no guarantees it will remain in-sync with other automation-related
# scripts. Therefore it may not always function for everybody in every
# future scenario without updates/modifications/tweaks.
set -e set -e
RED="\e[1;36;41m" RED="\e[1;31m"
YEL="\e[1;33;44m" YEL="\e[1;32m"
NOR="\e[0m" NOR="\e[0m"
USAGE_WARNING=" USAGE_WARNING="
${YEL}WARNING: This will not work without local sudo access to run podman,${NOR} ${YEL}WARNING: This will not work without podman,${NOR}
${YEL}and prior authorization to use the libpod GCP project. Also,${NOR} ${YEL}and prior authorization to use the libpod GCP project.${NOR}
${YEL}possession of the proper ssh private key is required.${NOR}
" "
# TODO: Many/most of these values should come from .cirrus.yml # These values come from .cirrus.yml gce_instance clause
ZONE="${ZONE:-us-central1-a}" ZONE="${ZONE:-us-central1-a}"
CPUS="2" CPUS="2"
MEMORY="4Gb" MEMORY="4Gb"
DISK="200" DISK="200"
PROJECT="libpod-218412" PROJECT="libpod-218412"
GOSRC="/var/tmp/go/src/github.com/containers/podman" GOSRC="/var/tmp/go/src/github.com/containers/podman"
GCLOUD_IMAGE=${GCLOUD_IMAGE:-quay.io/cevich/gcloud_centos:latest} GIT_REPO="https://github.com/containers/podman.git"
GCLOUD_SUDO=${GCLOUD_SUDO-sudo}
# Container image with necessary runtime elements
GCLOUD_IMAGE="${GCLOUD_IMAGE:-docker.io/google/cloud-sdk:alpine}"
GCLOUD_CFGDIR=".config/gcloud"
SCRIPT_FILENAME=$(basename ${BASH_SOURCE[0]})
HOOK_FILENAME="hook_${SCRIPT_FILENAME}"
# Shared tmp directory between container and us # Shared tmp directory between container and us
TMPDIR=$(mktemp -d --tmpdir $(basename $0)_tmpdir_XXXXXX) TMPDIR=$(mktemp -d --tmpdir ${SCRIPT_FILENAME}_tmpdir_XXXXXX)
LIBPODROOT=$(realpath "$(dirname $0)/../") show_usage() {
echo -e "\n${RED}ERROR: $1${NOR}"
echo -e "${YEL}Usage: $SCRIPT_FILENAME <image_name>${NOR}"
echo ""
if [[ -r ".cirrus.yml" ]]
then
echo -e "${YEL}Some possible image_name values (from .cirrus.yml):${NOR}"
image_hints
echo ""
echo -e "${YEL}Optional:${NOR} If a $HOME/$GCLOUD_CFGDIR/$HOOK_FILENAME executable exists during"
echo "VM creation, it will be executed remotely after cloning"
echo "$GIT_REPO. The"
echo "current local working branch name and commit ID, will be provided as"
echo "it's arguments."
fi
exit 1
}
LIBPODROOT=$(realpath "$(dirname ${BASH_SOURCE[0]})/../")
# else: Assume $PWD is the root of the libpod repository # else: Assume $PWD is the root of the libpod repository
[[ "$LIBPODROOT" != "/" ]] || LIBPODROOT=$PWD [[ "$LIBPODROOT" != "/" ]] || \
show_usage "Must execute script from within clone of containers/podman repo."
# Command shortcuts save some typing (assumes $LIBPODROOT is subdir of $HOME) [[ "$UID" -ne 0 ]] || \
PGCLOUD="$GCLOUD_SUDO podman run -it --rm -e AS_ID=$UID -e AS_USER=$USER --security-opt label=disable -v $TMPDIR:$HOME -v $HOME/.config/gcloud:$HOME/.config/gcloud -v $HOME/.config/gcloud/ssh:$HOME/.ssh -v $LIBPODROOT:$LIBPODROOT $GCLOUD_IMAGE --configuration=libpod --project=$PROJECT" show_usage "Must execute script as a regular (non-root) user."
[[ "${LIBPODROOT#$HOME}" != "$LIBPODROOT" ]] || \
show_usage "Clone of containers/podman must be a subdirectory of \$HOME ($HOME)"
# Disable SELinux labeling to allow read-only mounting of repository files
PGCLOUD="podman run -it --rm --security-opt label=disable -v $TMPDIR:$TMPDIR -v $HOME/.config/gcloud:/root/.config/gcloud -v $HOME/.config/gcloud/ssh:/root/.ssh -v $LIBPODROOT:$LIBPODROOT:ro $GCLOUD_IMAGE gcloud --configuration=libpod --project=$PROJECT"
SCP_CMD="$PGCLOUD compute scp" SCP_CMD="$PGCLOUD compute scp"
showrun() { showrun() {
if [[ "$1" == "--background" ]] echo '+ '$(printf " %q" "$@") > /dev/stderr
then echo ""
shift "$@"
# Properly escape any nested spaces, so command can be copy-pasted
echo '+ '$(printf " %q" "$@")' &' > /dev/stderr
"$@" &
echo -e "${RED}<backgrounded>${NOR}"
else
echo '+ '$(printf " %q" "$@") > /dev/stderr
"$@"
fi
} }
cleanup() { cleanup() {
@ -52,6 +85,7 @@ cleanup() {
wait wait
# set GCLOUD_DEBUG to leave tmpdir behind for postmortem # set GCLOUD_DEBUG to leave tmpdir behind for postmortem
# shellcheck disable=SC2154
test -z "$GCLOUD_DEBUG" && rm -rf $TMPDIR test -z "$GCLOUD_DEBUG" && rm -rf $TMPDIR
# Not always called from an exit handler, but should always exit when called # Not always called from an exit handler, but should always exit when called
@ -61,32 +95,18 @@ trap cleanup EXIT
delvm() { delvm() {
echo -e "\n" echo -e "\n"
echo -e "\n${YEL}Offering to Delete $VMNAME ${RED}(Might take a minute or two)${NOR}" echo -e "\n${YEL}Offering to Delete $VMNAME${NOR}"
echo -e "\n${YEL}Note: It's safe to answer N, then re-run script again later.${NOR}" echo -e "${RED}(Deletion might take a minute or two)${NOR}"
echo -e "${YEL}Note: It's safe to answer N, then re-run script again later.${NOR}"
showrun $CLEANUP_CMD # prompts for Yes/No showrun $CLEANUP_CMD # prompts for Yes/No
cleanup cleanup
} }
show_usage() {
echo -e "\n${RED}ERROR: $1${NOR}"
echo -e "${YEL}Usage: $(basename $0) [-m <SPECIALMODE>] [-u <ROOTLESS_USER> ] <image_name>${NOR}"
echo "Use -m <SPECIALMODE> with a supported value documented in contrib/cirrus/README.md."
echo "With '-m rootless' must also specify -u <ROOTLESS_USER> with name of user to create & use"
echo ""
if [[ -r ".cirrus.yml" ]]
then
echo -e "${YEL}Some possible image_name values (from .cirrus.yml):${NOR}"
image_hints
echo ""
fi
exit 1
}
get_env_vars() { get_env_vars() {
# Deal with both YAML and embedded shell-like substitutions in values # Deal with both YAML and embedded shell-like substitutions in values
# if substitution fails, fall back to printing naked env. var as-is. # if substitution fails, fall back to printing naked env. var as-is.
python3 -c ' python3 -c '
import yaml,re import sys,yaml,re
env=yaml.load(open(".cirrus.yml"), Loader=yaml.SafeLoader)["env"] env=yaml.load(open(".cirrus.yml"), Loader=yaml.SafeLoader)["env"]
dollar_env_var=re.compile(r"\$(\w+)") dollar_env_var=re.compile(r"\$(\w+)")
dollarcurly_env_var=re.compile(r"\$\{(\w+)\}") dollarcurly_env_var=re.compile(r"\$\{(\w+)\}")
@ -98,11 +118,10 @@ class ReIterKey(dict):
rep=r"{\1}" # Convert env vars markup to -> str.format_map(re_iter_key) markup rep=r"{\1}" # Convert env vars markup to -> str.format_map(re_iter_key) markup
out=ReIterKey() out=ReIterKey()
for k,v in env.items(): for k,v in env.items():
v=str(v) if "ENCRYPTED" not in str(v) and bool(v):
if "ENCRYPTED" not in v: out[k]=dollar_env_var.sub(rep, dollarcurly_env_var.sub(rep, str(v)))
out[k]=dollar_env_var.sub(rep, dollarcurly_env_var.sub(rep, v))
for k,v in out.items(): for k,v in out.items():
print("{0}=\"{1}\"".format(k, v.format_map(out))) sys.stdout.write("{0}=\"{1}\"\n".format(k, str(v).format_map(out)))
' '
} }
@ -110,8 +129,14 @@ image_hints() {
get_env_vars | fgrep '_CACHE_IMAGE_NAME' | awk -F "=" '{print $2}' get_env_vars | fgrep '_CACHE_IMAGE_NAME' | awk -F "=" '{print $2}'
} }
unset VM_IMAGE_NAME
unset VMNAME
unset CREATE_CMD
unset SSH_CMD
unset CLEANUP_CMD
declare -xa ENVS
parse_args(){ parse_args(){
local arg
echo -e "$USAGE_WARNING" echo -e "$USAGE_WARNING"
if [[ "$USER" =~ "root" ]] if [[ "$USER" =~ "root" ]]
@ -119,88 +144,43 @@ parse_args(){
show_usage "This script must be run as a regular user." show_usage "This script must be run as a regular user."
fi fi
ENVS="$(get_env_vars)" [[ "$#" -eq 1 ]] || \
[[ "$#" -ge "1" ]] || \ show_usage "Must specify a VM Image name to use, and the test flavor."
show_usage "Must specify at least one command-line parameter."
IMAGE_NAME="" VM_IMAGE_NAME="$1"
ROOTLESS_USER=""
SPECIALMODE="none"
for arg
do
if [[ "$SPECIALMODE" == "GRABNEXT" ]] && [[ "${arg:0:1}" != "-" ]]
then
SPECIALMODE="$arg"
echo -e "${YEL}Using \$SPECIALMODE=$SPECIALMODE.${NOR}"
continue
elif [[ "$ROOTLESS_USER" == "GRABNEXT" ]] && [[ "${arg:0:1}" != "-" ]]
then
ROOTLESS_USER="$arg"
echo -e "${YEL}Using \$ROOTLESS_USER=$ROOTLESS_USER.${NOR}"
continue
fi
case "$arg" in
-m)
SPECIALMODE="GRABNEXT"
;;
-u)
ROOTLESS_USER="GRABNEXT"
;;
*)
[[ "${arg:0:1}" != "-" ]] || \
show_usage "Unknown command-line option '$arg'."
[[ -z "$IMAGE_NAME" ]] || \
show_usage "Must specify exactly one image name, got '$IMAGE_NAME' and '$arg'."
IMAGE_NAME="$arg"
;;
esac
done
if [[ "$SPECIALMODE" == "GRABNEXT" ]] # Word-splitting is desireable in this case
then # shellcheck disable=SC2207
show_usage "Must specify argument to -m option." ENVS=(
fi $(get_env_vars)
"VM_IMAGE_NAME=$VM_IMAGE_NAME"
)
if [[ "$ROOTLESS_USER" == "GRABNEXT" ]] VMNAME="${VMNAME:-${USER}-${VM_IMAGE_NAME}}"
then
show_usage "Must specify argument to -u option."
fi
if [[ -z "$IMAGE_NAME" ]] CREATE_CMD="$PGCLOUD compute instances create --zone=$ZONE --image=${VM_IMAGE_NAME} --custom-cpu=$CPUS --custom-memory=$MEMORY --boot-disk-size=$DISK --labels=in-use-by=$USER $VMNAME"
then
show_usage "No image-name specified."
fi
if [[ "$SPECIALMODE" == "rootless" ]] && [[ -z "$ROOTLESS_USER" ]] SSH_CMD="$PGCLOUD compute ssh root@$VMNAME"
then
show_usage "With '-m rootless' must also pass -u <username> of rootless user."
fi
if echo "$IMAGE_NAME" | grep -q "image-builder-image"
then
echo -e "Creating an image-builder VM, I hope you know what you're doing.\n"
IBI_ARGS="--scopes=compute-rw,storage-rw,userinfo-email"
SSHUSER="centos"
else
unset IBI_ARGS
SSHUSER="root"
fi
ENVS="$ENVS SPECIALMODE=\"$SPECIALMODE\""
[[ -z "$ROOTLESS_USER" ]] || \
ENVS="$ENVS ROOTLESS_USER=$ROOTLESS_USER"
SETUP_CMD="env $ENVS ADD_SECOND_PARTITIO=True $GOSRC/contrib/cirrus/setup_environment.sh"
VMNAME="${VMNAME:-${USER}-${IMAGE_NAME}}"
CREATE_CMD="$PGCLOUD compute instances create --zone=$ZONE --image=${IMAGE_NAME} --custom-cpu=$CPUS --custom-memory=$MEMORY --boot-disk-size=$DISK --labels=in-use-by=$USER $IBI_ARGS $VMNAME"
SSH_CMD="$PGCLOUD compute ssh $SSHUSER@$VMNAME"
CLEANUP_CMD="$PGCLOUD compute instances delete --zone $ZONE --delete-disks=all $VMNAME" CLEANUP_CMD="$PGCLOUD compute instances delete --zone $ZONE --delete-disks=all $VMNAME"
} }
# Returns true if user has run an 'init' and has a valid token for
# the specific project-id and named-configuration argumens in $PGCLOUD.
function has_valid_credentials() {
if $PGCLOUD info |& grep -Eq 'Account:.*None'; then
return 1
fi
# It's possible for 'gcloud info' to list expired credentials,
# e.g. 'ERROR: ... invalid grant: Bad Request'
if $PGCLOUD auth print-access-token |& grep -q 'ERROR'; then
return 1
fi
return 0
}
##### main ##### main
[[ "${LIBPODROOT%%${LIBPODROOT##$HOME}}" == "$HOME" ]] || \ [[ "${LIBPODROOT%%${LIBPODROOT##$HOME}}" == "$HOME" ]] || \
@ -209,23 +189,17 @@ parse_args(){
cd "$LIBPODROOT" cd "$LIBPODROOT"
parse_args "$@" parse_args "$@"
# Ensure mount-points and data directories exist on host as $USER. Also prevents
# permission-denied errors during cleanup() b/c `sudo podman` created mount-points
# owned by root.
mkdir -p $TMPDIR/${LIBPODROOT##$HOME}
mkdir -p $TMPDIR/.ssh mkdir -p $TMPDIR/.ssh
mkdir -p {$HOME,$TMPDIR}/.config/gcloud/ssh mkdir -p {$HOME,$TMPDIR}/.config/gcloud/ssh
chmod 700 {$HOME,$TMPDIR}/.config/gcloud/ssh $TMPDIR/.ssh chmod 700 {$HOME,$TMPDIR}/.config/gcloud/ssh $TMPDIR/.ssh
cd $LIBPODROOT echo -e "\n${YEL}Pulling gcloud image...${NOR}"
podman pull $GCLOUD_IMAGE
# Attempt to determine if named 'libpod' gcloud configuration exists if ! has_valid_credentials
showrun $PGCLOUD info > $TMPDIR/gcloud-info
if egrep -q "Account:.*None" $TMPDIR/gcloud-info
then then
echo -e "\n${YEL}WARNING: Can't find gcloud configuration for libpod, running init.${NOR}" echo -e "\n${YEL}WARNING: Can't find gcloud configuration for libpod, running init.${NOR}"
echo -e " ${RED}Please choose "#1: Re-initialize" and "login" if asked.${NOR}" echo -e " ${RED}Please choose \"#1: Re-initialize\" and \"login\" if asked.${NOR}"
showrun $PGCLOUD init --project=$PROJECT --console-only --skip-diagnostics showrun $PGCLOUD init --project=$PROJECT --console-only --skip-diagnostics
# Verify it worked (account name == someone@example.com) # Verify it worked (account name == someone@example.com)
@ -236,68 +210,52 @@ then
exit 5 exit 5
fi fi
# If this is the only config, make it the default to avoid persistent warnings from gcloud # If this is the only config, make it the default to avoid
# persistent warnings from gcloud about there being no default.
[[ -r "$HOME/.config/gcloud/configurations/config_default" ]] || \ [[ -r "$HOME/.config/gcloud/configurations/config_default" ]] || \
ln "$HOME/.config/gcloud/configurations/config_libpod" \ ln "$HOME/.config/gcloud/configurations/config_libpod" \
"$HOME/.config/gcloud/configurations/config_default" "$HOME/.config/gcloud/configurations/config_default"
fi fi
# Couldn't make rsync work with gcloud's ssh wrapper because ssh-keys generated on the fly trap delvm EXIT # Allow deleting VM if CTRL-C during create
TARBALL=$VMNAME.tar.bz2
echo -e "\n${YEL}Packing up local repository into a tarball.${NOR}"
showrun --background tar cjf $TMPDIR/$TARBALL --warning=no-file-changed --exclude-vcs-ignores -C $LIBPODROOT .
trap delvm INT # Allow deleting VM if CTRL-C during create
# This fails if VM already exists: permit this usage to re-init
echo -e "\n${YEL}Trying to creating a VM named $VMNAME${NOR}\n${YEL}in GCE region/zone $ZONE${NOR}" echo -e "\n${YEL}Trying to creating a VM named $VMNAME${NOR}\n${YEL}in GCE region/zone $ZONE${NOR}"
echo -e "For faster access, export ZONE='something-closer-<any letter>'" echo -e "For faster terminal access, export ZONE='<something-closer>'"
echo 'List of regions and zones: https://cloud.google.com/compute/docs/regions-zones/' echo -e 'Zone-list at: https://cloud.google.com/compute/docs/regions-zones/\n'
echo -e "${RED}(might take a minute/two. Errors ignored).${NOR}" if showrun $CREATE_CMD; then # Freshly created VM needs initial setup
showrun $CREATE_CMD || true # allow re-running commands below when "delete: N"
# Any subsequent failure should prompt for VM deletion echo -e "\n${YEL}Waiting up to 30s for ssh port to open${NOR}"
trap - INT ATTEMPTS=10
trap delvm EXIT trap "exit 1" INT
while ((ATTEMPTS)) && ! $SSH_CMD --command "true"; do
let "ATTEMPTS--"
echo -e "${RED}Nope, not yet.${NOR}"
sleep 3s
done
trap - INT
if ! ((ATTEMPTS)); then
echo -e "\n${RED}Failed${NOR}"
exit 7
fi
echo -e "${YEL}Got it. Cloning upstream repository as a starting point.${NOR}"
echo -e "\n${YEL}Waiting up to 30s for ssh port to open${NOR}" showrun $SSH_CMD -- "mkdir -p $GOSRC"
trap 'COUNT=9999' INT showrun $SSH_CMD -- "git clone --progress $GIT_REPO $GOSRC"
ATTEMPTS=10
for (( COUNT=1 ; COUNT <= $ATTEMPTS ; COUNT++ )) if [[ -x "$HOME/$GCLOUD_CFGDIR/$HOOK_FILENAME" ]]; then
do echo -e "\n${YEL}Copying hook to VM and executing (ignoring errors).${NOR}"
if $SSH_CMD --command "true"; then break; else sleep 3s; fi $PGCLOUD compute scp "/root/$GCLOUD_CFGDIR/$HOOK_FILENAME" root@$VMNAME:.
done if ! showrun $SSH_CMD -- "cd $GOSRC && bash /root/$HOOK_FILENAME $(git branch --show-current) $(git rev-parse HEAD)"; then
if (( COUNT > $ATTEMPTS )) echo "-e ${RED}Hook exited: $?${NOR}"
then fi
echo -e "\n${RED}Failed${NOR}" fi
exit 7
fi fi
echo -e "${YEL}Got it${NOR}"
echo -e "\n${YEL}Removing and re-creating $GOSRC on $VMNAME.${NOR}" echo -e "\n${YEL}Generating connection script for $VMNAME.${NOR}"
showrun $SSH_CMD --command "rm -rf $GOSRC" echo -e "Note: Script can be re-used in another terminal if needed."
showrun $SSH_CMD --command "mkdir -p $GOSRC" echo -e "${RED}(option to delete VM presented upon exiting).${NOR}"
# TODO: This is fairly fragile, specifically the quoting for the remote command.
echo '#!/bin/bash' > $TMPDIR/ssh
echo "$SSH_CMD -- -t 'cd $GOSRC && exec env \"${ENVS[*]}\" bash -il'" >> $TMPDIR/ssh
chmod +x $TMPDIR/ssh
echo -e "\n${YEL}Transferring tarball to $VMNAME.${NOR}" showrun $TMPDIR/ssh
wait
showrun $SCP_CMD $HOME/$TARBALL $SSHUSER@$VMNAME:/tmp/$TARBALL
echo -e "\n${YEL}Unpacking tarball into $GOSRC on $VMNAME.${NOR}"
showrun $SSH_CMD --command "tar xjf /tmp/$TARBALL -C $GOSRC"
echo -e "\n${YEL}Removing tarball on $VMNAME.${NOR}"
showrun $SSH_CMD --command "rm -f /tmp/$TARBALL"
echo -e "\n${YEL}Executing environment setup${NOR}"
showrun $SSH_CMD --command "$SETUP_CMD"
VMIP=$($PGCLOUD compute instances describe $VMNAME --format='get(networkInterfaces[0].accessConfigs[0].natIP)')
echo -e "\n${YEL}Connecting to $VMNAME${NOR}\nPublic IP Address: $VMIP\n${RED}(option to delete VM upon logout).${NOR}\n"
if [[ -n "$ROOTLESS_USER" ]]
then
echo "Re-chowning source files after transfer"
showrun $SSH_CMD --command "chown -R $ROOTLESS_USER $GOSRC"
echo "Connecting as user $ROOTLESS_USER"
SSH_CMD="$PGCLOUD compute ssh $ROOTLESS_USER@$VMNAME"
fi
showrun $SSH_CMD -- -t "cd $GOSRC && exec env $ENVS bash -il"