Gate: Use ShellCheck to lint scripts

This linting is currently limited to scripts in the new gate.

Change-Id: Id4906af0c8dc1e4b77eb0909d432ba8b6e32f5c4
This commit is contained in:
Mark Burnett 2017-10-27 10:05:47 -05:00
parent e82fd04ff1
commit 986016cb9c
26 changed files with 283 additions and 254 deletions

View File

@ -1,18 +1,18 @@
set -e
set -o nounset
LIB_DIR=$(realpath $(dirname $BASH_SOURCE))
LIB_DIR=$(realpath "$(dirname "${BASH_SOURCE}")")
source $LIB_DIR/config.sh
source $LIB_DIR/const.sh
source $LIB_DIR/etcd.sh
source $LIB_DIR/kube.sh
source $LIB_DIR/log.sh
source $LIB_DIR/promenade.sh
source $LIB_DIR/registry.sh
source $LIB_DIR/ssh.sh
source $LIB_DIR/validate.sh
source $LIB_DIR/virsh.sh
source "$LIB_DIR"/config.sh
source "$LIB_DIR"/const.sh
source "$LIB_DIR"/etcd.sh
source "$LIB_DIR"/kube.sh
source "$LIB_DIR"/log.sh
source "$LIB_DIR"/promenade.sh
source "$LIB_DIR"/registry.sh
source "$LIB_DIR"/ssh.sh
source "$LIB_DIR"/validate.sh
source "$LIB_DIR"/virsh.sh
if [[ -v GATE_DEBUG && ${GATE_DEBUG} = "1" ]]; then
set -x

View File

@ -7,22 +7,23 @@ export VIRSH_POOL=${VIRSH_POOL:-promenade}
export VIRSH_POOL_PATH=${VIRSH_POOL_PATH:-/var/lib/libvirt/promenade}
config_configuration() {
jq -cr '.configuration[]' < ${GATE_MANIFEST}
# XXX Do I need ' | @sh' now?
jq -cr '.configuration[]' < "${GATE_MANIFEST}"
}
config_vm_memory() {
jq -cr '.vm.memory' < ${GATE_MANIFEST}
jq -cr '.vm.memory' < "${GATE_MANIFEST}"
}
config_vm_names() {
jq -cr '.vm.names[]' < ${GATE_MANIFEST}
jq -cr '.vm.names[]' < "${GATE_MANIFEST}"
}
config_vm_ip() {
NAME=${1}
echo 192.168.77.1${NAME:1}
echo "192.168.77.1${NAME:1}"
}
config_vm_vcpus() {
jq -cr '.vm.vcpus' < ${GATE_MANIFEST}
jq -cr '.vm.vcpus' < "${GATE_MANIFEST}"
}

View File

@ -1,8 +1,8 @@
GENESIS_NAME=n0
SSH_CONFIG_DIR=${WORKSPACE}/tools/g2/config-ssh
TEMPLATE_DIR=${WORKSPACE}/tools/g2/templates
XML_DIR=${WORKSPACE}/tools/g2/xml
ALL_VM_NAMES=(
export GENESIS_NAME=n0
export SSH_CONFIG_DIR=${WORKSPACE}/tools/g2/config-ssh
export TEMPLATE_DIR=${WORKSPACE}/tools/g2/templates
export XML_DIR=${WORKSPACE}/tools/g2/xml
export ALL_VM_NAMES=(
n0
n1
n2

View File

@ -4,14 +4,13 @@ etcdctl_cmd() {
shift 2
kubectl_cmd ${VM} -n kube-system exec -t ${CLUSTER}-etcd-${VM} -- etcdctl ${@}
kubectl_cmd "${VM}" -n kube-system exec -t "${CLUSTER}-etcd-${VM}" -- etcdctl "${@}"
}
etcdctl_member_list() {
CLUSTER=${1}
VM=${2}
shift 2
EXTRA_ARGS=${@}
etcdctl_cmd ${CLUSTER} ${VM} member list -w json | jq -r '.members[].name' | sort
etcdctl_cmd "${CLUSTER}" "${VM}" member list -w json | jq -r '.members[].name' | sort
}

View File

@ -1,7 +1,7 @@
kubectl_apply() {
VIA=${1}
FILE=${2}
ssh_cmd ${VIA} "cat ${FILE} | kubectl apply -f -"
ssh_cmd "${VIA}" "cat ${FILE} | kubectl apply -f -"
}
kubectl_cmd() {
@ -9,7 +9,7 @@ kubectl_cmd() {
shift
ssh_cmd ${VIA} kubectl ${@}
ssh_cmd "${VIA}" kubectl "${@}"
}
kubectl_wait_for_pod() {
@ -17,25 +17,25 @@ kubectl_wait_for_pod() {
NAMESPACE=${2}
POD_NAME=${3}
SEC=${4:-600}
log Waiting ${SEC} seconds for termination of pod ${POD_NAME}
log Waiting "${SEC}" seconds for termination of pod "${POD_NAME}"
POD_PHASE_JSONPATH='{.status.phase}'
end=$(($(date +%s) + $SEC))
end=$(($(date +%s) + SEC))
while true; do
POD_PHASE=$(kubectl_cmd ${VIA} --request-timeout 10s --namespace ${NAMESPACE} get -o jsonpath="${POD_PHASE_JSONPATH}" pod ${POD_NAME})
POD_PHASE=$(kubectl_cmd "${VIA}" --request-timeout 10s --namespace "${NAMESPACE}" get -o jsonpath="${POD_PHASE_JSONPATH}" pod "${POD_NAME}")
if [[ ${POD_PHASE} = "Succeeded" ]]; then
log Pod ${POD_NAME} succeeded.
log Pod "${POD_NAME}" succeeded.
break
elif [[ $POD_PHASE = "Failed" ]]; then
log Pod ${POD_NAME} failed.
kubectl_cmd ${VIA} --request-timeout 10s --namespace ${NAMESPACE} get -o yaml pod ${POD_NAME} 1>&2
log Pod "${POD_NAME}" failed.
kubectl_cmd "${VIA}" --request-timeout 10s --namespace "${NAMESPACE}" get -o yaml pod "${POD_NAME}" 1>&2
exit 1
else
now=$(date +%s)
if [ $now -gt $end ]; then
if [[ $now -gt $end ]]; then
log Pod did not terminate before timeout.
kubectl_cmd ${VIA} --request-timeout 10s --namespace ${NAMESPACE} get -o yaml pod ${POD_NAME} 1>&2
kubectl_cmd "${VIA}" --request-timeout 10s --namespace "${NAMESPACE}" get -o yaml pod "${POD_NAME}" 1>&2
exit 1
fi
sleep 1

View File

@ -18,8 +18,8 @@ fi
log() {
d=$(date --utc)
echo -e ${C_MUTE}${d}${C_CLEAR} $* 1>&2
echo -e ${d} $* >> ${LOG_FILE}
echo -e "${C_MUTE}${d}${C_CLEAR} ${*}" 1>&2
echo -e "${d} ${*}" >> "${LOG_FILE}"
}
log_stage_diagnostic_header() {
@ -35,7 +35,7 @@ log_huge_success() {
}
log_note() {
echo -e ${C_HILIGHT}NOTE:${C_CLEAR} ${@}
echo -e "${C_HILIGHT}NOTE:${C_CLEAR} ${*}"
}
log_stage_error() {
@ -60,7 +60,7 @@ log_stage_success() {
log_temp_dir() {
TEMP_DIR=${1}
echo -e Working in ${C_TEMP}${TEMP_DIR}${C_CLEAR}
echo -e "Working in ${C_TEMP}${TEMP_DIR}${C_CLEAR}"
}
if [[ -v GATE_DEBUG && ${GATE_DEBUG} = "1" ]]; then

View File

@ -2,6 +2,6 @@ promenade_teardown_node() {
TARGET=${1}
VIA=${2}
ssh_cmd ${TARGET} /usr/local/bin/promenade-teardown
kubectl_cmd ${VIA} delete node ${TARGET}
ssh_cmd "${TARGET}" /usr/local/bin/promenade-teardown
kubectl_cmd "${VIA}" delete node "${TARGET}"
}

View File

@ -1,13 +1,13 @@
registry_down() {
REGISTRY_ID=$(docker ps -qa -f name=registry)
if [ "x${REGISTRY_ID}" != "x" ]; then
if [[ ! -z ${REGISTRY_ID} ]]; then
log Removing docker registry
docker rm -fv ${REGISTRY_ID} &>> ${LOG_FILE}
docker rm -fv "${REGISTRY_ID}" &>> "${LOG_FILE}"
fi
}
registry_list_images() {
FILES=$(find $(config_configuration) -type f -name '*.yaml')
FILES=$(find "$(config_configuration)" -type f -name '*.yaml')
HOSTNAME_REGEX='[a-zA-Z0-9][a-zA-Z0-9_-]{0,62}'
DOMAIN_NAME_REGEX="${HOSTNAME_REGEX}(\.${HOSTNAME_REGEX})*"
@ -19,7 +19,7 @@ registry_list_images() {
TAG_REGEX='[a-zA-Z0-9][a-zA-Z0-9.-]{0,127}'
cat ${FILES} \
cat "${FILES[@]}" \
| tr ' \t' '\n' | tr -s '\n' \
| grep -E "^(${NETLOC_REGEX}/)?${REPO_REGEX}:${TAG_REGEX}$" \
| sort -u \
@ -29,19 +29,21 @@ registry_list_images() {
registry_populate() {
log Validating local registry is populated
for image in $(registry_list_images); do
if ! docker pull localhost:5000/${image} &> /dev/null; then
log Loading image ${image} into local registry
docker pull ${image} &>> ${LOG_FILE}
docker tag ${image} localhost:5000/${image} &>> ${LOG_FILE}
docker push localhost:5000/${image} &>> ${LOG_FILE}
if ! docker pull "localhost:5000/${image}" &> /dev/null; then
log Loading image "${image}" into local registry
{
docker pull "${image}"
docker tag "${image}" "localhost:5000/${image}"
docker push "localhost:5000/${image}"
} &>> "${LOG_FILE}"
fi
done
}
registry_replace_references() {
FILES=${@}
for image in $(registry_list_images ${FILES}); do
sed -i "s;${image};registry:5000/${image};g" ${FILES}
FILES="${*}"
for image in $(registry_list_images "${FILES}"); do
sed -i "s;${image};registry:5000/${image};g" "${FILES}"
done
}
@ -49,19 +51,19 @@ registry_up() {
log Validating local registry is up
REGISTRY_ID=$(docker ps -qa -f name=registry)
RUNNING_REGISTRY_ID=$(docker ps -q -f name=registry)
if [ "x${RUNNING_REGISTRY_ID}" = "x" -a "x${REGISTRY_ID}" != "x" ]; then
if [[ -z ${RUNNING_REGISTRY_ID} && ! -z ${REGISTRY_ID} ]]; then
log Removing stopped docker registry
docker rm -fv ${REGISTRY_ID} &>> ${LOG_FILE}
docker rm -fv "${REGISTRY_ID}" &>> "${LOG_FILE}"
fi
if [ "x${REGISTRY_ID}" = "x" ]; then
if [[ -z ${RUNNING_REGISTRY_ID} ]]; then
log Starting docker registry
docker run -d \
-p 5000:5000 \
-e REGISTRY_HTTP_ADDR=0.0.0.0:5000 \
--restart=always \
--name registry \
-v $REGISTRY_DATA_DIR:/var/lib/registry \
registry:2 &>> ${LOG_FILE}
-v "${REGISTRY_DATA_DIR}:/var/lib/registry" \
registry:2 &>> "${LOG_FILE}"
fi
}

View File

@ -1,47 +1,46 @@
rsync_cmd() {
rsync -e "ssh -F ${SSH_CONFIG_DIR}/config" ${@}
rsync -e "ssh -F ${SSH_CONFIG_DIR}/config" "${@}"
}
ssh_cmd() {
if [[ "x${PROMENADE_DEBUG}" = "x1" ]]; then
EXTRA_ARGS=-v
if [[ ${GATE_DEBUG} = "1" ]]; then
ssh -F "${SSH_CONFIG_DIR}/config" -v "${@}"
else
EXTRA_ARGS=
ssh -F "${SSH_CONFIG_DIR}/config" "${@}"
fi
ssh -F ${SSH_CONFIG_DIR}/config ${EXTRA_ARGS} ${@}
}
ssh_config_declare() {
log Validating SSH config exists
if [ ! -s ${SSH_CONFIG_DIR}/config ]; then
if [ ! -s "${SSH_CONFIG_DIR}/config" ]; then
log Creating SSH config
env -i \
SSH_CONFIG_DIR=${SSH_CONFIG_DIR} \
envsubst < ${TEMPLATE_DIR}/ssh-config.sub > ${SSH_CONFIG_DIR}/config
"SSH_CONFIG_DIR=${SSH_CONFIG_DIR}" \
envsubst < "${TEMPLATE_DIR}/ssh-config.sub" > "${SSH_CONFIG_DIR}/config"
fi
}
ssh_keypair_declare() {
log Validating SSH keypair exists
if [ ! -s ${SSH_CONFIG_DIR}/id_rsa ]; then
if [ ! -s "${SSH_CONFIG_DIR}/id_rsa" ]; then
log Generating SSH keypair
ssh-keygen -N '' -f ${SSH_CONFIG_DIR}/id_rsa &>> ${LOG_FILE}
ssh-keygen -N '' -f "${SSH_CONFIG_DIR}/id_rsa" &>> "${LOG_FILE}"
fi
}
ssh_load_pubkey() {
cat ${SSH_CONFIG_DIR}/id_rsa.pub
cat "${SSH_CONFIG_DIR}/id_rsa.pub"
}
ssh_setup_declare() {
mkdir -p ${SSH_CONFIG_DIR}
mkdir -p "${SSH_CONFIG_DIR}"
ssh_keypair_declare
ssh_config_declare
}
ssh_wait() {
NAME=${1}
while ! ssh_cmd ${NAME} /bin/true; do
while ! ssh_cmd "${NAME}" /bin/true; do
sleep 0.5
done
}

View File

@ -1,23 +1,23 @@
validate_cluster() {
NAME=${1}
log Validating cluster via VM ${NAME}
rsync_cmd ${TEMP_DIR}/scripts/validate-cluster.sh ${NAME}:/root/promenade/
ssh_cmd ${NAME} /root/promenade/validate-cluster.sh
log Validating cluster via VM "${NAME}"
rsync_cmd "${TEMP_DIR}/scripts/validate-cluster.sh" "${NAME}:/root/promenade/"
ssh_cmd "${NAME}" /root/promenade/validate-cluster.sh
}
validate_etcd_membership() {
CLUSTER=${1}
VM=${2}
shift 2
EXPECTED_MEMBERS=${@}
EXPECTED_MEMBERS="${*}"
log Validating ${CLUSTER} etcd membership via ${VM}
FOUND_MEMBERS=$(etcdctl_member_list ${CLUSTER} ${VM} | tr '\n' ' ' | sed 's/ $//')
log Validating "${CLUSTER}" etcd membership via "${VM}"
FOUND_MEMBERS=$(etcdctl_member_list "${CLUSTER}" "${VM}" | tr '\n' ' ' | sed 's/ $//')
if [[ "x${EXPECTED_MEMBERS}" != "x${FOUND_MEMBERS}" ]]; then
log Etcd membership check failed for cluster ${CLUSTER}
log Found \"${FOUND_MEMBERS}\", expected \"${EXPECTED_MEMBERS}\"
log Etcd membership check failed for cluster "${CLUSTER}"
log "Found \"${FOUND_MEMBERS}\", expected \"${EXPECTED_MEMBERS}\""
exit 1
fi
}

View File

@ -1,72 +1,77 @@
img_base_declare() {
log Validating base image exists
if ! virsh vol-key --pool ${VIRSH_POOL} --vol promenade-base.img > /dev/null; then
log Installing base image from ${BASE_IMAGE_URL}
if ! virsh vol-key --pool "${VIRSH_POOL}" --vol promenade-base.img > /dev/null; then
log Installing base image from "${BASE_IMAGE_URL}"
cd ${TEMP_DIR}
curl -q -L -o base.img ${BASE_IMAGE_URL}
cd "${TEMP_DIR}"
curl -q -L -o base.img "${BASE_IMAGE_URL}"
virsh vol-create-as \
--pool ${VIRSH_POOL} \
--name promenade-base.img \
--format qcow2 \
--capacity ${BASE_IMAGE_SIZE} \
--prealloc-metadata &>> ${LOG_FILE}
virsh vol-upload \
--vol promenade-base.img \
--file base.img \
--pool ${VIRSH_POOL} &>> ${LOG_FILE}
{
virsh vol-create-as \
--pool "${VIRSH_POOL}" \
--name promenade-base.img \
--format qcow2 \
--capacity "${BASE_IMAGE_SIZE}" \
--prealloc-metadata
virsh vol-upload \
--vol promenade-base.img \
--file base.img \
--pool "${VIRSH_POOL}"
} &>> "${LOG_FILE}"
fi
}
iso_gen() {
NAME=${1}
if virsh vol-key --pool ${VIRSH_POOL} --vol cloud-init-${NAME}.iso &> /dev/null; then
log Removing existing cloud-init ISO for ${NAME}
if virsh vol-key --pool "${VIRSH_POOL}" --vol "cloud-init-${NAME}.iso" &> /dev/null; then
log Removing existing cloud-init ISO for "${NAME}"
virsh vol-delete \
--pool ${VIRSH_POOL} \
--vol cloud-init-${NAME}.iso &>> ${LOG_FILE}
--pool "${VIRSH_POOL}" \
--vol "cloud-init-${NAME}.iso" &>> "${LOG_FILE}"
fi
log Creating cloud-init ISO for ${NAME}
log "Creating cloud-init ISO for ${NAME}"
ISO_DIR=${TEMP_DIR}/iso/${NAME}
mkdir -p ${ISO_DIR}
cd ${ISO_DIR}
mkdir -p "${ISO_DIR}"
cd "${ISO_DIR}"
export BR_IP_NODE=$(config_vm_ip ${NAME})
BR_IP_NODE=$(config_vm_ip "${NAME}")
SSH_PUBLIC_KEY=$(ssh_load_pubkey)
export BR_IP_NODE
export NAME
export SSH_PUBLIC_KEY=$(ssh_load_pubkey)
envsubst < ${TEMPLATE_DIR}/user-data.sub > user-data
envsubst < ${TEMPLATE_DIR}/meta-data.sub > meta-data
envsubst < ${TEMPLATE_DIR}/network-config.sub > network-config
export SSH_PUBLIC_KEY
envsubst < "${TEMPLATE_DIR}/user-data.sub" > user-data
envsubst < "${TEMPLATE_DIR}/meta-data.sub" > meta-data
envsubst < "${TEMPLATE_DIR}/network-config.sub" > network-config
genisoimage \
-V cidata \
-input-charset utf-8 \
-joliet \
-rock \
-o cidata.iso \
meta-data \
network-config \
user-data &>> ${LOG_FILE}
{
genisoimage \
-V cidata \
-input-charset utf-8 \
-joliet \
-rock \
-o cidata.iso \
meta-data \
network-config \
user-data
virsh vol-create-as \
--pool ${VIRSH_POOL} \
--name cloud-init-${NAME}.iso \
--capacity $(stat -c %s ${ISO_DIR}/cidata.iso) \
--format raw &>> ${LOG_FILE}
virsh vol-create-as \
--pool "${VIRSH_POOL}" \
--name "cloud-init-${NAME}.iso" \
--capacity "$(stat -c %s "${ISO_DIR}/cidata.iso")" \
--format raw
virsh vol-upload \
--pool ${VIRSH_POOL} \
--vol cloud-init-${NAME}.iso \
--file ${ISO_DIR}/cidata.iso &>> ${LOG_FILE}
virsh vol-upload \
--pool "${VIRSH_POOL}" \
--vol "cloud-init-${NAME}.iso" \
--file "${ISO_DIR}/cidata.iso"
} &>> "${LOG_FILE}"
}
iso_path() {
NAME=${1}
echo ${TEMP_DIR}/iso/${NAME}/cidata.iso
echo "${TEMP_DIR}/iso/${NAME}/cidata.iso"
}
net_clean() {
@ -77,104 +82,104 @@ net_clean() {
net_declare() {
if ! virsh net-list --name | grep ^promenade$ > /dev/null; then
log Creating promenade network
virsh net-create ${XML_DIR}/network.xml &>> ${LOG_FILE}
virsh net-create "${XML_DIR}/network.xml" &>> "${LOG_FILE}"
fi
}
pool_declare() {
log Validating virsh pool setup
if ! virsh pool-uuid ${VIRSH_POOL} &> /dev/null; then
log Creating pool ${VIRSH_POOL}
virsh pool-create-as --name ${VIRSH_POOL} --type dir --target ${VIRSH_POOL_PATH} &>> ${LOG_FILE}
if ! virsh pool-uuid "${VIRSH_POOL}" &> /dev/null; then
log Creating pool "${VIRSH_POOL}"
virsh pool-create-as --name "${VIRSH_POOL}" --type dir --target "${VIRSH_POOL_PATH}" &>> "${LOG_FILE}"
fi
}
vm_clean() {
NAME=${1}
if virsh list --name | grep ${NAME} &> /dev/null; then
virsh destroy ${NAME} &>> ${LOG_FILE}
if virsh list --name | grep "${NAME}" &> /dev/null; then
virsh destroy "${NAME}" &>> "${LOG_FILE}"
fi
if virsh list --name --all | grep ${NAME} &> /dev/null; then
log Removing VM ${NAME}
virsh undefine --remove-all-storage --domain ${NAME} &>> ${LOG_FILE}
if virsh list --name --all | grep "${NAME}" &> /dev/null; then
log Removing VM "${NAME}"
virsh undefine --remove-all-storage --domain "${NAME}" &>> "${LOG_FILE}"
fi
}
vm_clean_all() {
log Removing all VMs in parallel
for NAME in ${ALL_VM_NAMES[@]}; do
vm_clean ${NAME} &
for NAME in "${ALL_VM_NAMES[@]}"; do
vm_clean "${NAME}" &
done
wait
}
vm_create() {
NAME=${1}
iso_gen ${NAME}
vol_create_root ${NAME}
iso_gen "${NAME}"
vol_create_root "${NAME}"
log Creating VM ${NAME}
log Creating VM "${NAME}"
virt-install \
--name ${NAME} \
--name "${NAME}" \
--virt-type kvm \
--cpu host \
--graphics vnc,listen=0.0.0.0 \
--noautoconsole \
--network network=promenade \
--vcpus $(config_vm_vcpus) \
--memory $(config_vm_memory) \
--vcpus "$(config_vm_vcpus)" \
--memory "$(config_vm_memory)" \
--import \
--disk vol=${VIRSH_POOL}/promenade-${NAME}.img,format=qcow2,bus=virtio \
--disk pool=${VIRSH_POOL},size=20,format=qcow2,bus=virtio \
--disk pool=${VIRSH_POOL},size=20,format=qcow2,bus=virtio \
--disk vol=${VIRSH_POOL}/cloud-init-${NAME}.iso,device=cdrom &>> ${LOG_FILE}
--disk "vol=${VIRSH_POOL}/promenade-${NAME}.img,format=qcow2,bus=virtio" \
--disk "pool=${VIRSH_POOL},size=20,format=qcow2,bus=virtio" \
--disk "pool=${VIRSH_POOL},size=20,format=qcow2,bus=virtio" \
--disk "vol=${VIRSH_POOL}/cloud-init-${NAME}.iso,device=cdrom" &>> "${LOG_FILE}"
ssh_wait ${NAME}
ssh_cmd ${NAME} sync
ssh_wait "${NAME}"
ssh_cmd "${NAME}" sync
}
vm_create_all() {
log Starting all VMs in parallel
for NAME in $(config_vm_names); do
vm_create ${NAME} &
vm_create "${NAME}" &
done
wait
for NAME in $(config_vm_names); do
vm_validate ${NAME}
vm_validate "${NAME}"
done
}
vm_start() {
NAME=${1}
log Starting VM ${NAME}
virsh start ${NAME} &>> ${LOG_FILE}
ssh_wait ${NAME}
log Starting VM "${NAME}"
virsh start "${NAME}" &>> "${LOG_FILE}"
ssh_wait "${NAME}"
}
vm_stop() {
NAME=${1}
log Stopping VM ${NAME}
virsh destroy ${NAME} &>> ${LOG_FILE}
log Stopping VM "${NAME}"
virsh destroy "${NAME}" &>> "${LOG_FILE}"
}
vm_restart_all() {
for NAME in $(config_vm_names); do
vm_stop ${NAME} &
vm_stop "${NAME}" &
done
wait
for NAME in $(config_vm_names); do
vm_start ${NAME} &
vm_start "${NAME}" &
done
wait
}
vm_validate() {
NAME=${1}
if ! virsh list --name | grep ${NAME} &> /dev/null; then
log VM ${NAME} did not start correctly.
if ! virsh list --name | grep "${NAME}" &> /dev/null; then
log VM "${NAME}" did not start correctly.
exit 1
fi
}
@ -183,17 +188,17 @@ vm_validate() {
vol_create_root() {
NAME=${1}
if virsh vol-list --pool ${VIRSH_POOL} | grep promenade-${NAME}.img &> /dev/null; then
log Deleting previous volume promenade-${NAME}.img
virsh vol-delete --pool ${VIRSH_POOL} promenade-${NAME}.img &>> ${LOG_FILE}
if virsh vol-list --pool "${VIRSH_POOL}" | grep "promenade-${NAME}.img" &> /dev/null; then
log Deleting previous volume "promenade-${NAME}.img"
virsh vol-delete --pool "${VIRSH_POOL}" "promenade-${NAME}.img" &>> "${LOG_FILE}"
fi
log Creating root volume for ${NAME}
log Creating root volume for "${NAME}"
virsh vol-create-as \
--pool ${VIRSH_POOL} \
--name promenade-${NAME}.img \
--pool "${VIRSH_POOL}" \
--name "promenade-${NAME}.img" \
--capacity 64G \
--format qcow2 \
--backing-vol promenade-base.img \
--backing-vol-format qcow2 &>> ${LOG_FILE}
--backing-vol-format qcow2 &>> "${LOG_FILE}"
}

View File

@ -2,11 +2,11 @@
set -e
source ${GATE_UTILS}
source "${GATE_UTILS}"
log Building docker image ${IMAGE_PROMENADE}
docker build -q -t ${IMAGE_PROMENADE} ${WORKSPACE}
log Building docker image "${IMAGE_PROMENADE}"
docker build -q -t "${IMAGE_PROMENADE}" "${WORKSPACE}"
log Loading Promenade image ${IMAGE_PROMENADE} into local registry
docker tag ${IMAGE_PROMENADE} localhost:5000/${IMAGE_PROMENADE} &>> ${LOG_FILE}
docker push localhost:5000/${IMAGE_PROMENADE} &>> ${LOG_FILE}
log Loading Promenade image "${IMAGE_PROMENADE}" into local registry
docker tag "${IMAGE_PROMENADE}" "localhost:5000/${IMAGE_PROMENADE}" &>> "${LOG_FILE}"
docker push "localhost:5000/${IMAGE_PROMENADE}" &>> "${LOG_FILE}"

View File

@ -2,17 +2,17 @@
set -e
source ${GATE_UTILS}
source "${GATE_UTILS}"
cd ${TEMP_DIR}
cd "${TEMP_DIR}"
mkdir scripts
log Building scripts
docker run --rm -t \
-w /target \
-v ${TEMP_DIR}:/target \
-e PROMENADE_DEBUG=${PROMENADE_DEBUG} \
${IMAGE_PROMENADE} \
-v "${TEMP_DIR}:/target" \
-e "PROMENADE_DEBUG=${PROMENADE_DEBUG}" \
"${IMAGE_PROMENADE}" \
promenade \
build-all \
--validators \

View File

@ -2,16 +2,16 @@
set -e
source ${GATE_UTILS}
source "${GATE_UTILS}"
rm -rf ${WORKSPACE}/conformance
mkdir -p ${WORKSPACE}/conformance
rm -rf "${WORKSPACE}/conformance"
mkdir -p "${WORKSPACE}/conformance"
rsync_cmd ${WORKSPACE}/tools/g2/sonobuoy.yaml ${GENESIS_NAME}:/root/
ssh_cmd ${GENESIS_NAME} mkdir -p /mnt/sonobuoy
kubectl_apply ${GENESIS_NAME} /root/sonobuoy.yaml
rsync_cmd "${WORKSPACE}/tools/g2/sonobuoy.yaml" "${GENESIS_NAME}:/root/"
ssh_cmd "${GENESIS_NAME}" mkdir -p /mnt/sonobuoy
kubectl_apply "${GENESIS_NAME}" /root/sonobuoy.yaml
if kubectl_wait_for_pod ${GENESIS_NAME} heptio-sonobuoy sonobuoy 7200; then
if kubectl_wait_for_pod "${GENESIS_NAME}" heptio-sonobuoy sonobuoy 7200; then
log Pod succeeded
SUCCESS=1
else
@ -19,10 +19,10 @@ else
SUCCESS=0
fi
FILENAME=$(ssh_cmd ${GENESIS_NAME} ls /mnt/sonobuoy || echo "")
FILENAME=$(ssh_cmd "${GENESIS_NAME}" ls /mnt/sonobuoy || echo "")
if [[ ! -z ${FILENAME} ]]; then
if rsync_cmd ${GENESIS_NAME}:/mnt/sonobuoy/${FILENAME} ${WORKSPACE}/conformance/sonobuoy.tgz; then
tar xf ${WORKSPACE}/conformance/sonobuoy.tgz -C ${WORKSPACE}/conformance
if rsync_cmd "${GENESIS_NAME}:/mnt/sonobuoy/${FILENAME}" "${WORKSPACE}/conformance/sonobuoy.tgz"; then
tar xf "${WORKSPACE}/conformance/sonobuoy.tgz" -C "${WORKSPACE}/conformance"
fi
fi

View File

@ -2,7 +2,7 @@
set -e
source ${GATE_UTILS}
source "${GATE_UTILS}"
vm_clean_all
vm_create_all

View File

@ -2,7 +2,7 @@
set -e
source ${GATE_UTILS}
source "${GATE_UTILS}"
# Docker registry (cache) setup
registry_up

View File

@ -2,25 +2,27 @@
set -e
source ${GATE_UTILS}
source "${GATE_UTILS}"
OUTPUT_DIR=${TEMP_DIR}/config
mkdir -p ${OUTPUT_DIR}
OUTPUT_DIR="${TEMP_DIR}/config"
mkdir -p "${OUTPUT_DIR}"
for source_dir in $(config_configuration); do
log Copying configuration from ${source_dir}
cp ${WORKSPACE}/${source_dir}/*.yaml ${OUTPUT_DIR}
log Copying configuration from "${source_dir}"
cp "${WORKSPACE}/${source_dir}"/*.yaml "${OUTPUT_DIR}"
done
registry_replace_references ${OUTPUT_DIR}/*.yaml
registry_replace_references "${OUTPUT_DIR}"/*.yaml
FILES=($(ls "${OUTPUT_DIR}"))
log Generating certificates
docker run --rm -t \
-w /target \
-v ${OUTPUT_DIR}:/target \
-e PROMENADE_DEBUG=${PROMENADE_DEBUG} \
${IMAGE_PROMENADE} \
-v "${OUTPUT_DIR}:/target" \
-e "PROMENADE_DEBUG=${PROMENADE_DEBUG}" \
"${IMAGE_PROMENADE}" \
promenade \
generate-certs \
-o /target \
$(ls ${OUTPUT_DIR})
"${FILES[@]}"

View File

@ -2,9 +2,9 @@
set -e
source ${GATE_UTILS}
source "${GATE_UTILS}"
rsync_cmd ${TEMP_DIR}/scripts/*genesis* ${GENESIS_NAME}:/root/promenade/
rsync_cmd "${TEMP_DIR}/scripts"/*genesis* "${GENESIS_NAME}:/root/promenade/"
ssh_cmd ${GENESIS_NAME} /root/promenade/genesis.sh
ssh_cmd ${GENESIS_NAME} /root/promenade/validate-genesis.sh
ssh_cmd "${GENESIS_NAME}" /root/promenade/genesis.sh
ssh_cmd "${GENESIS_NAME}" /root/promenade/validate-genesis.sh

View File

@ -2,7 +2,7 @@
set -e
source $GATE_UTILS
source "${GATE_UTILS}"
vm_restart_all
validate_cluster ${GENESIS_NAME}
validate_cluster "${GENESIS_NAME}"

View File

@ -7,18 +7,18 @@ if [ $# -le 0 ]; then
exit 1
fi
source ${GATE_UTILS}
source "${GATE_UTILS}"
JOIN_TARGETS=${@}
JOIN_TARGETS="${*}"
for NAME in ${JOIN_TARGETS}; do
rsync_cmd ${TEMP_DIR}/scripts/*${NAME}* ${NAME}:/root/promenade/
rsync_cmd "${TEMP_DIR}"/scripts/*"${NAME}"* "${NAME}:/root/promenade/"
ssh_cmd ${NAME} /root/promenade/join-${NAME}.sh
ssh_cmd ${NAME} /root/promenade/validate-${NAME}.sh
ssh_cmd "${NAME}" "/root/promenade/join-${NAME}.sh"
ssh_cmd "${NAME}" "/root/promenade/validate-${NAME}.sh"
done
validate_cluster n0
validate_etcd_membership kubernetes n0 genesis ${@}
validate_etcd_membership calico n0 n0 ${@}
validate_etcd_membership kubernetes n0 genesis "${*}"
validate_etcd_membership calico n0 n0 "${*}"

View File

@ -2,7 +2,7 @@
set -e
source ${GATE_UTILS}
source "${GATE_UTILS}"
log Adding labels to node n0
kubectl_cmd n1 label node n0 \

View File

@ -2,21 +2,21 @@
set -e
source ${GATE_UTILS}
source "${GATE_UTILS}"
EXPECTED_MEMBERS=${@}
EXPECTED_MEMBERS="${*}"
promenade_teardown_node ${GENESIS_NAME} n1
promenade_teardown_node "${GENESIS_NAME}" n1
vm_clean ${GENESIS_NAME}
vm_create ${GENESIS_NAME}
vm_clean "${GENESIS_NAME}"
vm_create "${GENESIS_NAME}"
rsync_cmd ${TEMP_DIR}/scripts/*${GENESIS_NAME}* ${GENESIS_NAME}:/root/promenade/
rsync_cmd "${TEMP_DIR}/scripts/"*"${GENESIS_NAME}"* "${GENESIS_NAME}:/root/promenade/"
ssh_cmd ${GENESIS_NAME} /root/promenade/join-${GENESIS_NAME}.sh
ssh_cmd ${GENESIS_NAME} /root/promenade/validate-${GENESIS_NAME}.sh
ssh_cmd "${GENESIS_NAME}" "/root/promenade/join-${GENESIS_NAME}.sh"
ssh_cmd "${GENESIS_NAME}" "/root/promenade/validate-${GENESIS_NAME}.sh"
validate_cluster n1
validate_etcd_membership kubernetes n1 ${EXPECTED_MEMBERS}
validate_etcd_membership calico n1 ${EXPECTED_MEMBERS}
validate_etcd_membership kubernetes n1 "${EXPECTED_MEMBERS}"
validate_etcd_membership calico n1 "${EXPECTED_MEMBERS}"

View File

@ -2,37 +2,43 @@
set -e
SCRIPT_DIR=$(realpath $(dirname $0))
export WORKSPACE=$(realpath ${SCRIPT_DIR}/..)
export GATE_UTILS=${WORKSPACE}/tools/g2/lib/all.sh
export TEMP_DIR=$(mktemp -d)
chmod -R 755 ${TEMP_DIR}
SCRIPT_DIR=$(realpath "$(dirname "${0}")")
WORKSPACE=$(realpath "${SCRIPT_DIR}/..")
GATE_UTILS=${WORKSPACE}/tools/g2/lib/all.sh
TEMP_DIR=$(mktemp -d)
chmod -R 755 "${TEMP_DIR}"
export GATE_COLOR=${GATE_COLOR:-1}
GATE_COLOR=${GATE_COLOR:-1}
MANIFEST_ARG=${1:-resiliency}
export GATE_MANIFEST=${WORKSPACE}/tools/g2/manifests/${MANIFEST_ARG}.json
GATE_MANIFEST=${WORKSPACE}/tools/g2/manifests/${MANIFEST_ARG}.json
source ${GATE_UTILS}
export GATE_COLOR
export GATE_MANIFEST
export GATE_UTILS
export TEMP_DIR
export WORKSPACE
source "${GATE_UTILS}"
STAGES_DIR=${WORKSPACE}/tools/g2/stages
log_temp_dir ${TEMP_DIR}
log_temp_dir "${TEMP_DIR}"
echo
STAGES=$(mktemp)
jq -cr '.stages | .[]' ${GATE_MANIFEST} > ${STAGES}
jq -cr '.stages | .[]' "${GATE_MANIFEST}" > "${STAGES}"
# NOTE(mark-burnett): It is necessary to use a non-stdin file descriptor for
# the read below, since we will be calling SSH, which will consume the
# remaining data on STDIN.
exec 3< $STAGES
exec 3< "$STAGES"
while read -u 3 stage; do
NAME=$(echo ${stage} | jq -r .name)
STAGE_CMD=${STAGES_DIR}/$(echo ${stage} | jq -r .script)
NAME=$(echo "${stage}" | jq -r .name)
STAGE_CMD=${STAGES_DIR}/$(echo "${stage}" | jq -r .script)
if echo ${stage} | jq -e .arguments > /dev/null; then
ARGUMENTS=($(echo ${stage} | jq -r '.arguments[]'))
if echo "${stage}" | jq -e .arguments > /dev/null; then
ARGUMENTS=($(echo "${stage}" | jq -r '.arguments[]'))
else
ARGUMENTS=
fi
@ -42,10 +48,10 @@ while read -u 3 stage; do
log_stage_success
else
log_color_reset
log_stage_error "${NAME}" ${LOG_FILE}
if echo ${stage} | jq -e .on_error > /dev/null; then
log_stage_error "${NAME}" "${LOG_FILE}"
if echo "${stage}" | jq -e .on_error > /dev/null; then
log_stage_diagnostic_header
ON_ERROR=${WORKSPACE}/$(echo ${stage} | jq -r .on_error)
ON_ERROR=${WORKSPACE}/$(echo "${stage}" | jq -r .on_error)
set +e
$ON_ERROR
fi

View File

@ -9,3 +9,11 @@ for manifest in $(find "${WORKSPACE}/tools/g2/manifests" -type f | sort); do
echo Checking "${manifest}"
python -m jsonschema "${WORKSPACE}/tools/g2/manifest-schema.json" -i "${manifest}"
done
if [[ -x $(which shellcheck) ]]; then
echo Checking shell scripts..
shellcheck -s bash -e SC2029 "${WORKSPACE}"/tools/*gate*.sh "${WORKSPACE}"/tools/g2/stages/* "${WORKSPACE}"/tools/g2/lib/*
else
echo No shellcheck executable found. Please, install it.
exit 1
fi

View File

@ -2,13 +2,17 @@
set -e
SCRIPT_DIR=$(realpath $(dirname $0))
export WORKSPACE=$(realpath ${SCRIPT_DIR}/..)
export GATE_UTILS=${WORKSPACE}/tools/g2/lib/all.sh
SCRIPT_DIR=$(realpath "$(dirname "${0}")")
WORKSPACE=$(realpath "${SCRIPT_DIR}/..")
GATE_UTILS=${WORKSPACE}/tools/g2/lib/all.sh
export GATE_COLOR=${GATE_COLOR:-1}
GATE_COLOR=${GATE_COLOR:-1}
source ${GATE_UTILS}
export GATE_COLOR
export GATE_UTILS
export WORKSPACE
source "${GATE_UTILS}"
REQUIRE_RELOG=0
@ -28,7 +32,7 @@ sudo apt-get install -q -y --no-install-recommends \
log_stage_header "Joining User Groups"
for grp in docker libvirtd; do
if ! groups | grep $grp > /dev/null; then
sudo adduser `id -un` $grp
sudo adduser "$(id -un)" $grp
REQUIRE_RELOG=1
fi
done
@ -46,13 +50,13 @@ if ! sudo virt-host-validate qemu &> /dev/null; then
sudo virt-host-validate qemu || true
fi
if [ ! -d ${VIRSH_POOL_PATH} ]; then
sudo mkdir -p ${VIRSH_POOL_PATH}
if [[ ! -d ${VIRSH_POOL_PATH} ]]; then
sudo mkdir -p "${VIRSH_POOL_PATH}"
fi
if [ $REQUIRE_RELOG -eq 1 ]; then
if [[ ${REQUIRE_RELOG} -eq 1 ]]; then
echo
log_note You must ${C_HEADER}log out${C_CLEAR} and back in before the gate is ready to run.
log_note "You must ${C_HEADER}log out${C_CLEAR} and back in before the gate is ready to run."
fi
log_huge_success

View File

@ -2,11 +2,14 @@
set -e
SCRIPT_DIR=$(realpath $(dirname $0))
export WORKSPACE=$(realpath ${SCRIPT_DIR}/..)
export GATE_UTILS=${WORKSPACE}/tools/g2/lib/all.sh
SCRIPT_DIR=$(realpath "$(dirname "${0}")")
WORKSPACE=$(realpath "${SCRIPT_DIR}/..")
GATE_UTILS=${WORKSPACE}/tools/g2/lib/all.sh
source ${GATE_UTILS}
export GATE_UTILS
export WORKSPACE
source "${GATE_UTILS}"
vm_clean_all
registry_down