(gating) Add shellcheck linter for multinode

- Add a shellcheck linter for the scripts in the multinode
  framework

- Update all scripting to comply with shellcheck

- Move linting job to Ubuntu Bionic as the multinode gate now
  requires Bionic versions of libvirt

Change-Id: Ibee645331421e1e6cecd4e3daa8e9c321dce5523
This commit is contained in:
Nishant Kumar 2019-10-11 17:01:43 +00:00
parent 0d8e68c17f
commit 2b67ffaefb
38 changed files with 246 additions and 229 deletions

View File

@ -27,10 +27,16 @@
jobs: jobs:
- airship-in-a-bottle-upload-git-mirror - airship-in-a-bottle-upload-git-mirror
- nodeset:
name: airship-integration-single-node
nodes:
- name: primary
label: ubuntu-bionic
- job: - job:
name: airship-in-a-bottle-linter name: airship-in-a-bottle-linter
run: tools/gate/playbooks/zuul-linter.yaml run: tools/gate/playbooks/zuul-linter.yaml
nodeset: openstack-helm-single-node nodeset: airship-integration-single-node
- job: - job:
name: airship-in-a-bottle-upload-git-mirror name: airship-in-a-bottle-upload-git-mirror

View File

@ -15,6 +15,16 @@
- hosts: primary - hosts: primary
tasks: tasks:
- name: Execute a Whitespace Linter check - name: Execute a Whitespace Linter check
command: find . -not -path "*/\.*" -not -path "*/doc/build/*" -not -name "*.tgz" -type f -exec egrep -l " +$" {} \; command: find . -not -path "*/\.*" -not -path "*/doc/build/*" -not -name "*.tgz" -type f -exec egrep -ln " +$" {} \;
register: result register: result
failed_when: result.stdout != "" failed_when: result.stdout != ""
- name: Install shellcheck
apt:
name: "shellcheck"
become: true
- name: Execute Shellcheck Against Framework Scripts
command: find ./tools -type f -name '*.sh' -exec shellcheck -e SC1090 {} \;
args:
chdir: "{{ zuul.project.src_dir }}"
register: result
failed_when: result.stdout != ""

View File

@ -20,14 +20,14 @@ export CLUSTER_TYPE="${CLUSTER_TYPE:="node,clusterrole,clusterrolebinding,storag
export PARALLELISM_FACTOR="${PARALLELISM_FACTOR:=2}" export PARALLELISM_FACTOR="${PARALLELISM_FACTOR:=2}"
function list_objects () { function list_objects () {
printf ${CLUSTER_TYPE} | xargs -d ',' -I {} -P1 -n1 bash -c 'echo "$@"' _ {} printf "%s" ${CLUSTER_TYPE} | xargs -d ',' -I {} -P1 -n1 bash -c 'echo "$@"' _ {}
} }
export -f list_objects export -f list_objects
function name_objects () { function name_objects () {
export OBJECT=$1 export OBJECT=$1
kubectl get ${OBJECT} -o name | xargs -L1 -I {} -P1 -n1 bash -c 'echo "${OBJECT} ${1#*/}"' _ {} kubectl get "${OBJECT}" -o name | xargs -L1 -I {} -P1 -n1 bash -c "echo ${OBJECT} ${1#*/}" _ {}
} }
export -f name_objects export -f name_objects
@ -39,9 +39,9 @@ function get_objects () {
echo "${OBJECT}/${NAME}" echo "${OBJECT}/${NAME}"
export BASE_DIR="${BASE_DIR:="/tmp"}" export BASE_DIR="${BASE_DIR:="/tmp"}"
DIR="${BASE_DIR}/objects/cluster/${OBJECT}" DIR="${BASE_DIR}/objects/cluster/${OBJECT}"
mkdir -p ${DIR} mkdir -p "${DIR}"
kubectl get ${OBJECT} ${NAME} -o yaml > "${DIR}/${NAME}.yaml" kubectl get "${OBJECT}" "${NAME}" -o yaml > "${DIR}/${NAME}.yaml"
kubectl describe ${OBJECT} ${NAME} > "${DIR}/${NAME}.txt" kubectl describe "${OBJECT}" "${NAME}" > "${DIR}/${NAME}.txt"
} }
export -f get_objects export -f get_objects

View File

@ -1,5 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved. # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -16,10 +15,10 @@
set -e set -e
SCRIPT_DIR=$(realpath $(dirname $0)) SCRIPT_DIR="$(realpath "$(dirname "$0")")"
WORKSPACE=$(realpath ${SCRIPT_DIR}/../../..) WORKSPACE="$(realpath "${SCRIPT_DIR}/../../..")"
GATE_UTILS=${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh GATE_UTILS="${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh"
source ${GATE_UTILS} source "${GATE_UTILS}"
drydock_cmd $@ drydock_cmd "$@"

View File

@ -25,7 +25,7 @@ function get_namespaces () {
function list_namespaced_objects () { function list_namespaced_objects () {
export NAMESPACE=$1 export NAMESPACE=$1
printf ${OBJECT_TYPE} | xargs -d ',' -I {} -P1 -n1 bash -c 'echo "${NAMESPACE} $@"' _ {} printf "%s" "${OBJECT_TYPE}" | xargs -d ',' -I {} -P1 -n1 bash -c "echo ${NAMESPACE} ${1#*/}" _ {}
} }
export -f list_namespaced_objects export -f list_namespaced_objects
@ -34,7 +34,7 @@ function name_objects () {
input=($1) input=($1)
export NAMESPACE=${input[0]} export NAMESPACE=${input[0]}
export OBJECT=${input[1]} export OBJECT=${input[1]}
kubectl get -n ${NAMESPACE} ${OBJECT} -o name | xargs -L1 -I {} -P1 -n1 bash -c 'echo "${NAMESPACE} ${OBJECT} $@"' _ {} kubectl get -n "${NAMESPACE}" "${OBJECT}" -o name | xargs -L1 -I {} -P1 -n1 bash -c "echo ${NAMESPACE} ${OBJECT} ${1#*/}" _ {}
} }
export -f name_objects export -f name_objects
@ -47,19 +47,19 @@ function get_objects () {
echo "${NAMESPACE}/${OBJECT}/${NAME}" echo "${NAMESPACE}/${OBJECT}/${NAME}"
export BASE_DIR="${BASE_DIR:="/tmp"}" export BASE_DIR="${BASE_DIR:="/tmp"}"
DIR="${BASE_DIR}/namespaces/${NAMESPACE}/${OBJECT}" DIR="${BASE_DIR}/namespaces/${NAMESPACE}/${OBJECT}"
mkdir -p ${DIR} mkdir -p "${DIR}"
kubectl get -n ${NAMESPACE} ${OBJECT} ${NAME} -o yaml > "${DIR}/${NAME}.yaml" kubectl get -n "${NAMESPACE}" "${OBJECT}" "${NAME}" -o yaml > "${DIR}/${NAME}.yaml"
kubectl describe -n ${NAMESPACE} ${OBJECT} ${NAME} > "${DIR}/${NAME}.txt" kubectl describe -n "${NAMESPACE}" "${OBJECT}" "${NAME}" > "${DIR}/${NAME}.txt"
LOG_DIR="${BASE_DIR}/pod-logs" LOG_DIR="${BASE_DIR}/pod-logs"
mkdir -p ${LOG_DIR} mkdir -p ${LOG_DIR}
if [ ${OBJECT_TYPE} = "pods" ]; then if [ ${OBJECT_TYPE} = "pods" ]; then
POD_DIR="${LOG_DIR}/${NAME}" POD_DIR="${LOG_DIR}/${NAME}"
mkdir -p ${POD_DIR} mkdir -p "${POD_DIR}"
CONTAINERS=$(kubectl get pod "${NAME}" -n "${NAMESPACE}" -o json | jq -r '.spec.containers[].name') CONTAINERS=$(kubectl get pod "${NAME}" -n "${NAMESPACE}" -o json | jq -r '.spec.containers[].name')
for CONTAINER in ${CONTAINERS}; do for CONTAINER in ${CONTAINERS}; do
kubectl logs -n ${NAMESPACE} ${NAME} -c ${CONTAINER} > "${POD_DIR}/${CONTAINER}.txt" kubectl logs -n "${NAMESPACE}" "${NAME}" -c "${CONTAINER}" > "${POD_DIR}/${CONTAINER}.txt"
done done
fi fi
} }

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash #!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved. # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -15,10 +15,10 @@
set -e set -e
SCRIPT_DIR=$(realpath $(dirname $0)) SCRIPT_DIR="$(realpath "$(dirname "$0")")"
WORKSPACE=$(realpath ${SCRIPT_DIR}/../../..) WORKSPACE="$(realpath "${SCRIPT_DIR}/../../..")"
GATE_UTILS=${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh GATE_UTILS="${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh"
source ${GATE_UTILS} source "${GATE_UTILS}"
exec rsync -e "ssh -F ${SSH_CONFIG_DIR}/config" $@ exec rsync -e "ssh -F ${SSH_CONFIG_DIR}/config" "$@"

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash #!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved. # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -15,10 +15,10 @@
set -e set -e
SCRIPT_DIR=$(realpath $(dirname $0)) SCRIPT_DIR="$(realpath "$(dirname "$0")")"
WORKSPACE=$(realpath ${SCRIPT_DIR}/../../..) WORKSPACE="$(realpath "${SCRIPT_DIR}/../../..")"
GATE_UTILS=${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh GATE_UTILS="${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh"
source ${GATE_UTILS} source "${GATE_UTILS}"
exec scp -F ${SSH_CONFIG_DIR}/config $@ exec scp -F "${SSH_CONFIG_DIR}/config" "$@"

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash #!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved. # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -15,10 +15,10 @@
set -x set -x
SCRIPT_DIR=$(realpath $(dirname $0)) SCRIPT_DIR="$(realpath "$(dirname "$0")")"
WORKSPACE=$(realpath ${SCRIPT_DIR}/../../..) WORKSPACE="$(realpath "${SCRIPT_DIR}/../../..")"
GATE_UTILS=${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh GATE_UTILS="${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh"
source ${GATE_UTILS} source "${GATE_UTILS}"
shipyard_cmd $@ shipyard_cmd "$@"

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash #!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved. # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -15,10 +15,10 @@
set -e set -e
SCRIPT_DIR=$(realpath $(dirname $0)) SCRIPT_DIR="$(realpath "$(dirname "$0")")"
WORKSPACE=$(realpath ${SCRIPT_DIR}/../../..) WORKSPACE="$(realpath "${SCRIPT_DIR}/../../..")"
GATE_UTILS=${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh GATE_UTILS="${WORKSPACE}/multi_nodes_gate/airship_gate/lib/all.sh"
source ${GATE_UTILS} source "${GATE_UTILS}"
exec ssh -F ${SSH_CONFIG_DIR}/config $@ exec ssh -F "${SSH_CONFIG_DIR}/config" "$@"

View File

@ -20,64 +20,64 @@ shipard_cmd_stdout() {
install_ingress_ca install_ingress_ca
ssh_cmd "${BUILD_NAME}" \ ssh_cmd "${BUILD_NAME}" \
docker run -t --network=host \ docker run -t --network=host \
--dns ${dns_server} \ --dns "${dns_server}" \
-v "${BUILD_WORK_DIR}:/work" \ -v "${BUILD_WORK_DIR}:/work" \
-e OS_AUTH_URL=${AIRSHIP_KEYSTONE_URL} \ -e OS_AUTH_URL="${AIRSHIP_KEYSTONE_URL}" \
-e OS_USERNAME=shipyard \ -e OS_USERNAME=shipyard \
-e OS_USER_DOMAIN_NAME=default \ -e OS_USER_DOMAIN_NAME=default \
-e OS_PASSWORD="${SHIPYARD_PASSWORD}" \ -e OS_PASSWORD="${SHIPYARD_PASSWORD}" \
-e OS_PROJECT_DOMAIN_NAME=default \ -e OS_PROJECT_DOMAIN_NAME=default \
-e OS_PROJECT_NAME=service \ -e OS_PROJECT_NAME=service \
-e REQUESTS_CA_BUNDLE=/work/ingress_ca.pem \ -e REQUESTS_CA_BUNDLE=/work/ingress_ca.pem \
--entrypoint /usr/local/bin/shipyard "${IMAGE_SHIPYARD_CLI}" $* 2>&1 --entrypoint /usr/local/bin/shipyard "${IMAGE_SHIPYARD_CLI}" "$@" 2>&1
} }
shipyard_cmd() { shipyard_cmd() {
if [[ ! -z "${LOG_FILE}" ]] if [[ ! -z "${LOG_FILE}" ]]
then then
set -o pipefail set -o pipefail
shipard_cmd_stdout $* | tee -a "${LOG_FILE}" shipard_cmd_stdout "$@" | tee -a "${LOG_FILE}"
set +o pipefail set +o pipefail
else else
shipard_cmd_stdout $* shipard_cmd_stdout "$@"
fi fi
} }
drydock_cmd_stdout() { drydock_cmd_stdout() {
dns_netspec="$(config_netspec_for_role "dns")" dns_netspec="$(config_netspec_for_role "dns")"
dns_server=$(config_vm_net_ip "${BUILD_NAME}" "$dns_netspec") dns_server="$(config_vm_net_ip "${BUILD_NAME}" "$dns_netspec")"
install_ingress_ca install_ingress_ca
ssh_cmd "${BUILD_NAME}" \ ssh_cmd "${BUILD_NAME}" \
docker run -t --network=host \ docker run -t --network=host \
--dns ${dns_server} \ --dns "${dns_server}" \
-v "${BUILD_WORK_DIR}:/work" \ -v "${BUILD_WORK_DIR}:/work" \
-e DD_URL=http://drydock-api.ucp.svc.cluster.local:9000 \ -e DD_URL=http://drydock-api.ucp.svc.cluster.local:9000 \
-e OS_AUTH_URL=${AIRSHIP_KEYSTONE_URL} \ -e OS_AUTH_URL="${AIRSHIP_KEYSTONE_URL}" \
-e OS_USERNAME=shipyard \ -e OS_USERNAME=shipyard \
-e OS_USER_DOMAIN_NAME=default \ -e OS_USER_DOMAIN_NAME=default \
-e OS_PASSWORD="${SHIPYARD_PASSWORD}" \ -e OS_PASSWORD="${SHIPYARD_PASSWORD}" \
-e OS_PROJECT_DOMAIN_NAME=default \ -e OS_PROJECT_DOMAIN_NAME=default \
-e OS_PROJECT_NAME=service \ -e OS_PROJECT_NAME=service \
-e REQUESTS_CA_BUNDLE=/work/ingress_ca.pem \ -e REQUESTS_CA_BUNDLE=/work/ingress_ca.pem \
--entrypoint /usr/local/bin/drydock "${IMAGE_DRYDOCK_CLI}" $* 2>&1 --entrypoint /usr/local/bin/drydock "${IMAGE_DRYDOCK_CLI}" "$@" 2>&1
} }
drydock_cmd() { drydock_cmd() {
if [[ ! -z "${LOG_FILE}" ]] if [[ ! -z "${LOG_FILE}" ]]
then then
set -o pipefail set -o pipefail
drydock_cmd_stdout $* | tee -a "${LOG_FILE}" drydock_cmd_stdout "$@" | tee -a "${LOG_FILE}"
set +o pipefail set +o pipefail
else else
drydock_cmd_stdout $* drydock_cmd_stdout "$@"
fi fi
} }
# Create a shipyard action # Create a shipyard action
# and poll until completion # and poll until completion
shipyard_action_wait() { shipyard_action_wait() {
action=$1 action="$1"
timeout=${2:-3600} timeout="${2:-3600}"
poll_time=${3:-60} poll_time="${3:-60}"
if [[ $action == "update_site" ]] if [[ $action == "update_site" ]]
then then
@ -149,7 +149,7 @@ collect_ssh_key() {
return 0 return 0
fi fi
cat << EOF > ${GATE_DEPOT}/airship_ubuntu_ssh_key.yaml cat << EOF > "${GATE_DEPOT}/airship_ubuntu_ssh_key.yaml"
--- ---
schema: deckhand/Certificate/v1 schema: deckhand/Certificate/v1
metadata: metadata:
@ -161,6 +161,6 @@ metadata:
storagePolicy: cleartext storagePolicy: cleartext
data: |- data: |-
EOF EOF
cat ${SSH_CONFIG_DIR}/id_rsa.pub | sed -e 's/^/ /' >> ${GATE_DEPOT}/airship_ubuntu_ssh_key.yaml sed -e 's/^/ /' >> "${GATE_DEPOT}/airship_ubuntu_ssh_key.yaml" < "${SSH_CONFIG_DIR}/id_rsa.pub"
} }

View File

@ -1,7 +1,10 @@
#!/bin/bash
set -e set -e
LIB_DIR=$(realpath "$(dirname "${BASH_SOURCE}")") LIB_DIR=$(realpath "$(dirname "${BASH_SOURCE[0]}")")
REPO_ROOT=$(realpath "$(dirname "${BASH_SOURCE}")/../../../..") export LIB_DIR
REPO_ROOT=$(realpath "$(dirname "${BASH_SOURCE[0]}")/../../../..")
export REPO_ROOT
source "$LIB_DIR"/config.sh source "$LIB_DIR"/config.sh
source "$LIB_DIR"/const.sh source "$LIB_DIR"/const.sh

View File

@ -1,13 +1,16 @@
#!/bin/bash
QUAGGA_DAEMONS="${TEMP_DIR}/daemons" QUAGGA_DAEMONS="${TEMP_DIR}/daemons"
QUAGGA_DEBIAN_CONF="${TEMP_DIR}/debian.conf" QUAGGA_DEBIAN_CONF="${TEMP_DIR}/debian.conf"
QUAGGA_BGPD_CONF="${TEMP_DIR}/bgpd.conf" QUAGGA_BGPD_CONF="${TEMP_DIR}/bgpd.conf"
bgp_router_config() { bgp_router_config() {
quagga_as_number=$(config_bgp_as "quagga_as") quagga_as_number="$(config_bgp_as "quagga_as")"
calico_as_number=$(config_bgp_as "calico_as") calico_as_number="$(config_bgp_as "calico_as")"
bgp_net="$(config_netspec_for_role "bgp")" bgp_net="$(config_netspec_for_role "bgp")"
quagga_ip=$(config_vm_net_ip "build" "$bgp_net") quagga_ip="$(config_vm_net_ip "build" "$bgp_net")"
# shellcheck disable=SC2016
QUAGGA_AS=${quagga_as_number} CALICO_AS=${calico_as_number} QUAGGA_IP=${quagga_ip} envsubst '${QUAGGA_AS} ${CALICO_AS} ${QUAGGA_IP}' < "${TEMPLATE_DIR}/bgpd_conf.sub" > "${QUAGGA_BGPD_CONF}" QUAGGA_AS=${quagga_as_number} CALICO_AS=${calico_as_number} QUAGGA_IP=${quagga_ip} envsubst '${QUAGGA_AS} ${CALICO_AS} ${QUAGGA_IP}' < "${TEMPLATE_DIR}/bgpd_conf.sub" > "${QUAGGA_BGPD_CONF}"
cp "${TEMPLATE_DIR}/daemons.sub" "${QUAGGA_DAEMONS}" cp "${TEMPLATE_DIR}/daemons.sub" "${QUAGGA_DAEMONS}"
@ -20,9 +23,9 @@ bgp_router_start() {
nodename=$1 nodename=$1
remote_work_dir="/var/tmp/quagga" remote_work_dir="/var/tmp/quagga"
remote_daemons_file="${remote_work_dir}/$(basename $QUAGGA_DAEMONS)" remote_daemons_file="${remote_work_dir}/$(basename "$QUAGGA_DAEMONS")"
remote_debian_conf_file="${remote_work_dir}/$(basename $QUAGGA_DEBIAN_CONF)" remote_debian_conf_file="${remote_work_dir}/$(basename "$QUAGGA_DEBIAN_CONF")"
remote_bgpd_conf_file="${remote_work_dir}/$(basename $QUAGGA_BGPD_CONF)" remote_bgpd_conf_file="${remote_work_dir}/$(basename "$QUAGGA_BGPD_CONF")"
ssh_cmd "${nodename}" mkdir -p "${remote_work_dir}" ssh_cmd "${nodename}" mkdir -p "${remote_work_dir}"
@ -30,5 +33,5 @@ bgp_router_start() {
rsync_cmd "$QUAGGA_DEBIAN_CONF" "${nodename}:${remote_debian_conf_file}" rsync_cmd "$QUAGGA_DEBIAN_CONF" "${nodename}:${remote_debian_conf_file}"
rsync_cmd "$QUAGGA_BGPD_CONF" "${nodename}:${remote_bgpd_conf_file}" rsync_cmd "$QUAGGA_BGPD_CONF" "${nodename}:${remote_bgpd_conf_file}"
ssh_cmd "${nodename}" docker run -ti -d --net=host --privileged -v /var/tmp/quagga:/etc/quagga --restart always --name Quagga $IMAGE_QUAGGA ssh_cmd "${nodename}" docker run -ti -d --net=host --privileged -v /var/tmp/quagga:/etc/quagga --restart always --name Quagga "$IMAGE_QUAGGA"
} }

View File

@ -1,4 +1,5 @@
#!/usr/bin/env bash #!/bin/bash
#
# Copyright 2019 AT&T Intellectual Property. All other rights reserved. # Copyright 2019 AT&T Intellectual Property. All other rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -28,7 +29,7 @@ manifests_lookup(){
local allow_fail="$6" local allow_fail="$6"
FAIL=false FAIL=false
RESULT=`python3 -c " RESULT=$(python3 -c "
import yaml,sys import yaml,sys
y = yaml.load_all(open('$file')) y = yaml.load_all(open('$file'))
for x in y: for x in y:
@ -50,7 +51,7 @@ for x in y:
print(x$key_path) print(x$key_path)
break break
else: else:
sys.exit(1)" 2>&1` || FAIL=true sys.exit(1)" 2>&1) || FAIL=true
if [[ $FAIL = true ]] && [[ $allow_fail != true ]]; then if [[ $FAIL = true ]] && [[ $allow_fail != true ]]; then
echo "Lookup failed for schema '$schema', metadata.name '$mdata_name', key path '$key_path'" echo "Lookup failed for schema '$schema', metadata.name '$mdata_name', key path '$key_path'"
@ -63,7 +64,8 @@ install_file(){
local path="$1" local path="$1"
local content="$2" local content="$2"
local permissions="$3" local permissions="$3"
local dirname=$(dirname "$path") local dirname
dirname=$(dirname "$path")
if [[ ! -d $dirname ]]; then if [[ ! -d $dirname ]]; then
mkdir -p "$dirname" mkdir -p "$dirname"
@ -72,9 +74,9 @@ install_file(){
if [[ ! -f $path ]] || [ "$(cat "$path")" != "$content" ]; then if [[ ! -f $path ]] || [ "$(cat "$path")" != "$content" ]; then
echo "$content" > "$path" echo "$content" > "$path"
chmod "$permissions" "$path" chmod "$permissions" "$path"
FILE_UPDATED=true export FILE_UPDATED=true
else else
FILE_UPDATED=false export FILE_UPDATED=false
fi fi
} }
@ -90,7 +92,7 @@ fi
if ([[ -z $1 ]] && [[ -z $RENDERED ]]) || [[ $1 =~ .*[hH][eE][lL][pP].* ]]; then if ([[ -z $1 ]] && [[ -z $RENDERED ]]) || [[ $1 =~ .*[hH][eE][lL][pP].* ]]; then
echo "Missing required script argument" echo "Missing required script argument"
echo "Usage: ./$(basename $BASH_SOURCE) /path/to/rendered/site/manifest.yaml" echo "Usage: ./$(basename "${BASH_SOURCE[0]}") /path/to/rendered/site/manifest.yaml"
exit 1 exit 1
fi fi
@ -106,8 +108,8 @@ fi
echo "Using rendered manifests file '$rendered_file'" echo "Using rendered manifests file '$rendered_file'"
# env vars which can be set if you want to disable # env vars which can be set if you want to disable
: ${DISABLE_SECCOMP_PROFILE:=} : "${DISABLE_SECCOMP_PROFILE:=}"
: ${DISABLE_APPARMOR_PROFILES:=} : "${DISABLE_APPARMOR_PROFILES:=}"
############################################################################### ###############################################################################
@ -146,8 +148,8 @@ if [[ ! $DISABLE_APPARMOR_PROFILES ]]; then
if [[ -n "$RESULT" ]] && [[ $RESULT -gt 0 ]]; then if [[ -n "$RESULT" ]] && [[ $RESULT -gt 0 ]]; then
# Fetch apparmor profile data # Fetch apparmor profile data
LAST=$(( $RESULT - 1 )) LAST=$(( RESULT - 1 ))
for i in `seq 0 $LAST`; do for i in $(seq 0 $LAST); do
manifests_lookup "$rendered_file" "drydock/BootAction/v1" \ manifests_lookup "$rendered_file" "drydock/BootAction/v1" \
"apparmor-profiles" "['data']['assets'][$i]['path']" "apparmor-profiles" "['data']['assets'][$i]['path']"

View File

@ -457,7 +457,7 @@ join_array() {
besteffort() { besteffort() {
set +e set +e
$@ "$@"
set -e set -e
} }
@ -467,7 +467,7 @@ get_namekey() {
key=$(cat "$NAMEKEY_FILE") key=$(cat "$NAMEKEY_FILE")
else else
key=$(openssl rand -hex 4) key=$(openssl rand -hex 4)
echo -n "$key" > $NAMEKEY_FILE echo -n "$key" > "$NAMEKEY_FILE"
fi fi
echo -n "$key" echo -n "$key"

View File

@ -1,3 +1,4 @@
#!/bin/bash
export GENESIS_NAME=n0 export GENESIS_NAME=n0
export BUILD_NAME=build export BUILD_NAME=build
export SSH_CONFIG_DIR=${WORKSPACE}/multi_nodes_gate/airship_gate/config-ssh export SSH_CONFIG_DIR=${WORKSPACE}/multi_nodes_gate/airship_gate/config-ssh

View File

@ -1,3 +1,4 @@
#!/bin/bash
docker_ps() { docker_ps() {
VIA="${1}" VIA="${1}"
ssh_cmd "${VIA}" docker ps -a ssh_cmd "${VIA}" docker ps -a

View File

@ -1,34 +1,39 @@
#!/bin/bash
DNS_ZONE_FILE="${TEMP_DIR}/ingress.dns" DNS_ZONE_FILE="${TEMP_DIR}/ingress.dns"
COREFILE="${TEMP_DIR}/ingress.corefile" COREFILE="${TEMP_DIR}/ingress.corefile"
ingress_dns_config() { ingress_dns_config() {
ingress_domain=$(config_ingress_domain) ingress_domain="$(config_ingress_domain)"
INGRESS_DOMAIN=${ingress_domain} envsubst '${INGRESS_DOMAIN}' < "${TEMPLATE_DIR}/ingress_header.sub" > "${DNS_ZONE_FILE}" #shellcheck disable=SC2016
INGRESS_DOMAIN="${ingress_domain}" envsubst '${INGRESS_DOMAIN}' < "${TEMPLATE_DIR}/ingress_header.sub" > "${DNS_ZONE_FILE}"
read -a ingress_ip_list <<< $(config_ingress_ips) read -r -a ingress_ip_list <<< "$(config_ingress_ips)"
for ip in "${ingress_ip_list[@]}" for ip in "${ingress_ip_list[@]}"
do do
read -a ip_entries <<< $(config_ingress_entries $ip) # TODO(sthussey) shift config_ingress_entries to printf w/ quotes
# shellcheck disable=SC2046
read -r -a ip_entries <<< $(config_ingress_entries "$ip")
for entry in "${ip_entries[@]}" for entry in "${ip_entries[@]}"
do do
HOSTNAME=${entry} HOSTIP=${ip} envsubst < "${TEMPLATE_DIR}/ingress_entry.sub" >> "${DNS_ZONE_FILE}" HOSTNAME="${entry}" HOSTIP="${ip}" envsubst < "${TEMPLATE_DIR}/ingress_entry.sub" >> "${DNS_ZONE_FILE}"
done done
done done
DNS_DOMAIN=${ingress_domain} ZONE_FILE=$(basename $DNS_ZONE_FILE) DNS_SERVERS="$UPSTREAM_DNS" envsubst < "${TEMPLATE_DIR}/ingress_corefile.sub" > "${COREFILE}" DNS_DOMAIN="${ingress_domain}" ZONE_FILE="$(basename "$DNS_ZONE_FILE")" DNS_SERVERS="$UPSTREAM_DNS" envsubst < "${TEMPLATE_DIR}/ingress_corefile.sub" > "${COREFILE}"
} }
ingress_dns_start() { ingress_dns_start() {
# nodename where DNS should run # nodename where DNS should run
nodename=$1 nodename="$1"
remote_work_dir="/var/tmp/coredns" remote_work_dir="/var/tmp/coredns"
remote_zone_file="${remote_work_dir}/$(basename $DNS_ZONE_FILE)" remote_zone_file="${remote_work_dir}/$(basename "$DNS_ZONE_FILE")"
remote_corefile="${remote_work_dir}/$(basename $COREFILE)" remote_corefile="${remote_work_dir}/$(basename "$COREFILE")"
ssh_cmd "${nodename}" mkdir -p "${remote_work_dir}" ssh_cmd "${nodename}" mkdir -p "${remote_work_dir}"
rsync_cmd "$DNS_ZONE_FILE" "${nodename}:${remote_zone_file}" rsync_cmd "$DNS_ZONE_FILE" "${nodename}:${remote_zone_file}"
rsync_cmd "$COREFILE" "${nodename}:${remote_corefile}" rsync_cmd "$COREFILE" "${nodename}:${remote_corefile}"
ssh_cmd "${nodename}" docker run -d -v /var/tmp/coredns:/data -w /data --network host --restart always -P $IMAGE_COREDNS -conf $(basename $remote_corefile) ssh_cmd "${nodename}" docker run -d -v /var/tmp/coredns:/data -w /data --network host --restart always -P "$IMAGE_COREDNS" -conf "$(basename "$remote_corefile")"
} }

View File

@ -1,3 +1,5 @@
#!/bin/bash
kubectl_apply() { kubectl_apply() {
VIA=${1} VIA=${1}
FILE=${2} FILE=${2}

View File

@ -1,3 +1,4 @@
#!/bin/bash
if [[ -v GATE_COLOR && ${GATE_COLOR} = "1" ]]; then if [[ -v GATE_COLOR && ${GATE_COLOR} = "1" ]]; then
C_CLEAR="\e[0m" C_CLEAR="\e[0m"
C_ERROR="\e[38;5;160m" C_ERROR="\e[38;5;160m"

View File

@ -1,3 +1,5 @@
#!/bin/bash
nginx_down() { nginx_down() {
REGISTRY_ID=$(docker ps -qa -f name=promenade-nginx) REGISTRY_ID=$(docker ps -qa -f name=promenade-nginx)
if [ "x${REGISTRY_ID}" != "x" ]; then if [ "x${REGISTRY_ID}" != "x" ]; then
@ -22,7 +24,7 @@ nginx_cache_and_replace_tar_urls() {
TAR_NUM=0 TAR_NUM=0
mkdir -p "${NGINX_DIR}" mkdir -p "${NGINX_DIR}"
for file in "$@"; do for file in "$@"; do
grep -Po "^ +tar_url: \K.+$" "${file}" | while read tar_url ; do grep -Po "^ +tar_url: \K.+$" "${file}" | while read -r tar_url ; do
# NOTE(mark-burnet): Does not yet ignore repeated files. # NOTE(mark-burnet): Does not yet ignore repeated files.
DEST_PATH="${NGINX_DIR}/cached-tar-${TAR_NUM}.tgz" DEST_PATH="${NGINX_DIR}/cached-tar-${TAR_NUM}.tgz"
log "Caching ${tar_url} in file: ${DEST_PATH}" log "Caching ${tar_url} in file: ${DEST_PATH}"

View File

@ -1,3 +1,5 @@
#!/bin/bash
promenade_health_check() { promenade_health_check() {
VIA=${1} VIA=${1}
log "Checking Promenade API health" log "Checking Promenade API health"

View File

@ -1,3 +1,5 @@
#!/bin/bash
registry_down() { registry_down() {
REGISTRY_ID=$(docker ps -qa -f name=registry) REGISTRY_ID=$(docker ps -qa -f name=registry)
if [[ ! -z ${REGISTRY_ID} ]]; then if [[ ! -z ${REGISTRY_ID} ]]; then
@ -7,7 +9,7 @@ registry_down() {
} }
registry_list_images() { registry_list_images() {
FILES=($(find ${DEFINITION_DEPOT} -type f -name '*.yaml')) FILES=($(find "${DEFINITION_DEPOT}" -type f -name '*.yaml'))
HOSTNAME_REGEX='[a-zA-Z0-9][a-zA-Z0-9_-]{0,62}' HOSTNAME_REGEX='[a-zA-Z0-9][a-zA-Z0-9_-]{0,62}'
DOMAIN_NAME_REGEX="${HOSTNAME_REGEX}(\.${HOSTNAME_REGEX})*" DOMAIN_NAME_REGEX="${HOSTNAME_REGEX}(\.${HOSTNAME_REGEX})*"

View File

@ -1,8 +1,11 @@
#!/bin/bash
rsync_cmd() { rsync_cmd() {
rsync -e "ssh -F ${SSH_CONFIG_DIR}/config" "${@}" rsync -e "ssh -F ${SSH_CONFIG_DIR}/config" "${@}"
} }
ssh_cmd_raw() { ssh_cmd_raw() {
# shellcheck disable=SC2068
ssh -F "${SSH_CONFIG_DIR}/config" $@ ssh -F "${SSH_CONFIG_DIR}/config" $@
} }
@ -11,8 +14,10 @@ ssh_cmd() {
shift shift
args=$(shell-quote -- "${@}") args=$(shell-quote -- "${@}")
if [[ -v GATE_DEBUG && ${GATE_DEBUG} = "1" ]]; then if [[ -v GATE_DEBUG && ${GATE_DEBUG} = "1" ]]; then
# shellcheck disable=SC2029
ssh -F "${SSH_CONFIG_DIR}/config" -v "${HOST}" "${args}" ssh -F "${SSH_CONFIG_DIR}/config" -v "${HOST}" "${args}"
else else
# shellcheck disable=SC2029
ssh -F "${SSH_CONFIG_DIR}/config" "${HOST}" "${args}" ssh -F "${SSH_CONFIG_DIR}/config" "${HOST}" "${args}"
fi fi
} }
@ -28,9 +33,9 @@ ssh_config_declare() {
env -i \ env -i \
"SSH_CONFIG_DIR=${SSH_CONFIG_DIR}" \ "SSH_CONFIG_DIR=${SSH_CONFIG_DIR}" \
"SSH_NODE_HOSTNAME=${n}" \ "SSH_NODE_HOSTNAME=${n}" \
"SSH_NODE_IP=$(config_vm_net_ip ${n} "$ssh_net")" \ "SSH_NODE_IP=$(config_vm_net_ip "${n}" "$ssh_net")" \
envsubst < "${TEMPLATE_DIR}/ssh-config-node.sub" >> "${SSH_CONFIG_DIR}/config" envsubst < "${TEMPLATE_DIR}/ssh-config-node.sub" >> "${SSH_CONFIG_DIR}/config"
if [[ "$(config_vm_bootstrap ${n})" == "true" ]] if [[ "$(config_vm_bootstrap "${n}")" == "true" ]]
then then
echo " User root" >> "${SSH_CONFIG_DIR}/config" echo " User root" >> "${SSH_CONFIG_DIR}/config"
else else

View File

@ -184,8 +184,10 @@ iso_gen() {
export NAME export NAME
export SSH_PUBLIC_KEY export SSH_PUBLIC_KEY
export NTP_POOLS="$(join_array ',' "$NTP_POOLS")" NTP_POOLS="$(join_array ',' "$NTP_POOLS")"
export NTP_SERVERS="$(join_array ',' "$NTP_SERVERS")" export NTP_POOLS
NTP_SERVERS="$(join_array ',' "$NTP_SERVERS")"
export NTP_SERVERS
envsubst < "${TEMPLATE_DIR}/user-data.sub" > user-data envsubst < "${TEMPLATE_DIR}/user-data.sub" > user-data
fs_header="false" fs_header="false"
@ -199,11 +201,7 @@ iso_gen() {
echo "fs_header:" >> user-data echo "fs_header:" >> user-data
fs_header="true" fs_header="true"
fi fi
export FS_TYPE=$(config_format_type "$disk_format") FS_TYPE="$(config_format_type "$disk_format")" DISK_DEVICE="$disk" envsubst < "${TEMPLATE_DIR}/disk-data.sub" >> user-data
export DISK_DEVICE="$disk"
envsubst < "${TEMPLATE_DIR}/disk-data.sub" >> user-data
unset FS_TYPE
unset DISK_DEVICE
fi fi
done done
@ -221,11 +219,7 @@ iso_gen() {
mount_header="true" mount_header="true"
fi fi
export MOUNTPOINT=$(config_format_mount "$disk_format") MOUNTPOINT="$(config_format_mount "$disk_format")" DISK_DEVICE="$disk" envsubst < "${TEMPLATE_DIR}/mount-data.sub" >> user-data
export DISK_DEVICE="$disk"
envsubst < "${TEMPLATE_DIR}/mount-data.sub" >> user-data
unset MOUNTPOINT
unset DISK_DEVICE
fi fi
done done
@ -288,6 +282,7 @@ nets_clean() {
for iface in $(ip -oneline l show type vlan | grep "$netname" | awk -F ' ' '{print $2}' | tr -d ':' | awk -F '@' '{print $1}') for iface in $(ip -oneline l show type vlan | grep "$netname" | awk -F ' ' '{print $2}' | tr -d ':' | awk -F '@' '{print $1}')
do do
# shellcheck disable=SC2024
sudo ip l del dev "$iface" &>> "$LOG_FILE" sudo ip l del dev "$iface" &>> "$LOG_FILE"
done done
virsh net-destroy "$netname" &>> "${LOG_FILE}" virsh net-destroy "$netname" &>> "${LOG_FILE}"
@ -303,11 +298,11 @@ net_create() {
if [[ $(config_net_is_layer3 "$net") == "true" ]]; then if [[ $(config_net_is_layer3 "$net") == "true" ]]; then
net_template="${TEMPLATE_DIR}/l3network-definition.sub" net_template="${TEMPLATE_DIR}/l3network-definition.sub"
NETNAME="${virsh_netname}" NETIP="$(config_net_selfip "$netname")" NETMASK="$(cidr_to_netmask $(config_net_cidr "$netname"))" NETMAC="$(config_net_mac "$netname")" envsubst < "$net_template" > ${TEMP_DIR}/net-${netname}.xml NETNAME="${virsh_netname}" NETIP="$(config_net_selfip "$netname")" NETMASK="$(cidr_to_netmask "$(config_net_cidr "$netname")")" NETMAC="$(config_net_mac "$netname")" envsubst < "$net_template" > "${TEMP_DIR}/net-${netname}.xml"
else else
net_template="${TEMPLATE_DIR}/l2network-definition.sub" net_template="${TEMPLATE_DIR}/l2network-definition.sub"
NETNAME="${virsh_netname}" envsubst < "$net_template" > ${TEMP_DIR}/net-${netname}.xml NETNAME="${virsh_netname}" envsubst < "$net_template" > "${TEMP_DIR}/net-${netname}.xml"
fi fi
log Creating network "${namekey}"_"${netname}" log Creating network "${namekey}"_"${netname}"
@ -499,12 +494,13 @@ vm_create() {
wait wait
log Creating VM "${NAME}" and bootstrapping the boot drive log Creating VM "${NAME}" and bootstrapping the boot drive
# shellcheck disable=SC2086
virt-install \ virt-install \
--name "${NAME}" \ --name "${NAME}" \
--os-variant ubuntu16.04 \ --os-variant ubuntu16.04 \
--virt-type kvm \ --virt-type kvm \
--cpu ${VIRSH_CPU_OPTS} \ --cpu "${VIRSH_CPU_OPTS}" \
--serial file,path=${TEMP_DIR}/console/${NAME}.log \ --serial "file,path=${TEMP_DIR}/console/${NAME}.log" \
--graphics none \ --graphics none \
--noautoconsole \ --noautoconsole \
$NETWORK_OPTS \ $NETWORK_OPTS \
@ -520,13 +516,14 @@ vm_create() {
else else
log Creating VM "${NAME}" log Creating VM "${NAME}"
# shellcheck disable=SC2086
virt-install \ virt-install \
--name "${NAME}" \ --name "${NAME}" \
--os-variant ubuntu16.04 \ --os-variant ubuntu16.04 \
--virt-type kvm \ --virt-type kvm \
--cpu ${VIRSH_CPU_OPTS} \ --cpu "${VIRSH_CPU_OPTS}" \
--graphics none \ --graphics none \
--serial file,path=${TEMP_DIR}/console/${NAME}.log \ --serial file,path="${TEMP_DIR}/console/${NAME}.log" \
--noautoconsole \ --noautoconsole \
$NETWORK_OPTS \ $NETWORK_OPTS \
--vcpus "$(config_vm_vcpus "${NAME}")" \ --vcpus "$(config_vm_vcpus "${NAME}")" \
@ -607,7 +604,7 @@ get_libvirt_group() {
make_virtmgr_account() { make_virtmgr_account() {
for libvirt_group in $(get_libvirt_group) for libvirt_group in $(get_libvirt_group)
do do
if [[ -z "$(grep -oE '^virtmgr:' /etc/passwd)" ]] if ! grep -qE '^virtmgr:' /etc/passwd
then then
sudo useradd -m -s /bin/sh -g "${libvirt_group}" virtmgr sudo useradd -m -s /bin/sh -g "${libvirt_group}" virtmgr
else else
@ -628,17 +625,19 @@ gen_libvirt_key() {
sudo cp "${GATE_SSH_KEY}.pub" ~virtmgr/.ssh/airship_gate.pub sudo cp "${GATE_SSH_KEY}.pub" ~virtmgr/.ssh/airship_gate.pub
else else
log "Generating new SSH keypair for virtmgr" log "Generating new SSH keypair for virtmgr"
#shellcheck disable=SC2024
sudo ssh-keygen -N '' -b 2048 -t rsa -f ~virtmgr/.ssh/airship_gate &>> "${LOG_FILE}" sudo ssh-keygen -N '' -b 2048 -t rsa -f ~virtmgr/.ssh/airship_gate &>> "${LOG_FILE}"
fi fi
} }
# Install private key into site definition # Install private key into site definition
install_libvirt_key() { install_libvirt_key() {
export PUB_KEY=$(sudo cat ~virtmgr/.ssh/airship_gate.pub) PUB_KEY=$(sudo cat ~virtmgr/.ssh/airship_gate.pub)
export PUB_KEY
mkdir -p ${TEMP_DIR}/tmp mkdir -p "${TEMP_DIR}/tmp"
envsubst < "${TEMPLATE_DIR}/authorized_keys.sub" > ${TEMP_DIR}/tmp/virtmgr.authorized_keys envsubst < "${TEMPLATE_DIR}/authorized_keys.sub" > "${TEMP_DIR}/tmp/virtmgr.authorized_keys"
sudo cp ${TEMP_DIR}/tmp/virtmgr.authorized_keys ~virtmgr/.ssh/authorized_keys sudo cp "${TEMP_DIR}/tmp/virtmgr.authorized_keys" ~virtmgr/.ssh/authorized_keys
sudo chown -R virtmgr ~virtmgr/.ssh sudo chown -R virtmgr ~virtmgr/.ssh
sudo chmod 700 ~virtmgr/.ssh sudo chmod 700 ~virtmgr/.ssh
sudo chmod 600 ~virtmgr/.ssh/authorized_keys sudo chmod 600 ~virtmgr/.ssh/authorized_keys
@ -649,7 +648,7 @@ install_libvirt_key() {
fi fi
mkdir -p "${GATE_DEPOT}" mkdir -p "${GATE_DEPOT}"
cat << EOF > ${GATE_DEPOT}/airship_drydock_kvm_ssh_key.yaml cat << EOF > "${GATE_DEPOT}/airship_drydock_kvm_ssh_key.yaml"
--- ---
schema: deckhand/CertificateKey/v1 schema: deckhand/CertificateKey/v1
metadata: metadata:
@ -661,5 +660,5 @@ metadata:
storagePolicy: cleartext storagePolicy: cleartext
data: |- data: |-
EOF EOF
sudo cat ~virtmgr/.ssh/airship_gate | sed -e 's/^/ /' >> ${GATE_DEPOT}/airship_drydock_kvm_ssh_key.yaml sudo cat ~virtmgr/.ssh/airship_gate | sed -e 's/^/ /' >> "${GATE_DEPOT}/airship_drydock_kvm_ssh_key.yaml"
} }

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash #!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved. # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -17,7 +17,7 @@ set -e
source "${GATE_UTILS}" source "${GATE_UTILS}"
BGP_ROUTER=$1 BGP_ROUTER="$1"
bgp_router_config bgp_router_config
bgp_router_start ${BGP_ROUTER} bgp_router_start "${BGP_ROUTER}"

View File

@ -1,19 +1,19 @@
#!/usr/bin/env bash #!/bin/bash
set -e set -e
source "${GATE_UTILS}" source "${GATE_UTILS}"
mkdir -p ${SCRIPT_DEPOT} mkdir -p "${SCRIPT_DEPOT}"
chmod 777 ${SCRIPT_DEPOT} chmod 777 "${SCRIPT_DEPOT}"
DOCKER_RUN_OPTS=("-e PROMENADE_DEBUG=${PROMENADE_DEBUG}") DOCKER_RUN_OPTS=("-e" "PROMENADE_DEBUG=${PROMENADE_DEBUG}")
for v in HTTPS_PROXY HTTP_PROXY NO_PROXY https_proxy http_proxy no_proxy for v in HTTPS_PROXY HTTP_PROXY NO_PROXY https_proxy http_proxy no_proxy
do do
if [[ -v "${v}" ]] if [[ -v "${v}" ]]
then then
DOCKER_RUN_OPTS+=(" -e ${v}=${!v}") DOCKER_RUN_OPTS+=("-e" "${v}=${!v}")
fi fi
done done
@ -25,18 +25,18 @@ then
KEYS_PATH="" KEYS_PATH=""
fi fi
PROMENADE_TMP_LOCAL="$(basename $PROMENADE_TMP_LOCAL)" PROMENADE_TMP_LOCAL="$(basename "$PROMENADE_TMP_LOCAL")"
PROMENADE_TMP="${TEMP_DIR}/${PROMENADE_TMP_LOCAL}" PROMENADE_TMP="${TEMP_DIR}/${PROMENADE_TMP_LOCAL}"
mkdir -p $PROMENADE_TMP mkdir -p "$PROMENADE_TMP"
chmod 777 $PROMENADE_TMP chmod 777 "$PROMENADE_TMP"
log Prepare hyperkube log Prepare hyperkube
docker run --rm -t \ docker run --rm -t \
--network host \ --network host \
-v "${PROMENADE_TMP}:/tmp/${PROMENADE_TMP_LOCAL}" \ -v "${PROMENADE_TMP}:/tmp/${PROMENADE_TMP_LOCAL}" \
${DOCKER_RUN_OPTS[*]} \ "${DOCKER_RUN_OPTS[@]}" \
"${IMAGE_HYPERKUBE}" \ "${IMAGE_HYPERKUBE}" \
cp /hyperkube /tmp/${PROMENADE_TMP_LOCAL} cp /hyperkube "/tmp/${PROMENADE_TMP_LOCAL}"
log Building scripts log Building scripts
docker run --rm -t \ docker run --rm -t \
@ -48,10 +48,10 @@ docker run --rm -t \
-v "${SCRIPT_DEPOT}:/scripts" \ -v "${SCRIPT_DEPOT}:/scripts" \
-v "${PROMENADE_TMP}:/tmp/${PROMENADE_TMP_LOCAL}" \ -v "${PROMENADE_TMP}:/tmp/${PROMENADE_TMP_LOCAL}" \
-e "PROMENADE_ENCRYPTION_KEY=${PROMENADE_ENCRYPTION_KEY}" \ -e "PROMENADE_ENCRYPTION_KEY=${PROMENADE_ENCRYPTION_KEY}" \
${DOCKER_RUN_OPTS[*]} \ "${DOCKER_RUN_OPTS[@]}" \
"${IMAGE_PROMENADE_CLI}" \ "${IMAGE_PROMENADE_CLI}" \
promenade \ promenade \
build-all \ build-all \
--validators \ --validators \
-o /scripts \ -o /scripts \
/config/*.yaml ${CERTS_PATH} ${KEYS_PATH} /config/*.yaml "${CERTS_PATH}" "${KEYS_PATH}"

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash #!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved. # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -22,7 +22,7 @@ source "${GATE_UTILS}"
# registry_up # registry_up
# Create temp_dir structure # Create temp_dir structure
mkdir -p ${TEMP_DIR}/console mkdir -p "${TEMP_DIR}/console"
# SSH setup # SSH setup
ssh_setup_declare ssh_setup_declare

View File

@ -1,32 +1,11 @@
#!/usr/bin/env bash #!/bin/bash
set -e set -e
source "${GATE_UTILS}" source "${GATE_UTILS}"
IS_UPDATE=0 DESIGN_FILES=($(find "${DEFINITION_DEPOT}" -name '*.yaml' -print0 | xargs -0 -n 1 basename | xargs -n 1 printf "/tmp/design/%s\n"))
DO_EXCLUDE=0 GATE_FILES=($(find "${GATE_DEPOT}" -name '*.yaml' -print0 | xargs -0 -n 1 basename | xargs -n 1 printf "/tmp/gate/%s\n"))
EXCLUDE_PATTERNS=()
while getopts "ux:" opt; do
case "${opt}" in
u)
IS_UPDATE=1
;;
x)
DO_EXCLUDE=1
EXCLUDE_PATTERNS+=("${OPTARG}")
;;
*)
echo "Unknown option"
exit 1
;;
esac
done
shift $((OPTIND-1))
DESIGN_FILES=($(find "${DEFINITION_DEPOT}" -name '*.yaml' | xargs -n 1 basename | xargs -n 1 printf "/tmp/design/%s\n"))
GATE_FILES=($(find "${GATE_DEPOT}" -name '*.yaml' | xargs -n 1 basename | xargs -n 1 printf "/tmp/gate/%s\n"))
mkdir -p "${CERT_DEPOT}" mkdir -p "${CERT_DEPOT}"
chmod 777 "${CERT_DEPOT}" chmod 777 "${CERT_DEPOT}"

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash #!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved. # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -17,7 +17,7 @@ set -e
source "${GATE_UTILS}" source "${GATE_UTILS}"
DNS_SERVER=$1 DNS_SERVER="$1"
ingress_dns_config ingress_dns_config
ingress_dns_start ${DNS_SERVER} ingress_dns_start "${DNS_SERVER}"

View File

@ -21,29 +21,23 @@ mkdir -p "${DEFINITION_DEPOT}"
chmod 777 "${DEFINITION_DEPOT}" chmod 777 "${DEFINITION_DEPOT}"
render_pegleg_cli() { render_pegleg_cli() {
cli_string="pegleg -v site" cli_string=("pegleg" "-v" "site")
if [[ "${GERRIT_SSH_USER}" ]] if [[ "${GERRIT_SSH_USER}" ]]
then then
cli_string+=" -u ${GERRIT_SSH_USER}" cli_string+=("-u" "${GERRIT_SSH_USER}")
fi fi
if [[ "${GERRIT_SSH_KEY}" ]] if [[ "${GERRIT_SSH_KEY}" ]]
then then
cli_string+=" -k /workspace/${GERRIT_SSH_KEY}" cli_string+=("-k" "/workspace/${GERRIT_SSH_KEY}")
fi fi
primary_repo=$(config_pegleg_primary_repo) primary_repo="$(config_pegleg_primary_repo)"
if [[ -d "${REPO_ROOT}/${primary_repo}" ]] if [[ -d "${REPO_ROOT}/${primary_repo}" ]]
then then
# NOTE: to get latest pegleg colllect to work cli_string+=("-r" "/workspace/${primary_repo}")
# airship-in-bottle repo has versions (v1.0demo, v1.0dev) within global
# and that is preventing pegleg to collect documents.
# It complains with duplicate data
$(find ${REPO_ROOT}/${primary_repo} -name "v1.0dev" -type d \
-exec rm -r {} +)
cli_string="${cli_string} -r /workspace/${primary_repo}"
else else
log "${primary_repo} not a valid primary repository" log "${primary_repo} not a valid primary repository"
return 1 return 1
@ -55,18 +49,20 @@ render_pegleg_cli() {
then then
for r in ${aux_repos[*]} for r in ${aux_repos[*]}
do do
cli_string="${cli_string} -e ${r}=/workspace/${r}" cli_string+=("-e" "${r}=/workspace/${r}")
done done
fi fi
cli_string="${cli_string} collect -s /collect" cli_string+=("collect" "-s" "/collect")
cli_string="${cli_string} $(config_pegleg_sitename)" cli_string+=("$(config_pegleg_sitename)")
echo ${cli_string} printf " %s " "${cli_string[@]}"
} }
collect_design_docs() { collect_design_docs() {
# shellcheck disable=SC2091
# shellcheck disable=SC2046
docker run \ docker run \
--rm -t \ --rm -t \
--network host \ --network host \

View File

@ -21,29 +21,23 @@ mkdir -p "${RENDERED_DEPOT}"
chmod 777 "${RENDERED_DEPOT}" chmod 777 "${RENDERED_DEPOT}"
render_pegleg_cli() { render_pegleg_cli() {
cli_string="pegleg -v site" cli_string=("pegleg" "-v" "site")
if [[ "${GERRIT_SSH_USER}" ]] if [[ "${GERRIT_SSH_USER}" ]]
then then
cli_string+=" -u ${GERRIT_SSH_USER}" cli_string+=("-u" "${GERRIT_SSH_USER}")
fi fi
if [[ "${GERRIT_SSH_KEY}" ]] if [[ "${GERRIT_SSH_KEY}" ]]
then then
cli_string+=" -k /workspace/${GERRIT_SSH_KEY}" cli_string+=("-k" "/workspace/${GERRIT_SSH_KEY}")
fi fi
primary_repo=$(config_pegleg_primary_repo) primary_repo="$(config_pegleg_primary_repo)"
if [[ -d "${REPO_ROOT}/${primary_repo}" ]] if [[ -d "${REPO_ROOT}/${primary_repo}" ]]
then then
# NOTE: to get latest pegleg colllect to work cli_string+=("-r" "/workspace/${primary_repo}")
# airship-in-bottle repo has versions (v1.0demo, v1.0dev) within global
# and that is preventing pegleg to collect documents.
# It complains with duplicate data
$(find ${REPO_ROOT}/${primary_repo} -name "v1.0dev" -type d \
-exec rm -r {} +)
cli_string="${cli_string} -r /workspace/${primary_repo}"
else else
log "${primary_repo} not a valid primary repository" log "${primary_repo} not a valid primary repository"
return 1 return 1
@ -55,18 +49,20 @@ render_pegleg_cli() {
then then
for r in ${aux_repos[*]} for r in ${aux_repos[*]}
do do
cli_string="${cli_string} -e ${r}=/workspace/${r}" cli_string+=("-e" "${r}=/workspace/${r}")
done done
fi fi
cli_string="${cli_string} render -o /collect/rendered.yaml" cli_string+=("render" "-o" "/collect/rendered.yaml")
cli_string="${cli_string} $(config_pegleg_sitename)" cli_string+=("$(config_pegleg_sitename)")
echo ${cli_string} printf " %s " "${cli_string[@]}"
} }
collect_rendered_doc() { collect_rendered_doc() {
# shellcheck disable=SC2091
# shellcheck disable=SC2046
docker run \ docker run \
--rm -t \ --rm -t \
--network host \ --network host \

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash #!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved. # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -29,9 +29,9 @@ while getopts "og" opt; do
o) o)
OMIT_CERTS=1 OMIT_CERTS=1
;; ;;
g) g)
OMIT_GATE=1 OMIT_GATE=1
;; ;;
*) *)
echo "Unknown option" echo "Unknown option"
exit 1 exit 1
@ -57,7 +57,7 @@ ssh_cmd "${BUILD_NAME}" mkdir -p "${BUILD_WORK_DIR}/site"
rsync_cmd "${DEFINITION_DEPOT}"/*.yaml "${BUILD_NAME}:${BUILD_WORK_DIR}/site/" rsync_cmd "${DEFINITION_DEPOT}"/*.yaml "${BUILD_NAME}:${BUILD_WORK_DIR}/site/"
sleep 120 sleep 120
check_configdocs_result "$(shipyard_cmd create configdocs design --directory=${BUILD_WORK_DIR}/site --replace)" check_configdocs_result "$(shipyard_cmd create configdocs design "--directory=${BUILD_WORK_DIR}/site" --replace)"
# Skip certs/gate if already part of site manifests # Skip certs/gate if already part of site manifests
if [[ -n "${USE_EXISTING_SECRETS}" ]] if [[ -n "${USE_EXISTING_SECRETS}" ]]
@ -70,14 +70,14 @@ if [[ "${OMIT_CERTS}" == "0" ]]
then then
ssh_cmd "${BUILD_NAME}" mkdir -p "${BUILD_WORK_DIR}/certs" ssh_cmd "${BUILD_NAME}" mkdir -p "${BUILD_WORK_DIR}/certs"
rsync_cmd "${CERT_DEPOT}"/*.yaml "${BUILD_NAME}:${BUILD_WORK_DIR}/certs/" rsync_cmd "${CERT_DEPOT}"/*.yaml "${BUILD_NAME}:${BUILD_WORK_DIR}/certs/"
check_configdocs_result "$(shipyard_cmd create configdocs certs --directory=${BUILD_WORK_DIR}/certs --append)" check_configdocs_result "$(shipyard_cmd create configdocs certs "--directory=${BUILD_WORK_DIR}/certs" --append)"
fi fi
if [[ "${OMIT_GATE}" == "0" ]] if [[ "${OMIT_GATE}" == "0" ]]
then then
ssh_cmd "${BUILD_NAME}" mkdir -p "${BUILD_WORK_DIR}/gate" ssh_cmd "${BUILD_NAME}" mkdir -p "${BUILD_WORK_DIR}/gate"
rsync_cmd "${GATE_DEPOT}"/*.yaml "${BUILD_NAME}:${BUILD_WORK_DIR}/gate/" rsync_cmd "${GATE_DEPOT}"/*.yaml "${BUILD_NAME}:${BUILD_WORK_DIR}/gate/"
check_configdocs_result "$(shipyard_cmd create configdocs gate --directory=${BUILD_WORK_DIR}/gate --append)" check_configdocs_result "$(shipyard_cmd create configdocs gate "--directory=${BUILD_WORK_DIR}/gate" --append)"
fi fi
check_configdocs_result "$(shipyard_cmd commit configdocs)" check_configdocs_result "$(shipyard_cmd commit configdocs)"

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash #!/bin/bash
# Copyright 2018 AT&T Intellectual Property. All other rights reserved. # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -12,7 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
if [[ -n $GATE_DEBUG && $GATE_DEBUG = "1" ]]; then if [[ -n "$GATE_DEBUG" && "$GATE_DEBUG" = "1" ]]; then
set -x set -x
fi fi
@ -22,9 +22,9 @@ set -e
function upload_script() { function upload_script() {
source "$GATE_UTILS" source "$GATE_UTILS"
BASENAME=$(basename $BASH_SOURCE) BASENAME="$(basename "${BASH_SOURCE[0]}")"
# Copies script to genesis VM # Copies script to genesis VM
rsync_cmd "$BASH_SOURCE" "$GENESIS_NAME:/root/airship/" rsync_cmd "${BASH_SOURCE[0]}" "$GENESIS_NAME:/root/airship/"
set -o pipefail set -o pipefail
ssh_cmd_raw "$GENESIS_NAME" "KUBECONFIG=${KUBECONFIG} GATE_DEBUG=${GATE_DEBUG} NUM_NODES=$1 /root/airship/${BASENAME}" 2>&1 | tee -a "$LOG_FILE" ssh_cmd_raw "$GENESIS_NAME" "KUBECONFIG=${KUBECONFIG} GATE_DEBUG=${GATE_DEBUG} NUM_NODES=$1 /root/airship/${BASENAME}" 2>&1 | tee -a "$LOG_FILE"
set +o pipefail set +o pipefail
@ -38,7 +38,7 @@ function kubectl_retry() {
cnt=$((cnt+1)) cnt=$((cnt+1))
if [[ "$ret" -ne "0" ]]; then if [[ "$ret" -ne "0" ]]; then
if [[ "$cnt" -lt "$MAX_TRIES" ]]; then if [[ "$cnt" -lt "$MAX_TRIES" ]]; then
sleep $PAUSE sleep "$PAUSE"
else else
return 1 return 1
fi fi
@ -51,12 +51,12 @@ function kubectl_retry() {
function check_kube_nodes() { function check_kube_nodes() {
try=0 try=0
while true; do while true; do
nodes_list=$(kubectl_retry get nodes --no-headers) || true nodes_list="$(kubectl_retry get nodes --no-headers)" || true
ret=$? ret="$?"
try=$((try+1)) try="$((try+1))"
if [ "$ret" -ne "0" ]; then if [ "$ret" -ne "0" ]; then
if [[ "$try" -lt "$MAX_TRIES" ]]; then if [[ "$try" -lt "$MAX_TRIES" ]]; then
sleep $PAUSE sleep "$PAUSE"
else else
echo -e "Can't get nodes" echo -e "Can't get nodes"
return 1 return 1
@ -81,9 +81,9 @@ function check_kube_components() {
res=$(kubectl_retry get cs -o jsonpath="{.items[*].conditions[?(@.type == \"Healthy\")].status}") || true res=$(kubectl_retry get cs -o jsonpath="{.items[*].conditions[?(@.type == \"Healthy\")].status}") || true
try=$((try+1)) try=$((try+1))
if $(echo $res | grep -q False); then if echo "$res" | grep -q False; then
if [[ "$try" -lt "$MAX_TRIES" ]]; then if [[ "$try" -lt "$MAX_TRIES" ]]; then
sleep $PAUSE sleep "$PAUSE"
else else
echo "Error: kubernetes components are not working properly" echo "Error: kubernetes components are not working properly"
kubectl_retry get cs kubectl_retry get cs
@ -96,7 +96,7 @@ function check_kube_components() {
} }
if [[ -n "$GATE_UTILS" ]]; then if [[ -n "$GATE_UTILS" ]]; then
upload_script $NUM_NODES upload_script "$NUM_NODES"
else else
set +e set +e
KUBECONFIG="${KUBECONFIG:-/etc/kubernetes/admin/kubeconfig.yaml}" KUBECONFIG="${KUBECONFIG:-/etc/kubernetes/admin/kubeconfig.yaml}"
@ -108,8 +108,7 @@ set +e
echo "Error: ${KUBECTL} not found" echo "Error: ${KUBECTL} not found"
exit 1 exit 1
fi fi
exit_code=0 check_kube_nodes "$NUM_NODES"
check_kube_nodes $NUM_NODES
nodes_status=$? nodes_status=$?
check_kube_components check_kube_components
components_status=$? components_status=$?

View File

@ -58,7 +58,7 @@ jq -cr '.stages | .[]' "${GATE_MANIFEST}" > "${STAGES}"
# the read below, since we will be calling SSH, which will consume the # the read below, since we will be calling SSH, which will consume the
# remaining data on STDIN. # remaining data on STDIN.
exec 3< "$STAGES" exec 3< "$STAGES"
while read -u 3 stage; do while read -r -u 3 stage; do
NAME=$(echo "${stage}" | jq -r .name) NAME=$(echo "${stage}" | jq -r .name)
STAGE_SCRIPT="$(echo "${stage}" | jq -r .script)" STAGE_SCRIPT="$(echo "${stage}" | jq -r .script)"
STAGE_CMD="" STAGE_CMD=""

View File

@ -22,5 +22,5 @@ docker run -d \
-e REGISTRY_HTTP_ADDR=0.0.0.0:5000 \ -e REGISTRY_HTTP_ADDR=0.0.0.0:5000 \
--restart=always \ --restart=always \
--name registry \ --name registry \
-v $REGISTRY_DATA_DIR:/var/lib/registry \ -v "$REGISTRY_DATA_DIR:/var/lib/registry" \
registry:2 registry:2

View File

@ -15,15 +15,15 @@
set -ex set -ex
IMAGES_FILE=$(dirname $0)/IMAGES IMAGES_FILE="$(dirname "$0")/IMAGES"
IFS=, IFS=,
grep -v '^#.*' $IMAGES_FILE | while read src tag dst; do grep -v '^#.*' "$IMAGES_FILE" | while read -r src tag dst; do
echo src=$src tag=$tag dst=$dst echo "src=$src tag=$tag dst=$dst"
sudo docker pull $src:$tag sudo docker pull "$src:$tag"
full_dst=localhost:5000/$dst:$tag full_dst="localhost:5000/$dst:$tag"
sudo docker tag $src:$tag $full_dst sudo docker tag "$src:$tag" "$full_dst"
sudo docker push $full_dst sudo docker push "$full_dst"
done done

View File

@ -40,3 +40,5 @@ sudo docker run -t --rm --net=host
-v /:/host:rshared -v /:/host:rshared
EndOfCommand EndOfCommand
) )
export base_docker_command

View File

@ -21,8 +21,10 @@ OPENSTACK_CLI_IMAGE="${OPENSTACK_CLI_IMAGE:-docker.io/openstackhelm/heat:ocata}"
# Get the path of the directory where the script is located # Get the path of the directory where the script is located
# Source Base Docker Command # Source Base Docker Command
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" DIR="$(dirname "${BASH_SOURCE[0]}")"
cd ${DIR} && source openstack_cli_docker_base_command.sh # shellcheck disable=SC1091
cd "${DIR}" && source openstack_cli_docker_base_command.sh
# Execute OpenStack CLI # Execute OpenStack CLI
${base_docker_command} ${OPENSTACK_CLI_IMAGE} ${COMMAND} $@ # shellcheck disable=SC2154
${base_docker_command} "${OPENSTACK_CLI_IMAGE}" "${COMMAND}" "$@"