Add virsh-based local testing environment

This is intended to address difficulties in setting up the existing
Vagrant-based development environment, and provide a locally-runnable
gate script.

./tools/gate.sh runs tests as specified by a JSON manifest.  Valid
manifests live in `tools/g2/manifests`.  Currently, the following are
supported:

* full - Run an extensive suite.
* genesis - Run only through Genesis.
* quick - Run a small cluster test.
* prepare - Run only the off-site preparation before Genesis -- useful
  for quick sanity testing.

Change-Id: I4900d34437f9fe735f580ab91b38a6bb5424481e
This commit is contained in:
Mark Burnett 2017-10-19 09:02:54 -05:00
parent 95643147c5
commit 1cfbdd627d
45 changed files with 1163 additions and 14 deletions

View File

@ -1,6 +1,8 @@
.tox
.vagrant
Vagrantfile
__pycache__
docs
example
promenade.egg-info
tools

View File

@ -1,9 +1,96 @@
Getting Started
===============
Running Tests
-------------
Initial Setup of Virsh Environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To setup a local functional testing environment on your Ubuntu 16.04 machine,
run:
.. code-block:: bash
./tools/setup_gate.sh
Running Functional Tests
^^^^^^^^^^^^^^^^^^^^^^^^
To run complete functional tests locally:
.. code-block:: bash
./tools/gate.sh
For more verbose output, try:
.. code-block:: bash
PROMENADE_DEBUG=1 ./tools/gate.sh
The gate leaves its test VMs running for convenience. To shut everything down:
.. code-block:: bash
./tools/stop_gate.sh
To run a particular set of functional tests, you can specify the set on the
command line:
.. code-block:: bash
./tools/gate.sh <SUITE>
Valid functional test suites are defined by JSON files that live in
``tools/g2/manifests``.
Utilities
^^^^^^^^^
There are a couple of helper utilities available for interacting with gate VMs.
These can be found in ``tools/g2/bin``. The most important is certainly
``ssh.sh``, which allows you to connect easily to test VMs:
.. code-block:: bash
./tools/g2/bin/ssh.sh n0
Development
-----------
Using a Local Registry
^^^^^^^^^^^^^^^^^^^^^^
Repeatedly downloading multiple copies images during development can be quite
slow. To avoid this issue, you can run a docker registry on the development
host:
.. code-block:: bash
./tools/registry/start.sh
./tools/registry/update_cache.sh
Then, the images used by the example can be updated using:
.. code-block:: bash
./tools/registry/update_example.sh
That change can be undone via:
.. code-block:: bash
./tools/registry/revert_example.sh
The registry can be stopped with:
.. code-block:: bash
./tools/registry/stop.sh
Deployment using Vagrant
^^^^^^^^^^^^^^^^^^^^^^^^

View File

@ -426,7 +426,7 @@ data:
no_hooks: true
values:
calico:
ip_autodetection_method: interface=ens6
ip_autodetection_method: interface=ens3
pod_ip_cidr: 10.97.0.0/16
ctl:
install_on_host: true
@ -867,6 +867,35 @@ metadata:
dest:
path: '$.values.nodes[2].tls.peer.key'
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-n3
path: $
dest:
path: '$.values.nodes[3].tls.client.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-n3
path: $
dest:
path: '$.values.nodes[3].tls.client.key'
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-n3-peer
path: $
dest:
path: '$.values.nodes[3].tls.peer.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-n3-peer
path: $
dest:
path: '$.values.nodes[3].tls.peer.key'
data:
chart_name: etcd
release: kubernetes-etcd
@ -914,6 +943,14 @@ data:
peer:
cert: placeholder
key: placeholder
- name: n3
tls:
client:
cert: placeholder
key: placeholder
peer:
cert: placeholder
key: placeholder
service:
name: kubernetes-etcd
ip: 10.96.0.2

View File

@ -11,14 +11,7 @@ data:
ip: 192.168.77.10
join_ip: 192.168.77.11
labels:
static:
- node-role.kubernetes.io/master=
dynamic:
- calico-etcd=enabled
- kubernetes-apiserver=enabled
- kubernetes-controller-manager=enabled
- kubernetes-etcd=enabled
- kubernetes-scheduler=enabled
- ucp-control-plane=enabled
---
schema: promenade/KubernetesNode/v1
@ -77,6 +70,13 @@ data:
ip: 192.168.77.13
join_ip: 192.168.77.11
labels:
static:
- node-role.kubernetes.io/master=
dynamic:
- calico-etcd=enabled
- kubernetes-apiserver=enabled
- kubernetes-controller-manager=enabled
- kubernetes-etcd=enabled
- kubernetes-scheduler=enabled
- ucp-control-plane=enabled
...

View File

@ -38,7 +38,6 @@ class PKI:
files={
'csr.json': self.csr(name=ca_name, groups=['Kubernetes']),
})
LOG.debug('ca_cert=%r', result['cert'])
self.certificate_authorities[ca_name] = result
return (self._wrap_ca(ca_name, result['cert']), self._wrap_ca_key(
@ -96,7 +95,8 @@ class PKI:
f.write(data)
return json.loads(
subprocess.check_output(['cfssl'] + command, cwd=tmp))
subprocess.check_output(['cfssl'] + command, cwd=tmp,
stderr=subprocess.PIPE))
def _openssl(self, command, *, files=None):
if not files:
@ -107,7 +107,8 @@ class PKI:
with open(os.path.join(tmp, filename), 'w') as f:
f.write(data)
subprocess.check_call(['openssl'] + command, cwd=tmp)
subprocess.check_call(['openssl'] + command, cwd=tmp,
stderr=subprocess.PIPE)
result = {}
for filename in os.listdir(tmp):

View File

@ -13,6 +13,7 @@ ExecStart=/opt/kubernetes/bin/kubelet \
--eviction-max-pod-grace-period -1 \
--node-status-update-frequency 5s \
--kubeconfig=/etc/kubernetes/kubeconfig \
--hostname-override={{ config.get_first('Genesis:hostname', 'KubernetesNode:hostname') }} \
--network-plugin=cni \
--node-ip={{ config.get_first('Genesis:ip', 'KubernetesNode:ip') }} \
{%- if config['Genesis:labels.static'] is defined %}

View File

@ -7,14 +7,17 @@ HTTP_PROXY={{ config['KubernetesNetwork:proxy.url'] }}
NO_PROXY={{ config.get(kind='KubernetesNetwork') | fill_no_proxy }}
{%- endif %}
EXTRA_ARGS=
if [ "x$ARMADA_CHART_PATH_OVERRIDE" != "x" ]; then
EXTRA_ARGS=" -v $ARMADA_CHART_PATH_OVERRIDE:/etc/genesis/armada/assets/charts"
fi
exec docker run --rm -i \
--net host \
-v /etc/genesis/armada/auth:/armada/.kube \
-v /etc/genesis/armada/assets:/etc/genesis/armada/assets \
{%- if config.debug %}
-v /vagrant/charts:/etc/genesis/armada/assets/charts \
{%- endif %}
$EXTRA_ARGS \
{%- if config['KubernetesNetwork:proxy.url'] is defined %}
-e http_proxy=$HTTP_PROXY \
-e HTTP_PROXY=$HTTP_PROXY \

View File

@ -1,5 +1,7 @@
{% include "header.sh" with context %}
wait_for_kubernetes_api
for node in $(kubectl get nodes -o name | cut -d / -f 2); do
validate_kubectl_logs $node
done

1
tools/g2/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
config-ssh

11
tools/g2/bin/etcdctl.sh Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/env bash
set -e
SCRIPT_DIR=$(realpath $(dirname $0))
WORKSPACE=$(realpath ${SCRIPT_DIR}/../../..)
GATE_UTILS=${WORKSPACE}/tools/g2/lib/all.sh
source ${GATE_UTILS}
etcdctl_cmd ${@}

11
tools/g2/bin/rsync.sh Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/env bash
set -e
SCRIPT_DIR=$(realpath $(dirname $0))
WORKSPACE=$(realpath ${SCRIPT_DIR}/../../..)
GATE_UTILS=${WORKSPACE}/tools/g2/lib/all.sh
source ${GATE_UTILS}
exec rsync -e "ssh -F ${SSH_CONFIG_DIR}/config" $@

11
tools/g2/bin/ssh.sh Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/env bash
set -e
SCRIPT_DIR=$(realpath $(dirname $0))
WORKSPACE=$(realpath ${SCRIPT_DIR}/../../..)
GATE_UTILS=${WORKSPACE}/tools/g2/lib/all.sh
source ${GATE_UTILS}
exec ssh -F ${SSH_CONFIG_DIR}/config $@

16
tools/g2/lib/all.sh Normal file
View File

@ -0,0 +1,16 @@
LIB_DIR=$(realpath $(dirname $BASH_SOURCE))
source $LIB_DIR/config.sh
source $LIB_DIR/const.sh
source $LIB_DIR/etcd.sh
source $LIB_DIR/kube.sh
source $LIB_DIR/log.sh
source $LIB_DIR/promenade.sh
source $LIB_DIR/registry.sh
source $LIB_DIR/ssh.sh
source $LIB_DIR/validate.sh
source $LIB_DIR/virsh.sh
if [ "x${PROMENADE_DEBUG}" = "x1" ]; then
set -x
fi

7
tools/g2/lib/config.sh Normal file
View File

@ -0,0 +1,7 @@
export BASE_IMAGE_SIZE=${BASE_IMAGE_SIZE:-68719476736}
export BASE_IMAGE_URL=${BASE_IMAGE_URL:-https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img}
export IMAGE_PROMENADE=${IMAGE_PROMENADE:-quay.io/attcomdev/promenade:latest}
export PROMENADE_DEBUG=${PROMENADE_DEBUG:-0}
export REGISTRY_DATA_DIR=${REGISTRY_DATA_DIR:-/mnt/registry}
export VIRSH_POOL=${VIRSH_POOL:-promenade}
export VIRSH_POOL_PATH=${VIRSH_POOL_PATH:-/var/lib/libvirt/promenade}

15
tools/g2/lib/const.sh Normal file
View File

@ -0,0 +1,15 @@
GENESIS_NAME=n0
SSH_CONFIG_DIR=${WORKSPACE}/tools/g2/config-ssh
TEMPLATE_DIR=${WORKSPACE}/tools/g2/templates
XML_DIR=${WORKSPACE}/tools/g2/xml
VM_NAMES=(
n0
n1
n2
n3
)
vm_ip() {
NAME=${1}
echo 192.168.77.1${NAME:1}
}

17
tools/g2/lib/etcd.sh Normal file
View File

@ -0,0 +1,17 @@
etcdctl_cmd() {
CLUSTER=${1}
VM=${2}
shift 2
kubectl_cmd ${VM} -n kube-system exec -t ${CLUSTER}-etcd-${VM} -- etcdctl ${@}
}
etcdctl_member_list() {
CLUSTER=${1}
VM=${2}
shift 2
EXTRA_ARGS=${@}
etcdctl_cmd ${CLUSTER} ${VM} member list -w json | jq -r '.members[].name' | sort
}

7
tools/g2/lib/kube.sh Normal file
View File

@ -0,0 +1,7 @@
kubectl_cmd() {
VIA=${1}
shift
ssh_cmd ${VIA} kubectl ${@}
}

67
tools/g2/lib/log.sh Normal file
View File

@ -0,0 +1,67 @@
if [[ "x${GATE_COLOR}" = "x1" ]]; then
C_CLEAR="\e[0m"
C_ERROR="\e[38;5;160m"
C_HEADER="\e[38;5;164m"
C_TEMP="\e[38;5;226m"
C_HILIGHT="\e[38;5;27m"
C_MUTE="\e[38;5;238m"
C_SUCCESS="\e[38;5;46m"
else
C_CLEAR=""
C_ERROR=""
C_HEADER=""
C_HILIGHT=""
C_MUTE=""
C_SUCCESS=""
fi
log() {
echo -e ${C_MUTE}$(date --utc)${C_CLEAR} $* 1>&2
}
log_stage_diagnostic_header() {
echo -e " ${C_ERROR}= Diagnostic Report =${C_CLEAR}"
}
log_color_reset() {
echo -e "${C_CLEAR}"
}
log_huge_success() {
echo -e "${C_SUCCESS}=== HUGE SUCCESS ===${C_CLEAR}"
}
log_note() {
echo -e {$C_HILIGHT}NOTE:${C_CLEAR} ${@}
}
log_stage_error() {
NAME=${1}
TEMP_DIR=${2}
echo -e " ${C_ERROR}== Error in stage ${C_HILIGHT}${NAME}${C_ERROR} ( ${C_TEMP}${TEMP_DIR}${C_ERROR} ) ==${C_CLEAR}"
}
log_stage_footer() {
NAME=${1}
echo -e "${C_HEADER}=== Finished stage ${C_HILIGHT}${NAME}${C_HEADER} ===${C_CLEAR}"
}
log_stage_header() {
NAME=${1}
echo -e "${C_HEADER}=== Executing stage ${C_HILIGHT}${NAME}${C_HEADER} ===${C_CLEAR}"
}
log_stage_success() {
echo -e " ${C_SUCCESS}== Stage Success ==${C_CLEAR}"
}
log_temp_dir() {
TEMP_DIR=${1}
echo -e Working in ${C_TEMP}${TEMP_DIR}${C_CLEAR}
}
if [[ "x${PROMENADE_DEBUG}" = "x1" ]]; then
export LOG_FILE=/dev/stderr
else
export LOG_FILE=/dev/null
fi

View File

@ -0,0 +1,7 @@
promenade_teardown_node() {
TARGET=${1}
VIA=${2}
ssh_cmd ${TARGET} /usr/local/bin/promenade-teardown
kubectl_cmd ${VIA} delete node ${TARGET}
}

67
tools/g2/lib/registry.sh Normal file
View File

@ -0,0 +1,67 @@
registry_down() {
REGISTRY_ID=$(docker ps -qa -f name=registry)
if [ "x${REGISTRY_ID}" != "x" ]; then
log Removing docker registry
docker rm -fv ${REGISTRY_ID} &> ${LOG_FILE}
fi
}
registry_list_images() {
FILES=${@:-${WORKSPACE}/example/*.yaml}
HOSTNAME_REGEX='[a-zA-Z0-9][a-zA-Z0-9_-]{0,62}'
DOMAIN_NAME_REGEX="${HOSTNAME_REGEX}(\.${HOSTNAME_REGEX})*"
PORT_REGEX='[0-9]+'
NETLOC_REGEX="${DOMAIN_NAME_REGEX}(:${PORT_REGEX})?"
REPO_COMPONENT_REGEX='[a-zA-Z0-9][a-zA-Z0-9_-]{0,62}'
REPO_REGEX="${REPO_COMPONENT_REGEX}(/${REPO_COMPONENT_REGEX})*"
TAG_REGEX='[a-zA-Z0-9][a-zA-Z0-9.-]{0,127}'
cat ${FILES} \
| tr ' \t' '\n' | tr -s '\n' \
| grep -E "^(${NETLOC_REGEX}/)?${REPO_REGEX}:${TAG_REGEX}$" \
| sort -u \
| grep -v 'registry:5000'
}
registry_populate() {
log Validating local registry is populated
for image in $(registry_list_images); do
if ! docker pull localhost:5000/${image} &> /dev/null; then
log Loading image ${image} into local registry
docker pull ${image} >& ${LOG_FILE}
docker tag ${image} localhost:5000/${image} >& ${LOG_FILE}
docker push localhost:5000/${image} >& ${LOG_FILE}
fi
done
}
registry_replace_references() {
FILES=${@}
for image in $(registry_list_images ${FILES}); do
sed -i "s;${image};registry:5000/${image};g" ${FILES}
done
}
registry_up() {
log Validating local registry is up
REGISTRY_ID=$(docker ps -qa -f name=registry)
RUNNING_REGISTRY_ID=$(docker ps -q -f name=registry)
if [ "x${RUNNING_REGISTRY_ID}" = "x" -a "x${REGISTRY_ID}" != "x" ]; then
log Removing stopped docker registry
docker rm -fv ${REGISTRY_ID} &> ${LOG_FILE}
fi
if [ "x${REGISTRY_ID}" = "x" ]; then
log Starting docker registry
docker run -d \
-p 5000:5000 \
-e REGISTRY_HTTP_ADDR=0.0.0.0:5000 \
--restart=always \
--name registry \
-v $REGISTRY_DATA_DIR:/var/lib/registry \
registry:2 &> ${LOG_FILE}
fi
}

47
tools/g2/lib/ssh.sh Normal file
View File

@ -0,0 +1,47 @@
rsync_cmd() {
rsync -e "ssh -F ${SSH_CONFIG_DIR}/config" ${@}
}
ssh_cmd() {
if [[ "x${PROMENADE_DEBUG}" = "x1" ]]; then
EXTRA_ARGS=-v
else
EXTRA_ARGS=
fi
ssh -F ${SSH_CONFIG_DIR}/config ${EXTRA_ARGS} ${@}
}
ssh_config_declare() {
log Validating SSH config exists
if [ ! -s ${SSH_CONFIG_DIR}/config ]; then
log Creating SSH config
env -i \
SSH_CONFIG_DIR=${SSH_CONFIG_DIR} \
envsubst < ${TEMPLATE_DIR}/ssh-config.sub > ${SSH_CONFIG_DIR}/config
fi
}
ssh_keypair_declare() {
log Validating SSH keypair exists
if [ ! -s ${SSH_CONFIG_DIR}/id_rsa ]; then
log Generating SSH keypair
ssh-keygen -N '' -f ${SSH_CONFIG_DIR}/id_rsa > ${LOG_FILE}
fi
}
ssh_load_pubkey() {
cat ${SSH_CONFIG_DIR}/id_rsa.pub
}
ssh_setup_declare() {
mkdir -p ${SSH_CONFIG_DIR}
ssh_keypair_declare
ssh_config_declare
}
ssh_wait() {
NAME=${1}
while ! ssh_cmd ${NAME} /bin/true; do
sleep 0.5
done
}

23
tools/g2/lib/validate.sh Normal file
View File

@ -0,0 +1,23 @@
validate_cluster() {
NAME=${1}
log Validating cluster via VM ${NAME}
rsync_cmd ${TEMP_DIR}/scripts/validate-cluster.sh ${NAME}:/root/promenade/
ssh_cmd ${NAME} /root/promenade/validate-cluster.sh
}
validate_etcd_membership() {
CLUSTER=${1}
VM=${2}
shift 2
EXPECTED_MEMBERS=${@}
log Validating ${CLUSTER} etcd membership via ${VM}
FOUND_MEMBERS=$(etcdctl_member_list ${CLUSTER} ${VM} | tr '\n' ' ' | sed 's/ $//')
if [[ "x${EXPECTED_MEMBERS}" != "x${FOUND_MEMBERS}" ]]; then
log Etcd membership check failed for cluster ${CLUSTER}
log Found \"${FOUND_MEMBERS}\", expected \"${EXPECTED_MEMBERS}\"
exit 1
fi
}

199
tools/g2/lib/virsh.sh Normal file
View File

@ -0,0 +1,199 @@
img_base_declare() {
log Validating base image exists
if ! virsh vol-key --pool ${VIRSH_POOL} --vol promenade-base.img > /dev/null; then
log Installing base image from ${BASE_IMAGE_URL}
cd ${TEMP_DIR}
curl -q -L -o base.img ${BASE_IMAGE_URL}
virsh vol-create-as \
--pool ${VIRSH_POOL} \
--name promenade-base.img \
--format qcow2 \
--capacity ${BASE_IMAGE_SIZE} \
--prealloc-metadata &> ${LOG_FILE}
virsh vol-upload \
--vol promenade-base.img \
--file base.img \
--pool ${VIRSH_POOL} &> ${LOG_FILE}
fi
}
iso_gen() {
NAME=${1}
if virsh vol-key --pool ${VIRSH_POOL} --vol cloud-init-${NAME}.iso &> /dev/null; then
log Removing existing cloud-init ISO for ${NAME}
virsh vol-delete \
--pool ${VIRSH_POOL} \
--vol cloud-init-${NAME}.iso &> ${LOG_FILE}
fi
log Creating cloud-init ISO for ${NAME}
ISO_DIR=${TEMP_DIR}/iso/${NAME}
mkdir -p ${ISO_DIR}
cd ${ISO_DIR}
export BR_IP_NODE=$(vm_ip ${NAME})
export NAME
export SSH_PUBLIC_KEY=$(ssh_load_pubkey)
envsubst < ${TEMPLATE_DIR}/user-data.sub > user-data
envsubst < ${TEMPLATE_DIR}/meta-data.sub > meta-data
envsubst < ${TEMPLATE_DIR}/network-config.sub > network-config
genisoimage \
-V cidata \
-input-charset utf-8 \
-joliet \
-rock \
-o cidata.iso \
meta-data \
network-config \
user-data &> ${LOG_FILE}
virsh vol-create-as \
--pool ${VIRSH_POOL} \
--name cloud-init-${NAME}.iso \
--capacity $(stat -c %s ${ISO_DIR}/cidata.iso) \
--format raw &> ${LOG_FILE}
virsh vol-upload \
--pool ${VIRSH_POOL} \
--vol cloud-init-${NAME}.iso \
--file ${ISO_DIR}/cidata.iso &> ${LOG_FILE}
}
iso_path() {
NAME=${1}
echo ${TEMP_DIR}/iso/${NAME}/cidata.iso
}
net_clean() {
log net_clean is not yet implemented.
exit 1
}
net_declare() {
if ! virsh net-list --name | grep ^promenade$ > /dev/null; then
log Creating promenade network
virsh net-create ${XML_DIR}/network.xml &> ${LOG_FILE}
fi
}
pool_declare() {
log Validating virsh pool setup
if ! virsh pool-uuid ${VIRSH_POOL} &> /dev/null; then
log Creating pool ${VIRSH_POOL}
virsh pool-create-as --name ${VIRSH_POOL} --type dir --target ${VIRSH_POOL_PATH} &> ${LOG_FILE}
fi
}
vm_clean() {
NAME=${1}
if virsh list --name | grep ${NAME} &> /dev/null; then
virsh destroy ${NAME} &> ${LOG_FILE}
fi
if virsh list --name --all | grep ${NAME} &> /dev/null; then
log Removing VM ${NAME}
virsh undefine --remove-all-storage --domain ${NAME} &> ${LOG_FILE}
fi
}
vm_clean_all() {
log Removing all VMs in parallel
for NAME in ${VM_NAMES[@]}; do
vm_clean ${NAME} &
done
wait
}
vm_create() {
NAME=${1}
iso_gen ${NAME}
vol_create_root ${NAME}
log Creating VM ${NAME}
virt-install \
--name ${NAME} \
--hvm \
--cpu host \
--graphics vnc,listen=0.0.0.0 \
--noautoconsole \
--network network=promenade \
--vcpus 2 \
--memory 2048 \
--import \
--disk vol=${VIRSH_POOL}/promenade-${NAME}.img,format=qcow2,bus=virtio \
--disk pool=${VIRSH_POOL},size=20,format=qcow2,bus=virtio \
--disk pool=${VIRSH_POOL},size=20,format=qcow2,bus=virtio \
--disk vol=${VIRSH_POOL}/cloud-init-${NAME}.iso,device=cdrom &> ${LOG_FILE}
ssh_wait ${NAME}
ssh_cmd ${NAME} sync
}
vm_create_all() {
log Starting all VMs in parallel
for NAME in ${VM_NAMES[@]}; do
vm_create ${NAME} &
done
wait
for NAME in ${VM_NAMES[@]}; do
vm_validate ${NAME}
done
}
vm_start() {
NAME=${1}
log Starting VM ${NAME}
virsh start ${NAME} &> ${LOG_FILE}
ssh_wait ${NAME}
}
vm_stop() {
NAME=${1}
log Stopping VM ${NAME}
virsh destroy ${NAME} &> ${LOG_FILE}
}
vm_restart_all() {
for NAME in ${VM_NAMES[@]}; do
vm_stop ${NAME} &
done
wait
for NAME in ${VM_NAMES[@]}; do
vm_start ${NAME} &
done
wait
}
vm_validate() {
NAME=${1}
if ! virsh list --name | grep ${NAME} &> /dev/null; then
log VM ${NAME} did not start correctly. Use PROMENADE_DEBUG=1 for more details.
exit 1
fi
}
vol_create_root() {
NAME=${1}
if virsh vol-list --pool ${VIRSH_POOL} | grep promenade-${NAME}.img &> /dev/null; then
log Deleting previous volume promenade-${NAME}.img
virsh vol-delete --pool ${VIRSH_POOL} promenade-${NAME}.img &> ${LOG_FILE}
fi
log Creating root volume for ${NAME}
virsh vol-create-as \
--pool ${VIRSH_POOL} \
--name promenade-${NAME}.img \
--capacity 64G \
--format qcow2 \
--backing-vol promenade-base.img \
--backing-vol-format qcow2 &> ${LOG_FILE}
}

View File

@ -0,0 +1,49 @@
{
"stages": [
{
"name": "Gate Setup",
"script": "gate-setup.sh"
},
{
"name": "Build Image",
"script": "build-image.sh"
},
{
"name": "Generate Certificates",
"script": "generate-certificates.sh"
},
{
"name": "Build Scripts",
"script": "build-scripts.sh"
},
{
"name": "Create VMs",
"script": "create-vms.sh"
},
{
"name": "Genesis",
"script": "genesis.sh"
},
{
"name": "Join Masters",
"script": "join-masters.sh",
"arguments": [
"n1",
"n2",
"n3"
]
},
{
"name": "Reprovision Genesis",
"script": "reprovision-genesis.sh"
},
{
"name": "Hard Reboot Cluster",
"script": "hard-reboot-cluster.sh"
},
{
"name": "Move Master",
"script": "move-master.sh"
}
]
}

View File

@ -0,0 +1,29 @@
{
"stages": [
{
"name": "Gate Setup",
"script": "gate-setup.sh"
},
{
"name": "Build Image",
"script": "build-image.sh"
},
{
"name": "Generate Certificates",
"script": "generate-certificates.sh"
},
{
"name": "Build Scripts",
"script": "build-scripts.sh"
},
{
"name": "Create VMs",
"script": "create-vms.sh"
},
{
"name": "Genesis",
"script": "genesis.sh"
}
]
}

View File

@ -0,0 +1,16 @@
{
"stages": [
{
"name": "Build Image",
"script": "build-image.sh"
},
{
"name": "Generate Certificates",
"script": "generate-certificates.sh"
},
{
"name": "Build Scripts",
"script": "build-scripts.sh"
}
]
}

View File

@ -0,0 +1,44 @@
{
"stages": [
{
"name": "Gate Setup",
"script": "gate-setup.sh"
},
{
"name": "Build Image",
"script": "build-image.sh"
},
{
"name": "Generate Certificates",
"script": "generate-certificates.sh"
},
{
"name": "Build Scripts",
"script": "build-scripts.sh"
},
{
"name": "Create VMs",
"script": "create-vms.sh"
},
{
"name": "Genesis",
"script": "genesis.sh"
},
{
"name": "Join Masters",
"script": "join-masters.sh",
"arguments": [
"n1",
"n2"
]
},
{
"name": "Reprovision Genesis",
"script": "reprovision-genesis.sh"
},
{
"name": "Hard Reboot Cluster",
"script": "hard-reboot-cluster.sh"
}
]
}

8
tools/g2/stages/build-image.sh Executable file
View File

@ -0,0 +1,8 @@
#!/usr/bin/env bash
set -e
source ${GATE_UTILS}
log Building docker image ${IMAGE_PROMENADE}
sudo docker build -q -t ${IMAGE_PROMENADE} ${WORKSPACE}

View File

@ -0,0 +1,20 @@
#!/usr/bin/env bash
set -e
source ${GATE_UTILS}
cd ${TEMP_DIR}
mkdir scripts
log Building scripts
sudo docker run --rm -t \
-w /target \
-v ${TEMP_DIR}:/target \
-e PROMENADE_DEBUG=${PROMENADE_DEBUG} \
${IMAGE_PROMENADE} \
promenade \
build-all \
--validators \
-o scripts \
config/*.yaml

8
tools/g2/stages/create-vms.sh Executable file
View File

@ -0,0 +1,8 @@
#!/usr/bin/env bash
set -e
source ${GATE_UTILS}
vm_clean_all
vm_create_all

17
tools/g2/stages/gate-setup.sh Executable file
View File

@ -0,0 +1,17 @@
#!/usr/bin/env bash
set -e
source ${GATE_UTILS}
# Docker registry (cache) setup
registry_up
registry_populate
# SSH setup
ssh_setup_declare
# Virsh setup
pool_declare
img_base_declare
net_declare

View File

@ -0,0 +1,24 @@
#!/usr/bin/env bash
set -e
source ${GATE_UTILS}
OUTPUT_DIR=${TEMP_DIR}/config
mkdir -p ${OUTPUT_DIR}
log Copying example configuration
cp ${WORKSPACE}/example/*.yaml ${OUTPUT_DIR}
registry_replace_references ${OUTPUT_DIR}/*.yaml
log Generating certificates
sudo docker run --rm -t \
-w /target \
-v ${OUTPUT_DIR}:/target \
-e PROMENADE_DEBUG=${PROMENADE_DEBUG} \
${IMAGE_PROMENADE} \
promenade \
generate-certs \
-o /target \
$(ls ${OUTPUT_DIR})

10
tools/g2/stages/genesis.sh Executable file
View File

@ -0,0 +1,10 @@
#!/usr/bin/env bash
set -e
source ${GATE_UTILS}
rsync_cmd ${TEMP_DIR}/scripts/*genesis* ${GENESIS_NAME}:/root/promenade/
ssh_cmd ${GENESIS_NAME} /root/promenade/genesis.sh
ssh_cmd ${GENESIS_NAME} /root/promenade/validate-genesis.sh

View File

@ -0,0 +1,8 @@
#!/usr/bin/env bash
set -e
source $GATE_UTILS
vm_restart_all
validate_cluster ${GENESIS_NAME}

24
tools/g2/stages/join-masters.sh Executable file
View File

@ -0,0 +1,24 @@
#!/usr/bin/env bash
set -e
if [ $# -le 0 ]; then
echo "Must specify at least one vm to join"
exit 1
fi
source ${GATE_UTILS}
JOIN_TARGETS=${@}
for NAME in ${JOIN_TARGETS}; do
rsync_cmd ${TEMP_DIR}/scripts/*${NAME}* ${NAME}:/root/promenade/
ssh_cmd ${NAME} /root/promenade/join-${NAME}.sh
ssh_cmd ${NAME} /root/promenade/validate-${NAME}.sh
done
validate_cluster n0
validate_etcd_membership kubernetes n0 genesis n1 n2 n3
validate_etcd_membership calico n0 n0 n1 n2 n3

35
tools/g2/stages/move-master.sh Executable file
View File

@ -0,0 +1,35 @@
#!/usr/bin/env bash
set -e
source ${GATE_UTILS}
log Adding labels to node n0
kubectl_cmd n1 label node n0 \
calico-etcd=enabled \
kubernetes-apiserver=enabled \
kubernetes-controller-manager=enabled \
kubernetes-etcd=enabled \
kubernetes-scheduler=enabled
# XXX Need to wait
sleep 60
validate_etcd_membership kubernetes n1 n0 n1 n2 n3
validate_etcd_membership calico n1 n0 n1 n2 n3
log Removing labels from node n2
kubectl_cmd n1 label node n2 \
calico-etcd- \
kubernetes-apiserver- \
kubernetes-controller-manager- \
kubernetes-etcd- \
kubernetes-scheduler-
# XXX Need to wait
sleep 60
validate_cluster n1
validate_etcd_membership kubernetes n1 n0 n1 n3
validate_etcd_membership calico n1 n0 n1 n3

View File

@ -0,0 +1,20 @@
#!/usr/bin/env bash
set -e
source ${GATE_UTILS}
promenade_teardown_node ${GENESIS_NAME} n1
vm_clean ${GENESIS_NAME}
vm_create ${GENESIS_NAME}
rsync_cmd ${TEMP_DIR}/scripts/*${GENESIS_NAME}* ${GENESIS_NAME}:/root/promenade/
ssh_cmd ${GENESIS_NAME} /root/promenade/join-${GENESIS_NAME}.sh
ssh_cmd ${GENESIS_NAME} /root/promenade/validate-${GENESIS_NAME}.sh
validate_cluster n1
validate_etcd_membership kubernetes n1 n1 n2 n3
validate_etcd_membership calico n1 n1 n2 n3

View File

@ -0,0 +1,3 @@
#cloud-config
instance-id: promenade-${NAME}
local-hostname: ${NAME}

View File

@ -0,0 +1,13 @@
#cloud-config
version: 1
config:
- type: physical
name: ens3
subnets:
- type: static
address: ${BR_IP_NODE}/24
gateway: 192.168.77.1
- type: nameserver
address:
- 8.8.8.8
- 8.8.4.4

View File

@ -0,0 +1,17 @@
IdentityFile ${SSH_CONFIG_DIR}/id_rsa
LogLevel QUIET
StrictHostKeyChecking no
User root
UserKnownHostsFile /dev/null
Host n0
HostName 192.168.77.10
Host n1
HostName 192.168.77.11
Host n2
HostName 192.168.77.12
Host n3
HostName 192.168.77.13

View File

@ -0,0 +1,14 @@
#cloud-config
disable_root: false
hostname: ${NAME}
manage_etc_hosts: false
ssh_authorized_keys:
- ${SSH_PUBLIC_KEY}
chpasswd:
list: |
root:password
expire: false

11
tools/g2/xml/network.xml Normal file
View File

@ -0,0 +1,11 @@
<network>
<name>promenade</name>
<forward mode='nat'/>
<bridge name='prom-br' stp='on' delay='0'/>
<mac address='52:54:00:e7:94:3f'/>
<ip address='192.168.77.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.77.10' end='192.168.77.254'/>
</dhcp>
</ip>
</network>

59
tools/gate.sh Executable file
View File

@ -0,0 +1,59 @@
#!/usr/bin/env bash
set -e
SCRIPT_DIR=$(realpath $(dirname $0))
export WORKSPACE=$(realpath ${SCRIPT_DIR}/..)
export GATE_UTILS=${WORKSPACE}/tools/g2/lib/all.sh
export TEMP_DIR=$(mktemp -d)
chmod -R 755 ${TEMP_DIR}
export GATE_COLOR=${GATE_COLOR:-1}
source ${GATE_UTILS}
MANIFEST_ARG=${1:-full}
MANIFEST=${WORKSPACE}/tools/g2/manifests/${MANIFEST_ARG}.json
STAGES_DIR=${WORKSPACE}/tools/g2/stages
log_temp_dir ${TEMP_DIR}
echo
STAGES=$(mktemp)
jq -cr '.stages | .[]' ${MANIFEST} > ${STAGES}
# NOTE(mark-burnett): It is necessary to use a non-stdin file descriptor for
# the read below, since we will be calling SSH, which will consume the
# remaining data on STDIN.
exec 3< $STAGES
while read -u 3 stage; do
NAME=$(echo ${stage} | jq -r .name)
STAGE_CMD=${STAGES_DIR}/$(echo ${stage} | jq -r .script)
if echo ${stage} | jq -e .arguments > /dev/null; then
ARGUMENTS=($(echo ${stage} | jq -r '.arguments[]'))
else
ARGUMENTS=()
fi
log_stage_header "${NAME}"
if $STAGE_CMD ${ARGUMENTS[*]}; then
log_stage_success
else
log_color_reset
log_stage_error "${NAME}" ${TEMP_DIR}
if echo ${stage} | jq -e .on_error > /dev/null; then
log_stage_diagnostic_header
ON_ERROR=${WORKSPACE}/$(echo ${stage} | jq -r .on_error)
set +e
$ON_ERROR
fi
exit 1
fi
log_stage_footer "${NAME}"
echo
done
echo
log_huge_success

69
tools/setup_gate.sh Executable file
View File

@ -0,0 +1,69 @@
#!/usr/bin/env bash
set -e
SCRIPT_DIR=$(realpath $(dirname $0))
export WORKSPACE=$(realpath ${SCRIPT_DIR}/..)
export GATE_UTILS=${WORKSPACE}/tools/g2/lib/all.sh
export GATE_COLOR=${GATE_COLOR:-1}
source ${GATE_UTILS}
REQUIRE_REBOOT=0
REQUIRE_RELOG=0
log_stage_header "Installing Packages"
export DEBIAN_FRONTEND=noninteractive
sudo apt-get update -qq
sudo apt-get install -q -y --no-install-recommends \
curl \
docker.io \
genisoimage \
jq \
libvirt-bin \
virtinst
log_stage_header "Joining User Groups"
for grp in docker libvirtd; do
if ! groups | grep $grp > /dev/null; then
sudo adduser `id -un` $grp
REQUIRE_RELOG=1
fi
done
log_stage_header "Setting Kernel Parameters"
if [ "xY" != "x$(cat /sys/module/kvm_intel/parameters/nested)" ]; then
log_note Enabling nested virtualization.
sudo modprobe -r kvm_intel
sudo modprobe kvm_intel nested=1
echo "options kvm-intel nested=1" | sudo tee /etc/modprobe.d/kvm-intel.conf
fi
if ! sudo virt-host-validate qemu &> /dev/null; then
if ! grep intel_iommu /etc/defaults/grub &> /dev/null; then
log_note Enabling Intel IOMMU
REQUIRE_REBOOT=1
sudo mkdir -p /etc/defaults
sudo touch /etc/defaults/grub
echo 'GRUB_CMDLINE_LINUX_DEFAULT="${GRUB_CMDLINE_LINUX_DEFAULT} intel_iommu=on"' | sudo tee -a /etc/defaults/grub
else
echo -e ${C_ERROR}Failed to configure virtualization:${C_CLEAR}
sudo virt-host-health qemu
exit 1
fi
fi
if [ ! -d ${VIRSH_POOL_PATH} ]; then
sudo mkdir -p ${VIRSH_POOL_PATH}
fi
if [ $REQUIRE_REBOOT -eq 1 ]; then
echo
log_note You must ${C_HEADER}reboot${C_CLEAR} before for the gate is ready to run.
elif [ $REQUIRE_RELOG -eq 1 ]; then
echo
log_note You must ${C_HEADER}log out${C_CLEAR} and back in before the gate is ready to run.
fi
log_huge_success

12
tools/stop_gate.sh Executable file
View File

@ -0,0 +1,12 @@
#!/usr/bin/env bash
set -e
SCRIPT_DIR=$(realpath $(dirname $0))
export WORKSPACE=$(realpath ${SCRIPT_DIR}/..)
export GATE_UTILS=${WORKSPACE}/tools/g2/lib/all.sh
source ${GATE_UTILS}
vm_clean_all
registry_down