[WIP] Make AIAB container-runtime-agnostic
Attempting to make AIAB container-runtime-agnostic by either replacing Docker-native operations with Kubernetes-native operations (e.g., replacing the Docker run commands for the Shipyard and Deckhand scripts with Kubernetes Jobs) or by using a CONTAINER_RUNTIME environment variable to switch between Docker and Containerd. Change-Id: I1e7613d149c2064d20cd601f42857d4ad1e59226
This commit is contained in:
parent
d7d345fd24
commit
c91f34eb98
|
@ -69,7 +69,12 @@ ip route show > "${BASE_DIR}/ip-route"
|
|||
cp -p /etc/resolv.conf "${BASE_DIR}/"
|
||||
|
||||
env | sort --ignore-case > "${BASE_DIR}/environment"
|
||||
docker images > "${BASE_DIR}/docker-images"
|
||||
if [[ "$CONTAINER_RUNTIME" == "CONTAINERD" || "$CONTAINER_RUNTIME" == "containerd" ]]
|
||||
then
|
||||
ctr images ls > "${BASE_DIR}/containerd-images"
|
||||
else
|
||||
docker images > "${BASE_DIR}/docker-images"
|
||||
fi
|
||||
|
||||
if which calicoctl; then
|
||||
mkdir -p "${CALICO_DIR}"
|
||||
|
|
|
@ -13,33 +13,68 @@ install_ingress_ca() {
|
|||
rsync_cmd "$local_file" "${BUILD_NAME}":"$remote_file"
|
||||
}
|
||||
|
||||
shipard_cmd_stdout() {
|
||||
shipyard_cmd_stdout() {
|
||||
# needed to reach airship endpoints
|
||||
dns_netspec="$(config_netspec_for_role "dns")"
|
||||
dns_server=$(config_vm_net_ip "${BUILD_NAME}" "$dns_netspec")
|
||||
install_ingress_ca
|
||||
ssh_cmd "${BUILD_NAME}" \
|
||||
docker run -t --network=host \
|
||||
--dns "${dns_server}" \
|
||||
-v "${BUILD_WORK_DIR}:/work" \
|
||||
-e OS_AUTH_URL="${AIRSHIP_KEYSTONE_URL}" \
|
||||
-e OS_USERNAME=shipyard \
|
||||
-e OS_USER_DOMAIN_NAME=default \
|
||||
-e OS_PASSWORD="${SHIPYARD_PASSWORD}" \
|
||||
-e OS_PROJECT_DOMAIN_NAME=default \
|
||||
-e OS_PROJECT_NAME=service \
|
||||
-e REQUESTS_CA_BUNDLE=/work/ingress_ca.pem \
|
||||
--entrypoint /usr/local/bin/shipyard "${IMAGE_SHIPYARD_CLI}" "$@" 2>&1
|
||||
local_jobspec="${TEMP_DIR}/shipyard-job.yaml"
|
||||
remote_jobspec="${BUILD_WORK_DIR}/shipyard-job.yaml"
|
||||
cat << EOF > "$local_jobspec"
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: shipyard-command
|
||||
namespace: ucp
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: shipyard-command
|
||||
image: ${IMAGE_SHIPYARD_CLI}
|
||||
command: ["/usr/local/bin/shipyard"]
|
||||
args: ["$@", "2>&1"]
|
||||
env:
|
||||
- name: "OS_AUTH_URL"
|
||||
value: "${AIRSHIP_KEYSTONE_URL}"
|
||||
- name: "OS_USERNAME"
|
||||
value: "shipyard"
|
||||
- name: "OS_USER_DOMAIN"
|
||||
value: "default"
|
||||
- name: "OS_PASSWORD"
|
||||
value: "${SHIPYARD_PASSWORD}"
|
||||
- name: "OS_PROJECT_DOMAIN_NAME"
|
||||
value: "default"
|
||||
- name: "OS_PROJECT_NAME"
|
||||
value: "service"
|
||||
- name: "REQUESTS_CA_BUNDLE"
|
||||
value: "/work/ingress_ca.pem"
|
||||
volumeMounts:
|
||||
- name: ${BUILD_WORK_DIR}
|
||||
mountPath: /work
|
||||
stdin: true
|
||||
tty: true
|
||||
restartPolicy: Never
|
||||
dnsConfig:
|
||||
nameservers: "$dns_server"
|
||||
hostNetwork: true
|
||||
activeDeadlineSeconds: ${timeout}
|
||||
backoffLimit: 1
|
||||
EOF
|
||||
rsync_cmd "$local_jobspec" "${BUILD_NAME}":"$remote_jobspec"
|
||||
ssh_cmd "${BUILD_NAME}" kubectl create -f "$remote_jobspec"
|
||||
ssh_cmd "${BUILD_NAME}" kubectl logs -f shipyard-command
|
||||
ssh_cmd "${BUILD_NAME}" kubectl delete jobs/shipyard-command
|
||||
}
|
||||
|
||||
shipyard_cmd() {
|
||||
if [[ ! -z "${LOG_FILE}" ]]
|
||||
then
|
||||
set -o pipefail
|
||||
shipard_cmd_stdout "$@" | tee -a "${LOG_FILE}"
|
||||
shipyard_cmd_stdout "$@" | tee -a "${LOG_FILE}"
|
||||
set +o pipefail
|
||||
else
|
||||
shipard_cmd_stdout "$@"
|
||||
shipyard_cmd_stdout "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -47,20 +82,57 @@ drydock_cmd_stdout() {
|
|||
dns_netspec="$(config_netspec_for_role "dns")"
|
||||
dns_server="$(config_vm_net_ip "${BUILD_NAME}" "$dns_netspec")"
|
||||
install_ingress_ca
|
||||
ssh_cmd "${BUILD_NAME}" \
|
||||
docker run -t --network=host \
|
||||
--dns "${dns_server}" \
|
||||
-v "${BUILD_WORK_DIR}:/work" \
|
||||
-e DD_URL=http://drydock-api.ucp.svc.cluster.local:9000 \
|
||||
-e OS_AUTH_URL="${AIRSHIP_KEYSTONE_URL}" \
|
||||
-e OS_USERNAME=shipyard \
|
||||
-e OS_USER_DOMAIN_NAME=default \
|
||||
-e OS_PASSWORD="${SHIPYARD_PASSWORD}" \
|
||||
-e OS_PROJECT_DOMAIN_NAME=default \
|
||||
-e OS_PROJECT_NAME=service \
|
||||
-e REQUESTS_CA_BUNDLE=/work/ingress_ca.pem \
|
||||
--entrypoint /usr/local/bin/drydock "${IMAGE_DRYDOCK_CLI}" "$@" 2>&1
|
||||
local_jobspec="${TEMP_DIR}/drydock-job.yaml"
|
||||
remote_jobspec="${BUILD_WORK_DIR}/drydock-job.yaml"
|
||||
cat << EOF > "$local_jobspec"
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: drydock-command
|
||||
namespace: ucp
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: drydock-command
|
||||
image: ${IMAGE_DRYDOCK_CLI}
|
||||
command: ["/usr/local/bin/drydock"]
|
||||
args: ["$@", "2>&1"]
|
||||
env:
|
||||
- name: "DD_URL"
|
||||
value: "http://drydock-api.ucp.svc.cluster.local:9000"
|
||||
- name: "OS_AUTH_URL"
|
||||
value: "${AIRSHIP_KEYSTONE_URL}"
|
||||
- name: "OS_USERNAME"
|
||||
value: "shipyard"
|
||||
- name: "OS_USER_DOMAIN"
|
||||
value: "default"
|
||||
- name: "OS_PASSWORD"
|
||||
value: "${SHIPYARD_PASSWORD}"
|
||||
- name: "OS_PROJECT_DOMAIN_NAME"
|
||||
value: "default"
|
||||
- name: "OS_PROJECT_NAME"
|
||||
value: "service"
|
||||
- name: "REQUESTS_CA_BUNDLE"
|
||||
value: "/work/ingress_ca.pem"
|
||||
volumeMounts:
|
||||
- name: ${BUILD_WORK_DIR}
|
||||
mountPath: /work
|
||||
stdin: true
|
||||
tty: true
|
||||
restartPolicy: Never
|
||||
dnsConfig:
|
||||
nameservers: "$dns_server"
|
||||
hostNetwork: true
|
||||
activeDeadlineSeconds: ${timeout}
|
||||
backoffLimit: 1
|
||||
EOF
|
||||
rsync_cmd "$local_jobspec" "${BUILD_NAME}":"$remote_jobspec"
|
||||
ssh_cmd "${BUILD_NAME}" kubectl create -f "$remote_jobspec"
|
||||
ssh_cmd "${BUILD_NAME}" kubectl logs -f drydock-command
|
||||
ssh_cmd "${BUILD_NAME}" kubectl delete jobs/drydock-command
|
||||
}
|
||||
|
||||
drydock_cmd() {
|
||||
if [[ ! -z "${LOG_FILE}" ]]
|
||||
then
|
||||
|
|
|
@ -9,6 +9,8 @@ export REPO_ROOT
|
|||
source "$LIB_DIR"/config.sh
|
||||
source "$LIB_DIR"/const.sh
|
||||
source "$LIB_DIR"/docker.sh
|
||||
source "$LIB_DIR"/containerd.sh
|
||||
source "$LIB_DIR"/container-runtime.sh
|
||||
source "$LIB_DIR"/kube.sh
|
||||
source "$LIB_DIR"/log.sh
|
||||
source "$LIB_DIR"/nginx.sh
|
||||
|
|
|
@ -33,5 +33,10 @@ bgp_router_start() {
|
|||
rsync_cmd "$QUAGGA_DEBIAN_CONF" "${nodename}:${remote_debian_conf_file}"
|
||||
rsync_cmd "$QUAGGA_BGPD_CONF" "${nodename}:${remote_bgpd_conf_file}"
|
||||
|
||||
ssh_cmd "${nodename}" docker run -ti -d --net=host --privileged -v /var/tmp/quagga:/etc/quagga --restart always --name Quagga "$IMAGE_QUAGGA"
|
||||
if [[ "$CONTAINER_RUNTIME" == "CONTAINERD" || "$CONTAINER_RUNTIME" == "containerd" ]]
|
||||
then
|
||||
ssh_cmd "${nodename}" ctr containers create -t --privileged --net-host --mount type=bind,src=/var/tmp/quagga,dst=/etc/quagga "$IMAGE_QUAGGA" Quagga
|
||||
else
|
||||
ssh_cmd "${nodename}" docker run -ti -d --net=host --privileged -v /var/tmp/quagga:/etc/quagga --restart always --name Quagga "$IMAGE_QUAGGA"
|
||||
fi
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ export UPSTREAM_DNS=${UPSTREAM_DNS:-"8.8.8.8 8.8.4.4"}
|
|||
export NTP_POOLS=${NTP_POOLS:-"0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org"}
|
||||
export NTP_SERVERS=${NTP_SERVERS:-""}
|
||||
export PROMENADE_ENCRYPTION_KEY=${PROMENADE_ENCRYPTION_KEY:-MjI1N2ZiMjMzYjI0ZmVkZDU4}
|
||||
export CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-"DOCKER"}
|
||||
|
||||
# key-pair used for drydock/maas auth towards libvirt and access to
|
||||
# the virtual nodes; auto-generated if no value provided
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
#!/bin/bash
|
||||
container_runtime_ps() {
|
||||
if [[ "$CONTAINER_RUNTIME" == "CONTAINERD" || "$CONTAINER_RUNTIME" == "containerd" ]]
|
||||
then
|
||||
containerd_ps "${1}"
|
||||
fi
|
||||
docker_ps "${1}"
|
||||
}
|
||||
|
||||
container_runtime_info() {
|
||||
if [[ "$CONTAINER_RUNTIME" == "CONTAINERD" || "$CONTAINER_RUNTIME" == "containerd" ]]
|
||||
then
|
||||
containerd_info "${1}"
|
||||
fi
|
||||
docker_info "${1}"
|
||||
}
|
||||
|
||||
container_runtime_exited_containers() {
|
||||
if [[ "$CONTAINER_RUNTIME" == "CONTAINERD" || "$CONTAINER_RUNTIME" == "containerd" ]]
|
||||
then
|
||||
containerd_exited_containers "${1}"
|
||||
fi
|
||||
docker_exited_containers "${1}"
|
||||
}
|
||||
|
||||
containerd_inspect() {
|
||||
if [[ "$CONTAINER_RUNTIME" == "CONTAINERD" || "$CONTAINER_RUNTIME" == "containerd" ]]
|
||||
then
|
||||
containerd_inspect "${1}" "${2}"
|
||||
fi
|
||||
docker_inspect "${1}" "${2}"
|
||||
}
|
||||
|
||||
containerd_logs() {
|
||||
if [[ "$CONTAINER_RUNTIME" == "CONTAINERD" || "$CONTAINER_RUNTIME" == "containerd" ]]
|
||||
then
|
||||
containerd_logs "${1}"
|
||||
fi
|
||||
docker_logs "${1}"
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
#!/bin/bash
|
||||
containerd_ps() {
|
||||
VIA="${1}"
|
||||
ssh_cmd "${VIA}" ctr containers ls
|
||||
}
|
||||
|
||||
containerd_info() {
|
||||
VIA="${1}"
|
||||
ssh_cmd "${VIA}" ctr version && \
|
||||
echo "*** PLUGINS ***" && \
|
||||
ctr plugins ls && \
|
||||
echo "*** CONTAINERS ***" && \
|
||||
ctr containers ls && \
|
||||
echo "*** TASKS ***" && \
|
||||
ctr tasks ls && \
|
||||
echo "*** IMAGES ***" && \
|
||||
ctr images ls \
|
||||
2>&1
|
||||
}
|
||||
|
||||
containerd_exited_containers() {
|
||||
# this is currently a no-op for containerd
|
||||
true;
|
||||
}
|
||||
|
||||
containerd_inspect() {
|
||||
VIA="${1}"
|
||||
CONTAINER_ID="${2}"
|
||||
ssh_cmd "${VIA}" ctr containers info "${CONTAINER_ID}"
|
||||
}
|
||||
|
||||
containerd_logs() {
|
||||
# this is currently a no-op for containerd
|
||||
true;
|
||||
}
|
|
@ -35,5 +35,11 @@ ingress_dns_start() {
|
|||
ssh_cmd "${nodename}" mkdir -p "${remote_work_dir}"
|
||||
rsync_cmd "$DNS_ZONE_FILE" "${nodename}:${remote_zone_file}"
|
||||
rsync_cmd "$COREFILE" "${nodename}:${remote_corefile}"
|
||||
ssh_cmd "${nodename}" docker run -d -v /var/tmp/coredns:/data -w /data --network host --restart always -P "$IMAGE_COREDNS" -conf "$(basename "$remote_corefile")"
|
||||
|
||||
if [[ "$CONTAINER_RUNTIME" == "CONTAINERD" || "$CONTAINER_RUNTIME" == "containerd" ]]
|
||||
then
|
||||
ssh_cmd "${nodename}" ctr containers create --net-host --mount type=bind,src=/var/tmp/coredns,dst=/data --cwd /data "$IMAGE_COREDNS" coredns
|
||||
else
|
||||
ssh_cmd "${nodename}" docker run -d -v /var/tmp/coredns:/data -w /data --network host --restart always -P "$IMAGE_COREDNS" -conf "$(basename "$remote_corefile")"
|
||||
fi
|
||||
}
|
||||
|
|
|
@ -26,15 +26,15 @@ mkdir -p "${ERROR_DIR}"
|
|||
|
||||
log "Gathering info from failed genesis server (n0) in ${ERROR_DIR}"
|
||||
|
||||
log "Gathering docker info for exitted containers"
|
||||
mkdir -p "${ERROR_DIR}/docker"
|
||||
docker_ps "${VIA}" | tee "${ERROR_DIR}/docker/ps"
|
||||
docker_info "${VIA}" | tee "${ERROR_DIR}/docker/info"
|
||||
log "Gathering container runtime info"
|
||||
mkdir -p "${ERROR_DIR}/container-runtime"
|
||||
container_runtime_ps "${VIA}" | tee "${ERROR_DIR}/container-runtime/ps"
|
||||
container_runtime_info "${VIA}" | tee "${ERROR_DIR}/container-runtime/info"
|
||||
|
||||
for container_id in $(docker_exited_containers "${VIA}"); do
|
||||
docker_inspect "${VIA}" "${container_id}" | tee "${ERROR_DIR}/docker/${container_id}"
|
||||
echo "=== Begin logs ===" | tee -a "${ERROR_DIR}/docker/${container_id}"
|
||||
docker_logs "${VIA}" "${container_id}" | tee -a "${ERROR_DIR}/docker/${container_id}"
|
||||
for container_id in $(container_runtime_exited_containers "${VIA}"); do
|
||||
docker_inspect "${VIA}" "${container_id}" | tee "${ERROR_DIR}/container-runtime/${container_id}"
|
||||
echo "=== Begin logs ===" | tee -a "${ERROR_DIR}/container-runtime/${container_id}"
|
||||
docker_logs "${VIA}" "${container_id}" | tee -a "${ERROR_DIR}/container-runtime/${container_id}"
|
||||
done
|
||||
|
||||
log "Gathering kubectl output"
|
||||
|
|
Loading…
Reference in New Issue