diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..3bedcc10 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +Makefile +promenade-*.tar diff --git a/.gitignore b/.gitignore index 85648dc2..6f122c39 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,8 @@ -*.retry -hosts-deploy.yaml -roles/deploy-kubelet/templates/kubeconfig +/*.log +/*.tar +/.vagrant +/cni.tgz +/env.sh +/helm +/kubelet +/linux-amd64 diff --git a/Dockerfile.genesis b/Dockerfile.genesis new file mode 100644 index 00000000..6b1f0871 --- /dev/null +++ b/Dockerfile.genesis @@ -0,0 +1,37 @@ +# Copyright 2017 The Promenade Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM ubuntu:xenial + +ENV NODE_HOSTNAME= + +RUN apt-get update -qq \ + && apt-get install --no-install-recommends -y \ + docker.io \ + gettext-base \ + && rm -rf /var/lib/apt/lists/* \ + && mkdir /promenade \ + && mkdir /promenade/assets \ + && mkdir /promenade/scripts + +WORKDIR /promenade + +ENTRYPOINT /promenade/scripts/entrypoint.sh + +COPY genesis-images.tar cni.tgz helm kubelet /promenade/ + +COPY kubelet.service.template /promenade/ +COPY env.sh scripts/common/* /promenade/scripts/ +COPY scripts/entrypoint-genesis.sh /promenade/scripts/entrypoint.sh +COPY assets/ /promenade/assets/ diff --git a/Dockerfile.join b/Dockerfile.join new file mode 100644 index 00000000..0f8850e4 --- /dev/null +++ b/Dockerfile.join @@ -0,0 +1,37 @@ +# Copyright 2017 The Promenade Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM ubuntu:xenial + +ENV NODE_HOSTNAME= + +RUN apt-get update -qq \ + && apt-get install --no-install-recommends -y \ + docker.io \ + gettext-base \ + && rm -rf /var/lib/apt/lists/* \ + && mkdir /promenade \ + && mkdir /promenade/assets \ + && mkdir /promenade/scripts + +WORKDIR /promenade + +ENTRYPOINT /promenade/scripts/entrypoint.sh + +COPY join-images.tar cni.tgz kubelet /promenade/ + +COPY kubelet.service.template /promenade/ +COPY env.sh scripts/common/* /promenade/scripts/ +COPY scripts/entrypoint-join.sh /promenade/scripts/entrypoint.sh +COPY assets/kubeconfig assets/auth/kubeconfig /promenade/assets/ diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..ec507e70 --- /dev/null +++ b/Makefile @@ -0,0 +1,141 @@ +# Copyright 2017 The Promenade Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#---------------# +# Configuration # +#---------------# +BOOTKUBE_VERSION := v0.4.1 +CNI_VERSION := v0.5.2 +HELM_VERSION := v2.3.1 +KUBERNETES_VERSION := v1.6.2 + +NAMESPACE := quay.io/attcomdev +GENESIS_REPO := promenade-genesis +JOIN_REPO := promenade-join +TAG := dev + +GENESIS_IMAGES := \ + gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1 \ + gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1 \ + gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1 \ + gcr.io/google_containers/pause-amd64:3.0 \ + quay.io/calico/cni:v1.7.0 \ + quay.io/calico/kube-policy-controller:v0.5.4 \ + quay.io/calico/node:v1.1.3 \ + quay.io/coreos/bootkube:$(BOOTKUBE_VERSION) \ + quay.io/coreos/etcd-operator:v0.2.5 \ + quay.io/coreos/etcd:v3.1.4 \ + quay.io/coreos/etcd:v3.1.6 \ + quay.io/coreos/flannel:v0.7.1 \ + quay.io/coreos/hyperkube:$(KUBERNETES_VERSION)_coreos.0 \ + quay.io/coreos/kenc:48b6feceeee56c657ea9263f47b6ea091e8d3035 \ + quay.io/coreos/pod-checkpointer:20cf8b9a6018731a0770192f30dfa7a1941521e3 \ + +JOIN_IMAGES := \ + gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1 \ + gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1 \ + gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1 \ + gcr.io/google_containers/pause-amd64:3.0 \ + quay.io/calico/cni:v1.7.0 \ + quay.io/calico/kube-policy-controller:v0.5.4 \ + quay.io/calico/node:v1.1.3 \ + quay.io/coreos/etcd-operator:v0.2.5 \ + quay.io/coreos/etcd:v3.1.4 \ + quay.io/coreos/etcd:v3.1.6 \ + quay.io/coreos/flannel:v0.7.1 \ + quay.io/coreos/hyperkube:$(KUBERNETES_VERSION)_coreos.0 \ + quay.io/coreos/kenc:48b6feceeee56c657ea9263f47b6ea091e8d3035 \ + quay.io/coreos/pod-checkpointer:20cf8b9a6018731a0770192f30dfa7a1941521e3 \ + + +#-------# +# Rules # +#-------# +all: build + +build: build-genesis build-join + +push: push-genesis push-join + +save: save-genesis save-join + +genesis: build-genesis + +build-genesis: Dockerfile.genesis cni.tgz env.sh helm genesis-images.tar kubelet kubelet.service.template + sudo docker build -f Dockerfile.genesis -t $(NAMESPACE)/$(GENESIS_REPO):$(TAG) . + +push-genesis: build-genesis + sudo docker push $(NAMESPACE)/$(GENESIS_REPO):$(TAG) + +save-genesis: build-genesis + sudo docker save $(NAMESPACE)/$(GENESIS_REPO):$(TAG) > promenade-genesis.tar + + +join: build-join + +build-join: Dockerfile.join join-images.tar kubelet.service.template + sudo docker build -f Dockerfile.join -t $(NAMESPACE)/$(JOIN_REPO):$(TAG) . + +push-join: build-join + sudo docker push $(NAMESPACE)/$(JOIN_REPO):$(TAG) + +save-join: build-join + sudo docker save $(NAMESPACE)/$(JOIN_REPO):$(TAG) > promenade-join.tar + +cni.tgz: + curl -Lo cni.tgz https://github.com/containernetworking/cni/releases/download/$(CNI_VERSION)/cni-amd64-$(CNI_VERSION).tgz + +env.sh: Makefile + rm -f env.sh + echo export BOOTKUBE_VERSION=$(BOOTKUBE_VERSION) >> env.sh + echo export CNI_VERSION=$(CNI_VERSION) >> env.sh + echo export HELM_VERSION=$(HELM_VERSION) >> env.sh + echo export KUBERNETES_VERSION=$(KUBERNETES_VERSION) >> env.sh + +helm: + curl -Lo helm.tgz https://storage.googleapis.com/kubernetes-helm/helm-$(HELM_VERSION)-linux-amd64.tar.gz + tar xf helm.tgz + mv linux-amd64/helm ./helm + rm -rf ./linux-amd64/ + rm -f helm.tgz + chmod +x helm + +genesis-images.tar: + for IMAGE in $(GENESIS_IMAGES); do \ + sudo docker pull $$IMAGE; \ + done + sudo docker save -o genesis-images.tar $(GENESIS_IMAGES) + +join-images.tar: + for IMAGE in $(JOIN_IMAGES); do \ + sudo docker pull $$IMAGE; \ + done + sudo docker save -o join-images.tar $(JOIN_IMAGES) + +kubelet: + curl -LO http://storage.googleapis.com/kubernetes-release/release/$(KUBERNETES_VERSION)/bin/linux/amd64/kubelet + chmod +x kubelet + +clean: + rm -rf \ + *.tar \ + cni.tgz \ + env.sh \ + helm \ + helm.tgz \ + kubelet \ + linux-amd64 \ + + +.PHONY : build build-genesis build-join clean genesis join push push-genesis push-join diff --git a/README.md b/README.md index 5d8d8fb3..61086169 100644 --- a/README.md +++ b/README.md @@ -1,237 +1,81 @@ -# Promenade: Manually Self-hosted Kubernetes via Bootkube -A small howto on how to bring up a self-hosted kubernetes cluster +# Overview -We'll use [bootkube](https://github.com/kubernetes-incubator/bootkube) to initiate the master-components. First we'll render the assets necessary for bringing up the control plane (apiserver, controller-manger, scheduler, etc). Then we'll start the kubelets which job is it to start the assets but can't do much, because there's no API-server yet. Running `bootkube` once will kick things off then. At a high-level the bootstrapping process looks like this: +Promenade is tool for deploying self-hosted, highly resilient Kubernetes clusters using +[bootkube](https://github.com/kubernetes-incubator/bootkube). Currently. Promenade works by leveraging Docker containers with the Bootkube binaries in order to setup Kubernetes on the host operating system. Default Kubernetes assets and manifests are included in this repo, but it is recommended to render or supply your own assets for real-world deployments. -![Self-Hosted](./img/self-hosted-moving-parts.png?raw=true "Self-hosted-moving-parts") +## Quickstart using Vagrant -Image taken from the [self-hosted proposal](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/self-hosted-kubernetes.md). +Make sure you have [Vagrant](https://vagrantup.com) and +[VirtualBox](https://www.virtualbox.org/wiki/Downloads) installed. Then +install the `vagrant-hostmanager` plugin. - -This is how the final cluster looks like from a `kubectl` perspective: - -![Screenshot](./img/self-hosted.png?raw=true "Screenshot") - -Let's start! -## Temporary apiserver: `bootkube` -### Download -``` -wget https://github.com/kubernetes-incubator/bootkube/releases/download/v0.3.9/bootkube.tar.gz -tar xvzf bootkube.tar.gz -sudo cp bin/linux/bootkube /usr/bin/ +```bash +vagrant plugin install vagrant-hostmanager ``` -### Render the Assets -Exchange `10.7.183.59` with the node you are working on. If you have DNS available group all master node IP addresses behind a CNAME Record and provide this insted. -``` -bootkube render --asset-dir=assets --experimental-self-hosted-etcd --etcd-servers=http://10.3.0.15:2379 --api-servers=https://10.7.183.59:443 -``` -This will generate several things: -- manifests for running apiserver, controller-manager, scheduler, flannel, etcd, dns and kube-proxy -- a `kubeconfig` file for connecting to and authenticating with the apiserver -- TLS assets +Build the genesis and join images and save them to disk for quick loading into +the Vagrant VMs. -## Start the Master Kubelet -### Download `hyperkube` -``` -wget http://storage.googleapis.com/kubernetes-release/release/v1.5.3/bin/linux/amd64/hyperkube -O ./hyperkube -sudo mv hyperkube /usr/bin/hyperkube -sudo chmod 755 /usr/bin/hyperkube +```bash +make save ``` -### Install CNI -``` -sudo mkdir -p /opt/cni/bin -wget https://github.com/containernetworking/cni/releases/download/v0.4.0/cni-amd64-v0.4.0.tbz2 -sudo tar xjf cni-amd64-v0.4.0.tbz2 -C /opt/cni/bin/ +Start the VMs and save a snapshot for quicker iteration: + +```bash +vagrant up +vagrant snapshot save clean ``` -### Copy Configuration Files -``` -sudo cp assets/auth/kubeconfig /etc/kubernetes/ -sudo cp -a assets/manifests /etc/kubernetes/ -``` -### Start the Kubelet -``` -sudo hyperkube kubelet --kubeconfig=/etc/kubernetes/kubeconfig \ - --require-kubeconfig \ - --cni-conf-dir=/etc/kubernetes/cni/net.d \ - --network-plugin=cni \ - --lock-file=/var/run/lock/kubelet.lock \ - --exit-on-lock-contention \ - --pod-manifest-path=/etc/kubernetes/manifests \ - --allow-privileged \ - --node-labels=master=true \ - --minimum-container-ttl-duration=6m0s \ - --cluster_dns=10.3.0.10 \ - --cluster_domain=cluster.local \ - --hostname-override=10.7.183.59 -``` -The TLS credentials generated by `bootkube render` in assets/tls/ are copied to a secret: assets/manifests/kube-apiserver-secret.yaml. +Spin up a cluster: -### Start the Temporary API Server -bootkube will serve as the temporary apiserver so the kubelet from above can start the real apiserver in a pod -``` -sudo bootkube start --asset-dir=./assets --experimental-self-hosted-etcd --etcd-server=http://127.0.0.1:12379 -``` -bootkube should exit itself after successfully bootstrapping the master components. It's only needed for the very first bootstrapping - -### Check the Output -``` -watch hyperkube kubectl get pods -o wide --all-namespaces +```bash +./test-install.sh ``` -## Join Nodes to the Cluster -Copy the information where to find the apiserver and how to authenticate: -``` -scp 10.7.183.59:assets/auth/kubeconfig . -sudo mkdir -p /etc/kubernetes -sudo mv kubeconfig /etc/kubernetes/ -``` -install cni binaries and download hyperkube -``` -sudo mkdir -p /opt/cni/bin -wget https://github.com/containernetworking/cni/releases/download/v0.4.0/cni-amd64-v0.4.0.tbz2 -sudo tar xjf cni-amd64-v0.4.0.tbz2 -C /opt/cni/bin/ -wget http://storage.googleapis.com/kubernetes-release/release/v1.5.3/bin/linux/amd64/hyperkube -O ./hyperkube -sudo mv hyperkube /usr/bin/hyperkube -sudo chmod 755 /usr/bin/hyperkube -``` -### Master Nodes -Start the kubelet: -``` -sudo hyperkube kubelet --kubeconfig=/etc/kubernetes/kubeconfig \ - --require-kubeconfig \ - --cni-conf-dir=/etc/kubernetes/cni/net.d \ - --network-plugin=cni \ - --lock-file=/var/run/lock/kubelet.lock \ - --exit-on-lock-contention \ - --pod-manifest-path=/etc/kubernetes/manifests \ - --allow-privileged \ - --node-labels=master=true \ - --minimum-container-ttl-duration=6m0s \ - --cluster_dns=10.3.0.10 \ - --cluster_domain=cluster.local \ - --hostname-override=10.7.183.60 +Watch nodes spin up: + +```bash +watch kubectl --insecure-skip-tls-verify --kubeconfig <(sed 's/kubernetes:443/192.168.77.10:443/' < assets/kubeconfig) get nodes ``` -### Worker Nodes +To test changes, you can safely reset single or multiple nodes: -Note the only difference is the removal of `--node-labels=master=true`: -``` -sudo hyperkube kubelet --kubeconfig=/etc/kubernetes/kubeconfig \ - --require-kubeconfig \ - --cni-conf-dir=/etc/kubernetes/cni/net.d \ - --network-plugin=cni \ - --lock-file=/var/run/lock/kubelet.lock \ - --exit-on-lock-contention \ - --pod-manifest-path=/etc/kubernetes/manifests \ - --allow-privileged \ - --minimum-container-ttl-duration=6m0s \ - --cluster_dns=10.3.0.10 \ - --cluster_domain=cluster.local\ - --hostname-override=10.7.183.60 +```bash +vagrant snapshot resotre n2 clean --no-provision +vagrant snapshot restore clean --no-provision ``` -## Scale Etcd -kubectl apply doesn't work for TPR at the moment. See https://github.com/kubernetes/kubernetes/issues/29542. As a workaround, we use cURL to resize the cluster. +## Detailed Deployment -``` -hyperkube kubectl --namespace=kube-system get cluster.etcd kube-etcd -o json > etcd.json && \ -vim etcd.json && \ -curl -H 'Content-Type: application/json' -X PUT --data @etcd.json http://127.0.0.1:8080/apis/etcd.coreos.com/v1beta1/namespaces/kube-system/clusters/kube-etcd -``` -If that doesn't work, re-run until it does. See https://github.com/kubernetes-incubator/bootkube/issues/346#issuecomment-283526930 +The below steps can be used to deploy a cluster on bare metal or virtual nodes: -## Challenges +1. Overwrite the placeholder assets in the `assets` directory. -### Node setup -Some Broadcom NICs panic'ed with the default Ubuntu kernel -- upgrade kernel to >`4.8` because of brcm nic failure -- move to `--storage-driver=overlay2` instead of `aufs` as docker driver -- disable swap on the node (will be a fatal error in kube-1.6) +2. Make sure the `Makefile` lists the images and versions you expect to be + required. + +3. Build the images with `make build` + +4. Setup each host with the following: + - DNS resolution pointing `kubernetes` to the appropriate IPs (Kubernetes master nodes) for the + Kubernetes API + - A running docker daemon, configured to use the DNS resolution specified + above (see `vagrant-assets/docker-daemon.json`) + +5. Transfer the appropriate images to each host. You may find it useful to + run `make save`, transfer the image and then use `docker load -i ...` to + restore it rather than to rely on a registry. + +6. On the genesis (seed) server, start the cluster supplying in the node's FQDNs: + `docker run --rm -v /:/target -v /var/run/docker.sock:/var/run/docker.sock -e NODE_HOSTNAME=genesis-node.fqdn quay.io/attcomdev/promenade-genesis:dev` + +7. On each additional node to be joined to the cluster: + `docker run --rm -v /:/target -v /var/run/docker.sock:/var/run/docker.sock -e NODE_HOSTNAME=join-node.fqdn quay.io/attcomdev/promenade-join:dev` -## ToDo Items: -### apiserver resiliance -the master apiservers need to have a single address only. Possible solutions: -- use LB from the DC -- use DNS from the DC with programmable API (e.g. powerdns) -- use something like kube-keepalive-vip? -- bootstrap DNS itself (skydns, coredns) +## References: -### Etcd Challenges -- backup strategies (https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-members-cluster-that-restores-from-previous-pv-backup) -- etcd-operator failures (e.g. https://github.com/coreos/etcd-operator/issues/851) -- partial failure (loosing quorum) -- permament failure (state gone completely) -- etcd needs ntp available (or another mechanism so that every node is in sync) +1. [Demo of Genesis Node Deployment](https://asciinema.org/a/c2fdtzh2z2fiymiyu75b32u0h) - -## Notes -### clean up docker -``` -sudo su - -docker rm -f $(docker ps -a -q) -exit -``` - -### Compile Bootkube -``` -sudo docker run --rm -it -v $(pwd)/golang/src:/go/src/ -w /go/src golang:1.7 bash -go get -u github.com/kubernetes-incubator/bootkube -cd $GOPATH/src/github.com/kubernetes-incubator/bootkube -make -``` - -### RBAC -``` -./bootkube-rbac render --asset-dir assets-rbac --experimental-self-hosted-etcd --etcd-servers=http://10.3.0.15:2379 --api-servers=https://10.7.183.59:443 -sudo rm -rf /etc/kubernetes/* -sudo cp -a assets-rbac/manifests /etc/kubernetes/ -sudo cp assets-rbac/auth/kubeconfig /etc/kubernetes/ -sudo ./bootkube-rbac start --asset-dir=./assets-rbac --experimental-self-hosted-etcd --etcd-server=http://127.0.0.1:12379 -``` - -### Containerized Kubelet -The benefit here is using a docker container instead of a kubelet binary. Also the hyperkube docker image packages and installs the cni binaries. The downside would be that in either case something needs to start the container upon a reboot of the node. Usually the something is systemd and systemd is better managing binaries than docker containers. Either way, this is how you would run a containerized kubelet: -``` -sudo docker run \ - --rm \ - -it \ - --privileged \ - -v /dev:/dev \ - -v /run:/run \ - -v /sys:/sys \ - -v /etc/kubernetes:/etc/kubernetes \ - -v /usr/share/ca-certificates:/etc/ssl/certs \ - -v /var/lib/docker:/var/lib/docker \ - -v /var/lib/kubelet:/var/lib/kubelet \ - -v /:/rootfs \ - quay.io/coreos/hyperkube:v1.5.3_coreos.0 \ - ./hyperkube \ - kubelet \ - --network-plugin=cni \ - --cni-conf-dir=/etc/kubernetes/cni/net.d \ - --cni-bin-dir=/opt/cni/bin \ - --pod-manifest-path=/etc/kubernetes/manifests \ - --allow-privileged \ - --hostname-override=10.7.183.60 \ - --cluster-dns=10.3.0.10 \ - --cluster-domain=cluster.local \ - --kubeconfig=/etc/kubernetes/kubeconfig \ - --require-kubeconfig \ - --lock-file=/var/run/lock/kubelet.lock \ - --containerized -``` -Not quite working yet though. The node comes up, registeres successfully with the master and starts daemonsets. Everything comes up except flannel: -``` -main.go:127] Failed to create SubnetManager: unable to initialize inclusterconfig: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory -``` - -## Resources and References -- https://github.com/kubernetes/community/blob/master/contributors/design-proposals/self-hosted-kubernetes.md -- https://github.com/kubernetes-incubator/bootkube -- https://github.com/coreos/etcd-operator/ -- http://blog.kubernetes.io/2017/01/stronger-foundation-for-creating-and-managing-kubernetes-clusters.html -- https://github.com/kubernetes/kubeadm/issues/127 +2. [Demo of Joining Node to Cluster](https://asciinema.org/a/62dssvuiwbdanfuwwe6vzcihk) diff --git a/Vagrantfile b/Vagrantfile new file mode 100644 index 00000000..1b243476 --- /dev/null +++ b/Vagrantfile @@ -0,0 +1,59 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +Vagrant.configure("2") do |config| + config.vm.box = "ubuntu/xenial64" + config.vm.box_check_update = false + + config.vm.provision :file, source: "vagrant-assets/docker-daemon.json", destination: "/tmp/docker-daemon.json" + config.vm.provision :file, source: "vagrant-assets/dnsmasq-kubernetes", destination: "/tmp/dnsmasq-kubernetes" + + config.vm.provision :shell, privileged: true, inline:< host communication. + # If left blank, then the interface is chosen using the node's + # default route. + canal_iface: "" + + # Whether or not to masquerade traffic to destinations not within + # the pod network. + masquerade: "true" + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "canal", + "type": "flannel", + "delegate": { + "type": "calico", + "etcd_endpoints": "__ETCD_ENDPOINTS__", + "log_level": "info", + "policy": { + "type": "k8s", + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" + } + } + } diff --git a/assets/manifests/kube-flannel.yaml b/assets/manifests/kube-flannel.yaml new file mode 100644 index 00000000..3f4fd8a9 --- /dev/null +++ b/assets/manifests/kube-flannel.yaml @@ -0,0 +1,368 @@ +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: canal-etcd + namespace: kube-system + labels: + k8s-app: canal-etcd +spec: + template: + metadata: + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + labels: + k8s-app: canal-etcd + spec: + # Only run this pod on the master. + nodeSelector: + node-role.kubernetes.io/master: "" + hostNetwork: true + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: canal-etcd + image: quay.io/coreos/etcd:v3.1.4 + env: + - name: ETCD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + command: ["/bin/sh","-c"] + args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"] + volumeMounts: + - name: var-etcd + mountPath: /var/etcd + volumes: + - name: var-etcd + hostPath: + path: /var/etcd + +--- +# This manfiest installs the Service which gets traffic to the Calico +# etcd. +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: canal-etcd + name: canal-etcd + namespace: kube-system +spec: + # Select the canal-etcd pod running on the master. + selector: + k8s-app: canal-etcd + # This ClusterIP needs to be known in advance, since we cannot rely + # on DNS to get access to etcd. + clusterIP: 10.3.0.136 + ports: + - port: 6666 +--- +# This manifest installs the per-node agents, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: canal-node + namespace: kube-system + labels: + k8s-app: canal-node +spec: + selector: + matchLabels: + k8s-app: canal-node + template: + metadata: + labels: + k8s-app: canal-node + spec: + hostNetwork: true + serviceAccountName: calico-cni-plugin + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + # Runs the flannel daemon to enable vxlan networking between + # container hosts. + - name: flannel + image: quay.io/coreos/flannel:v0.7.1 + env: + # The location of the etcd cluster. + - name: FLANNELD_ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_endpoints + # The interface flannel should run on. + - name: FLANNELD_IFACE + valueFrom: + configMapKeyRef: + name: canal-config + key: canal_iface + # Perform masquerade on traffic leaving the pod cidr. + - name: FLANNELD_IP_MASQ + valueFrom: + configMapKeyRef: + name: canal-config + key: masquerade + # Write the subnet.env file to the mounted directory. + - name: FLANNELD_SUBNET_FILE + value: "/run/flannel/subnet.env" + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/resolv.conf + name: resolv + - mountPath: /run/flannel + name: run-flannel + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and local routes on each + # host. + - name: calico-node + image: quay.io/calico/node:v1.1.3 + env: + # The location of the etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_endpoints + # Disable Calico BGP. Calico is simply enforcing policy. + - name: CALICO_NETWORKING + value: "false" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # All pods to speak to services that resolve to the same host. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + # This container installs the Calico CNI binaries + # and CNI network config file on each node. + - name: install-calico-cni + image: quay.io/calico/cni:v1.7.0 + imagePullPolicy: Always + command: ["/install-cni.sh"] + env: + # The name of the CNI network config file to install. + - name: CNI_CONF_NAME + value: "10-canal.conf" + # The location of the etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_endpoints + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: canal-config + key: cni_network_config + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Used by flannel daemon. + - name: run-flannel + hostPath: + path: /run/flannel + - name: resolv + hostPath: + path: /etc/resolv.conf + +--- + +# This manifest deploys a Job which performs one time +# configuration of Canal. +apiVersion: batch/v1 +kind: Job +metadata: + name: configure-canal + namespace: kube-system + labels: + k8s-app: canal +spec: + template: + metadata: + name: configure-canal + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + hostNetwork: true + restartPolicy: OnFailure + containers: + # Writes basic flannel configuration to etcd. + - name: configure-flannel + image: quay.io/coreos/etcd:v3.1.4 + command: + - "etcdctl" + - "--no-sync" + - "set" + - "/coreos.com/network/config" + - '{ "Network": "10.2.0.0/16", "Backend": {"Type": "vxlan"} }' + env: + # The location of the etcd cluster. + - name: ETCDCTL_PEERS + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_endpoints + +--- + +# This manifest deploys the Calico policy controller on Kubernetes. +# See https://github.com/projectcalico/k8s-policy +apiVersion: extensions/v1beta1 +kind: ReplicaSet +metadata: + name: calico-policy-controller + namespace: kube-system + labels: + k8s-app: calico-policy +spec: + # The policy controller can only have a single active instance. + replicas: 1 + template: + metadata: + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + name: calico-policy-controller + namespace: kube-system + labels: + k8s-app: calico-policy + spec: + # The policy controller must run in the host network namespace so that + # it isn't governed by policy that would prevent it from working. + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + serviceAccountName: calico-policy-controller + containers: + - name: calico-policy-controller + image: quay.io/calico/kube-policy-controller:v0.5.4 + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: canal-config + key: etcd_endpoints + # The location of the Kubernetes API. Use the default Kubernetes + # service for API access. + - name: K8S_API + value: "https://kubernetes.default:443" + # Since we're running in the host namespace and might not have KubeDNS + # access, configure the container's /etc/hosts to resolve + # kubernetes.default to the correct service clusterIP. + - name: CONFIGURE_ETC_HOSTS + value: "true" + +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: calico-cni-plugin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-cni-plugin +subjects: +- kind: ServiceAccount + name: calico-cni-plugin + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-cni-plugin + namespace: kube-system +rules: + - apiGroups: [""] + resources: + - pods + - nodes + verbs: + - get + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-cni-plugin + namespace: kube-system + +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: calico-policy-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-policy-controller +subjects: +- kind: ServiceAccount + name: calico-policy-controller + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-policy-controller + namespace: kube-system +rules: + - apiGroups: + - "" + - extensions + resources: + - pods + - namespaces + - networkpolicies + verbs: + - watch + - list + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-policy-controller + namespace: kube-system diff --git a/assets/manifests/kube-proxy.yaml b/assets/manifests/kube-proxy.yaml new file mode 100644 index 00000000..a52281d6 --- /dev/null +++ b/assets/manifests/kube-proxy.yaml @@ -0,0 +1,56 @@ +--- +apiVersion: "extensions/v1beta1" +kind: DaemonSet +metadata: + name: kube-proxy + namespace: kube-system + labels: + tier: node + component: kube-proxy +spec: + template: + metadata: + labels: + tier: node + component: kube-proxy + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + containers: + - name: kube-proxy + image: quay.io/coreos/hyperkube:v1.6.2_coreos.0 + command: + - /hyperkube + - proxy + - --cluster-cidr=10.2.0.0/16 + - --hostname-override=$(NODE_NAME) + - --kubeconfig=/etc/kubernetes/kubeconfig + - --proxy-mode=iptables + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/ssl/certs + name: ssl-certs-host + readOnly: true + - name: etc-kubernetes + mountPath: /etc/kubernetes + readOnly: true + hostNetwork: true + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + volumes: + - hostPath: + path: /usr/share/ca-certificates + name: ssl-certs-host + - name: etc-kubernetes + hostPath: + path: /etc/kubernetes diff --git a/assets/manifests/kube-scheduler-disruption.yaml b/assets/manifests/kube-scheduler-disruption.yaml new file mode 100644 index 00000000..c6ab7f2d --- /dev/null +++ b/assets/manifests/kube-scheduler-disruption.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: kube-scheduler + namespace: kube-system +spec: + minAvailable: 1 + selector: + matchLabels: + tier: control-plane + component: kube-scheduler diff --git a/assets/manifests/kube-scheduler.yaml b/assets/manifests/kube-scheduler.yaml new file mode 100644 index 00000000..ab81828f --- /dev/null +++ b/assets/manifests/kube-scheduler.yaml @@ -0,0 +1,56 @@ +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: kube-scheduler + namespace: kube-system + labels: + tier: control-plane + component: kube-scheduler +spec: + replicas: 2 + template: + metadata: + labels: + tier: control-plane + component: kube-scheduler + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: tier + operator: In + values: + - control-plane + - key: component + operator: In + values: + - kube-scheduler + topologyKey: kubernetes.io/hostname + containers: + - name: kube-scheduler + image: quay.io/coreos/hyperkube:v1.6.2_coreos.0 + command: + - ./hyperkube + - scheduler + - --leader-elect=true + livenessProbe: + httpGet: + path: /healthz + port: 10251 # Note: Using default port. Update if --port option is set differently. + initialDelaySeconds: 15 + timeoutSeconds: 15 + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule diff --git a/assets/manifests/kube-system-rbac-role-binding.yaml b/assets/manifests/kube-system-rbac-role-binding.yaml new file mode 100644 index 00000000..80438fee --- /dev/null +++ b/assets/manifests/kube-system-rbac-role-binding.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1alpha1 +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1alpha1 +metadata: + name: system:default-sa +subjects: + - kind: ServiceAccount + name: default + namespace: kube-system +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io diff --git a/assets/manifests/pod-checkpointer.yaml b/assets/manifests/pod-checkpointer.yaml new file mode 100644 index 00000000..813dc5b1 --- /dev/null +++ b/assets/manifests/pod-checkpointer.yaml @@ -0,0 +1,59 @@ +--- +apiVersion: "extensions/v1beta1" +kind: DaemonSet +metadata: + name: pod-checkpointer + namespace: kube-system + labels: + tier: control-plane + component: pod-checkpointer +spec: + template: + metadata: + labels: + tier: control-plane + component: pod-checkpointer + annotations: + checkpointer.alpha.coreos.com/checkpoint: "true" + spec: + containers: + - name: checkpoint + image: quay.io/coreos/pod-checkpointer:20cf8b9a6018731a0770192f30dfa7a1941521e3 + command: + - /checkpoint + - --v=4 + - --lock-file=/var/run/lock/pod-checkpointer.lock + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + imagePullPolicy: Always + volumeMounts: + - mountPath: /etc/kubernetes + name: etc-kubernetes + - mountPath: /var/run + name: var-run + hostNetwork: true + nodeSelector: + node-role.kubernetes.io/master: "" + restartPolicy: Always + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + volumes: + - name: etc-kubernetes + hostPath: + path: /etc/kubernetes + - name: var-run + hostPath: + path: /var/run diff --git a/assets/tls/apiserver.crt b/assets/tls/apiserver.crt new file mode 100644 index 00000000..1679b269 --- /dev/null +++ b/assets/tls/apiserver.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhDCCAmygAwIBAgIIYRTnEUWPB2EwDQYJKoZIhvcNAQELBQAwJTERMA8GA1UE +ChMIYm9vdGt1YmUxEDAOBgNVBAMTB2t1YmUtY2EwHhcNMTcwNTE5MTg0MTIwWhcN +MTgwNTE5MTg0MTIxWjAvMRQwEgYDVQQKEwtrdWJlLW1hc3RlcjEXMBUGA1UEAxMO +a3ViZS1hcGlzZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDB +sXDQGt4CSkm+H0oT3HgzADzK3IQtc5QVKTb2DTyw2/m+h4MRd6n+lra8pto09Is/ +YiVx8OCCFFsO726ZZqLQlQePDF36QKJbpIyGq2b3GVByDQqtn47xhXUeLu0z7IMK +8906xmZXeg8HHTIS9P66z3xA9kLn0nwSSFJHGTXMoFr8cnLySnrtDHe9pGo/+jcR +0+jiH3at3w2F1tCaTZ8znEMRP80BTysb7IlZdmNBfaSoT45Nje2eBpZDdxvI8qhi +J2ZWZ7vQsu6AlCneKpTj4tgsV6sEAgs2V8pabRaSM5t0Hq1lGo/npcOamIUQAq1u +O2SpSTIojdSHmWdD9h5dAgMBAAGjga0wgaowDgYDVR0PAQH/BAQDAgWgMB0GA1Ud +JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjB5BgNVHREEcjBwggprdWJlcm5ldGVz +ggprdWJlcm5ldGVzghJrdWJlcm5ldGVzLmRlZmF1bHSCFmt1YmVybmV0ZXMuZGVm +YXVsdC5zdmOCJGt1YmVybmV0ZXMuZGVmYXVsdC5zdmMuY2x1c3Rlci5sb2NhbIcE +CgMAATANBgkqhkiG9w0BAQsFAAOCAQEAj8G9Lc75QNbhADQuOXsSGEi6bJ0UdFoV +vv5VLNMOdZ0+jXtHtLYrB3RygIcolSdqlaqpM9nj61xgnhG3OIYIw8BCqQlaBgO+ +5cAvzmql29AoDbLeu3JctmySScqyCj4mqtlOGHgIotUq226Re1aqSJ8zLH7UDVEa +jyQo8vn5GQm/XwyGUt4nSpYXMi6MztebcePdyOe4387NFJS9/OUQIdWlhv1cegK+ +fU8KRv2MiBfZZqJ1DQD17eV9494DImGN1nCpVlmPNBGTCe75SOYCBOwYhHKoNMLn +YmtnpzBtfAkU4EzjiMm6V22XI/lZsQdxeQfMMScmh+M60DHr7ToRdg== +-----END CERTIFICATE----- diff --git a/assets/tls/apiserver.key b/assets/tls/apiserver.key new file mode 100644 index 00000000..2547d282 --- /dev/null +++ b/assets/tls/apiserver.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAwbFw0BreAkpJvh9KE9x4MwA8ytyELXOUFSk29g08sNv5voeD +EXep/pa2vKbaNPSLP2IlcfDgghRbDu9umWai0JUHjwxd+kCiW6SMhqtm9xlQcg0K +rZ+O8YV1Hi7tM+yDCvPdOsZmV3oPBx0yEvT+us98QPZC59J8EkhSRxk1zKBa/HJy +8kp67Qx3vaRqP/o3EdPo4h92rd8NhdbQmk2fM5xDET/NAU8rG+yJWXZjQX2kqE+O +TY3tngaWQ3cbyPKoYidmVme70LLugJQp3iqU4+LYLFerBAILNlfKWm0WkjObdB6t +ZRqP56XDmpiFEAKtbjtkqUkyKI3Uh5lnQ/YeXQIDAQABAoIBAERN1ZGdl+LI3b5s +/EuKuNyLXeTP5NC+bF8V/KrCOj/IIwccdI0JXOpJrcFTOano/t3oN3o5zoIwuXfH +2YHBHvNdSqAYZV+lwVt96IxpD1NeGu9NSBG4LclgHc/6Dm38Hq4TF1XttxNsGLaS +hiEHQnkQSCoEbc2gfV5ZIKKv8jfpShYiaAPzrt3saE/2+OliJ5p6zfXKNlEsg1US +78g+JiOVXZdEQFyPP5Yo8gje8wQ2NetnilQQ9rtBbPv9FfsTrj03srlU2D7IIBdQ +7D3Z5AN7e7RiwRGmStZ4GllcCuhvjhvfhav132G01o8/DwvVLTnfSKFA7+E8UYG9 +6ZAzX4UCgYEA/pXt8ehj3s1f8cNaSEJlD8AsOHgzcuxRvdrE+zA8l4eEFpP5UJej +OcDu966q1yt4Qp7Yx2sW3UA76m7RugdqA5MP25fgzGV5n22iwYbnBQvqDQEOjMH1 +1k0CkaRXhDCzGuwb07og/rhOJdCI3OSCQpLD6BsX8MVPJ/2Gfe4XECcCgYEAwsTo +/iNZFNKkPwlfjpirry6gB7GZYRYdkneMM92fTzuDdqSIrM9oLBeUyixAfPP9c0yV +2cwhc8TLdHxIwatzNNJkwp2+eANfg8jQ0vK9J8V0649C5iM3aZ5MUVG2IS4RAZtH +MG2w5fvdd7SqJ8ROWUy7+E0s472yfJNL3auNa9sCgYEA5AXPwEsAII/cboMlevEU +6Z2bPdzTYAywThcDNWSH8MStFzfkJz4aMWFP6EHmvKAvr6Psz/hn2zVsNNabPD7l +wlvW6T1IWGpPG++rxiCZDJkWQh1/Na2IDjCdq2sCA+FGmkd9yQ69/MeBHzd/TjHR +ReWEWIDj2YAwHMZjzqkQuSMCgYA10Kp/7cxjUIBJWpGomM53LO7SsWOry6yIF7gJ +bKbkAZGlanjJJtWluS5HXkrDO7c/8F1HPHvRvQJqQRzpRjIi2i81Btjl2CjABPCO +GLvjDU/s9jyJ0hkxeaekoGsuZ8gTJZBZ9TT3lsvuk2CgdEEhs24MgWZx1qxGd3xy +1z/QGQKBgQCE7afZwIEUQ6epGlUPwmm+mqGiUNbXAz/PnK/IhuOeV9aEU78lPH8p +6rMpuh5SOqGcRaZhpRSIY3vqU9Yk49OO4BOawF2j8k4pVkTaJGgD71in8aDbcVBc +VlIMP2q93mnyO7OC8znQKHMs5WRWEokRbSsjWEeQF1MtyBWaIiWmlg== +-----END RSA PRIVATE KEY----- diff --git a/assets/tls/ca.crt b/assets/tls/ca.crt new file mode 100644 index 00000000..cca186c3 --- /dev/null +++ b/assets/tls/ca.crt @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC6DCCAdCgAwIBAgIBADANBgkqhkiG9w0BAQsFADAlMREwDwYDVQQKEwhib290 +a3ViZTEQMA4GA1UEAxMHa3ViZS1jYTAeFw0xNzA1MTkxODQxMjBaFw0yNzA1MTcx +ODQxMjBaMCUxETAPBgNVBAoTCGJvb3RrdWJlMRAwDgYDVQQDEwdrdWJlLWNhMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAulAVfzTe/mMl31NAx7P524sz +nQKmxG+BXfDPt4O778tBF76RsEX+wKrRtooBr7axhvR0ok5kDZPARGpNKARmdCSm +336ErFtqTwMoreY7WVCU2CBFOtt2umfJDuGVoNUHEkD8MeV2lYJCoxwJrhe5wiqq +m4hptSCepUjilmkReWQ+/N4+RVDpr86GY2QBUlv9OtA5hxTisbA01SwSPAWrpOqV +8JIj2RLZn85FTzMFTQk0Wu0Zugiryqdaxl33VL3+URI3QC2r2dpvd1SeyWDEXvjm +kn9238we+2wBeRaceCvC7jyDvYSOhS+j92wFdnQYx+HinA8nn8Qfdm38u6A9hwID +AQABoyMwITAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG +9w0BAQsFAAOCAQEADHvgtDCE8tv0lKIzEqfubUA5LKQ4NiT5SUAucYazMpKw1QIW +QinCoLEfyPMwgkbgXjzwne8PxeEjjvwCRqwbyViBWnv937p94poZ/9G3CW3bSYwQ +4ZeZnwW6wW0IGsEheMwknBeQboocM6cXu8hto1AYHOnjtg2t1RufWpsDn5aokuW/ +RI8Hg5vnWWKAAAwcwkmg8aiN/1nYQG/coD41kXe/iJ1DTPZa2CPxgm71f2hRnEYT +c7uT7uueBapo1O+ttPkghsIvPZKc6vKxK0wrvzHGRoULl77Z83z92aoPLzcmnJ3d +MFEq4d7JQ5u5i+SaqqqOdp1RGAiuiNpcvyP9ew== +-----END CERTIFICATE----- diff --git a/assets/tls/ca.key b/assets/tls/ca.key new file mode 100644 index 00000000..f3c15499 --- /dev/null +++ b/assets/tls/ca.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAulAVfzTe/mMl31NAx7P524sznQKmxG+BXfDPt4O778tBF76R +sEX+wKrRtooBr7axhvR0ok5kDZPARGpNKARmdCSm336ErFtqTwMoreY7WVCU2CBF +Ott2umfJDuGVoNUHEkD8MeV2lYJCoxwJrhe5wiqqm4hptSCepUjilmkReWQ+/N4+ +RVDpr86GY2QBUlv9OtA5hxTisbA01SwSPAWrpOqV8JIj2RLZn85FTzMFTQk0Wu0Z +ugiryqdaxl33VL3+URI3QC2r2dpvd1SeyWDEXvjmkn9238we+2wBeRaceCvC7jyD +vYSOhS+j92wFdnQYx+HinA8nn8Qfdm38u6A9hwIDAQABAoIBADpNLSztQoqgRA2q +Y68aZqmI2dHcLotxyS24WYe3tWvIUso3XCeo/5sS2SUh8n0l0k/E12qi1TRac+P0 +z8gh+F2HyqBNWv8EbDPlbSldzlyYlrs6/e75FiImsAf0F3qIrvnLVB/ZCk6mwGuC +LpVH310fNNwOx+ViG8LlF+KxZkJxzoKQ2RwiCwzMzpvNBTJyEE1jfqNlc92XnP65 +FhjcFfzSJhFK3VH1gdpfO8bUiLiiUhzKzXH7Af73UqZ22wHeYx87ZJBv7e9ymbWT +GMf9js92e3OdXa3al75JlXgexSDmV2OdZNj6zpqAyupo5b+jXNxcxDaQCitOAcyU +H6HqMiECgYEAwWeEvOL/JC1hFBniM3jtG7ZcXjT1nuc0I9z+b0O6i3JXp1AXuxqU +COOn0udgJ4SJZZk2LOja7Mq6DsPvbPK9OA/XvSju6U/cqALpLdT+bvcG1J5km80w +F9d5a8CmABYsIzIm5VOYCZN/ELxo9uzDhNpiU1m7EVZengg8E1/xSpMCgYEA9pz/ +SGZTFHdLZn7jgg9EzdnjZ2SlSnGc1tHayiRbHknwt8JFMwHeL/TPI6/4ns4A8l59 +IEl1Zf8pWDhwa2qGITXQBmauLYzuPGSIBdABLnJQtE4r6o+vYafZxZVvTAv5B4Sz +TCWFkLYtvHvs71+u7IKS+dJg3EYy3Gx5KVhddb0CgYAr8QMdj018wLqvwHm+TBlD +FJnD5bBwnAMiqtE8Il091YrIvs/FePJtWpwEtQEJuXkmFjtS1Mz4w86mECpTzIrl +M+RGXAh8BeMSYSbtfNkaCRIKOLqPE317zT8PFkQg/OimTny72dRPSK2z9bq7b2u0 +wZFZcqen9sGkkiZkGIZP9QKBgQDcgX6FVvD8QLqLl/OHLG3h/ewmW8irqrCJKDUQ +P7e1enmhZTSIqifoC2ZXS5XrMNmJ3VDWWLh/DcsDFdv3P9VUxpAN2SvukK/IEj/J +qrYTuKVOwwLjhbxUfkfrMnXEsoPl5BKJiJdH0I1OliRB2PVIhmwysphm/OGnU9p2 +TIuspQKBgQCq5QJcny6CWHnFh/Q1faYqIjvaS4MqLfnDndvZ98abpcjik3AKgWof +iaROSk40L+q4uDuaM5tU1ufS/FS94hwlk2O1bQ/xgJBkNZnvZJRFU3oZjhggyl6G +iFtBTAGGtJqHTPMtn/Y6dUOJ/ZFIZWzuNhJGYX/S3ifpZeldKXmXew== +-----END RSA PRIVATE KEY----- diff --git a/assets/tls/kubelet.crt b/assets/tls/kubelet.crt new file mode 100644 index 00000000..859df1d1 --- /dev/null +++ b/assets/tls/kubelet.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDAzCCAeugAwIBAgIILMPkLd2E/uAwDQYJKoZIhvcNAQELBQAwJTERMA8GA1UE +ChMIYm9vdGt1YmUxEDAOBgNVBAMTB2t1YmUtY2EwHhcNMTcwNTE5MTg0MTIwWhcN +MTgwNTE5MTg0MTIxWjArMRcwFQYDVQQKEw5zeXN0ZW06bWFzdGVyczEQMA4GA1UE +AxMHa3ViZWxldDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALtz9mHo +tPkidPbQeu9RS6tAOQhAhPOzV7y5kxo9ZkyGR5mOJ5MElfoofHWGXDqJs3IHO6Zr +ZTKTYgX6c3jisMhIT62JnN9ZaATWcrd+qQ15ixTNhqdy3UcX6xlB8YF8KpVZ40rO +wrP/UsG9EaBit37iOmmINIkZtbNIhvOYhkJvr+NOtX/8TsnRZpT9PyCeyZJbsZIZ +d1Apfu2ENeS1C1OgOQIEOREBehc3GVH11D9BRtFob22MjZUjxyGj0SButUmpvnY9 +ogfE5pT0yhI+kZlP6iMPkk0oGlkcc+U4X8VrSyYXfJNEbmI5aDZe3A4lk4fXiF/Y +NosbHYnzdf/j0acCAwEAAaMxMC8wDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQG +CCsGAQUFBwMBBggrBgEFBQcDAjANBgkqhkiG9w0BAQsFAAOCAQEAIgaxO6aAyGRq +MINPID5bG/ZSRoIBSEX0bAviLKWP9RonjfayM8Xb3r2WZ4TmJoYYDNMRFoyCeStw +1fjl7b2vpmFBOxlpmRvNhRF1dlI9Rt4GRRVkxeS7c4dkc0LFTHEPp0X/RmSt4uf+ +X9sYsWOGSBf52+qZ/7UNI6SYwoltenzbwnLHY9NSLXiVFommCXPaBma1GlkQN2F3 +cEInhf78BXKXeIpWdZboHuWOUu3aoRT0p6fegb2Uxh2a73s6sToHjE7oy3H2ZvKR +kcFJ2TnKMrqzEK/9wyc/gu/kYVx8/zCoPlDQASem7aTZgOIDZ8wc4g9rBitnxdIs +jxZwjOKt9g== +-----END CERTIFICATE----- diff --git a/assets/tls/kubelet.key b/assets/tls/kubelet.key new file mode 100644 index 00000000..27816a66 --- /dev/null +++ b/assets/tls/kubelet.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEAu3P2Yei0+SJ09tB671FLq0A5CECE87NXvLmTGj1mTIZHmY4n +kwSV+ih8dYZcOomzcgc7pmtlMpNiBfpzeOKwyEhPrYmc31loBNZyt36pDXmLFM2G +p3LdRxfrGUHxgXwqlVnjSs7Cs/9Swb0RoGK3fuI6aYg0iRm1s0iG85iGQm+v4061 +f/xOydFmlP0/IJ7Jkluxkhl3UCl+7YQ15LULU6A5AgQ5EQF6FzcZUfXUP0FG0Whv +bYyNlSPHIaPRIG61Sam+dj2iB8TmlPTKEj6RmU/qIw+STSgaWRxz5ThfxWtLJhd8 +k0RuYjloNl7cDiWTh9eIX9g2ixsdifN1/+PRpwIDAQABAoIBAQCRpzJbs4DjUHXH +zgin6eg9AaMPGWr1HXZgC2YU7n6NmY0K8N0pLFgIz+qdOzBwv8xyHtKnpi001jZF +ZOzSknpAtYdL1XDST1s23xa2I7Hh6X47RNOLSwJLGnev4YBxV3STJgwpdWzuhcbd +CTcoA2yHJ+uxUodXvGVmEEXkA7DW7zLZpvLJ//nD5z5CM0IUPdaSgXhYQp2NZWtI +RjLdjkuYVyBYC2rU4LpmiH1eIVL7bDHoUQhOaHN0wSFG80o46gvrqbhrMPw7BwIu +bCW30q4Y4JPRYn5ru0zCForne65I2kRtnJUDjn99dOntWVZibRojY0hFFEyGYOjZ +WItzGAbxAoGBANFj2ZHitQxtqYs7MNIY9jz/7pzuPaX8dm+2/3WW5Aot01+s4yVH +pd7HE8l5NjnejWG7nG2GPsIhbCCVXEtSMGt1BRioKpc2dLq+ZQb75LGDMaJzMWEm +/HimJuhXvxOzzKC9Z29vo4d6JC58vPwyu27dFAv3rzAcdiWb/aib7S6ZAoGBAOUu +BePZgqlpwl3wqDlAljiLsH8AeZUH2rDA4n4d+1kKPMqJYMmftGaTkDJMeJfisfKb +EXcQsGJAeOLHRpY1VvkHqn5v+7qg9JHSnlw+/nTF5Vk6ISAFMs2Qfwdq6fZ898GZ +mi9VXr0hez7Z/v/liCxBcl0hgAhnjIFGvQ5rSmo/AoGBAIvlVFWdzCyTj/UQBNw6 +BTpYHAoJOnMNq+uTrjXYLF+IonKHxfMAXZfsFhJDw7ECOh+UAz1BtehqAB387H7+ +WI9SzabdpCcHIRIrZsA1x2O6LY1FvTYVoBTTnacaCPWW6R5zrQnM4sr/FfFhMbqm +AohdeKlOQGO6gE08XUsrclnxAoGBALOv+f5DtCaQPUzaO4toEGAVZjStcqZemiCr +mum3KDMPy8ohHDn5dcBXQl+thX/QxiSpYHAyLZlbY2yrQbTT7XUjhZHMy1nwiNEs +ie1ZlriH0OK8qOwqJ0L1YCO4t+gC415vyGwES1uOvMrysPSCStooFjre4Tu1tHxH +skNz68yRAoGBAJyMFoQu0rzOxCwQx+8m1encm9pcUvu2eSwwy+9460W474Ww4qZA +F4DWwjDg5dBG1Im21KIJkhoX579dh3QIRr3PRwlQUkQlxTrUSEtpfNTU3pvWV9BF +tuLS1TnOdweoQ8cGZZd9PWMLLrBd0JeR4FyH23rOUmMFwJ2A6OopeX6B +-----END RSA PRIVATE KEY----- diff --git a/assets/tls/service-account.key b/assets/tls/service-account.key new file mode 100644 index 00000000..26c20230 --- /dev/null +++ b/assets/tls/service-account.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA1OJQmE9JCI20h3BI/xJpQoNIfYviHIhlx6Al60Kv4Zb+taD+ +Jd6pCbHqjgYyiYH1wq0nMC9MiRbphdMsKfJXo57H2X1QWNc+3RYzNEL2ra2rkCGw +q1jKGk6RofagbrinjAC9hGcm/V713fCdSpULH6Ruro9Kjvtca0nLjBcGC03pkuUi +1e7EPj2SALQxA1iV2+sqqpg2axlpyAN7gecafjVN10kkMw9GKumQqUpejCtf3tTv +zzfmGqiNnHDB8lDnXpHecKIZkfXdH5Pd4jRY5DyFfrsL5xy0OHF4rA/EDSFkdEZ2 +rTYiCB/O17pw6LuEu79V3N2hJVEwe4Uti3olQwIDAQABAoIBAHSWjXUc1u6sTNZw +FEo9lxAqPiUj2u2tdbBicOHrX8+4lj56sTWkQAdjPQYTNtJALowzsGafQNdDiRkV +kfZXFtAxQVpHWx2MpI0If3p7wgVUO8Vv7gWpVuYZaYC+RRbeYkQ2k5RTufLBcv3d +rQcPoUvvDf7j0v2DhBXuEF/krBa70OnI6Fv5b6Tay4cN6vmNJSPUlDPvicCizmvV +WtAq5pkPfXW1uweMYDOSD10zaetclMae/0C1hahk9kGoLv49XnKCX/Luzwx0ShJL +F0Zk+0s9nmMAAfRL8JM7E9iwXa8I4zXpaNON5RfzdUQeU6puhNQrMExrfzFYWYVl +rPaRnqECgYEA4C7i9B08wR+JEbKeEvTTzUAS8W+S5lSkzPN75Tt4aHeTojzvRXa0 +nUvbr+0PGctpa3OwDzh/SayKqkJvWzxWmzKELTsWkpUZLyx37oxkoQ+dUKSFDYF7 +ejGYfqthUC65NA0rqmz6qiCK/RFXL1ihMY0f/74+IzChoiftpFQ0pt8CgYEA8xjn +jHcBpGmUOyKRWkmTM1x3l5NhT2bZYy5CGPXZ8tiu6zdi2gw2xUmgVIPzUnTDqmOH +NPuRvHv2sovqZsApDankwzsWthFLVFjPdpXjVa+Gvp6YN0FTeeIEjGujmCJ9Zj9b +oIk4o6gRzQNx5L/RaE2/oQrTGwlCWeA44pH6gh0CgYEA0KZSzOk5VnVHWZVo0jPT +vUBZYSR7EKzPBYHIWj3Tf0drvKACAiDNUWj8+uwkFdngMAXoYwIuVh+kn3pdsgii +gqetpXtNMvhaDDHTHc7FCbJCtH+q5jsQ9VWbnKldVQdnkC6B6YisdBL9yTOOdZ6D +yF6U3a3un0nv5cBLyZoltvkCgYEA5Aexc6ZSKQpMXGghlmK7rIsJN2qs9hFQy2Mh +503+oni1I7jxhf29BrT4qy6W+PrEa7kuo/lzDC3wDC2Is9d+6u05xBRSSnjQg49H +FEKnW8HpkDcuK26gwgzMHXf+nf+ER3wZE+6D7agDAp8/n8Z6xO9hWMvRmGPIFIxq +b8VlCdUCgYBgwfUsSsCMP8KVOJAuwf4/SWOkIUUQHQUj1CyEz2UWG5QiP2wqFiA7 +IH8K8JsO9MSWq3ndR9kR+HGBCkJyyoD1GzBZeRhPb+69fYWao3lKUzEDqmxB7zjh +NPltbLlGGNbPhczXyJeSv1N94MUwY1wt0aAX6G+HiBI8a3cjC/cQPg== +-----END RSA PRIVATE KEY----- diff --git a/assets/tls/service-account.pub b/assets/tls/service-account.pub new file mode 100644 index 00000000..a43e38fc --- /dev/null +++ b/assets/tls/service-account.pub @@ -0,0 +1,9 @@ +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1OJQmE9JCI20h3BI/xJp +QoNIfYviHIhlx6Al60Kv4Zb+taD+Jd6pCbHqjgYyiYH1wq0nMC9MiRbphdMsKfJX +o57H2X1QWNc+3RYzNEL2ra2rkCGwq1jKGk6RofagbrinjAC9hGcm/V713fCdSpUL +H6Ruro9Kjvtca0nLjBcGC03pkuUi1e7EPj2SALQxA1iV2+sqqpg2axlpyAN7geca +fjVN10kkMw9GKumQqUpejCtf3tTvzzfmGqiNnHDB8lDnXpHecKIZkfXdH5Pd4jRY +5DyFfrsL5xy0OHF4rA/EDSFkdEZ2rTYiCB/O17pw6LuEu79V3N2hJVEwe4Uti3ol +QwIDAQAB +-----END PUBLIC KEY----- diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 563ada3c..00000000 --- a/docs/README.md +++ /dev/null @@ -1,2 +0,0 @@ -## Instructions: -ansible-playbook -e bootstrap_enabled=true -i hosts-deploy.yaml site.yaml diff --git a/hosts.yaml b/hosts.yaml deleted file mode 100644 index 45032e48..00000000 --- a/hosts.yaml +++ /dev/null @@ -1,38 +0,0 @@ -#Sample Hosts File with variables - -#For Single node deployments, make sure that the bootstrap node is listed as a master and worker node as well. -[bootstrap] -192.168.0.1 - -[master] -#Make sure bootstrap node is first master node -192.168.0.1 -192.168.0.2 - -[workers] -192.168.0.3 - - -[bootstrap:vars] -node_master=true -bootstrap_enabled=false -boot_kube_version="v0.3.12" - - -[master:vars] -node_master=true -cni_version="v0.5.1" -hyperkube_version="v1.5.6" -kubelet_version="v1.5.6" -calicoctl_version="v1.1.0" -calico_peer1="192.168.0.4" -calico_peer2="192.168.0.5" -deploy_pods_master=true - -[all:vars] -ansible_user="ubuntu" -ansible_ssh_pass="password" -#API Server FQDN is required for SkyDNS to resolve -api_server_fqdn="cluster-ha.default.svc.cluster.local" -kube_labels="openstack-control-plane" -kube_controller_manager_version="v1.5.6" diff --git a/img/self-hosted-moving-parts.png b/img/self-hosted-moving-parts.png deleted file mode 100644 index 423add2e..00000000 Binary files a/img/self-hosted-moving-parts.png and /dev/null differ diff --git a/img/self-hosted.png b/img/self-hosted.png deleted file mode 100644 index 5446d6bf..00000000 Binary files a/img/self-hosted.png and /dev/null differ diff --git a/roles/deploy-kubelet/templates/kubelet.service b/kubelet.service.template similarity index 66% rename from roles/deploy-kubelet/templates/kubelet.service rename to kubelet.service.template index 248012c1..e0fa7f79 100644 --- a/roles/deploy-kubelet/templates/kubelet.service +++ b/kubelet.service.template @@ -1,10 +1,10 @@ [Unit] Description=Kubernetes Kubelet -Documentation=https://github.com/kubernetes/kubernetes +Documentation=https://kubernetes.io/docs/admin/kubelet/ [Service] ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests -ExecStart=/usr/bin/kubelet \ +ExecStart=/usr/local/bin/kubelet \ --kubeconfig=/etc/kubernetes/kubeconfig \ --require-kubeconfig \ --cni-conf-dir=/etc/cni/net.d \ @@ -14,11 +14,10 @@ ExecStart=/usr/bin/kubelet \ --exit-on-lock-contention \ --pod-manifest-path=/etc/kubernetes/manifests \ --allow-privileged \ - --minimum-container-ttl-duration=6m0s \ - --cluster_dns=10.3.0.10 \ + --cluster_dns=192.168.1.70,8.8.8.8,10.3.0.10 \ --cluster_domain=cluster.local \ - --node-labels=master={{ node_master|default('false') }} \ - --hostname-override={{ inventory_hostname }} \ + --node-labels=node-role.kubernetes.io/canal-node=true,node-role.kubernetes.io/master= \ + --hostname-override=${NODE_HOSTNAME} \ --v=2 Restart=on-failure RestartSec=5 diff --git a/roles/deploy-addons/tasks/addon-ceph.yaml b/roles/deploy-addons/tasks/addon-ceph.yaml deleted file mode 100644 index 4e6a22d1..00000000 --- a/roles/deploy-addons/tasks/addon-ceph.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Install Ceph - apt: - name: ceph-common - state: present - register: ceph_installed - when: addons_enabled and "{{addons.ceph is defined}}" - -- name: Create Ceph and OpenStack-Helm directories - file: - path: "{{ item }}" - state: directory - with_items: - - "/var/lib/openstack-helm/ceph/osd" - - "/var/lib/openstack-helm/ceph/ceph" - - "/var/lib/openstack-helm/ceph/mon" - - "/var/lib/nova/instances" - when: addons_enabled and "{{addons.ceph is defined}}" - -- name: Install Sigil for Ceph Secrets - shell: curl -L https://github.com/gliderlabs/sigil/releases/download/v0.4.0/sigil_0.4.0_Linux_x86_64.tgz | tar -zxC /usr/local/bin - when: addons_enabled and "{{addons.ceph is defined}}" and ceph_installed | changed - -- name: Capture kubernetes version - shell: kubelet --version | cut -d " " -f2 - register: kube_version diff --git a/roles/deploy-addons/tasks/addon-dashboard.yaml b/roles/deploy-addons/tasks/addon-dashboard.yaml deleted file mode 100644 index 19688b58..00000000 --- a/roles/deploy-addons/tasks/addon-dashboard.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: Check for Kubernetes dashboard - shell: hyperkube kubectl get pods -o wide --all-namespaces | grep kubernetes-dashboard - register: dashboard_check - ignore_errors: true - when: addons_enabled and "{{addons.dashboard is defined}}" - -- name: Deploy Kubernetes Dashboard - shell: hyperkube kubectl create -f https://rawgit.com/kubernetes/dashboard/master/src/deploy/kubernetes-dashboard.yaml - when: addons_enabled and "{{addons.dashboard is defined}}" and dashboard_check | failed diff --git a/roles/deploy-addons/tasks/addon-helm.yaml b/roles/deploy-addons/tasks/addon-helm.yaml deleted file mode 100644 index ef0963c5..00000000 --- a/roles/deploy-addons/tasks/addon-helm.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: Check if Helm is installed - stat: - path: /usr/local/bin/helm - register: helm_installed - when: addons_enabled and "{{addons.helm is defined}}" - -- name: Install helm - shell: curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > /root/get_helm.sh - when: addons_enabled and "{{addons.ceph is defined}}" and helm_installed.stat.exists == False - -- name: Set file properties - file: - path: /root/get_helm.sh - mode: 0700 - when: addons_enabled and "{{addons.ceph is defined}}" and helm_installed.stat.exists == False - -- name: Install helm - shell: sh /root/get_helm.sh - when: addons_enabled and "{{addons.ceph is defined}}" and helm_installed.stat.exists == False diff --git a/roles/deploy-addons/tasks/addon-maas.yaml b/roles/deploy-addons/tasks/addon-maas.yaml deleted file mode 100644 index 7070dc43..00000000 --- a/roles/deploy-addons/tasks/addon-maas.yaml +++ /dev/null @@ -1,59 +0,0 @@ ---- -- name: Check if MAAS is Running - shell: hyperkube kubectl describe pod maas-region --namespace=maas - ignore_errors: true - register: maas_deployed - when: addons_enabled and "{{addons.maas is defined}}" - -- name: Check if Postgres is Running - shell: hyperkube kubectl describe pod postgresql-0 --namespace=maas - ignore_errors: true - register: postgres_deployed - when: addons_enabled and "{{addons.maas is defined}}" - -#Check every 15 seconds to make sure the tiller pod has fully come up. -- action: shell hyperkube kubectl get pods --all-namespaces | grep tiller - register: tiller_output - until: tiller_output.stdout.find("Running") != -1 - retries: 20 - delay: 15 - when: addons_enabled and "{{addons.maas is defined}}" - -- name: Run Make on all Helm charts - shell: make - environment: - HELM_HOME: /opt/openstack-helm/repos/openstack-helm/.helm - args: - chdir: /opt/openstack-helm/repos/openstack-helm/ - when: addons_enabled and "{{addons.maas is defined}}" and maas_deployed | failed - -- name: Deploy Postgres - shell: helm install postgresql --namespace=maas - environment: - HELM_HOME: /opt/openstack-helm/repos/openstack-helm/.helm - args: - chdir: /opt/openstack-helm/repos/openstack-helm/ - when: addons_enabled and "{{addons.maas is defined}}" and postgres_deployed | failed - -- action: shell hyperkube kubectl get pods --namespace maas - register: postgres_output - until: postgres_output.stdout.find("Running") != -1 - retries: 20 - delay: 15 - when: addons_enabled and "{{addons.maas is defined}}" - -- name: Deploy MaaS - shell: helm install maas --namespace=maas - environment: - HELM_HOME: /opt/openstack-helm/repos/openstack-helm/.helm - args: - chdir: /opt/openstack-helm/repos/openstack-helm/ - when: addons_enabled and "{{addons.maas is defined}}" and maas_deployed | failed - -#Check every 15 seconds until MaaS comes up -- action: shell hyperkube kubectl get pods --namespace maas - register: maas_output - until: maas_output.stdout.find("Running") != -1 - retries: 20 - delay: 15 - when: addons_enabled and "{{addons.maas is defined}}" diff --git a/roles/deploy-addons/tasks/addon-osh.yaml b/roles/deploy-addons/tasks/addon-osh.yaml deleted file mode 100644 index bec2f24f..00000000 --- a/roles/deploy-addons/tasks/addon-osh.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- -- name: Create directories for OpenStack Helm - file: - path: /opt/openstack-helm/repos/openstack-helm - state: directory - when: addons_enabled and "{{addons.osh is defined}}" - -- name: Checkout OpenStack-Helm - git: - repo: https://github.com/att-comdev/openstack-helm.git - dest: /opt/openstack-helm/repos/openstack-helm - update: true - when: addons_enabled and "{{addons.osh is defined}}" - -- name: Check for Helm/Tiller - shell: hyperkube kubectl get pods --namespace kube-system | grep tiller - ignore_errors: true - register: helm_running - when: addons_enabled and "{{addons.osh is defined}}" - -- name: Initialize Helm/Tiller - shell: helm init --home /opt/openstack-helm/repos/openstack-helm/.helm - environment: - HELM_HOME: /opt/openstack-helm/repos/openstack-helm/.helm - when: addons_enabled and "{{addons.osh is defined}}" and helm_running | failed - -- name: Helm Serve - shell: nohup helm serve --repo-path /opt/openstack-helm/repos/openstack-helm/.helm/repository/local & - environment: - HELM_HOME: /opt/openstack-helm/repos/openstack-helm/.helm - args: - chdir: /opt/openstack-helm/repos/openstack-helm/.helm - when: addons_enabled and "{{addons.osh is defined}}" and helm_running | failed - -- name: Add helm repositories - shell: helm repo add local http://localhost:8879/charts --home /opt/openstack-helm/repos/openstack-helm/.helm - args: - chdir: /opt/openstack-helm/repos/openstack-helm/.helm - when: addons_enabled and "{{addons.osh is defined}}" and helm_running | failed diff --git a/roles/deploy-addons/tasks/main.yaml b/roles/deploy-addons/tasks/main.yaml deleted file mode 100644 index 488c7f09..00000000 --- a/roles/deploy-addons/tasks/main.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- include: addon-dashboard.yaml -- include: addon-helm.yaml -- include: addon-osh.yaml -- include: addon-ceph.yaml -- include: addon-maas.yaml diff --git a/roles/deploy-addons/templates/kube-controller-manager.json.j2 b/roles/deploy-addons/templates/kube-controller-manager.json.j2 deleted file mode 100644 index 9e8b4c56..00000000 --- a/roles/deploy-addons/templates/kube-controller-manager.json.j2 +++ /dev/null @@ -1,75 +0,0 @@ -{ - "kind": "Pod", - "apiVersion": "v1", - "metadata": { - "name": "kube-controller-manager", - "namespace": "kube-system", - "creationTimestamp": null, - "labels": { - "component": "kube-controller-manager", - "tier": "control-plane" - } - }, - "spec": { - "volumes": [ - { - "name": "k8s", - "hostPath": { - "path": "/etc/kubernetes" - } - }, - { - "name": "certs", - "hostPath": { - "path": "/etc/ssl/certs" - } - } - ], - "containers": [ - { - "name": "kube-controller-manager", - "image": "quay.io/attcomdev/kube-controller-manager:{{ kube_controller_manager_version }}", - "command": [ - "kube-controller-manager", - "--address=127.0.0.1", - "--leader-elect", - "--master=127.0.0.1:8080", - "--cluster-name=kubernetes", - "--root-ca-file=/etc/kubernetes/pki/ca.pem", - "--service-account-private-key-file=/etc/kubernetes/pki/apiserver-key.pem", - "--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem", - "--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem", - "--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap" - ], - "resources": { - "requests": { - "cpu": "200m" - } - }, - "volumeMounts": [ - { - "name": "k8s", - "readOnly": true, - "mountPath": "/etc/kubernetes/" - }, - { - "name": "certs", - "mountPath": "/etc/ssl/certs" - } - ], - "livenessProbe": { - "httpGet": { - "path": "/healthz", - "port": 10252, - "host": "127.0.0.1" - }, - "initialDelaySeconds": 15, - "timeoutSeconds": 15, - "failureThreshold": 8 - } - } - ], - "hostNetwork": true - }, - "status": {} -} diff --git a/roles/deploy-bootstrap/tasks/deploy-bootkube.yaml b/roles/deploy-bootstrap/tasks/deploy-bootkube.yaml deleted file mode 100644 index f13307e0..00000000 --- a/roles/deploy-bootstrap/tasks/deploy-bootkube.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Setup bootkube.service - when: - bootstrap_enabled - template: - src: bootkube.service - dest: /etc/systemd/system/bootkube.service - -- name: Run bootkube - when: - bootstrap_enabled - systemd: - name: bootkube - state: started - daemon_reload: yes diff --git a/roles/deploy-bootstrap/tasks/main.yaml b/roles/deploy-bootstrap/tasks/main.yaml deleted file mode 100644 index 3b2db9d4..00000000 --- a/roles/deploy-bootstrap/tasks/main.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- include: prep-host.yaml -- include: prep-bootkube.yaml -- include: prep-network.yaml -- include: prep-kubernetes.yaml -- include: deploy-bootkube.yaml diff --git a/roles/deploy-bootstrap/tasks/prep-bootkube.yaml b/roles/deploy-bootstrap/tasks/prep-bootkube.yaml deleted file mode 100644 index ee5b1ab7..00000000 --- a/roles/deploy-bootstrap/tasks/prep-bootkube.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: Ensures bootkube dir exists - when: - bootstrap_enabled - file: - path: /tmp/bootkube - state: directory - -- name: Extract bootkube binaries - when: - bootstrap_enabled - unarchive: - src: "https://github.com/kubernetes-incubator/bootkube/releases/download/{{ boot_kube_version }}/bootkube.tar.gz" - dest: /tmp/bootkube - remote_src: True - -- name: Render bootkube manifests - when: - bootstrap_enabled - command: "/tmp/bootkube/bin/linux/bootkube render --asset-dir=/tmp/bootkube/assets --experimental-self-hosted-etcd --etcd-servers=http://10.3.0.15:2379 --api-servers=https://{{ api_server_fqdn }}:443" - args: - creates: /etc/kubernetes/kubeconfig diff --git a/roles/deploy-bootstrap/tasks/prep-host.yaml b/roles/deploy-bootstrap/tasks/prep-host.yaml deleted file mode 100644 index 01a816c8..00000000 --- a/roles/deploy-bootstrap/tasks/prep-host.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Install base packages - when: - bootstrap_enabled - apt: - name: "{{ item }}" - state: present - with_items: - - "docker.io" - - "vim" - - "ethtool" - - "traceroute" - - "git" - - "build-essential" - - "lldpd" - -- name: Insert Temporary Hosts File Entry for FQDN Resolution - when: - bootstrap_enabled - lineinfile: - dest: /etc/hosts - line: "{{ hostvars[groups['master'][0]]['ansible_default_ipv4']['address'] }} {{ api_server_fqdn }}" - state: present diff --git a/roles/deploy-bootstrap/tasks/prep-kubernetes.yaml b/roles/deploy-bootstrap/tasks/prep-kubernetes.yaml deleted file mode 100644 index d28d5fc9..00000000 --- a/roles/deploy-bootstrap/tasks/prep-kubernetes.yaml +++ /dev/null @@ -1,29 +0,0 @@ ---- -- name: Ensures /etc/kubernetes dir exists - when: - bootstrap_enabled - file: - path: /etc/kubernetes - state: directory - -- name: copy kubeconfig credentials - when: - bootstrap_enabled - command: cp /tmp/bootkube/assets/auth/kubeconfig /etc/kubernetes/kubeconfig - args: - creates: /etc/kubernetes/kubeconfig - -- name: copy kubernetes manifests - when: - bootstrap_enabled - command: cp -a /tmp/bootkube/assets/manifests /etc/kubernetes/ - args: - creates: /etc/kubernetes/manifests - -- name: fetch kubeconfig - when: - bootstrap_enabled - fetch: - src: /etc/kubernetes/kubeconfig - dest: roles/deploy-kubelet/templates/kubeconfig - flat: yes diff --git a/roles/deploy-bootstrap/tasks/prep-network.yaml b/roles/deploy-bootstrap/tasks/prep-network.yaml deleted file mode 100644 index 77c977d0..00000000 --- a/roles/deploy-bootstrap/tasks/prep-network.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Inject Custom manifests - kube-calico.yaml - when: - bootstrap_enabled - template: - src: kube-calico.yaml.j2 - dest: "/tmp/bootkube/assets/manifests/kube-flannel.yaml" - -- name: Inject Custom manifests - kube-calico-cfg.yaml - when: - bootstrap_enabled - template: - src: kube-calico-cfg.yaml.j2 - dest: "/tmp/bootkube/assets/manifests/kube-flannel-cfg.yaml" diff --git a/roles/deploy-bootstrap/templates/bootkube.service b/roles/deploy-bootstrap/templates/bootkube.service deleted file mode 100644 index 2f5dc7b6..00000000 --- a/roles/deploy-bootstrap/templates/bootkube.service +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=Kubernetes Control Plane Bootstrapping -Documentation=https://github.com/kubernetes-incubator/bootkube - -[Service] -ExecStart=/tmp/bootkube/bin/linux/bootkube start --asset-dir=/tmp/bootkube/assets/ --experimental-self-hosted-etcd --etcd-server=http://127.0.0.1:12379 -Restart=on-failure - -[Install] -WantedBy=multi-user.target diff --git a/roles/deploy-bootstrap/templates/calico.yaml b/roles/deploy-bootstrap/templates/calico.yaml deleted file mode 100644 index 61371f0a..00000000 --- a/roles/deploy-bootstrap/templates/calico.yaml +++ /dev/null @@ -1,267 +0,0 @@ -# This ConfigMap is used to configure a self-hosted Calico installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # The location of your etcd cluster. This uses the Service clusterIP - # defined below. - etcd_endpoints: "http://10.96.232.136:6666" - - # Configure the Calico backend to use. - calico_backend: "bird" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "type": "calico", - "etcd_endpoints": "__ETCD_ENDPOINTS__", - "log_level": "info", - "ipam": { - "type": "calico-ipam" - }, - "policy": { - "type": "k8s", - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" - } - } - ---- - -# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet -# to force it to run on the master even when the master isn't schedulable, and uses -# nodeSelector to ensure it only runs on the master. -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: calico-etcd - namespace: kube-system - labels: - k8s-app: calico-etcd -spec: - template: - metadata: - labels: - k8s-app: calico-etcd - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] - spec: - # Only run this pod on the master. - nodeSelector: - kubeadm.alpha.kubernetes.io/role: master - hostNetwork: true - containers: - - name: calico-etcd - image: gcr.io/google_containers/etcd:2.2.1 - env: - - name: CALICO_ETCD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - command: ["/bin/sh","-c"] - args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"] - volumeMounts: - - name: var-etcd - mountPath: /var/etcd - volumes: - - name: var-etcd - hostPath: - path: /var/etcd - ---- - -# This manfiest installs the Service which gets traffic to the Calico -# etcd. -apiVersion: v1 -kind: Service -metadata: - labels: - k8s-app: calico-etcd - name: calico-etcd - namespace: kube-system -spec: - # Select the calico-etcd pod running on the master. - selector: - k8s-app: calico-etcd - # This ClusterIP needs to be known in advance, since we cannot rely - # on DNS to get access to etcd. - clusterIP: 10.96.232.136 - ports: - - port: 6666 - ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node -spec: - selector: - matchLabels: - k8s-app: calico-node - template: - metadata: - labels: - k8s-app: calico-node - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] - spec: - hostNetwork: true - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: quay.io/calico/node:v1.1.0 - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # Enable BGP. Disable to enforce policy only. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Configure the IP Pool from which Pod IPs will be chosen. - - name: CALICO_IPV4POOL_CIDR - value: "192.168.0.0/16" - - name: CALICO_IPV4POOL_IPIP - value: "always" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set Felix logging to "info" - - name: FELIX_LOGSEVERITYSCREEN - value: "info" - # Auto-detect the BGP IP address. - - name: IP - value: "" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: quay.io/calico/cni:v1.6.1 - command: ["/install-cni.sh"] - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - ---- - -# This manifest deploys the Calico policy controller on Kubernetes. -# See https://github.com/projectcalico/k8s-policy -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-policy-controller - namespace: kube-system - labels: - k8s-app: calico-policy -spec: - # The policy controller can only have a single active instance. - replicas: 1 - strategy: - type: Recreate - template: - metadata: - name: calico-policy-controller - namespace: kube-system - labels: - k8s-app: calico-policy-controller - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] - spec: - # The policy controller must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. - hostNetwork: true - containers: - - name: calico-policy-controller - image: quay.io/calico/kube-policy-controller:v0.5.4 - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # The location of the Kubernetes API. Use the default Kubernetes - # service for API access. - - name: K8S_API - value: "https://kubernetes.default:443" - # Since we're running in the host namespace and might not have KubeDNS - # access, configure the container's /etc/hosts to resolve - # kubernetes.default to the correct service clusterIP. - - name: CONFIGURE_ETC_HOSTS - value: "true" diff --git a/roles/deploy-bootstrap/templates/kube-calico-cfg.yaml.j2 b/roles/deploy-bootstrap/templates/kube-calico-cfg.yaml.j2 deleted file mode 100644 index 45850e3e..00000000 --- a/roles/deploy-bootstrap/templates/kube-calico-cfg.yaml.j2 +++ /dev/null @@ -1,144 +0,0 @@ -# This ConfigMap is used to configure a self-hosted Calico installation without ETCD -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "type": "calico", - "log_level": "debug", - "datastore_type": "kubernetes", - "hostname": "__KUBERNETES_NODE_NAME__", - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - } - ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node -spec: - selector: - matchLabels: - k8s-app: calico-node - template: - metadata: - labels: - k8s-app: calico-node - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] - spec: - hostNetwork: true - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: quay.io/calico/node:v1.1.0 - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Enable felix debug logging. - - name: FELIX_LOGSEVERITYSCREEN - value: "debug" - # Don't enable BGP. - - name: CALICO_NETWORKING_BACKEND - value: "none" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Disable IPV6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # The Calico IPv4 pool to use. This should match `--cluster-cidr` - - name: CALICO_IPV4POOL_CIDR - value: "10.244.0.0/16" - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # No IP address needed. - - name: IP - value: "" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: quay.io/calico/cni:v1.6.1 - command: ["/install-cni.sh"] - env: - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d diff --git a/roles/deploy-bootstrap/templates/kube-calico.yaml.j2 b/roles/deploy-bootstrap/templates/kube-calico.yaml.j2 deleted file mode 100644 index a9c0abcc..00000000 --- a/roles/deploy-bootstrap/templates/kube-calico.yaml.j2 +++ /dev/null @@ -1 +0,0 @@ -#Nothing To Be Seen Here. Prevents Bootkube from coming up diff --git a/roles/deploy-bootstrap/templates/kube-controller-manager.json b/roles/deploy-bootstrap/templates/kube-controller-manager.json deleted file mode 100644 index 9e8b4c56..00000000 --- a/roles/deploy-bootstrap/templates/kube-controller-manager.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "kind": "Pod", - "apiVersion": "v1", - "metadata": { - "name": "kube-controller-manager", - "namespace": "kube-system", - "creationTimestamp": null, - "labels": { - "component": "kube-controller-manager", - "tier": "control-plane" - } - }, - "spec": { - "volumes": [ - { - "name": "k8s", - "hostPath": { - "path": "/etc/kubernetes" - } - }, - { - "name": "certs", - "hostPath": { - "path": "/etc/ssl/certs" - } - } - ], - "containers": [ - { - "name": "kube-controller-manager", - "image": "quay.io/attcomdev/kube-controller-manager:{{ kube_controller_manager_version }}", - "command": [ - "kube-controller-manager", - "--address=127.0.0.1", - "--leader-elect", - "--master=127.0.0.1:8080", - "--cluster-name=kubernetes", - "--root-ca-file=/etc/kubernetes/pki/ca.pem", - "--service-account-private-key-file=/etc/kubernetes/pki/apiserver-key.pem", - "--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem", - "--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem", - "--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap" - ], - "resources": { - "requests": { - "cpu": "200m" - } - }, - "volumeMounts": [ - { - "name": "k8s", - "readOnly": true, - "mountPath": "/etc/kubernetes/" - }, - { - "name": "certs", - "mountPath": "/etc/ssl/certs" - } - ], - "livenessProbe": { - "httpGet": { - "path": "/healthz", - "port": 10252, - "host": "127.0.0.1" - }, - "initialDelaySeconds": 15, - "timeoutSeconds": 15, - "failureThreshold": 8 - } - } - ], - "hostNetwork": true - }, - "status": {} -} diff --git a/roles/deploy-bootstrap/templates/kube-controller-manager.yaml b/roles/deploy-bootstrap/templates/kube-controller-manager.yaml deleted file mode 100644 index 49f6673d..00000000 --- a/roles/deploy-bootstrap/templates/kube-controller-manager.yaml +++ /dev/null @@ -1,45 +0,0 @@ ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: kube-controller-manager - namespace: kube-system - labels: - k8s-app: kube-controller-manager -spec: - replicas: 2 - template: - metadata: - labels: - k8s-app: kube-controller-manager - spec: - nodeSelector: - master: "true" - containers: - - name: kube-controller-manager - image: quay.io/attcomdev/kube-controller-manager:{{ kube_controller_manager_version }} - command: - - ./hyperkube - - controller-manager - - --allocate-node-cidrs=true - - --configure-cloud-routes=false - - --cluster-cidr=10.2.0.0/16 - - --root-ca-file=/etc/kubernetes/secrets/ca.crt - - --service-account-private-key-file=/etc/kubernetes/secrets/service-account.key - - --leader-elect=true - - --cloud-provider= - volumeMounts: - - name: secrets - mountPath: /etc/kubernetes/secrets - readOnly: true - - name: ssl-host - mountPath: /etc/ssl/certs - readOnly: true - volumes: - - name: secrets - secret: - secretName: kube-controller-manager - - name: ssl-host - hostPath: - path: /usr/share/ca-certificates - dnsPolicy: Default # Don't use cluster DNS. diff --git a/roles/deploy-kubelet/handlers/main.yaml b/roles/deploy-kubelet/handlers/main.yaml deleted file mode 100644 index ebdb9576..00000000 --- a/roles/deploy-kubelet/handlers/main.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -- name: restart kubelet - service: name=kubelet state=restarted diff --git a/roles/deploy-kubelet/tasks/deploy-cluster-ha.yaml b/roles/deploy-kubelet/tasks/deploy-cluster-ha.yaml deleted file mode 100644 index 5d7241db..00000000 --- a/roles/deploy-kubelet/tasks/deploy-cluster-ha.yaml +++ /dev/null @@ -1,95 +0,0 @@ ---- -- name: Grab the ETCD IP - shell: hyperkube kubectl get services --all-namespaces | grep "etcd-service" | awk '{ print $3 }' - register: etcd_service_ip - -# - name: Deploy Calico manifest template -# template: -# src: calico.yaml -# dest: /opt/openstack-helm/manifests/calico.yaml -# register: calico_changed -# -# - name: Install calicoctl tool -# get_url: -# url: "https://github.com/projectcalico/calicoctl/releases/download/{{ calicoctl_version }}/calicoctl" -# dest: /usr/bin/calicoctl -# validate_certs: false -# mode: 0755 -# -# - name: Check for Calico deployment -# shell: hyperkube kubectl get services --all-namespaces | grep calico -# ignore_errors: True -# register: calico_deployed -# -# - name: Deploy BGP Peer Manifest (1) -# template: -# src: calico-peer.yaml -# dest: /opt/openstack-helm/manifests/calico-peer.yaml -# -# - name: Deploy BGP Peer Manifest (2) -# template: -# src: calico-peer2.yaml -# dest: /opt/openstack-helm/manifests/calico-peer2.yaml -# -# - name: Create Calico Pods -# shell: hyperkube kubectl create -f /opt/openstack-helm/manifests/calico.yaml -# when: calico_deployed | failed and "{{ inventory_hostname }} in groups['bootstrap']" -# -# - action: shell hyperkube kubectl get pods --all-namespaces | grep calico -# register: calico_output -# until: calico_output.stdout.find("Running") != -1 -# retries: 20 -# delay: 15 -# -# - name: Create BGP Peering(1) -# shell: calicoctl create -f /opt/openstack-helm/manifests/calico-peer.yaml --skip-exists -# environment: -# ETCD_ENDPOINTS: "http://{{ etcd_service_ip.stdout }}:2379" -# when: calico_deployed | failed and "{{ inventory_hostname }} in groups['bootstrap']" -# -# - name: Create BGP Peering(2) -# shell: calicoctl create -f /opt/openstack-helm/manifests/calico-peer2.yaml --skip-exists -# environment: -# ETCD_ENDPOINTS: "http://{{ etcd_service_ip.stdout }}:2379" -# when: calico_deployed | failed and "{{ inventory_hostname }} in groups['bootstrap']" - -- name: Check ClusterHA in KubeDNS - shell: hyperkube kubectl get services --all-namespaces | grep cluster-ha - ignore_errors: true - register: cluster_ha_present - -- name: Install ClusterHA ConfigMaps - template: - src: cluster-ha.j2 - dest: /opt/openstack-helm/manifests/cluster-ha.yaml - register: cluster_ha_configmaps - -- name: Delete ClusterHA if present - shell: hyperkube kubectl delete -f /opt/openstack-helm/manifests/cluster-ha.yaml - when: cluster_ha_present | succeeded and cluster_ha_configmaps | changed - ignore_errors: true - -- name: Deploy ClusterHA ConfigMaps - shell: hyperkube kubectl create -f /opt/openstack-helm/manifests/cluster-ha.yaml - when: cluster_ha_configmaps | changed - -- name: Determine KubeDNS Server - shell: hyperkube kubectl get svc kube-dns --namespace=kube-system | awk '{print $2}' | sed -n '$p' - register: kube_dns_server - -- name: Add KubeDNS to /etc/resolv.conf - lineinfile: - dest: /etc/resolv.conf - insertafter: "^# DO" - line: "nameserver {{ kube_dns_server.stdout }}" - state: present - backup: true - -- name: Remove /etc/hosts entry if present - lineinfile: - dest: /etc/hosts - line: "{{ hostvars[groups['master'][0]]['ansible_default_ipv4']['address'] }} {{ api_server_fqdn }}" - state: absent - -- name: Test Kubernetes cluster - shell: hyperkube kubectl get nodes diff --git a/roles/deploy-kubelet/tasks/deploy-kubernetes.yaml b/roles/deploy-kubelet/tasks/deploy-kubernetes.yaml deleted file mode 100644 index b3781e22..00000000 --- a/roles/deploy-kubelet/tasks/deploy-kubernetes.yaml +++ /dev/null @@ -1,64 +0,0 @@ ---- -#TODO: Version kubelet, with checksum -- name: Install kubelet - get_url: - url: "http://storage.googleapis.com/kubernetes-release/release/{{ kubelet_version }}/bin/linux/amd64/kubelet" - dest: /usr/bin/kubelet -# checksum: md5:33af080e876b1f3d481b0ff1ceec3ab8 - mode: 0755 - -- name: Ensures /etc/kubernetes dir exists - file: - path: /etc/kubernetes - state: directory - -#Gets Kubeconfig from the bootstrap node. See roles/bootstrap/tasks/main.yml -- name: Install kubeconfig - template: - src: kubeconfig - dest: /etc/kubernetes/kubeconfig - -- name: Setup kubelet.service - template: - src: kubelet.service - dest: /etc/systemd/system/kubelet.service - notify: restart kubelet - -- name: Enable Kubelet to be started on boot - systemd: - name: kubelet - state: started - enabled: yes - daemon_reload: yes - -- name: Create Directories for Kubernetes manifests - file: - path: /opt/openstack-helm/manifests - state: directory - -#Wait for Kubeapi Server to come up -- action: shell hyperkube kubectl get pods --all-namespaces | grep kube-apiserver - register: kubeapi_output - until: kubeapi_output.stdout.find("Running") != -1 - retries: 40 - delay: 15 - -#Wait for cluster to stabilize across all nodes -- action: shell hyperkube kubectl get pods --all-namespaces - register: cluster_stable - until: '"ContainerCreating" not in cluster_stable.stdout' - retries: 40 - delay: 15 - -#Re-Deploy Calico with ETCD -- name: Inject Custom manifests - kube-calico.yaml - template: - src: kube-calico.yaml.j2 - dest: "/tmp/bootkube/assets/manifests/kube-flannel.yaml" - notify: restart kubelet - -- name: Inject Custom manifests - kube-calico-cfg.yaml - template: - src: kube-calico-cfg.yaml.j2 - dest: "/tmp/bootkube/assets/manifests/kube-flannel-cfg.yaml" - notify: restart kubelet diff --git a/roles/deploy-kubelet/tasks/main.yaml b/roles/deploy-kubelet/tasks/main.yaml deleted file mode 100644 index 5707a6be..00000000 --- a/roles/deploy-kubelet/tasks/main.yaml +++ /dev/null @@ -1,6 +0,0 @@ -#Deploys Kubelet ---- -- include: prep-host.yaml -- include: prep-hyperkube.yaml -- include: prep-cni.yaml -- include: deploy-kubernetes.yaml diff --git a/roles/deploy-kubelet/tasks/prep-cni.yaml b/roles/deploy-kubelet/tasks/prep-cni.yaml deleted file mode 100644 index db2ad063..00000000 --- a/roles/deploy-kubelet/tasks/prep-cni.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Ensures CNI dir exists - file: - path: /opt/cni/bin - state: directory - -- name: Install CNI binaries - unarchive: - src: "https://github.com/containernetworking/cni/releases/download/{{ cni_version }}/cni-amd64-{{ cni_version }}.tgz" - dest: /opt/cni/bin - remote_src: True diff --git a/roles/deploy-kubelet/tasks/prep-host.yaml b/roles/deploy-kubelet/tasks/prep-host.yaml deleted file mode 100644 index ee7e4bb8..00000000 --- a/roles/deploy-kubelet/tasks/prep-host.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: Install base packages - apt: - name: "{{ item }}" - state: present - with_items: - - "docker.io" - - "vim" - - "ethtool" - - "traceroute" - - "git" - - "build-essential" - - "lldpd" - -- name: Insert Temporary Hosts File Entry for FQDN Resolution - lineinfile: - dest: /etc/hosts - line: "{{ hostvars[groups['master'][0]]['ansible_default_ipv4']['address'] }} {{ api_server_fqdn }}" - state: present diff --git a/roles/deploy-kubelet/tasks/prep-hyperkube.yaml b/roles/deploy-kubelet/tasks/prep-hyperkube.yaml deleted file mode 100644 index 25b11dea..00000000 --- a/roles/deploy-kubelet/tasks/prep-hyperkube.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: Downloads Hyperkube - get_url: - url: "http://storage.googleapis.com/kubernetes-release/release/{{ hyperkube_version }}/bin/linux/amd64/hyperkube" - dest: /usr/bin/hyperkube - -- name: Set hyperkube permissions - file: - path: /usr/bin/hyperkube - mode: 0755 diff --git a/roles/deploy-kubelet/templates/calico-peer.yaml b/roles/deploy-kubelet/templates/calico-peer.yaml deleted file mode 100644 index d95cdf74..00000000 --- a/roles/deploy-kubelet/templates/calico-peer.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: bgpPeer -metadata: - peerIP: {{ calico_peer1 }} - scope: node - node: {{ ansible_hostname }} -spec: - asNumber: 64686 diff --git a/roles/deploy-kubelet/templates/calico-peer2.yaml b/roles/deploy-kubelet/templates/calico-peer2.yaml deleted file mode 100644 index 1ff5b4d1..00000000 --- a/roles/deploy-kubelet/templates/calico-peer2.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: bgpPeer -metadata: - peerIP: {{ calico_peer2 }} - scope: node - node: {{ ansible_hostname }} -spec: - asNumber: 64686 diff --git a/roles/deploy-kubelet/templates/calico.yaml b/roles/deploy-kubelet/templates/calico.yaml deleted file mode 100644 index 57a61bc3..00000000 --- a/roles/deploy-kubelet/templates/calico.yaml +++ /dev/null @@ -1,323 +0,0 @@ -# This ConfigMap is used to configure a self-hosted Calico installation. -# This ConfigMap is used to configure a self-hosted Calico installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # The location of your etcd cluster. This uses the Service clusterIP - # defined below. - #etcd_endpoints: "http://10.96.232.136:6666" - #etcd_endpoints: "http://10.200.232.136:6666" - etcd_endpoints: "http://{{ etcd_service_ip.stdout }}:2379" - - # True enables BGP networking, false tells Calico to enforce - # policy only, using native networking. - enable_bgp: "true" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "type": "calico", - "etcd_endpoints": "__ETCD_ENDPOINTS__", - "log_level": "info", - "ipam": { - "type": "calico-ipam" - }, - "policy": { - "type": "k8s", - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" - } - } - - # The default IP Pool to be created for the cluster. - # Pod IP addresses will be assigned from this pool. - ippool.yaml: | - apiVersion: v1 - kind: ipPool - metadata: - cidr: 10.200.0.0/16 - spec: - ipip: - enabled: true - nat-outgoing: true - ---- - -# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet -# to force it to run on the master even when the master isn't schedulable, and uses -# nodeSelector to ensure it only runs on the master. -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: calico-etcd - namespace: kube-system - labels: - k8s-app: calico-etcd -spec: - template: - metadata: - labels: - k8s-app: calico-etcd - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] - spec: - # Only run this pod on the master. - nodeSelector: - kubeadm.alpha.kubernetes.io/role: master - hostNetwork: true - containers: - - name: calico-etcd - image: gcr.io/google_containers/etcd:2.2.1 - env: - - name: CALICO_ETCD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - command: ["/bin/sh","-c"] - args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"] - volumeMounts: - - name: var-etcd - mountPath: /var/etcd - volumes: - - name: var-etcd - hostPath: - path: /var/etcd - ---- - -# This manfiest installs the Service which gets traffic to the Calico -# etcd. -apiVersion: v1 -kind: Service -metadata: - labels: - k8s-app: calico-etcd - name: calico-etcd - namespace: kube-system -spec: - # Select the calico-etcd pod running on the master. - selector: - k8s-app: calico-etcd - # This ClusterIP needs to be known in advance, since we cannot rely - # on DNS to get access to etcd. - #clusterIP: 10.96.232.136 - clusterIP: 10.3.0.190 - ports: - - port: 6666 - ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node -spec: - selector: - matchLabels: - k8s-app: calico-node - template: - metadata: - labels: - k8s-app: calico-node - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] - spec: - hostNetwork: true - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: quay.io/calico/node:v1.0.2 - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # Enable BGP. Disable to enforce policy only. - - name: CALICO_NETWORKING - valueFrom: - configMapKeyRef: - name: calico-config - key: enable_bgp - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Don't configure a default pool. This is done by the Job - # below. - - name: NO_DEFAULT_POOLS - value: "true" - # Auto-detect the BGP IP address. - - name: IP - value: "" - securityContext: - privileged: true - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: calico/cni:v1.5.6 - command: ["/install-cni.sh"] - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - ---- - -# This manifest deploys the Calico policy controller on Kubernetes. -# See https://github.com/projectcalico/k8s-policy -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-policy-controller - namespace: kube-system - labels: - k8s-app: calico-policy -spec: - # The policy controller can only have a single active instance. - replicas: 1 - strategy: - type: Recreate - template: - metadata: - name: calico-policy-controller - namespace: kube-system - labels: - k8s-app: calico-policy-controller - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] - spec: - # The policy controller must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. - hostNetwork: true - containers: - - name: calico-policy-controller - image: calico/kube-policy-controller:v0.5.2 - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # The location of the Kubernetes API. Use the default Kubernetes - # service for API access. - - name: K8S_API - value: "https://kubernetes.default:443" - # Since we're running in the host namespace and might not have KubeDNS - # access, configure the container's /etc/hosts to resolve - # kubernetes.default to the correct service clusterIP. - - name: CONFIGURE_ETC_HOSTS - value: "true" - ---- - -## This manifest deploys a Job which performs one time -# configuration of Calico -apiVersion: batch/v1 -kind: Job -metadata: - name: configure-calico - namespace: kube-system - labels: - k8s-app: calico -spec: - template: - metadata: - name: configure-calico - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] - spec: - hostNetwork: true - restartPolicy: OnFailure - containers: - # Writes basic configuration to datastore. - - name: configure-calico - image: calico/ctl:v1.0.2 - args: - - apply - - -f - - /etc/config/calico/ippool.yaml - volumeMounts: - - name: config-volume - mountPath: /etc/config - env: - # The location of the etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - volumes: - - name: config-volume - configMap: - name: calico-config - items: - - key: ippool.yaml - path: calico/ippool.yaml diff --git a/roles/deploy-kubelet/templates/cluster-ha.j2 b/roles/deploy-kubelet/templates/cluster-ha.j2 deleted file mode 100644 index 8ce9b834..00000000 --- a/roles/deploy-kubelet/templates/cluster-ha.j2 +++ /dev/null @@ -1,23 +0,0 @@ ---- -kind: Service -apiVersion: v1 -metadata: - name: cluster-ha -spec: - clusterIP: None - ports: - - protocol: TCP - port: 443 - targetPort: 443 ---- -kind: Endpoints -apiVersion: v1 -metadata: - name: cluster-ha -subsets: -- addresses: -{% for node in groups['master'] %} - - ip: {{ hostvars[node]['ansible_default_ipv4']['address'] }} -{% endfor %} - ports: - - port: 443 diff --git a/roles/deploy-kubelet/templates/kube-calico-cfg.yaml.j2 b/roles/deploy-kubelet/templates/kube-calico-cfg.yaml.j2 deleted file mode 100644 index 96f94933..00000000 --- a/roles/deploy-kubelet/templates/kube-calico-cfg.yaml.j2 +++ /dev/null @@ -1,53 +0,0 @@ -# This ConfigMap is used to configure a self-hosted Calico installation. -# Becomes kube-flannel-cfg.yaml once deployed on target host -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # Configure this with the location of your etcd cluster. - etcd_endpoints: "http://10.23.19.16:2379" - #etcd_endpoints: "http://127.0.0.1:2379" - - # Configure the Calico backend to use. - calico_backend: "bird" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "type": "calico", - "etcd_endpoints": "__ETCD_ENDPOINTS__", - "etcd_key_file": "__ETCD_KEY_FILE__", - "etcd_cert_file": "__ETCD_CERT_FILE__", - "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", - "log_level": "info", - "ipam": { - "type": "calico-ipam" - }, - "policy": { - "type": "k8s", - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - } - - # The default IP Pool to be created for the cluster. - # Pod IP addresses will be assigned from this pool. - ippool.yaml: | - apiVersion: v1 - kind: ipPool - metadata: - cidr: 10.2.0.0/16 - spec: - nat-outgoing: true - - # If you're using TLS enabled etcd uncomment the following. - # You must also populate the Secret below with these files. - etcd_ca: "" # "/calico-secrets/etcd-ca" - etcd_cert: "" # "/calico-secrets/etcd-cert" - etcd_key: "" # "/calico-secrets/etcd-key" diff --git a/roles/deploy-kubelet/templates/kube-calico.yaml.j2 b/roles/deploy-kubelet/templates/kube-calico.yaml.j2 deleted file mode 100644 index b2bb8f54..00000000 --- a/roles/deploy-kubelet/templates/kube-calico.yaml.j2 +++ /dev/null @@ -1,286 +0,0 @@ -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -# This file becomes kube-flannel.yaml once deployed to overwrite the default bootkube deployment -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node -spec: - selector: - matchLabels: - k8s-app: calico-node - template: - metadata: - labels: - k8s-app: calico-node - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] - spec: - hostNetwork: true - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: quay.io/calico/node:v1.1.1 - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # Choose the backend to use. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Don't configure a default pool. This is done by the Job - # below. - - name: NO_DEFAULT_POOLS - value: "true" - - name: FELIX_LOGSEVERITYSCREEN - value: "info" - # Location of the CA certificate for etcd. - - name: ETCD_CA_CERT_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_ca - # Location of the client key for etcd. - - name: ETCD_KEY_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_key - # Location of the client certificate for etcd. - - name: ETCD_CERT_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_cert - # Auto-detect the BGP IP address. - - name: IP - value: "" - securityContext: - privileged: true - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - # - mountPath: /calico-secrets - # name: etcd-certs - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: quay.io/calico/cni:v1.6.2 - command: ["/install-cni.sh"] - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - # - mountPath: /calico-secrets - # name: etcd-certs - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Mount in the etcd TLS secrets. - # - name: etcd-certs - # secret: - # secretName: calico-etcd-secrets - - ---- - -# This manifest deploys the Calico policy controller on Kubernetes. -# See https://github.com/projectcalico/k8s-policy -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-policy-controller - namespace: kube-system - labels: - k8s-app: calico-policy - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] -spec: - # The policy controller can only have a single active instance. - replicas: 1 - strategy: - type: Recreate - template: - metadata: - name: calico-policy-controller - namespace: kube-system - labels: - k8s-app: calico-policy - spec: - # The policy controller must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. - hostNetwork: true - containers: - - name: calico-policy-controller - image: quay.io/calico/kube-policy-controller:v0.5.4 - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # Location of the CA certificate for etcd. - - name: ETCD_CA_CERT_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_ca - # Location of the client key for etcd. - - name: ETCD_KEY_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_key - # Location of the client certificate for etcd. - - name: ETCD_CERT_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_cert - # The location of the Kubernetes API. Use the default Kubernetes - # service for API access. - - name: K8S_API - value: "https://kubernetes.default:443" - # Since we're running in the host namespace and might not have KubeDNS - # access, configure the container's /etc/hosts to resolve - # kubernetes.default to the correct service clusterIP. - - name: CONFIGURE_ETC_HOSTS - value: "true" - # volumeMounts: - # # Mount in the etcd TLS secrets. - # - mountPath: /calico-secrets - # name: etcd-certs - # volumes: - # Mount in the etcd TLS secrets. - # - name: etcd-certs - # secret: - # secretName: calico-etcd-secrets - ---- - -## This manifest deploys a Job which performs one time -# configuration of Calico -apiVersion: batch/v1 -kind: Job -metadata: - name: configure-calico - namespace: kube-system - labels: - k8s-app: calico -spec: - template: - metadata: - name: configure-calico - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: | - [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, - {"key":"CriticalAddonsOnly", "operator":"Exists"}] - spec: - hostNetwork: true - restartPolicy: OnFailure - containers: - # Writes basic configuration to datastore. - - name: configure-calico - image: calico/ctl:v1.1.1 - args: - - apply - - -f - - /etc/config/calico/ippool.yaml - volumeMounts: - - name: config-volume - mountPath: /etc/config - # Mount in the etcd TLS secrets. - # - mountPath: /calico-secrets - # name: etcd-certs - env: - # The location of the etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # Location of the CA certificate for etcd. - - name: ETCD_CA_CERT_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_ca - # Location of the client key for etcd. - - name: ETCD_KEY_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_key - # Location of the client certificate for etcd. - - name: ETCD_CERT_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_cert - volumes: - - name: config-volume - configMap: - name: calico-config - items: - - key: ippool.yaml - path: calico/ippool.yaml - # Mount in the etcd TLS secrets. - # - name: etcd-certs - # secret: - # secretName: calico-etcd-secrets diff --git a/roles/group_vars/all.yaml b/roles/group_vars/all.yaml deleted file mode 100644 index 62a835f1..00000000 --- a/roles/group_vars/all.yaml +++ /dev/null @@ -1,26 +0,0 @@ -#Default Override-able variables for bootstrap role -boot_kube_version: "v0.3.13" -bootstrap_enabled: "true" - -#For DNS Resilliency, override this with FQDN in your environment which resolves to all "master" servers -api_server_fqdn: "kubeapi.test.local" - -#Default Override-able variables for the Kubelet role -cni_version: "v0.5.2" -hyperkube_version: "v1.5.6" -kubelet_version: "v1.5.6" -calicoctl_version: "v1.1.0" - -#Calico Peering - Physical Switch Fabric IPs -calico_peer1: 10.23.21.2 -calico_peer2: 10.23.21.3 - -## Kubernetes Add-Ons: -# Optional Items: kube_dashboard, kube_helm (more to come). -addons_enabled: false -addons: - - dashboard - - helm - - osh - - ceph - - maas diff --git a/scripts/common/func.sh b/scripts/common/func.sh new file mode 100644 index 00000000..26742d65 --- /dev/null +++ b/scripts/common/func.sh @@ -0,0 +1,64 @@ +# Copyright 2017 The Promenade Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function validate_environment { + local ERRORS= + + if [ "x${NODE_HOSTNAME}" = "x" ]; then + echo Error: NODE_HOSTNAME not defined, but required. + ERRORS=1 + fi + + if ! docker info; then + cat < /target/etc/systemd/system/kubelet.service + chown root:root /target/etc/systemd/system/kubelet.service + chmod 644 /target/etc/systemd/system/kubelet.service + + chroot --userspec root:root /target /bin/bash < ./scripts/start-kubelet.sh +} diff --git a/scripts/common/start-kubelet.sh b/scripts/common/start-kubelet.sh new file mode 100755 index 00000000..b94787d4 --- /dev/null +++ b/scripts/common/start-kubelet.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# +# Copyright 2017 The Promenade Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex + +systemctl daemon-reload +systemctl enable kubelet.service +systemctl start kubelet.service diff --git a/scripts/entrypoint-genesis.sh b/scripts/entrypoint-genesis.sh new file mode 100755 index 00000000..c45ab7d9 --- /dev/null +++ b/scripts/entrypoint-genesis.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# +# Copyright 2017 The Promenade Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex + +source ./scripts/env.sh +source ./scripts/func.sh + +validate_environment +# XXX validate_genesis_assets + +docker load -i ./genesis-images.tar + +install_assets +install_cni +install_kubelet + +docker run --rm \ + -v /etc/kubernetes:/etc/kubernetes \ + quay.io/coreos/bootkube:${BOOTKUBE_VERSION} \ + /bootkube start \ + --asset-dir=/etc/kubernetes diff --git a/scripts/entrypoint-join.sh b/scripts/entrypoint-join.sh new file mode 100755 index 00000000..b2c1ceae --- /dev/null +++ b/scripts/entrypoint-join.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# +# Copyright 2017 The Promenade Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex + +source ./scripts/env.sh +source ./scripts/func.sh + +validate_environment +# XXX validate_join_assets + +install_assets +install_cni +install_kubelet diff --git a/site.yaml b/site.yaml deleted file mode 100644 index a17996e6..00000000 --- a/site.yaml +++ /dev/null @@ -1,27 +0,0 @@ -- hosts: bootstrap - remote_user: ubuntu - become: yes - become_method: sudo - roles: - - deploy-bootstrap - -- hosts: master - remote_user: ubuntu - become: yes - become_method: sudo - roles: - - deploy-kubelet - -- hosts: workers - remote_user: ubuntu - become: yes - become_method: sudo - roles: - - deploy-kubelet - -#- hosts: master -# remote_user: ubuntu -# become: yes -# become_method: sudo -# roles: -# - deploy-addons diff --git a/test-install.sh b/test-install.sh new file mode 100755 index 00000000..0376ec62 --- /dev/null +++ b/test-install.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -ex + +# Setup master +vagrant ssh n0 <