Merge pull request #5 from mark-burnett/rebuild

Replace ansible-based system with container-based system
This commit is contained in:
Alan Meadows 2017-05-24 18:56:44 -04:00 committed by GitHub
commit 74d6c48653
84 changed files with 1928 additions and 2108 deletions

2
.dockerignore Normal file
View File

@ -0,0 +1,2 @@
Makefile
promenade-*.tar

11
.gitignore vendored
View File

@ -1,3 +1,8 @@
*.retry
hosts-deploy.yaml
roles/deploy-kubelet/templates/kubeconfig
/*.log
/*.tar
/.vagrant
/cni.tgz
/env.sh
/helm
/kubelet
/linux-amd64

37
Dockerfile.genesis Normal file
View File

@ -0,0 +1,37 @@
# Copyright 2017 The Promenade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:xenial
ENV NODE_HOSTNAME=
RUN apt-get update -qq \
&& apt-get install --no-install-recommends -y \
docker.io \
gettext-base \
&& rm -rf /var/lib/apt/lists/* \
&& mkdir /promenade \
&& mkdir /promenade/assets \
&& mkdir /promenade/scripts
WORKDIR /promenade
ENTRYPOINT /promenade/scripts/entrypoint.sh
COPY genesis-images.tar cni.tgz helm kubelet /promenade/
COPY kubelet.service.template /promenade/
COPY env.sh scripts/common/* /promenade/scripts/
COPY scripts/entrypoint-genesis.sh /promenade/scripts/entrypoint.sh
COPY assets/ /promenade/assets/

37
Dockerfile.join Normal file
View File

@ -0,0 +1,37 @@
# Copyright 2017 The Promenade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:xenial
ENV NODE_HOSTNAME=
RUN apt-get update -qq \
&& apt-get install --no-install-recommends -y \
docker.io \
gettext-base \
&& rm -rf /var/lib/apt/lists/* \
&& mkdir /promenade \
&& mkdir /promenade/assets \
&& mkdir /promenade/scripts
WORKDIR /promenade
ENTRYPOINT /promenade/scripts/entrypoint.sh
COPY join-images.tar cni.tgz kubelet /promenade/
COPY kubelet.service.template /promenade/
COPY env.sh scripts/common/* /promenade/scripts/
COPY scripts/entrypoint-join.sh /promenade/scripts/entrypoint.sh
COPY assets/kubeconfig assets/auth/kubeconfig /promenade/assets/

141
Makefile Normal file
View File

@ -0,0 +1,141 @@
# Copyright 2017 The Promenade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------#
# Configuration #
#---------------#
BOOTKUBE_VERSION := v0.4.1
CNI_VERSION := v0.5.2
HELM_VERSION := v2.3.1
KUBERNETES_VERSION := v1.6.2
NAMESPACE := quay.io/attcomdev
GENESIS_REPO := promenade-genesis
JOIN_REPO := promenade-join
TAG := dev
GENESIS_IMAGES := \
gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1 \
gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1 \
gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1 \
gcr.io/google_containers/pause-amd64:3.0 \
quay.io/calico/cni:v1.7.0 \
quay.io/calico/kube-policy-controller:v0.5.4 \
quay.io/calico/node:v1.1.3 \
quay.io/coreos/bootkube:$(BOOTKUBE_VERSION) \
quay.io/coreos/etcd-operator:v0.2.5 \
quay.io/coreos/etcd:v3.1.4 \
quay.io/coreos/etcd:v3.1.6 \
quay.io/coreos/flannel:v0.7.1 \
quay.io/coreos/hyperkube:$(KUBERNETES_VERSION)_coreos.0 \
quay.io/coreos/kenc:48b6feceeee56c657ea9263f47b6ea091e8d3035 \
quay.io/coreos/pod-checkpointer:20cf8b9a6018731a0770192f30dfa7a1941521e3 \
JOIN_IMAGES := \
gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1 \
gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1 \
gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1 \
gcr.io/google_containers/pause-amd64:3.0 \
quay.io/calico/cni:v1.7.0 \
quay.io/calico/kube-policy-controller:v0.5.4 \
quay.io/calico/node:v1.1.3 \
quay.io/coreos/etcd-operator:v0.2.5 \
quay.io/coreos/etcd:v3.1.4 \
quay.io/coreos/etcd:v3.1.6 \
quay.io/coreos/flannel:v0.7.1 \
quay.io/coreos/hyperkube:$(KUBERNETES_VERSION)_coreos.0 \
quay.io/coreos/kenc:48b6feceeee56c657ea9263f47b6ea091e8d3035 \
quay.io/coreos/pod-checkpointer:20cf8b9a6018731a0770192f30dfa7a1941521e3 \
#-------#
# Rules #
#-------#
all: build
build: build-genesis build-join
push: push-genesis push-join
save: save-genesis save-join
genesis: build-genesis
build-genesis: Dockerfile.genesis cni.tgz env.sh helm genesis-images.tar kubelet kubelet.service.template
sudo docker build -f Dockerfile.genesis -t $(NAMESPACE)/$(GENESIS_REPO):$(TAG) .
push-genesis: build-genesis
sudo docker push $(NAMESPACE)/$(GENESIS_REPO):$(TAG)
save-genesis: build-genesis
sudo docker save $(NAMESPACE)/$(GENESIS_REPO):$(TAG) > promenade-genesis.tar
join: build-join
build-join: Dockerfile.join join-images.tar kubelet.service.template
sudo docker build -f Dockerfile.join -t $(NAMESPACE)/$(JOIN_REPO):$(TAG) .
push-join: build-join
sudo docker push $(NAMESPACE)/$(JOIN_REPO):$(TAG)
save-join: build-join
sudo docker save $(NAMESPACE)/$(JOIN_REPO):$(TAG) > promenade-join.tar
cni.tgz:
curl -Lo cni.tgz https://github.com/containernetworking/cni/releases/download/$(CNI_VERSION)/cni-amd64-$(CNI_VERSION).tgz
env.sh: Makefile
rm -f env.sh
echo export BOOTKUBE_VERSION=$(BOOTKUBE_VERSION) >> env.sh
echo export CNI_VERSION=$(CNI_VERSION) >> env.sh
echo export HELM_VERSION=$(HELM_VERSION) >> env.sh
echo export KUBERNETES_VERSION=$(KUBERNETES_VERSION) >> env.sh
helm:
curl -Lo helm.tgz https://storage.googleapis.com/kubernetes-helm/helm-$(HELM_VERSION)-linux-amd64.tar.gz
tar xf helm.tgz
mv linux-amd64/helm ./helm
rm -rf ./linux-amd64/
rm -f helm.tgz
chmod +x helm
genesis-images.tar:
for IMAGE in $(GENESIS_IMAGES); do \
sudo docker pull $$IMAGE; \
done
sudo docker save -o genesis-images.tar $(GENESIS_IMAGES)
join-images.tar:
for IMAGE in $(JOIN_IMAGES); do \
sudo docker pull $$IMAGE; \
done
sudo docker save -o join-images.tar $(JOIN_IMAGES)
kubelet:
curl -LO http://storage.googleapis.com/kubernetes-release/release/$(KUBERNETES_VERSION)/bin/linux/amd64/kubelet
chmod +x kubelet
clean:
rm -rf \
*.tar \
cni.tgz \
env.sh \
helm \
helm.tgz \
kubelet \
linux-amd64 \
.PHONY : build build-genesis build-join clean genesis join push push-genesis push-join

266
README.md
View File

@ -1,237 +1,81 @@
# Promenade: Manually Self-hosted Kubernetes via Bootkube
A small howto on how to bring up a self-hosted kubernetes cluster
# Overview
We'll use [bootkube](https://github.com/kubernetes-incubator/bootkube) to initiate the master-components. First we'll render the assets necessary for bringing up the control plane (apiserver, controller-manger, scheduler, etc). Then we'll start the kubelets which job is it to start the assets but can't do much, because there's no API-server yet. Running `bootkube` once will kick things off then. At a high-level the bootstrapping process looks like this:
Promenade is tool for deploying self-hosted, highly resilient Kubernetes clusters using
[bootkube](https://github.com/kubernetes-incubator/bootkube). Currently. Promenade works by leveraging Docker containers with the Bootkube binaries in order to setup Kubernetes on the host operating system. Default Kubernetes assets and manifests are included in this repo, but it is recommended to render or supply your own assets for real-world deployments.
![Self-Hosted](./img/self-hosted-moving-parts.png?raw=true "Self-hosted-moving-parts")
## Quickstart using Vagrant
Image taken from the [self-hosted proposal](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/self-hosted-kubernetes.md).
Make sure you have [Vagrant](https://vagrantup.com) and
[VirtualBox](https://www.virtualbox.org/wiki/Downloads) installed. Then
install the `vagrant-hostmanager` plugin.
This is how the final cluster looks like from a `kubectl` perspective:
![Screenshot](./img/self-hosted.png?raw=true "Screenshot")
Let's start!
## Temporary apiserver: `bootkube`
### Download
```
wget https://github.com/kubernetes-incubator/bootkube/releases/download/v0.3.9/bootkube.tar.gz
tar xvzf bootkube.tar.gz
sudo cp bin/linux/bootkube /usr/bin/
```bash
vagrant plugin install vagrant-hostmanager
```
### Render the Assets
Exchange `10.7.183.59` with the node you are working on. If you have DNS available group all master node IP addresses behind a CNAME Record and provide this insted.
```
bootkube render --asset-dir=assets --experimental-self-hosted-etcd --etcd-servers=http://10.3.0.15:2379 --api-servers=https://10.7.183.59:443
```
This will generate several things:
- manifests for running apiserver, controller-manager, scheduler, flannel, etcd, dns and kube-proxy
- a `kubeconfig` file for connecting to and authenticating with the apiserver
- TLS assets
Build the genesis and join images and save them to disk for quick loading into
the Vagrant VMs.
## Start the Master Kubelet
### Download `hyperkube`
```
wget http://storage.googleapis.com/kubernetes-release/release/v1.5.3/bin/linux/amd64/hyperkube -O ./hyperkube
sudo mv hyperkube /usr/bin/hyperkube
sudo chmod 755 /usr/bin/hyperkube
```bash
make save
```
### Install CNI
```
sudo mkdir -p /opt/cni/bin
wget https://github.com/containernetworking/cni/releases/download/v0.4.0/cni-amd64-v0.4.0.tbz2
sudo tar xjf cni-amd64-v0.4.0.tbz2 -C /opt/cni/bin/
Start the VMs and save a snapshot for quicker iteration:
```bash
vagrant up
vagrant snapshot save clean
```
### Copy Configuration Files
```
sudo cp assets/auth/kubeconfig /etc/kubernetes/
sudo cp -a assets/manifests /etc/kubernetes/
```
### Start the Kubelet
```
sudo hyperkube kubelet --kubeconfig=/etc/kubernetes/kubeconfig \
--require-kubeconfig \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--network-plugin=cni \
--lock-file=/var/run/lock/kubelet.lock \
--exit-on-lock-contention \
--pod-manifest-path=/etc/kubernetes/manifests \
--allow-privileged \
--node-labels=master=true \
--minimum-container-ttl-duration=6m0s \
--cluster_dns=10.3.0.10 \
--cluster_domain=cluster.local \
--hostname-override=10.7.183.59
```
The TLS credentials generated by `bootkube render` in assets/tls/ are copied to a secret: assets/manifests/kube-apiserver-secret.yaml.
Spin up a cluster:
### Start the Temporary API Server
bootkube will serve as the temporary apiserver so the kubelet from above can start the real apiserver in a pod
```
sudo bootkube start --asset-dir=./assets --experimental-self-hosted-etcd --etcd-server=http://127.0.0.1:12379
```
bootkube should exit itself after successfully bootstrapping the master components. It's only needed for the very first bootstrapping
### Check the Output
```
watch hyperkube kubectl get pods -o wide --all-namespaces
```bash
./test-install.sh
```
## Join Nodes to the Cluster
Copy the information where to find the apiserver and how to authenticate:
```
scp 10.7.183.59:assets/auth/kubeconfig .
sudo mkdir -p /etc/kubernetes
sudo mv kubeconfig /etc/kubernetes/
```
install cni binaries and download hyperkube
```
sudo mkdir -p /opt/cni/bin
wget https://github.com/containernetworking/cni/releases/download/v0.4.0/cni-amd64-v0.4.0.tbz2
sudo tar xjf cni-amd64-v0.4.0.tbz2 -C /opt/cni/bin/
wget http://storage.googleapis.com/kubernetes-release/release/v1.5.3/bin/linux/amd64/hyperkube -O ./hyperkube
sudo mv hyperkube /usr/bin/hyperkube
sudo chmod 755 /usr/bin/hyperkube
```
### Master Nodes
Start the kubelet:
```
sudo hyperkube kubelet --kubeconfig=/etc/kubernetes/kubeconfig \
--require-kubeconfig \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--network-plugin=cni \
--lock-file=/var/run/lock/kubelet.lock \
--exit-on-lock-contention \
--pod-manifest-path=/etc/kubernetes/manifests \
--allow-privileged \
--node-labels=master=true \
--minimum-container-ttl-duration=6m0s \
--cluster_dns=10.3.0.10 \
--cluster_domain=cluster.local \
--hostname-override=10.7.183.60
Watch nodes spin up:
```bash
watch kubectl --insecure-skip-tls-verify --kubeconfig <(sed 's/kubernetes:443/192.168.77.10:443/' < assets/kubeconfig) get nodes
```
### Worker Nodes
To test changes, you can safely reset single or multiple nodes:
Note the only difference is the removal of `--node-labels=master=true`:
```
sudo hyperkube kubelet --kubeconfig=/etc/kubernetes/kubeconfig \
--require-kubeconfig \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--network-plugin=cni \
--lock-file=/var/run/lock/kubelet.lock \
--exit-on-lock-contention \
--pod-manifest-path=/etc/kubernetes/manifests \
--allow-privileged \
--minimum-container-ttl-duration=6m0s \
--cluster_dns=10.3.0.10 \
--cluster_domain=cluster.local\
--hostname-override=10.7.183.60
```bash
vagrant snapshot resotre n2 clean --no-provision
vagrant snapshot restore clean --no-provision
```
## Scale Etcd
kubectl apply doesn't work for TPR at the moment. See https://github.com/kubernetes/kubernetes/issues/29542. As a workaround, we use cURL to resize the cluster.
## Detailed Deployment
```
hyperkube kubectl --namespace=kube-system get cluster.etcd kube-etcd -o json > etcd.json && \
vim etcd.json && \
curl -H 'Content-Type: application/json' -X PUT --data @etcd.json http://127.0.0.1:8080/apis/etcd.coreos.com/v1beta1/namespaces/kube-system/clusters/kube-etcd
```
If that doesn't work, re-run until it does. See https://github.com/kubernetes-incubator/bootkube/issues/346#issuecomment-283526930
The below steps can be used to deploy a cluster on bare metal or virtual nodes:
## Challenges
1. Overwrite the placeholder assets in the `assets` directory.
### Node setup
Some Broadcom NICs panic'ed with the default Ubuntu kernel
- upgrade kernel to >`4.8` because of brcm nic failure
- move to `--storage-driver=overlay2` instead of `aufs` as docker driver
- disable swap on the node (will be a fatal error in kube-1.6)
2. Make sure the `Makefile` lists the images and versions you expect to be
required.
3. Build the images with `make build`
4. Setup each host with the following:
- DNS resolution pointing `kubernetes` to the appropriate IPs (Kubernetes master nodes) for the
Kubernetes API
- A running docker daemon, configured to use the DNS resolution specified
above (see `vagrant-assets/docker-daemon.json`)
5. Transfer the appropriate images to each host. You may find it useful to
run `make save`, transfer the image and then use `docker load -i ...` to
restore it rather than to rely on a registry.
6. On the genesis (seed) server, start the cluster supplying in the node's FQDNs:
`docker run --rm -v /:/target -v /var/run/docker.sock:/var/run/docker.sock -e NODE_HOSTNAME=genesis-node.fqdn quay.io/attcomdev/promenade-genesis:dev`
7. On each additional node to be joined to the cluster:
`docker run --rm -v /:/target -v /var/run/docker.sock:/var/run/docker.sock -e NODE_HOSTNAME=join-node.fqdn quay.io/attcomdev/promenade-join:dev`
## ToDo Items:
### apiserver resiliance
the master apiservers need to have a single address only. Possible solutions:
- use LB from the DC
- use DNS from the DC with programmable API (e.g. powerdns)
- use something like kube-keepalive-vip?
- bootstrap DNS itself (skydns, coredns)
## References:
### Etcd Challenges
- backup strategies (https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-members-cluster-that-restores-from-previous-pv-backup)
- etcd-operator failures (e.g. https://github.com/coreos/etcd-operator/issues/851)
- partial failure (loosing quorum)
- permament failure (state gone completely)
- etcd needs ntp available (or another mechanism so that every node is in sync)
1. [Demo of Genesis Node Deployment](https://asciinema.org/a/c2fdtzh2z2fiymiyu75b32u0h)
## Notes
### clean up docker
```
sudo su -
docker rm -f $(docker ps -a -q)
exit
```
### Compile Bootkube
```
sudo docker run --rm -it -v $(pwd)/golang/src:/go/src/ -w /go/src golang:1.7 bash
go get -u github.com/kubernetes-incubator/bootkube
cd $GOPATH/src/github.com/kubernetes-incubator/bootkube
make
```
### RBAC
```
./bootkube-rbac render --asset-dir assets-rbac --experimental-self-hosted-etcd --etcd-servers=http://10.3.0.15:2379 --api-servers=https://10.7.183.59:443
sudo rm -rf /etc/kubernetes/*
sudo cp -a assets-rbac/manifests /etc/kubernetes/
sudo cp assets-rbac/auth/kubeconfig /etc/kubernetes/
sudo ./bootkube-rbac start --asset-dir=./assets-rbac --experimental-self-hosted-etcd --etcd-server=http://127.0.0.1:12379
```
### Containerized Kubelet
The benefit here is using a docker container instead of a kubelet binary. Also the hyperkube docker image packages and installs the cni binaries. The downside would be that in either case something needs to start the container upon a reboot of the node. Usually the something is systemd and systemd is better managing binaries than docker containers. Either way, this is how you would run a containerized kubelet:
```
sudo docker run \
--rm \
-it \
--privileged \
-v /dev:/dev \
-v /run:/run \
-v /sys:/sys \
-v /etc/kubernetes:/etc/kubernetes \
-v /usr/share/ca-certificates:/etc/ssl/certs \
-v /var/lib/docker:/var/lib/docker \
-v /var/lib/kubelet:/var/lib/kubelet \
-v /:/rootfs \
quay.io/coreos/hyperkube:v1.5.3_coreos.0 \
./hyperkube \
kubelet \
--network-plugin=cni \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--cni-bin-dir=/opt/cni/bin \
--pod-manifest-path=/etc/kubernetes/manifests \
--allow-privileged \
--hostname-override=10.7.183.60 \
--cluster-dns=10.3.0.10 \
--cluster-domain=cluster.local \
--kubeconfig=/etc/kubernetes/kubeconfig \
--require-kubeconfig \
--lock-file=/var/run/lock/kubelet.lock \
--containerized
```
Not quite working yet though. The node comes up, registeres successfully with the master and starts daemonsets. Everything comes up except flannel:
```
main.go:127] Failed to create SubnetManager: unable to initialize inclusterconfig: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory
```
## Resources and References
- https://github.com/kubernetes/community/blob/master/contributors/design-proposals/self-hosted-kubernetes.md
- https://github.com/kubernetes-incubator/bootkube
- https://github.com/coreos/etcd-operator/
- http://blog.kubernetes.io/2017/01/stronger-foundation-for-creating-and-managing-kubernetes-clusters.html
- https://github.com/kubernetes/kubeadm/issues/127
2. [Demo of Joining Node to Cluster](https://asciinema.org/a/62dssvuiwbdanfuwwe6vzcihk)

59
Vagrantfile vendored Normal file
View File

@ -0,0 +1,59 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/xenial64"
config.vm.box_check_update = false
config.vm.provision :file, source: "vagrant-assets/docker-daemon.json", destination: "/tmp/docker-daemon.json"
config.vm.provision :file, source: "vagrant-assets/dnsmasq-kubernetes", destination: "/tmp/dnsmasq-kubernetes"
config.vm.provision :shell, privileged: true, inline:<<EOS
set -ex
echo === Installing packages ===
apt-get update -qq
apt-get install -y -qq --no-install-recommends \
docker.io \
dnsmasq \
gettext-base \
echo === Setting up DNSMasq ===
mv /tmp/dnsmasq-kubernetes /etc/dnsmasq.d/
chown root:root /etc/dnsmasq.d/dnsmasq-kubernetes
chmod 444 /etc/dnsmasq.d/dnsmasq-kubernetes
systemctl restart dnsmasq
echo === Reconfiguring Docker ===
mv /tmp/docker-daemon.json /etc/docker/daemon.json
chown root:root /etc/docker/daemon.json
chmod 444 /etc/docker/daemon.json
systemctl restart docker
echo === Done ===
EOS
config.hostmanager.enabled = true
config.hostmanager.manage_guest = true
config.vm.provider "virtualbox" do |vb|
vb.cpus = 2
vb.memory = "2048"
end
config.vm.define "n0" do |c|
c.vm.hostname = "n0"
c.vm.network "private_network", ip: "192.168.77.10"
end
config.vm.define "n1" do |c|
c.vm.hostname = "n1"
c.vm.network "private_network", ip: "192.168.77.11"
end
config.vm.define "n2" do |c|
c.vm.hostname = "n2"
c.vm.network "private_network", ip: "192.168.77.12"
end
end

17
assets/auth/kubeconfig Normal file
View File

@ -0,0 +1,17 @@
---
apiVersion: v1
kind: Config
clusters:
- name: local
cluster:
server: https://kubernetes:443
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM2RENDQWRDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFsTVJFd0R3WURWUVFLRXdoaWIyOTAKYTNWaVpURVFNQTRHQTFVRUF4TUhhM1ZpWlMxallUQWVGdzB4TnpBMU1Ua3hPRFF4TWpCYUZ3MHlOekExTVRjeApPRFF4TWpCYU1DVXhFVEFQQmdOVkJBb1RDR0p2YjNScmRXSmxNUkF3RGdZRFZRUURFd2RyZFdKbExXTmhNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF1bEFWZnpUZS9tTWwzMU5BeDdQNTI0c3oKblFLbXhHK0JYZkRQdDRPNzc4dEJGNzZSc0VYK3dLclJ0b29CcjdheGh2UjBvazVrRFpQQVJHcE5LQVJtZENTbQozMzZFckZ0cVR3TW9yZVk3V1ZDVTJDQkZPdHQydW1mSkR1R1ZvTlVIRWtEOE1lVjJsWUpDb3h3SnJoZTV3aXFxCm00aHB0U0NlcFVqaWxta1JlV1ErL040K1JWRHByODZHWTJRQlVsdjlPdEE1aHhUaXNiQTAxU3dTUEFXcnBPcVYKOEpJajJSTFpuODVGVHpNRlRRazBXdTBadWdpcnlxZGF4bDMzVkwzK1VSSTNRQzJyMmRwdmQxU2V5V0RFWHZqbQprbjkyMzh3ZSsyd0JlUmFjZUN2QzdqeUR2WVNPaFMrajkyd0ZkblFZeCtIaW5BOG5uOFFmZG0zOHU2QTlod0lECkFRQUJveU13SVRBT0JnTlZIUThCQWY4RUJBTUNBcVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUcKOXcwQkFRc0ZBQU9DQVFFQURIdmd0RENFOHR2MGxLSXpFcWZ1YlVBNUxLUTROaVQ1U1VBdWNZYXpNcEt3MVFJVwpRaW5Db0xFZnlQTXdna2JnWGp6d25lOFB4ZUVqanZ3Q1Jxd2J5VmlCV252OTM3cDk0cG9aLzlHM0NXM2JTWXdRCjRaZVpud1c2d1cwSUdzRWhlTXdrbkJlUWJvb2NNNmNYdThodG8xQVlIT25qdGcydDFSdWZXcHNEbjVhb2t1Vy8KUkk4SGc1dm5XV0tBQUF3Y3drbWc4YWlOLzFuWVFHL2NvRDQxa1hlL2lKMURUUFphMkNQeGdtNzFmMmhSbkVZVApjN3VUN3V1ZUJhcG8xTyt0dFBrZ2hzSXZQWktjNnZLeEswd3J2ekhHUm9VTGw3N1o4M3o5MmFvUEx6Y21uSjNkCk1GRXE0ZDdKUTV1NWkrU2FxcXFPZHAxUkdBaXVpTnBjdnlQOWV3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
users:
- name: kubelet
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURBekNDQWV1Z0F3SUJBZ0lJTE1Qa0xkMkUvdUF3RFFZSktvWklodmNOQVFFTEJRQXdKVEVSTUE4R0ExVUUKQ2hNSVltOXZkR3QxWW1VeEVEQU9CZ05WQkFNVEIydDFZbVV0WTJFd0hoY05NVGN3TlRFNU1UZzBNVEl3V2hjTgpNVGd3TlRFNU1UZzBNVEl4V2pBck1SY3dGUVlEVlFRS0V3NXplWE4wWlcwNmJXRnpkR1Z5Y3pFUU1BNEdBMVVFCkF4TUhhM1ZpWld4bGREQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUx0ejltSG8KdFBraWRQYlFldTlSUzZ0QU9RaEFoUE96Vjd5NWt4bzlaa3lHUjVtT0o1TUVsZm9vZkhXR1hEcUpzM0lITzZacgpaVEtUWWdYNmMzamlzTWhJVDYySm5OOVphQVRXY3JkK3FRMTVpeFROaHFkeTNVY1g2eGxCOFlGOEtwVlo0MHJPCndyUC9Vc0c5RWFCaXQzN2lPbW1JTklrWnRiTklodk9ZaGtKdnIrTk90WC84VHNuUlpwVDlQeUNleVpKYnNaSVoKZDFBcGZ1MkVOZVMxQzFPZ09RSUVPUkVCZWhjM0dWSDExRDlCUnRGb2IyMk1qWlVqeHlHajBTQnV0VW1wdm5ZOQpvZ2ZFNXBUMHloSStrWmxQNmlNUGtrMG9HbGtjYytVNFg4VnJTeVlYZkpORWJtSTVhRFplM0E0bGs0ZlhpRi9ZCk5vc2JIWW56ZGYvajBhY0NBd0VBQWFNeE1DOHdEZ1lEVlIwUEFRSC9CQVFEQWdXZ01CMEdBMVVkSlFRV01CUUcKQ0NzR0FRVUZCd01CQmdnckJnRUZCUWNEQWpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQUlnYXhPNmFBeUdScQpNSU5QSUQ1YkcvWlNSb0lCU0VYMGJBdmlMS1dQOVJvbmpmYXlNOFhiM3IyV1o0VG1Kb1lZRE5NUkZveUNlU3R3CjFmamw3YjJ2cG1GQk94bHBtUnZOaFJGMWRsSTlSdDRHUlJWa3hlUzdjNGRrYzBMRlRIRVBwMFgvUm1TdDR1ZisKWDlzWXNXT0dTQmY1MitxWi83VU5JNlNZd29sdGVuemJ3bkxIWTlOU0xYaVZGb21tQ1hQYUJtYTFHbGtRTjJGMwpjRUluaGY3OEJYS1hlSXBXZFpib0h1V09VdTNhb1JUMHA2ZmVnYjJVeGgyYTczczZzVG9IakU3b3kzSDJadktSCmtjRkoyVG5LTXJxekVLLzl3eWMvZ3Uva1lWeDgvekNvUGxEUUFTZW03YVRaZ09JRFo4d2M0ZzlyQml0bnhkSXMKanhad2pPS3Q5Zz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcGdJQkFBS0NBUUVBdTNQMlllaTArU0owOXRCNjcxRkxxMEE1Q0VDRTg3Tlh2TG1UR2oxbVRJWkhtWTRuCmt3U1YraWg4ZFlaY09vbXpjZ2M3cG10bE1wTmlCZnB6ZU9Ld3lFaFByWW1jMzFsb0JOWnl0MzZwRFhtTEZNMkcKcDNMZFJ4ZnJHVUh4Z1h3cWxWbmpTczdDcy85U3diMFJvR0szZnVJNmFZZzBpUm0xczBpRzg1aUdRbSt2NDA2MQpmL3hPeWRGbWxQMC9JSjdKa2x1eGtobDNVQ2wrN1lRMTVMVUxVNkE1QWdRNUVRRjZGemNaVWZYVVAwRkcwV2h2CmJZeU5sU1BISWFQUklHNjFTYW0rZGoyaUI4VG1sUFRLRWo2Um1VL3FJdytTVFNnYVdSeHo1VGhmeFd0TEpoZDgKazBSdVlqbG9ObDdjRGlXVGg5ZUlYOWcyaXhzZGlmTjEvK1BScHdJREFRQUJBb0lCQVFDUnB6SmJzNERqVUhYSAp6Z2luNmVnOUFhTVBHV3IxSFhaZ0MyWVU3bjZObVkwSzhOMHBMRmdJeitxZE96Qnd2OHh5SHRLbnBpMDAxalpGClpPelNrbnBBdFlkTDFYRFNUMXMyM3hhMkk3SGg2WDQ3Uk5PTFN3SkxHbmV2NFlCeFYzU1RKZ3dwZFd6dWhjYmQKQ1Rjb0EyeUhKK3V4VW9kWHZHVm1FRVhrQTdEVzd6TFpwdkxKLy9uRDV6NUNNMElVUGRhU2dYaFlRcDJOWld0SQpSakxkamt1WVZ5QllDMnJVNExwbWlIMWVJVkw3YkRIb1VRaE9hSE4wd1NGRzgwbzQ2Z3ZycWJock1QdzdCd0l1CmJDVzMwcTRZNEpQUlluNXJ1MHpDRm9ybmU2NUkya1J0bkpVRGpuOTlkT250V1ZaaWJSb2pZMGhGRkV5R1lPaloKV0l0ekdBYnhBb0dCQU5GajJaSGl0UXh0cVlzN01OSVk5anovN3B6dVBhWDhkbSsyLzNXVzVBb3QwMStzNHlWSApwZDdIRThsNU5qbmVqV0c3bkcyR1BzSWhiQ0NWWEV0U01HdDFCUmlvS3BjMmRMcStaUWI3NUxHRE1hSnpNV0VtCi9IaW1KdWhYdnhPenpLQzlaMjl2bzRkNkpDNTh2UHd5dTI3ZEZBdjNyekFjZGlXYi9haWI3UzZaQW9HQkFPVXUKQmVQWmdxbHB3bDN3cURsQWxqaUxzSDhBZVpVSDJyREE0bjRkKzFrS1BNcUpZTW1mdEdhVGtESk1lSmZpc2ZLYgpFWGNRc0dKQWVPTEhScFkxVnZrSHFuNXYrN3FnOUpIU25sdysvblRGNVZrNklTQUZNczJRZndkcTZmWjg5OEdaCm1pOVZYcjBoZXo3Wi92L2xpQ3hCY2wwaGdBaG5qSUZHdlE1clNtby9Bb0dCQUl2bFZGV2R6Q3lUai9VUUJOdzYKQlRwWUhBb0pPbk1OcSt1VHJqWFlMRitJb25LSHhmTUFYWmZzRmhKRHc3RUNPaCtVQXoxQnRlaHFBQjM4N0g3KwpXSTlTemFiZHBDY0hJUklyWnNBMXgyTzZMWTFGdlRZVm9CVFRuYWNhQ1BXVzZSNXpyUW5NNHNyL0ZmRmhNYnFtCkFvaGRlS2xPUUdPNmdFMDhYVXNyY2xueEFvR0JBTE92K2Y1RHRDYVFQVXphTzR0b0VHQVZaalN0Y3FaZW1pQ3IKbXVtM0tETVB5OG9oSERuNWRjQlhRbCt0aFgvUXhpU3BZSEF5TFpsYlkyeXJRYlRUN1hVamhaSE15MW53aU5FcwppZTFabHJpSDBPSzhxT3dxSjBMMVlDTzR0K2dDNDE1dnlHd0VTMXVPdk1yeXNQU0NTdG9vRmpyZTRUdTF0SHhICnNrTno2OHlSQW9HQkFKeU1Gb1F1MHJ6T3hDd1F4KzhtMWVuY205cGNVdnUyZVN3d3krOTQ2MFc0NzRXdzRxWkEKRjREV3dqRGc1ZEJHMUltMjFLSUpraG9YNTc5ZGgzUUlScjNQUndsUVVrUWx4VHJVU0V0cGZOVFUzcHZXVjlCRgp0dUxTMVRuT2R3ZW9ROGNHWlpkOVBXTUxMckJkMEplUjRGeUgyM3JPVW1NRndKMkE2T29wZVg2QgotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
contexts:
- context:
cluster: local
user: kubelet

View File

@ -0,0 +1,60 @@
---
apiVersion: v1
kind: Pod
metadata:
name: bootstrap-kube-apiserver
namespace: kube-system
spec:
containers:
- name: kube-apiserver
image: quay.io/coreos/hyperkube:v1.6.2_coreos.0
command:
- /usr/bin/flock
- --exclusive
- --timeout=30
- /var/lock/api-server.lock
- /hyperkube
- apiserver
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
- --advertise-address=$(POD_IP)
- --allow-privileged=true
- --authorization-mode=RBAC
- --bind-address=0.0.0.0
- --client-ca-file=/etc/kubernetes/secrets/ca.crt
- --etcd-servers=http://10.3.0.15:2379,http://127.0.0.1:12379
- --insecure-port=0
- --kubelet-client-certificate=/etc/kubernetes/secrets/apiserver.crt
- --kubelet-client-key=/etc/kubernetes/secrets/apiserver.key
- --secure-port=443
- --service-account-key-file=/etc/kubernetes/secrets/service-account.pub
- --service-cluster-ip-range=10.3.0.0/24
- --storage-backend=etcd3
- --tls-ca-file=/etc/kubernetes/secrets/ca.crt
- --tls-cert-file=/etc/kubernetes/secrets/apiserver.crt
- --tls-private-key-file=/etc/kubernetes/secrets/apiserver.key
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- mountPath: /etc/kubernetes/secrets
name: secrets
readOnly: true
- mountPath: /var/lock
name: var-lock
readOnly: false
hostNetwork: true
volumes:
- name: secrets
hostPath:
path: /etc/kubernetes/bootstrap-secrets
- name: ssl-certs-host
hostPath:
path: /usr/share/ca-certificates
- name: var-lock
hostPath:
path: /var/lock

View File

@ -0,0 +1,35 @@
---
apiVersion: v1
kind: Pod
metadata:
name: bootstrap-kube-controller-manager
namespace: kube-system
spec:
containers:
- name: kube-controller-manager
image: quay.io/coreos/hyperkube:v1.6.2_coreos.0
command:
- ./hyperkube
- controller-manager
- --allocate-node-cidrs=true
- --cluster-cidr=10.2.0.0/16
- --configure-cloud-routes=false
- --kubeconfig=/etc/kubernetes/kubeconfig
- --leader-elect=true
- --root-ca-file=/etc/kubernetes/bootstrap-secrets/ca.crt
- --service-account-private-key-file=/etc/kubernetes/bootstrap-secrets/service-account.key
volumeMounts:
- name: kubernetes
mountPath: /etc/kubernetes
readOnly: true
- name: ssl-host
mountPath: /etc/ssl/certs
readOnly: true
hostNetwork: true
volumes:
- name: kubernetes
hostPath:
path: /etc/kubernetes
- name: ssl-host
hostPath:
path: /usr/share/ca-certificates

View File

@ -0,0 +1,30 @@
---
apiVersion: v1
kind: Pod
metadata:
name: bootstrap-etcd
namespace: kube-system
labels:
k8s-app: boot-etcd
spec:
containers:
- name: etcd
image: quay.io/coreos/etcd:v3.1.6
command:
- /usr/local/bin/etcd
- --name=boot-etcd
- --listen-client-urls=http://0.0.0.0:12379
- --listen-peer-urls=http://0.0.0.0:12380
- --advertise-client-urls=http://$(MY_POD_IP):12379
- --initial-advertise-peer-urls=http://$(MY_POD_IP):12380
- --initial-cluster=boot-etcd=http://$(MY_POD_IP):12380
- --initial-cluster-token=bootkube
- --initial-cluster-state=new
- --data-dir=/var/etcd/data
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
hostNetwork: true
restartPolicy: Never

View File

@ -0,0 +1,24 @@
---
apiVersion: v1
kind: Pod
metadata:
name: bootstrap-kube-scheduler
namespace: kube-system
spec:
containers:
- name: kube-scheduler
image: quay.io/coreos/hyperkube:v1.6.2_coreos.0
command:
- ./hyperkube
- scheduler
- --kubeconfig=/etc/kubernetes/kubeconfig
- --leader-elect=true
volumeMounts:
- name: kubernetes
mountPath: /etc/kubernetes
readOnly: true
hostNetwork: true
volumes:
- name: kubernetes
hostPath:
path: /etc/kubernetes

1
assets/kubeconfig Symbolic link
View File

@ -0,0 +1 @@
auth/kubeconfig

View File

@ -0,0 +1,31 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: etcd-operator
namespace: kube-system
labels:
k8s-app: etcd-operator
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: etcd-operator
spec:
containers:
- name: etcd-operator
image: quay.io/coreos/etcd-operator:v0.2.5
env:
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule

View File

@ -0,0 +1,15 @@
---
apiVersion: v1
kind: Service
metadata:
name: etcd-service
namespace: kube-system
spec:
selector:
app: etcd
etcd_cluster: kube-etcd
clusterIP: 10.3.0.15
ports:
- name: client
port: 2379
protocol: TCP

View File

@ -0,0 +1,12 @@
---
apiVersion: v1
data:
apiserver.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURoRENDQW15Z0F3SUJBZ0lJWVJUbkVVV1BCMkV3RFFZSktvWklodmNOQVFFTEJRQXdKVEVSTUE4R0ExVUUKQ2hNSVltOXZkR3QxWW1VeEVEQU9CZ05WQkFNVEIydDFZbVV0WTJFd0hoY05NVGN3TlRFNU1UZzBNVEl3V2hjTgpNVGd3TlRFNU1UZzBNVEl4V2pBdk1SUXdFZ1lEVlFRS0V3dHJkV0psTFcxaGMzUmxjakVYTUJVR0ExVUVBeE1PCmEzVmlaUzFoY0dselpYSjJaWEl3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLQW9JQkFRREIKc1hEUUd0NENTa20rSDBvVDNIZ3pBRHpLM0lRdGM1UVZLVGIyRFR5dzIvbStoNE1SZDZuK2xyYThwdG8wOUlzLwpZaVZ4OE9DQ0ZGc083MjZaWnFMUWxRZVBERjM2UUtKYnBJeUdxMmIzR1ZCeURRcXRuNDd4aFhVZUx1MHo3SU1LCjg5MDZ4bVpYZWc4SEhUSVM5UDY2ejN4QTlrTG4wbndTU0ZKSEdUWE1vRnI4Y25MeVNucnRESGU5cEdvLytqY1IKMCtqaUgzYXQzdzJGMXRDYVRaOHpuRU1SUDgwQlR5c2I3SWxaZG1OQmZhU29UNDVOamUyZUJwWkRkeHZJOHFoaQpKMlpXWjd2UXN1NkFsQ25lS3BUajR0Z3NWNnNFQWdzMlY4cGFiUmFTTTV0MEhxMWxHby9ucGNPYW1JVVFBcTF1Ck8yU3BTVElvamRTSG1XZEQ5aDVkQWdNQkFBR2pnYTB3Z2Fvd0RnWURWUjBQQVFIL0JBUURBZ1dnTUIwR0ExVWQKSlFRV01CUUdDQ3NHQVFVRkJ3TUJCZ2dyQmdFRkJRY0RBakI1QmdOVkhSRUVjakJ3Z2dwcmRXSmxjbTVsZEdWegpnZ3ByZFdKbGNtNWxkR1Z6Z2hKcmRXSmxjbTVsZEdWekxtUmxabUYxYkhTQ0ZtdDFZbVZ5Ym1WMFpYTXVaR1ZtCllYVnNkQzV6ZG1PQ0pHdDFZbVZ5Ym1WMFpYTXVaR1ZtWVhWc2RDNXpkbU11WTJ4MWMzUmxjaTVzYjJOaGJJY0UKQ2dNQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFqOEc5TGM3NVFOYmhBRFF1T1hzU0dFaTZiSjBVZEZvVgp2djVWTE5NT2RaMCtqWHRIdExZckIzUnlnSWNvbFNkcWxhcXBNOW5qNjF4Z25oRzNPSVlJdzhCQ3FRbGFCZ08rCjVjQXZ6bXFsMjlBb0RiTGV1M0pjdG15U1NjcXlDajRtcXRsT0dIZ0lvdFVxMjI2UmUxYXFTSjh6TEg3VURWRWEKanlRbzh2bjVHUW0vWHd5R1V0NG5TcFlYTWk2TXp0ZWJjZVBkeU9lNDM4N05GSlM5L09VUUlkV2xodjFjZWdLKwpmVThLUnYyTWlCZlpacUoxRFFEMTdlVjk0OTRESW1HTjFuQ3BWbG1QTkJHVENlNzVTT1lDQk93WWhIS29OTUxuClltdG5wekJ0ZkFrVTRFemppTW02VjIyWEkvbFpzUWR4ZVFmTU1TY21oK002MERIcjdUb1JkZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
apiserver.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBd2JGdzBCcmVBa3BKdmg5S0U5eDRNd0E4eXR5RUxYT1VGU2syOWcwOHNOdjV2b2VECkVYZXAvcGEydktiYU5QU0xQMklsY2ZEZ2doUmJEdTl1bVdhaTBKVUhqd3hkK2tDaVc2U01ocXRtOXhsUWNnMEsKclorTzhZVjFIaTd0TSt5REN2UGRPc1ptVjNvUEJ4MHlFdlQrdXM5OFFQWkM1OUo4RWtoU1J4azF6S0JhL0hKeQo4a3A2N1F4M3ZhUnFQL28zRWRQbzRoOTJyZDhOaGRiUW1rMmZNNXhERVQvTkFVOHJHK3lKV1haalFYMmtxRStPClRZM3RuZ2FXUTNjYnlQS29ZaWRtVm1lNzBMTHVnSlFwM2lxVTQrTFlMRmVyQkFJTE5sZktXbTBXa2pPYmRCNnQKWlJxUDU2WERtcGlGRUFLdGJqdGtxVWt5S0kzVWg1bG5RL1llWFFJREFRQUJBb0lCQUVSTjFaR2RsK0xJM2I1cwovRXVLdU55TFhlVFA1TkMrYkY4Vi9LckNPai9JSXdjY2RJMEpYT3BKcmNGVE9hbm8vdDNvTjNvNXpvSXd1WGZICjJZSEJIdk5kU3FBWVpWK2x3VnQ5Nkl4cEQxTmVHdTlOU0JHNExjbGdIYy82RG0zOEhxNFRGMVh0dHhOc0dMYVMKaGlFSFFua1FTQ29FYmMyZ2ZWNVpJS0t2OGpmcFNoWWlhQVB6cnQzc2FFLzIrT2xpSjVwNnpmWEtObEVzZzFVUwo3OGcrSmlPVlhaZEVRRnlQUDVZbzhnamU4d1EyTmV0bmlsUVE5cnRCYlB2OUZmc1RyajAzc3JsVTJEN0lJQmRRCjdEM1o1QU43ZTdSaXdSR21TdFo0R2xsY0N1aHZqaHZmaGF2MTMyRzAxbzgvRHd2VkxUbmZTS0ZBNytFOFVZRzkKNlpBelg0VUNnWUVBL3BYdDhlaGozczFmOGNOYVNFSmxEOEFzT0hnemN1eFJ2ZHJFK3pBOGw0ZUVGcFA1VUplagpPY0R1OTY2cTF5dDRRcDdZeDJzVzNVQTc2bTdSdWdkcUE1TVAyNWZnekdWNW4yMml3WWJuQlF2cURRRU9qTUgxCjFrMENrYVJYaERDekd1d2IwN29nL3JoT0pkQ0kzT1NDUXBMRDZCc1g4TVZQSi8yR2ZlNFhFQ2NDZ1lFQXdzVG8KL2lOWkZOS2tQd2xmanBpcnJ5NmdCN0daWVJZZGtuZU1NOTJmVHp1RGRxU0lyTTlvTEJlVXlpeEFmUFA5YzB5VgoyY3doYzhUTGRIeEl3YXR6Tk5Ka3dwMitlQU5mZzhqUTB2SzlKOFYwNjQ5QzVpTTNhWjVNVVZHMklTNFJBWnRICk1HMnc1ZnZkZDdTcUo4Uk9XVXk3K0UwczQ3MnlmSk5MM2F1TmE5c0NnWUVBNUFYUHdFc0FJSS9jYm9NbGV2RVUKNloyYlBkelRZQXl3VGhjRE5XU0g4TVN0Rnpma0p6NGFNV0ZQNkVIbXZLQXZyNlBzei9objJ6VnNOTmFiUEQ3bAp3bHZXNlQxSVdHcFBHKytyeGlDWkRKa1dRaDEvTmEySURqQ2RxMnNDQStGR21rZDl5UTY5L01lQkh6ZC9UakhSClJlV0VXSURqMllBd0hNWmp6cWtRdVNNQ2dZQTEwS3AvN2N4alVJQkpXcEdvbU01M0xPN1NzV09yeTZ5SUY3Z0oKYktia0FaR2xhbmpKSnRXbHVTNUhYa3JETzdjLzhGMUhQSHZSdlFKcVFSenBSaklpMmk4MUJ0amwyQ2pBQlBDTwpHTHZqRFUvczlqeUowaGt4ZWFla29Hc3VaOGdUSlpCWjlUVDNsc3Z1azJDZ2RFRWhzMjRNZ1daeDFxeEdkM3h5CjF6L1FHUUtCZ1FDRTdhZlp3SUVVUTZlcEdsVVB3bW0rbXFHaVVOYlhBei9QbksvSWh1T2VWOWFFVTc4bFBIOHAKNnJNcHVoNVNPcUdjUmFaaHBSU0lZM3ZxVTlZazQ5T080Qk9hd0YyajhrNHBWa1RhSkdnRDcxaW44YURiY1ZCYwpWbElNUDJxOTNtbnlPN09DOHpuUUtITXM1V1JXRW9rUmJTc2pXRWVRRjFNdHlCV2FJaVdtbGc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM2RENDQWRDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFsTVJFd0R3WURWUVFLRXdoaWIyOTAKYTNWaVpURVFNQTRHQTFVRUF4TUhhM1ZpWlMxallUQWVGdzB4TnpBMU1Ua3hPRFF4TWpCYUZ3MHlOekExTVRjeApPRFF4TWpCYU1DVXhFVEFQQmdOVkJBb1RDR0p2YjNScmRXSmxNUkF3RGdZRFZRUURFd2RyZFdKbExXTmhNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF1bEFWZnpUZS9tTWwzMU5BeDdQNTI0c3oKblFLbXhHK0JYZkRQdDRPNzc4dEJGNzZSc0VYK3dLclJ0b29CcjdheGh2UjBvazVrRFpQQVJHcE5LQVJtZENTbQozMzZFckZ0cVR3TW9yZVk3V1ZDVTJDQkZPdHQydW1mSkR1R1ZvTlVIRWtEOE1lVjJsWUpDb3h3SnJoZTV3aXFxCm00aHB0U0NlcFVqaWxta1JlV1ErL040K1JWRHByODZHWTJRQlVsdjlPdEE1aHhUaXNiQTAxU3dTUEFXcnBPcVYKOEpJajJSTFpuODVGVHpNRlRRazBXdTBadWdpcnlxZGF4bDMzVkwzK1VSSTNRQzJyMmRwdmQxU2V5V0RFWHZqbQprbjkyMzh3ZSsyd0JlUmFjZUN2QzdqeUR2WVNPaFMrajkyd0ZkblFZeCtIaW5BOG5uOFFmZG0zOHU2QTlod0lECkFRQUJveU13SVRBT0JnTlZIUThCQWY4RUJBTUNBcVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUcKOXcwQkFRc0ZBQU9DQVFFQURIdmd0RENFOHR2MGxLSXpFcWZ1YlVBNUxLUTROaVQ1U1VBdWNZYXpNcEt3MVFJVwpRaW5Db0xFZnlQTXdna2JnWGp6d25lOFB4ZUVqanZ3Q1Jxd2J5VmlCV252OTM3cDk0cG9aLzlHM0NXM2JTWXdRCjRaZVpud1c2d1cwSUdzRWhlTXdrbkJlUWJvb2NNNmNYdThodG8xQVlIT25qdGcydDFSdWZXcHNEbjVhb2t1Vy8KUkk4SGc1dm5XV0tBQUF3Y3drbWc4YWlOLzFuWVFHL2NvRDQxa1hlL2lKMURUUFphMkNQeGdtNzFmMmhSbkVZVApjN3VUN3V1ZUJhcG8xTyt0dFBrZ2hzSXZQWktjNnZLeEswd3J2ekhHUm9VTGw3N1o4M3o5MmFvUEx6Y21uSjNkCk1GRXE0ZDdKUTV1NWkrU2FxcXFPZHAxUkdBaXVpTnBjdnlQOWV3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
service-account.pub: LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUExT0pRbUU5SkNJMjBoM0JJL3hKcApRb05JZll2aUhJaGx4NkFsNjBLdjRaYit0YUQrSmQ2cENiSHFqZ1l5aVlIMXdxMG5NQzlNaVJicGhkTXNLZkpYCm81N0gyWDFRV05jKzNSWXpORUwycmEycmtDR3dxMWpLR2s2Um9mYWdicmluakFDOWhHY20vVjcxM2ZDZFNwVUwKSDZSdXJvOUtqdnRjYTBuTGpCY0dDMDNwa3VVaTFlN0VQajJTQUxReEExaVYyK3NxcXBnMmF4bHB5QU43Z2VjYQpmalZOMTBra013OUdLdW1RcVVwZWpDdGYzdFR2enpmbUdxaU5uSERCOGxEblhwSGVjS0laa2ZYZEg1UGQ0alJZCjVEeUZmcnNMNXh5ME9IRjRyQS9FRFNGa2RFWjJyVFlpQ0IvTzE3cHc2THVFdTc5VjNOMmhKVkV3ZTRVdGkzb2wKUXdJREFRQUIKLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg==
kind: Secret
metadata:
name: kube-apiserver
namespace: kube-system
type: Opaque

View File

@ -0,0 +1,82 @@
---
apiVersion: "extensions/v1beta1"
kind: DaemonSet
metadata:
name: kube-apiserver
namespace: kube-system
labels:
tier: control-plane
component: kube-apiserver
spec:
template:
metadata:
labels:
tier: control-plane
component: kube-apiserver
annotations:
checkpointer.alpha.coreos.com/checkpoint: "true"
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- name: kube-apiserver
image: quay.io/coreos/hyperkube:v1.6.2_coreos.0
command:
- /usr/bin/flock
- --exclusive
- --timeout=30
- /var/lock/api-server.lock
- /hyperkube
- apiserver
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
- --advertise-address=$(POD_IP)
- --allow-privileged=true
- --anonymous-auth=false
- --authorization-mode=RBAC
- --bind-address=0.0.0.0
- --client-ca-file=/etc/kubernetes/secrets/ca.crt
- --cloud-provider=
- --etcd-servers=http://10.3.0.15:2379
- --insecure-port=0
- --kubelet-client-certificate=/etc/kubernetes/secrets/apiserver.crt
- --kubelet-client-key=/etc/kubernetes/secrets/apiserver.key
- --secure-port=443
- --service-account-key-file=/etc/kubernetes/secrets/service-account.pub
- --service-cluster-ip-range=10.3.0.0/24
- --storage-backend=etcd3
- --tls-ca-file=/etc/kubernetes/secrets/ca.crt
- --tls-cert-file=/etc/kubernetes/secrets/apiserver.crt
- --tls-private-key-file=/etc/kubernetes/secrets/apiserver.key
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- mountPath: /etc/kubernetes/secrets
name: secrets
readOnly: true
- mountPath: /var/lock
name: var-lock
readOnly: false
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- name: ssl-certs-host
hostPath:
path: /usr/share/ca-certificates
- name: secrets
secret:
secretName: kube-apiserver
- name: var-lock
hostPath:
path: /var/lock

View File

@ -0,0 +1,12 @@
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: kube-controller-manager
namespace: kube-system
spec:
minAvailable: 1
selector:
matchLabels:
tier: control-plane
component: kube-controller-manager

View File

@ -0,0 +1,10 @@
---
apiVersion: v1
data:
ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM2RENDQWRDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFsTVJFd0R3WURWUVFLRXdoaWIyOTAKYTNWaVpURVFNQTRHQTFVRUF4TUhhM1ZpWlMxallUQWVGdzB4TnpBMU1Ua3hPRFF4TWpCYUZ3MHlOekExTVRjeApPRFF4TWpCYU1DVXhFVEFQQmdOVkJBb1RDR0p2YjNScmRXSmxNUkF3RGdZRFZRUURFd2RyZFdKbExXTmhNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF1bEFWZnpUZS9tTWwzMU5BeDdQNTI0c3oKblFLbXhHK0JYZkRQdDRPNzc4dEJGNzZSc0VYK3dLclJ0b29CcjdheGh2UjBvazVrRFpQQVJHcE5LQVJtZENTbQozMzZFckZ0cVR3TW9yZVk3V1ZDVTJDQkZPdHQydW1mSkR1R1ZvTlVIRWtEOE1lVjJsWUpDb3h3SnJoZTV3aXFxCm00aHB0U0NlcFVqaWxta1JlV1ErL040K1JWRHByODZHWTJRQlVsdjlPdEE1aHhUaXNiQTAxU3dTUEFXcnBPcVYKOEpJajJSTFpuODVGVHpNRlRRazBXdTBadWdpcnlxZGF4bDMzVkwzK1VSSTNRQzJyMmRwdmQxU2V5V0RFWHZqbQprbjkyMzh3ZSsyd0JlUmFjZUN2QzdqeUR2WVNPaFMrajkyd0ZkblFZeCtIaW5BOG5uOFFmZG0zOHU2QTlod0lECkFRQUJveU13SVRBT0JnTlZIUThCQWY4RUJBTUNBcVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUcKOXcwQkFRc0ZBQU9DQVFFQURIdmd0RENFOHR2MGxLSXpFcWZ1YlVBNUxLUTROaVQ1U1VBdWNZYXpNcEt3MVFJVwpRaW5Db0xFZnlQTXdna2JnWGp6d25lOFB4ZUVqanZ3Q1Jxd2J5VmlCV252OTM3cDk0cG9aLzlHM0NXM2JTWXdRCjRaZVpud1c2d1cwSUdzRWhlTXdrbkJlUWJvb2NNNmNYdThodG8xQVlIT25qdGcydDFSdWZXcHNEbjVhb2t1Vy8KUkk4SGc1dm5XV0tBQUF3Y3drbWc4YWlOLzFuWVFHL2NvRDQxa1hlL2lKMURUUFphMkNQeGdtNzFmMmhSbkVZVApjN3VUN3V1ZUJhcG8xTyt0dFBrZ2hzSXZQWktjNnZLeEswd3J2ekhHUm9VTGw3N1o4M3o5MmFvUEx6Y21uSjNkCk1GRXE0ZDdKUTV1NWkrU2FxcXFPZHAxUkdBaXVpTnBjdnlQOWV3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
service-account.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMU9KUW1FOUpDSTIwaDNCSS94SnBRb05JZll2aUhJaGx4NkFsNjBLdjRaYit0YUQrCkpkNnBDYkhxamdZeWlZSDF3cTBuTUM5TWlSYnBoZE1zS2ZKWG81N0gyWDFRV05jKzNSWXpORUwycmEycmtDR3cKcTFqS0drNlJvZmFnYnJpbmpBQzloR2NtL1Y3MTNmQ2RTcFVMSDZSdXJvOUtqdnRjYTBuTGpCY0dDMDNwa3VVaQoxZTdFUGoyU0FMUXhBMWlWMitzcXFwZzJheGxweUFON2dlY2FmalZOMTBra013OUdLdW1RcVVwZWpDdGYzdFR2Cnp6Zm1HcWlObkhEQjhsRG5YcEhlY0tJWmtmWGRINVBkNGpSWTVEeUZmcnNMNXh5ME9IRjRyQS9FRFNGa2RFWjIKclRZaUNCL08xN3B3Nkx1RXU3OVYzTjJoSlZFd2U0VXRpM29sUXdJREFRQUJBb0lCQUhTV2pYVWMxdTZzVE5adwpGRW85bHhBcVBpVWoydTJ0ZGJCaWNPSHJYOCs0bGo1NnNUV2tRQWRqUFFZVE50SkFMb3d6c0dhZlFOZERpUmtWCmtmWlhGdEF4UVZwSFd4Mk1wSTBJZjNwN3dnVlVPOFZ2N2dXcFZ1WVphWUMrUlJiZVlrUTJrNVJUdWZMQmN2M2QKclFjUG9VdnZEZjdqMHYyRGhCWHVFRi9rckJhNzBPbkk2RnY1YjZUYXk0Y042dm1OSlNQVWxEUHZpY0Npem12VgpXdEFxNXBrUGZYVzF1d2VNWURPU0QxMHphZXRjbE1hZS8wQzFoYWhrOWtHb0x2NDlYbktDWC9MdXp3eDBTaEpMCkYwWmsrMHM5bm1NQUFmUkw4Sk03RTlpd1hhOEk0elhwYU5PTjVSZnpkVVFlVTZwdWhOUXJNRXhyZnpGWVdZVmwKclBhUm5xRUNnWUVBNEM3aTlCMDh3UitKRWJLZUV2VFR6VUFTOFcrUzVsU2t6UE43NVR0NGFIZVRvanp2UlhhMApuVXZiciswUEdjdHBhM093RHpoL1NheUtxa0p2V3p4V216S0VMVHNXa3BVWkx5eDM3b3hrb1ErZFVLU0ZEWUY3CmVqR1lmcXRoVUM2NU5BMHJxbXo2cWlDSy9SRlhMMWloTVkwZi83NCtJekNob2lmdHBGUTBwdDhDZ1lFQTh4am4KakhjQnBHbVVPeUtSV2ttVE0xeDNsNU5oVDJiWll5NUNHUFhaOHRpdTZ6ZGkyZ3cyeFVtZ1ZJUHpVblREcW1PSApOUHVSdkh2MnNvdnFac0FwRGFua3d6c1d0aEZMVkZqUGRwWGpWYStHdnA2WU4wRlRlZUlFakd1am1DSjlaajliCm9JazRvNmdSelFOeDVML1JhRTIvb1FyVEd3bENXZUE0NHBINmdoMENnWUVBMEtaU3pPazVWblZIV1pWbzBqUFQKdlVCWllTUjdFS3pQQllISVdqM1RmMGRydktBQ0FpRE5VV2o4K3V3a0ZkbmdNQVhvWXdJdVZoK2tuM3Bkc2dpaQpncWV0cFh0Tk12aGFEREhUSGM3RkNiSkN0SCtxNWpzUTlWV2JuS2xkVlFkbmtDNkI2WWlzZEJMOXlUT09kWjZECnlGNlUzYTN1bjBudjVjQkx5Wm9sdHZrQ2dZRUE1QWV4YzZaU0tRcE1YR2dobG1LN3JJc0pOMnFzOWhGUXkyTWgKNTAzK29uaTFJN2p4aGYyOUJyVDRxeTZXK1ByRWE3a3VvL2x6REMzd0RDMklzOWQrNnUwNXhCUlNTbmpRZzQ5SApGRUtuVzhIcGtEY3VLMjZnd2d6TUhYZituZitFUjN3WkUrNkQ3YWdEQXA4L244WjZ4TzloV012Um1HUElGSXhxCmI4VmxDZFVDZ1lCZ3dmVXNTc0NNUDhLVk9KQXV3ZjQvU1dPa0lVVVFIUVVqMUN5RXoyVVdHNVFpUDJ3cUZpQTcKSUg4SzhKc085TVNXcTNuZFI5a1IrSEdCQ2tKeXlvRDFHekJaZVJoUGIrNjlmWVdhbzNsS1V6RURxbXhCN3pqaApOUGx0YkxsR0dOYlBoY3pYeUplU3YxTjk0TVV3WTF3dDBhQVg2RytIaUJJOGEzY2pDL2NRUGc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
kind: Secret
metadata:
name: kube-controller-manager
namespace: kube-system
type: Opaque

View File

@ -0,0 +1,77 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-controller-manager
namespace: kube-system
labels:
tier: control-plane
component: kube-controller-manager
spec:
replicas: 2
template:
metadata:
labels:
tier: control-plane
component: kube-controller-manager
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: tier
operator: In
values:
- control-plane
- key: component
operator: In
values:
- kube-contoller-manager
topologyKey: kubernetes.io/hostname
containers:
- name: kube-controller-manager
image: quay.io/coreos/hyperkube:v1.6.2_coreos.0
command:
- ./hyperkube
- controller-manager
- --allocate-node-cidrs=true
- --cloud-provider=
- --cluster-cidr=10.2.0.0/16
- --configure-cloud-routes=false
- --leader-elect=true
- --root-ca-file=/etc/kubernetes/secrets/ca.crt
- --service-account-private-key-file=/etc/kubernetes/secrets/service-account.key
livenessProbe:
httpGet:
path: /healthz
port: 10252 # Note: Using default port. Update if --port option is set differently.
initialDelaySeconds: 15
timeoutSeconds: 15
volumeMounts:
- name: secrets
mountPath: /etc/kubernetes/secrets
readOnly: true
- name: ssl-host
mountPath: /etc/ssl/certs
readOnly: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- name: secrets
secret:
secretName: kube-controller-manager
- name: ssl-host
hostPath:
path: /usr/share/ca-certificates
dnsPolicy: Default # Don't use cluster DNS.

View File

@ -0,0 +1,156 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain=cluster.local.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --log-facility=-
- --server=/cluster.local/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true

View File

@ -0,0 +1,21 @@
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.3.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP

View File

@ -0,0 +1,49 @@
---
apiVersion: "extensions/v1beta1"
kind: DaemonSet
metadata:
name: kube-etcd-network-checkpointer
namespace: kube-system
labels:
tier: control-plane
component: kube-etcd-network-checkpointer
spec:
template:
metadata:
labels:
tier: control-plane
component: kube-etcd-network-checkpointer
annotations:
checkpointer.alpha.coreos.com/checkpoint: "true"
spec:
containers:
- image: quay.io/coreos/kenc:48b6feceeee56c657ea9263f47b6ea091e8d3035
name: kube-etcd-network-checkpointer
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/kubernetes/selfhosted-etcd
name: checkpoint-dir
readOnly: false
- mountPath: /var/lock
name: var-lock
readOnly: false
command:
- /usr/bin/flock
- /var/lock/kenc.lock
- -c
- "kenc -r -m iptables && kenc -m iptables"
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- name: checkpoint-dir
hostPath:
path: /etc/kubernetes/checkpoint-iptables
- name: var-lock
hostPath:
path: /var/lock

View File

@ -0,0 +1,39 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
data:
# Configure this with the location of your etcd cluster.
etcd_endpoints: "http://10.3.0.136:6666"
# The interface used by Canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "canal",
"type": "flannel",
"delegate": {
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"log_level": "info",
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
}
}

View File

@ -0,0 +1,368 @@
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: canal-etcd
namespace: kube-system
labels:
k8s-app: canal-etcd
spec:
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
k8s-app: canal-etcd
spec:
# Only run this pod on the master.
nodeSelector:
node-role.kubernetes.io/master: ""
hostNetwork: true
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: canal-etcd
image: quay.io/coreos/etcd:v3.1.4
env:
- name: ETCD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command: ["/bin/sh","-c"]
args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"]
volumeMounts:
- name: var-etcd
mountPath: /var/etcd
volumes:
- name: var-etcd
hostPath:
path: /var/etcd
---
# This manfiest installs the Service which gets traffic to the Calico
# etcd.
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: canal-etcd
name: canal-etcd
namespace: kube-system
spec:
# Select the canal-etcd pod running on the master.
selector:
k8s-app: canal-etcd
# This ClusterIP needs to be known in advance, since we cannot rely
# on DNS to get access to etcd.
clusterIP: 10.3.0.136
ports:
- port: 6666
---
# This manifest installs the per-node agents, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: canal-node
namespace: kube-system
labels:
k8s-app: canal-node
spec:
selector:
matchLabels:
k8s-app: canal-node
template:
metadata:
labels:
k8s-app: canal-node
spec:
hostNetwork: true
serviceAccountName: calico-cni-plugin
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
# Runs the flannel daemon to enable vxlan networking between
# container hosts.
- name: flannel
image: quay.io/coreos/flannel:v0.7.1
env:
# The location of the etcd cluster.
- name: FLANNELD_ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_endpoints
# The interface flannel should run on.
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
# Perform masquerade on traffic leaving the pod cidr.
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
# Write the subnet.env file to the mounted directory.
- name: FLANNELD_SUBNET_FILE
value: "/run/flannel/subnet.env"
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/resolv.conf
name: resolv
- mountPath: /run/flannel
name: run-flannel
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and local routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v1.1.3
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_endpoints
# Disable Calico BGP. Calico is simply enforcing policy.
- name: CALICO_NETWORKING
value: "false"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# All pods to speak to services that resolve to the same host.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-calico-cni
image: quay.io/calico/cni:v1.7.0
imagePullPolicy: Always
command: ["/install-cni.sh"]
env:
# The name of the CNI network config file to install.
- name: CNI_CONF_NAME
value: "10-canal.conf"
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used by flannel daemon.
- name: run-flannel
hostPath:
path: /run/flannel
- name: resolv
hostPath:
path: /etc/resolv.conf
---
# This manifest deploys a Job which performs one time
# configuration of Canal.
apiVersion: batch/v1
kind: Job
metadata:
name: configure-canal
namespace: kube-system
labels:
k8s-app: canal
spec:
template:
metadata:
name: configure-canal
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
hostNetwork: true
restartPolicy: OnFailure
containers:
# Writes basic flannel configuration to etcd.
- name: configure-flannel
image: quay.io/coreos/etcd:v3.1.4
command:
- "etcdctl"
- "--no-sync"
- "set"
- "/coreos.com/network/config"
- '{ "Network": "10.2.0.0/16", "Backend": {"Type": "vxlan"} }'
env:
# The location of the etcd cluster.
- name: ETCDCTL_PEERS
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_endpoints
---
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: ReplicaSet
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# The policy controller can only have a single active instance.
replicas: 1
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-policy-controller
containers:
- name: calico-policy-controller
image: quay.io/calico/kube-policy-controller:v0.5.4
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: canal-config
key: etcd_endpoints
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-cni-plugin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-cni-plugin
subjects:
- kind: ServiceAccount
name: calico-cni-plugin
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-cni-plugin
namespace: kube-system
rules:
- apiGroups: [""]
resources:
- pods
- nodes
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-cni-plugin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-policy-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-policy-controller
subjects:
- kind: ServiceAccount
name: calico-policy-controller
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-policy-controller
namespace: kube-system
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
verbs:
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-policy-controller
namespace: kube-system

View File

@ -0,0 +1,56 @@
---
apiVersion: "extensions/v1beta1"
kind: DaemonSet
metadata:
name: kube-proxy
namespace: kube-system
labels:
tier: node
component: kube-proxy
spec:
template:
metadata:
labels:
tier: node
component: kube-proxy
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- name: kube-proxy
image: quay.io/coreos/hyperkube:v1.6.2_coreos.0
command:
- /hyperkube
- proxy
- --cluster-cidr=10.2.0.0/16
- --hostname-override=$(NODE_NAME)
- --kubeconfig=/etc/kubernetes/kubeconfig
- --proxy-mode=iptables
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- name: etc-kubernetes
mountPath: /etc/kubernetes
readOnly: true
hostNetwork: true
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host
- name: etc-kubernetes
hostPath:
path: /etc/kubernetes

View File

@ -0,0 +1,12 @@
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: kube-scheduler
namespace: kube-system
spec:
minAvailable: 1
selector:
matchLabels:
tier: control-plane
component: kube-scheduler

View File

@ -0,0 +1,56 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-scheduler
namespace: kube-system
labels:
tier: control-plane
component: kube-scheduler
spec:
replicas: 2
template:
metadata:
labels:
tier: control-plane
component: kube-scheduler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: tier
operator: In
values:
- control-plane
- key: component
operator: In
values:
- kube-scheduler
topologyKey: kubernetes.io/hostname
containers:
- name: kube-scheduler
image: quay.io/coreos/hyperkube:v1.6.2_coreos.0
command:
- ./hyperkube
- scheduler
- --leader-elect=true
livenessProbe:
httpGet:
path: /healthz
port: 10251 # Note: Using default port. Update if --port option is set differently.
initialDelaySeconds: 15
timeoutSeconds: 15
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule

View File

@ -0,0 +1,14 @@
---
apiVersion: rbac.authorization.k8s.io/v1alpha1
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1alpha1
metadata:
name: system:default-sa
subjects:
- kind: ServiceAccount
name: default
namespace: kube-system
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,59 @@
---
apiVersion: "extensions/v1beta1"
kind: DaemonSet
metadata:
name: pod-checkpointer
namespace: kube-system
labels:
tier: control-plane
component: pod-checkpointer
spec:
template:
metadata:
labels:
tier: control-plane
component: pod-checkpointer
annotations:
checkpointer.alpha.coreos.com/checkpoint: "true"
spec:
containers:
- name: checkpoint
image: quay.io/coreos/pod-checkpointer:20cf8b9a6018731a0770192f30dfa7a1941521e3
command:
- /checkpoint
- --v=4
- --lock-file=/var/run/lock/pod-checkpointer.lock
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
imagePullPolicy: Always
volumeMounts:
- mountPath: /etc/kubernetes
name: etc-kubernetes
- mountPath: /var/run
name: var-run
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
restartPolicy: Always
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- name: etc-kubernetes
hostPath:
path: /etc/kubernetes
- name: var-run
hostPath:
path: /var/run

21
assets/tls/apiserver.crt Normal file
View File

@ -0,0 +1,21 @@
-----BEGIN CERTIFICATE-----
MIIDhDCCAmygAwIBAgIIYRTnEUWPB2EwDQYJKoZIhvcNAQELBQAwJTERMA8GA1UE
ChMIYm9vdGt1YmUxEDAOBgNVBAMTB2t1YmUtY2EwHhcNMTcwNTE5MTg0MTIwWhcN
MTgwNTE5MTg0MTIxWjAvMRQwEgYDVQQKEwtrdWJlLW1hc3RlcjEXMBUGA1UEAxMO
a3ViZS1hcGlzZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDB
sXDQGt4CSkm+H0oT3HgzADzK3IQtc5QVKTb2DTyw2/m+h4MRd6n+lra8pto09Is/
YiVx8OCCFFsO726ZZqLQlQePDF36QKJbpIyGq2b3GVByDQqtn47xhXUeLu0z7IMK
8906xmZXeg8HHTIS9P66z3xA9kLn0nwSSFJHGTXMoFr8cnLySnrtDHe9pGo/+jcR
0+jiH3at3w2F1tCaTZ8znEMRP80BTysb7IlZdmNBfaSoT45Nje2eBpZDdxvI8qhi
J2ZWZ7vQsu6AlCneKpTj4tgsV6sEAgs2V8pabRaSM5t0Hq1lGo/npcOamIUQAq1u
O2SpSTIojdSHmWdD9h5dAgMBAAGjga0wgaowDgYDVR0PAQH/BAQDAgWgMB0GA1Ud
JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjB5BgNVHREEcjBwggprdWJlcm5ldGVz
ggprdWJlcm5ldGVzghJrdWJlcm5ldGVzLmRlZmF1bHSCFmt1YmVybmV0ZXMuZGVm
YXVsdC5zdmOCJGt1YmVybmV0ZXMuZGVmYXVsdC5zdmMuY2x1c3Rlci5sb2NhbIcE
CgMAATANBgkqhkiG9w0BAQsFAAOCAQEAj8G9Lc75QNbhADQuOXsSGEi6bJ0UdFoV
vv5VLNMOdZ0+jXtHtLYrB3RygIcolSdqlaqpM9nj61xgnhG3OIYIw8BCqQlaBgO+
5cAvzmql29AoDbLeu3JctmySScqyCj4mqtlOGHgIotUq226Re1aqSJ8zLH7UDVEa
jyQo8vn5GQm/XwyGUt4nSpYXMi6MztebcePdyOe4387NFJS9/OUQIdWlhv1cegK+
fU8KRv2MiBfZZqJ1DQD17eV9494DImGN1nCpVlmPNBGTCe75SOYCBOwYhHKoNMLn
YmtnpzBtfAkU4EzjiMm6V22XI/lZsQdxeQfMMScmh+M60DHr7ToRdg==
-----END CERTIFICATE-----

27
assets/tls/apiserver.key Normal file
View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAwbFw0BreAkpJvh9KE9x4MwA8ytyELXOUFSk29g08sNv5voeD
EXep/pa2vKbaNPSLP2IlcfDgghRbDu9umWai0JUHjwxd+kCiW6SMhqtm9xlQcg0K
rZ+O8YV1Hi7tM+yDCvPdOsZmV3oPBx0yEvT+us98QPZC59J8EkhSRxk1zKBa/HJy
8kp67Qx3vaRqP/o3EdPo4h92rd8NhdbQmk2fM5xDET/NAU8rG+yJWXZjQX2kqE+O
TY3tngaWQ3cbyPKoYidmVme70LLugJQp3iqU4+LYLFerBAILNlfKWm0WkjObdB6t
ZRqP56XDmpiFEAKtbjtkqUkyKI3Uh5lnQ/YeXQIDAQABAoIBAERN1ZGdl+LI3b5s
/EuKuNyLXeTP5NC+bF8V/KrCOj/IIwccdI0JXOpJrcFTOano/t3oN3o5zoIwuXfH
2YHBHvNdSqAYZV+lwVt96IxpD1NeGu9NSBG4LclgHc/6Dm38Hq4TF1XttxNsGLaS
hiEHQnkQSCoEbc2gfV5ZIKKv8jfpShYiaAPzrt3saE/2+OliJ5p6zfXKNlEsg1US
78g+JiOVXZdEQFyPP5Yo8gje8wQ2NetnilQQ9rtBbPv9FfsTrj03srlU2D7IIBdQ
7D3Z5AN7e7RiwRGmStZ4GllcCuhvjhvfhav132G01o8/DwvVLTnfSKFA7+E8UYG9
6ZAzX4UCgYEA/pXt8ehj3s1f8cNaSEJlD8AsOHgzcuxRvdrE+zA8l4eEFpP5UJej
OcDu966q1yt4Qp7Yx2sW3UA76m7RugdqA5MP25fgzGV5n22iwYbnBQvqDQEOjMH1
1k0CkaRXhDCzGuwb07og/rhOJdCI3OSCQpLD6BsX8MVPJ/2Gfe4XECcCgYEAwsTo
/iNZFNKkPwlfjpirry6gB7GZYRYdkneMM92fTzuDdqSIrM9oLBeUyixAfPP9c0yV
2cwhc8TLdHxIwatzNNJkwp2+eANfg8jQ0vK9J8V0649C5iM3aZ5MUVG2IS4RAZtH
MG2w5fvdd7SqJ8ROWUy7+E0s472yfJNL3auNa9sCgYEA5AXPwEsAII/cboMlevEU
6Z2bPdzTYAywThcDNWSH8MStFzfkJz4aMWFP6EHmvKAvr6Psz/hn2zVsNNabPD7l
wlvW6T1IWGpPG++rxiCZDJkWQh1/Na2IDjCdq2sCA+FGmkd9yQ69/MeBHzd/TjHR
ReWEWIDj2YAwHMZjzqkQuSMCgYA10Kp/7cxjUIBJWpGomM53LO7SsWOry6yIF7gJ
bKbkAZGlanjJJtWluS5HXkrDO7c/8F1HPHvRvQJqQRzpRjIi2i81Btjl2CjABPCO
GLvjDU/s9jyJ0hkxeaekoGsuZ8gTJZBZ9TT3lsvuk2CgdEEhs24MgWZx1qxGd3xy
1z/QGQKBgQCE7afZwIEUQ6epGlUPwmm+mqGiUNbXAz/PnK/IhuOeV9aEU78lPH8p
6rMpuh5SOqGcRaZhpRSIY3vqU9Yk49OO4BOawF2j8k4pVkTaJGgD71in8aDbcVBc
VlIMP2q93mnyO7OC8znQKHMs5WRWEokRbSsjWEeQF1MtyBWaIiWmlg==
-----END RSA PRIVATE KEY-----

18
assets/tls/ca.crt Normal file
View File

@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC6DCCAdCgAwIBAgIBADANBgkqhkiG9w0BAQsFADAlMREwDwYDVQQKEwhib290
a3ViZTEQMA4GA1UEAxMHa3ViZS1jYTAeFw0xNzA1MTkxODQxMjBaFw0yNzA1MTcx
ODQxMjBaMCUxETAPBgNVBAoTCGJvb3RrdWJlMRAwDgYDVQQDEwdrdWJlLWNhMIIB
IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAulAVfzTe/mMl31NAx7P524sz
nQKmxG+BXfDPt4O778tBF76RsEX+wKrRtooBr7axhvR0ok5kDZPARGpNKARmdCSm
336ErFtqTwMoreY7WVCU2CBFOtt2umfJDuGVoNUHEkD8MeV2lYJCoxwJrhe5wiqq
m4hptSCepUjilmkReWQ+/N4+RVDpr86GY2QBUlv9OtA5hxTisbA01SwSPAWrpOqV
8JIj2RLZn85FTzMFTQk0Wu0Zugiryqdaxl33VL3+URI3QC2r2dpvd1SeyWDEXvjm
kn9238we+2wBeRaceCvC7jyDvYSOhS+j92wFdnQYx+HinA8nn8Qfdm38u6A9hwID
AQABoyMwITAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG
9w0BAQsFAAOCAQEADHvgtDCE8tv0lKIzEqfubUA5LKQ4NiT5SUAucYazMpKw1QIW
QinCoLEfyPMwgkbgXjzwne8PxeEjjvwCRqwbyViBWnv937p94poZ/9G3CW3bSYwQ
4ZeZnwW6wW0IGsEheMwknBeQboocM6cXu8hto1AYHOnjtg2t1RufWpsDn5aokuW/
RI8Hg5vnWWKAAAwcwkmg8aiN/1nYQG/coD41kXe/iJ1DTPZa2CPxgm71f2hRnEYT
c7uT7uueBapo1O+ttPkghsIvPZKc6vKxK0wrvzHGRoULl77Z83z92aoPLzcmnJ3d
MFEq4d7JQ5u5i+SaqqqOdp1RGAiuiNpcvyP9ew==
-----END CERTIFICATE-----

27
assets/tls/ca.key Normal file
View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAulAVfzTe/mMl31NAx7P524sznQKmxG+BXfDPt4O778tBF76R
sEX+wKrRtooBr7axhvR0ok5kDZPARGpNKARmdCSm336ErFtqTwMoreY7WVCU2CBF
Ott2umfJDuGVoNUHEkD8MeV2lYJCoxwJrhe5wiqqm4hptSCepUjilmkReWQ+/N4+
RVDpr86GY2QBUlv9OtA5hxTisbA01SwSPAWrpOqV8JIj2RLZn85FTzMFTQk0Wu0Z
ugiryqdaxl33VL3+URI3QC2r2dpvd1SeyWDEXvjmkn9238we+2wBeRaceCvC7jyD
vYSOhS+j92wFdnQYx+HinA8nn8Qfdm38u6A9hwIDAQABAoIBADpNLSztQoqgRA2q
Y68aZqmI2dHcLotxyS24WYe3tWvIUso3XCeo/5sS2SUh8n0l0k/E12qi1TRac+P0
z8gh+F2HyqBNWv8EbDPlbSldzlyYlrs6/e75FiImsAf0F3qIrvnLVB/ZCk6mwGuC
LpVH310fNNwOx+ViG8LlF+KxZkJxzoKQ2RwiCwzMzpvNBTJyEE1jfqNlc92XnP65
FhjcFfzSJhFK3VH1gdpfO8bUiLiiUhzKzXH7Af73UqZ22wHeYx87ZJBv7e9ymbWT
GMf9js92e3OdXa3al75JlXgexSDmV2OdZNj6zpqAyupo5b+jXNxcxDaQCitOAcyU
H6HqMiECgYEAwWeEvOL/JC1hFBniM3jtG7ZcXjT1nuc0I9z+b0O6i3JXp1AXuxqU
COOn0udgJ4SJZZk2LOja7Mq6DsPvbPK9OA/XvSju6U/cqALpLdT+bvcG1J5km80w
F9d5a8CmABYsIzIm5VOYCZN/ELxo9uzDhNpiU1m7EVZengg8E1/xSpMCgYEA9pz/
SGZTFHdLZn7jgg9EzdnjZ2SlSnGc1tHayiRbHknwt8JFMwHeL/TPI6/4ns4A8l59
IEl1Zf8pWDhwa2qGITXQBmauLYzuPGSIBdABLnJQtE4r6o+vYafZxZVvTAv5B4Sz
TCWFkLYtvHvs71+u7IKS+dJg3EYy3Gx5KVhddb0CgYAr8QMdj018wLqvwHm+TBlD
FJnD5bBwnAMiqtE8Il091YrIvs/FePJtWpwEtQEJuXkmFjtS1Mz4w86mECpTzIrl
M+RGXAh8BeMSYSbtfNkaCRIKOLqPE317zT8PFkQg/OimTny72dRPSK2z9bq7b2u0
wZFZcqen9sGkkiZkGIZP9QKBgQDcgX6FVvD8QLqLl/OHLG3h/ewmW8irqrCJKDUQ
P7e1enmhZTSIqifoC2ZXS5XrMNmJ3VDWWLh/DcsDFdv3P9VUxpAN2SvukK/IEj/J
qrYTuKVOwwLjhbxUfkfrMnXEsoPl5BKJiJdH0I1OliRB2PVIhmwysphm/OGnU9p2
TIuspQKBgQCq5QJcny6CWHnFh/Q1faYqIjvaS4MqLfnDndvZ98abpcjik3AKgWof
iaROSk40L+q4uDuaM5tU1ufS/FS94hwlk2O1bQ/xgJBkNZnvZJRFU3oZjhggyl6G
iFtBTAGGtJqHTPMtn/Y6dUOJ/ZFIZWzuNhJGYX/S3ifpZeldKXmXew==
-----END RSA PRIVATE KEY-----

19
assets/tls/kubelet.crt Normal file
View File

@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDAzCCAeugAwIBAgIILMPkLd2E/uAwDQYJKoZIhvcNAQELBQAwJTERMA8GA1UE
ChMIYm9vdGt1YmUxEDAOBgNVBAMTB2t1YmUtY2EwHhcNMTcwNTE5MTg0MTIwWhcN
MTgwNTE5MTg0MTIxWjArMRcwFQYDVQQKEw5zeXN0ZW06bWFzdGVyczEQMA4GA1UE
AxMHa3ViZWxldDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALtz9mHo
tPkidPbQeu9RS6tAOQhAhPOzV7y5kxo9ZkyGR5mOJ5MElfoofHWGXDqJs3IHO6Zr
ZTKTYgX6c3jisMhIT62JnN9ZaATWcrd+qQ15ixTNhqdy3UcX6xlB8YF8KpVZ40rO
wrP/UsG9EaBit37iOmmINIkZtbNIhvOYhkJvr+NOtX/8TsnRZpT9PyCeyZJbsZIZ
d1Apfu2ENeS1C1OgOQIEOREBehc3GVH11D9BRtFob22MjZUjxyGj0SButUmpvnY9
ogfE5pT0yhI+kZlP6iMPkk0oGlkcc+U4X8VrSyYXfJNEbmI5aDZe3A4lk4fXiF/Y
NosbHYnzdf/j0acCAwEAAaMxMC8wDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQG
CCsGAQUFBwMBBggrBgEFBQcDAjANBgkqhkiG9w0BAQsFAAOCAQEAIgaxO6aAyGRq
MINPID5bG/ZSRoIBSEX0bAviLKWP9RonjfayM8Xb3r2WZ4TmJoYYDNMRFoyCeStw
1fjl7b2vpmFBOxlpmRvNhRF1dlI9Rt4GRRVkxeS7c4dkc0LFTHEPp0X/RmSt4uf+
X9sYsWOGSBf52+qZ/7UNI6SYwoltenzbwnLHY9NSLXiVFommCXPaBma1GlkQN2F3
cEInhf78BXKXeIpWdZboHuWOUu3aoRT0p6fegb2Uxh2a73s6sToHjE7oy3H2ZvKR
kcFJ2TnKMrqzEK/9wyc/gu/kYVx8/zCoPlDQASem7aTZgOIDZ8wc4g9rBitnxdIs
jxZwjOKt9g==
-----END CERTIFICATE-----

27
assets/tls/kubelet.key Normal file
View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpgIBAAKCAQEAu3P2Yei0+SJ09tB671FLq0A5CECE87NXvLmTGj1mTIZHmY4n
kwSV+ih8dYZcOomzcgc7pmtlMpNiBfpzeOKwyEhPrYmc31loBNZyt36pDXmLFM2G
p3LdRxfrGUHxgXwqlVnjSs7Cs/9Swb0RoGK3fuI6aYg0iRm1s0iG85iGQm+v4061
f/xOydFmlP0/IJ7Jkluxkhl3UCl+7YQ15LULU6A5AgQ5EQF6FzcZUfXUP0FG0Whv
bYyNlSPHIaPRIG61Sam+dj2iB8TmlPTKEj6RmU/qIw+STSgaWRxz5ThfxWtLJhd8
k0RuYjloNl7cDiWTh9eIX9g2ixsdifN1/+PRpwIDAQABAoIBAQCRpzJbs4DjUHXH
zgin6eg9AaMPGWr1HXZgC2YU7n6NmY0K8N0pLFgIz+qdOzBwv8xyHtKnpi001jZF
ZOzSknpAtYdL1XDST1s23xa2I7Hh6X47RNOLSwJLGnev4YBxV3STJgwpdWzuhcbd
CTcoA2yHJ+uxUodXvGVmEEXkA7DW7zLZpvLJ//nD5z5CM0IUPdaSgXhYQp2NZWtI
RjLdjkuYVyBYC2rU4LpmiH1eIVL7bDHoUQhOaHN0wSFG80o46gvrqbhrMPw7BwIu
bCW30q4Y4JPRYn5ru0zCForne65I2kRtnJUDjn99dOntWVZibRojY0hFFEyGYOjZ
WItzGAbxAoGBANFj2ZHitQxtqYs7MNIY9jz/7pzuPaX8dm+2/3WW5Aot01+s4yVH
pd7HE8l5NjnejWG7nG2GPsIhbCCVXEtSMGt1BRioKpc2dLq+ZQb75LGDMaJzMWEm
/HimJuhXvxOzzKC9Z29vo4d6JC58vPwyu27dFAv3rzAcdiWb/aib7S6ZAoGBAOUu
BePZgqlpwl3wqDlAljiLsH8AeZUH2rDA4n4d+1kKPMqJYMmftGaTkDJMeJfisfKb
EXcQsGJAeOLHRpY1VvkHqn5v+7qg9JHSnlw+/nTF5Vk6ISAFMs2Qfwdq6fZ898GZ
mi9VXr0hez7Z/v/liCxBcl0hgAhnjIFGvQ5rSmo/AoGBAIvlVFWdzCyTj/UQBNw6
BTpYHAoJOnMNq+uTrjXYLF+IonKHxfMAXZfsFhJDw7ECOh+UAz1BtehqAB387H7+
WI9SzabdpCcHIRIrZsA1x2O6LY1FvTYVoBTTnacaCPWW6R5zrQnM4sr/FfFhMbqm
AohdeKlOQGO6gE08XUsrclnxAoGBALOv+f5DtCaQPUzaO4toEGAVZjStcqZemiCr
mum3KDMPy8ohHDn5dcBXQl+thX/QxiSpYHAyLZlbY2yrQbTT7XUjhZHMy1nwiNEs
ie1ZlriH0OK8qOwqJ0L1YCO4t+gC415vyGwES1uOvMrysPSCStooFjre4Tu1tHxH
skNz68yRAoGBAJyMFoQu0rzOxCwQx+8m1encm9pcUvu2eSwwy+9460W474Ww4qZA
F4DWwjDg5dBG1Im21KIJkhoX579dh3QIRr3PRwlQUkQlxTrUSEtpfNTU3pvWV9BF
tuLS1TnOdweoQ8cGZZd9PWMLLrBd0JeR4FyH23rOUmMFwJ2A6OopeX6B
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA1OJQmE9JCI20h3BI/xJpQoNIfYviHIhlx6Al60Kv4Zb+taD+
Jd6pCbHqjgYyiYH1wq0nMC9MiRbphdMsKfJXo57H2X1QWNc+3RYzNEL2ra2rkCGw
q1jKGk6RofagbrinjAC9hGcm/V713fCdSpULH6Ruro9Kjvtca0nLjBcGC03pkuUi
1e7EPj2SALQxA1iV2+sqqpg2axlpyAN7gecafjVN10kkMw9GKumQqUpejCtf3tTv
zzfmGqiNnHDB8lDnXpHecKIZkfXdH5Pd4jRY5DyFfrsL5xy0OHF4rA/EDSFkdEZ2
rTYiCB/O17pw6LuEu79V3N2hJVEwe4Uti3olQwIDAQABAoIBAHSWjXUc1u6sTNZw
FEo9lxAqPiUj2u2tdbBicOHrX8+4lj56sTWkQAdjPQYTNtJALowzsGafQNdDiRkV
kfZXFtAxQVpHWx2MpI0If3p7wgVUO8Vv7gWpVuYZaYC+RRbeYkQ2k5RTufLBcv3d
rQcPoUvvDf7j0v2DhBXuEF/krBa70OnI6Fv5b6Tay4cN6vmNJSPUlDPvicCizmvV
WtAq5pkPfXW1uweMYDOSD10zaetclMae/0C1hahk9kGoLv49XnKCX/Luzwx0ShJL
F0Zk+0s9nmMAAfRL8JM7E9iwXa8I4zXpaNON5RfzdUQeU6puhNQrMExrfzFYWYVl
rPaRnqECgYEA4C7i9B08wR+JEbKeEvTTzUAS8W+S5lSkzPN75Tt4aHeTojzvRXa0
nUvbr+0PGctpa3OwDzh/SayKqkJvWzxWmzKELTsWkpUZLyx37oxkoQ+dUKSFDYF7
ejGYfqthUC65NA0rqmz6qiCK/RFXL1ihMY0f/74+IzChoiftpFQ0pt8CgYEA8xjn
jHcBpGmUOyKRWkmTM1x3l5NhT2bZYy5CGPXZ8tiu6zdi2gw2xUmgVIPzUnTDqmOH
NPuRvHv2sovqZsApDankwzsWthFLVFjPdpXjVa+Gvp6YN0FTeeIEjGujmCJ9Zj9b
oIk4o6gRzQNx5L/RaE2/oQrTGwlCWeA44pH6gh0CgYEA0KZSzOk5VnVHWZVo0jPT
vUBZYSR7EKzPBYHIWj3Tf0drvKACAiDNUWj8+uwkFdngMAXoYwIuVh+kn3pdsgii
gqetpXtNMvhaDDHTHc7FCbJCtH+q5jsQ9VWbnKldVQdnkC6B6YisdBL9yTOOdZ6D
yF6U3a3un0nv5cBLyZoltvkCgYEA5Aexc6ZSKQpMXGghlmK7rIsJN2qs9hFQy2Mh
503+oni1I7jxhf29BrT4qy6W+PrEa7kuo/lzDC3wDC2Is9d+6u05xBRSSnjQg49H
FEKnW8HpkDcuK26gwgzMHXf+nf+ER3wZE+6D7agDAp8/n8Z6xO9hWMvRmGPIFIxq
b8VlCdUCgYBgwfUsSsCMP8KVOJAuwf4/SWOkIUUQHQUj1CyEz2UWG5QiP2wqFiA7
IH8K8JsO9MSWq3ndR9kR+HGBCkJyyoD1GzBZeRhPb+69fYWao3lKUzEDqmxB7zjh
NPltbLlGGNbPhczXyJeSv1N94MUwY1wt0aAX6G+HiBI8a3cjC/cQPg==
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1,9 @@
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1OJQmE9JCI20h3BI/xJp
QoNIfYviHIhlx6Al60Kv4Zb+taD+Jd6pCbHqjgYyiYH1wq0nMC9MiRbphdMsKfJX
o57H2X1QWNc+3RYzNEL2ra2rkCGwq1jKGk6RofagbrinjAC9hGcm/V713fCdSpUL
H6Ruro9Kjvtca0nLjBcGC03pkuUi1e7EPj2SALQxA1iV2+sqqpg2axlpyAN7geca
fjVN10kkMw9GKumQqUpejCtf3tTvzzfmGqiNnHDB8lDnXpHecKIZkfXdH5Pd4jRY
5DyFfrsL5xy0OHF4rA/EDSFkdEZ2rTYiCB/O17pw6LuEu79V3N2hJVEwe4Uti3ol
QwIDAQAB
-----END PUBLIC KEY-----

View File

@ -1,2 +0,0 @@
## Instructions:
ansible-playbook -e bootstrap_enabled=true -i hosts-deploy.yaml site.yaml

View File

@ -1,38 +0,0 @@
#Sample Hosts File with variables
#For Single node deployments, make sure that the bootstrap node is listed as a master and worker node as well.
[bootstrap]
192.168.0.1
[master]
#Make sure bootstrap node is first master node
192.168.0.1
192.168.0.2
[workers]
192.168.0.3
[bootstrap:vars]
node_master=true
bootstrap_enabled=false
boot_kube_version="v0.3.12"
[master:vars]
node_master=true
cni_version="v0.5.1"
hyperkube_version="v1.5.6"
kubelet_version="v1.5.6"
calicoctl_version="v1.1.0"
calico_peer1="192.168.0.4"
calico_peer2="192.168.0.5"
deploy_pods_master=true
[all:vars]
ansible_user="ubuntu"
ansible_ssh_pass="password"
#API Server FQDN is required for SkyDNS to resolve
api_server_fqdn="cluster-ha.default.svc.cluster.local"
kube_labels="openstack-control-plane"
kube_controller_manager_version="v1.5.6"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 225 KiB

View File

@ -1,10 +1,10 @@
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
Documentation=https://kubernetes.io/docs/admin/kubelet/
[Service]
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStart=/usr/bin/kubelet \
ExecStart=/usr/local/bin/kubelet \
--kubeconfig=/etc/kubernetes/kubeconfig \
--require-kubeconfig \
--cni-conf-dir=/etc/cni/net.d \
@ -14,11 +14,10 @@ ExecStart=/usr/bin/kubelet \
--exit-on-lock-contention \
--pod-manifest-path=/etc/kubernetes/manifests \
--allow-privileged \
--minimum-container-ttl-duration=6m0s \
--cluster_dns=10.3.0.10 \
--cluster_dns=192.168.1.70,8.8.8.8,10.3.0.10 \
--cluster_domain=cluster.local \
--node-labels=master={{ node_master|default('false') }} \
--hostname-override={{ inventory_hostname }} \
--node-labels=node-role.kubernetes.io/canal-node=true,node-role.kubernetes.io/master= \
--hostname-override=${NODE_HOSTNAME} \
--v=2
Restart=on-failure
RestartSec=5

View File

@ -1,26 +0,0 @@
---
- name: Install Ceph
apt:
name: ceph-common
state: present
register: ceph_installed
when: addons_enabled and "{{addons.ceph is defined}}"
- name: Create Ceph and OpenStack-Helm directories
file:
path: "{{ item }}"
state: directory
with_items:
- "/var/lib/openstack-helm/ceph/osd"
- "/var/lib/openstack-helm/ceph/ceph"
- "/var/lib/openstack-helm/ceph/mon"
- "/var/lib/nova/instances"
when: addons_enabled and "{{addons.ceph is defined}}"
- name: Install Sigil for Ceph Secrets
shell: curl -L https://github.com/gliderlabs/sigil/releases/download/v0.4.0/sigil_0.4.0_Linux_x86_64.tgz | tar -zxC /usr/local/bin
when: addons_enabled and "{{addons.ceph is defined}}" and ceph_installed | changed
- name: Capture kubernetes version
shell: kubelet --version | cut -d " " -f2
register: kube_version

View File

@ -1,10 +0,0 @@
---
- name: Check for Kubernetes dashboard
shell: hyperkube kubectl get pods -o wide --all-namespaces | grep kubernetes-dashboard
register: dashboard_check
ignore_errors: true
when: addons_enabled and "{{addons.dashboard is defined}}"
- name: Deploy Kubernetes Dashboard
shell: hyperkube kubectl create -f https://rawgit.com/kubernetes/dashboard/master/src/deploy/kubernetes-dashboard.yaml
when: addons_enabled and "{{addons.dashboard is defined}}" and dashboard_check | failed

View File

@ -1,20 +0,0 @@
---
- name: Check if Helm is installed
stat:
path: /usr/local/bin/helm
register: helm_installed
when: addons_enabled and "{{addons.helm is defined}}"
- name: Install helm
shell: curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > /root/get_helm.sh
when: addons_enabled and "{{addons.ceph is defined}}" and helm_installed.stat.exists == False
- name: Set file properties
file:
path: /root/get_helm.sh
mode: 0700
when: addons_enabled and "{{addons.ceph is defined}}" and helm_installed.stat.exists == False
- name: Install helm
shell: sh /root/get_helm.sh
when: addons_enabled and "{{addons.ceph is defined}}" and helm_installed.stat.exists == False

View File

@ -1,59 +0,0 @@
---
- name: Check if MAAS is Running
shell: hyperkube kubectl describe pod maas-region --namespace=maas
ignore_errors: true
register: maas_deployed
when: addons_enabled and "{{addons.maas is defined}}"
- name: Check if Postgres is Running
shell: hyperkube kubectl describe pod postgresql-0 --namespace=maas
ignore_errors: true
register: postgres_deployed
when: addons_enabled and "{{addons.maas is defined}}"
#Check every 15 seconds to make sure the tiller pod has fully come up.
- action: shell hyperkube kubectl get pods --all-namespaces | grep tiller
register: tiller_output
until: tiller_output.stdout.find("Running") != -1
retries: 20
delay: 15
when: addons_enabled and "{{addons.maas is defined}}"
- name: Run Make on all Helm charts
shell: make
environment:
HELM_HOME: /opt/openstack-helm/repos/openstack-helm/.helm
args:
chdir: /opt/openstack-helm/repos/openstack-helm/
when: addons_enabled and "{{addons.maas is defined}}" and maas_deployed | failed
- name: Deploy Postgres
shell: helm install postgresql --namespace=maas
environment:
HELM_HOME: /opt/openstack-helm/repos/openstack-helm/.helm
args:
chdir: /opt/openstack-helm/repos/openstack-helm/
when: addons_enabled and "{{addons.maas is defined}}" and postgres_deployed | failed
- action: shell hyperkube kubectl get pods --namespace maas
register: postgres_output
until: postgres_output.stdout.find("Running") != -1
retries: 20
delay: 15
when: addons_enabled and "{{addons.maas is defined}}"
- name: Deploy MaaS
shell: helm install maas --namespace=maas
environment:
HELM_HOME: /opt/openstack-helm/repos/openstack-helm/.helm
args:
chdir: /opt/openstack-helm/repos/openstack-helm/
when: addons_enabled and "{{addons.maas is defined}}" and maas_deployed | failed
#Check every 15 seconds until MaaS comes up
- action: shell hyperkube kubectl get pods --namespace maas
register: maas_output
until: maas_output.stdout.find("Running") != -1
retries: 20
delay: 15
when: addons_enabled and "{{addons.maas is defined}}"

View File

@ -1,39 +0,0 @@
---
- name: Create directories for OpenStack Helm
file:
path: /opt/openstack-helm/repos/openstack-helm
state: directory
when: addons_enabled and "{{addons.osh is defined}}"
- name: Checkout OpenStack-Helm
git:
repo: https://github.com/att-comdev/openstack-helm.git
dest: /opt/openstack-helm/repos/openstack-helm
update: true
when: addons_enabled and "{{addons.osh is defined}}"
- name: Check for Helm/Tiller
shell: hyperkube kubectl get pods --namespace kube-system | grep tiller
ignore_errors: true
register: helm_running
when: addons_enabled and "{{addons.osh is defined}}"
- name: Initialize Helm/Tiller
shell: helm init --home /opt/openstack-helm/repos/openstack-helm/.helm
environment:
HELM_HOME: /opt/openstack-helm/repos/openstack-helm/.helm
when: addons_enabled and "{{addons.osh is defined}}" and helm_running | failed
- name: Helm Serve
shell: nohup helm serve --repo-path /opt/openstack-helm/repos/openstack-helm/.helm/repository/local &
environment:
HELM_HOME: /opt/openstack-helm/repos/openstack-helm/.helm
args:
chdir: /opt/openstack-helm/repos/openstack-helm/.helm
when: addons_enabled and "{{addons.osh is defined}}" and helm_running | failed
- name: Add helm repositories
shell: helm repo add local http://localhost:8879/charts --home /opt/openstack-helm/repos/openstack-helm/.helm
args:
chdir: /opt/openstack-helm/repos/openstack-helm/.helm
when: addons_enabled and "{{addons.osh is defined}}" and helm_running | failed

View File

@ -1,6 +0,0 @@
---
- include: addon-dashboard.yaml
- include: addon-helm.yaml
- include: addon-osh.yaml
- include: addon-ceph.yaml
- include: addon-maas.yaml

View File

@ -1,75 +0,0 @@
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "kube-controller-manager",
"namespace": "kube-system",
"creationTimestamp": null,
"labels": {
"component": "kube-controller-manager",
"tier": "control-plane"
}
},
"spec": {
"volumes": [
{
"name": "k8s",
"hostPath": {
"path": "/etc/kubernetes"
}
},
{
"name": "certs",
"hostPath": {
"path": "/etc/ssl/certs"
}
}
],
"containers": [
{
"name": "kube-controller-manager",
"image": "quay.io/attcomdev/kube-controller-manager:{{ kube_controller_manager_version }}",
"command": [
"kube-controller-manager",
"--address=127.0.0.1",
"--leader-elect",
"--master=127.0.0.1:8080",
"--cluster-name=kubernetes",
"--root-ca-file=/etc/kubernetes/pki/ca.pem",
"--service-account-private-key-file=/etc/kubernetes/pki/apiserver-key.pem",
"--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem",
"--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem",
"--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap"
],
"resources": {
"requests": {
"cpu": "200m"
}
},
"volumeMounts": [
{
"name": "k8s",
"readOnly": true,
"mountPath": "/etc/kubernetes/"
},
{
"name": "certs",
"mountPath": "/etc/ssl/certs"
}
],
"livenessProbe": {
"httpGet": {
"path": "/healthz",
"port": 10252,
"host": "127.0.0.1"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15,
"failureThreshold": 8
}
}
],
"hostNetwork": true
},
"status": {}
}

View File

@ -1,15 +0,0 @@
---
- name: Setup bootkube.service
when:
bootstrap_enabled
template:
src: bootkube.service
dest: /etc/systemd/system/bootkube.service
- name: Run bootkube
when:
bootstrap_enabled
systemd:
name: bootkube
state: started
daemon_reload: yes

View File

@ -1,6 +0,0 @@
---
- include: prep-host.yaml
- include: prep-bootkube.yaml
- include: prep-network.yaml
- include: prep-kubernetes.yaml
- include: deploy-bootkube.yaml

View File

@ -1,22 +0,0 @@
---
- name: Ensures bootkube dir exists
when:
bootstrap_enabled
file:
path: /tmp/bootkube
state: directory
- name: Extract bootkube binaries
when:
bootstrap_enabled
unarchive:
src: "https://github.com/kubernetes-incubator/bootkube/releases/download/{{ boot_kube_version }}/bootkube.tar.gz"
dest: /tmp/bootkube
remote_src: True
- name: Render bootkube manifests
when:
bootstrap_enabled
command: "/tmp/bootkube/bin/linux/bootkube render --asset-dir=/tmp/bootkube/assets --experimental-self-hosted-etcd --etcd-servers=http://10.3.0.15:2379 --api-servers=https://{{ api_server_fqdn }}:443"
args:
creates: /etc/kubernetes/kubeconfig

View File

@ -1,23 +0,0 @@
---
- name: Install base packages
when:
bootstrap_enabled
apt:
name: "{{ item }}"
state: present
with_items:
- "docker.io"
- "vim"
- "ethtool"
- "traceroute"
- "git"
- "build-essential"
- "lldpd"
- name: Insert Temporary Hosts File Entry for FQDN Resolution
when:
bootstrap_enabled
lineinfile:
dest: /etc/hosts
line: "{{ hostvars[groups['master'][0]]['ansible_default_ipv4']['address'] }} {{ api_server_fqdn }}"
state: present

View File

@ -1,29 +0,0 @@
---
- name: Ensures /etc/kubernetes dir exists
when:
bootstrap_enabled
file:
path: /etc/kubernetes
state: directory
- name: copy kubeconfig credentials
when:
bootstrap_enabled
command: cp /tmp/bootkube/assets/auth/kubeconfig /etc/kubernetes/kubeconfig
args:
creates: /etc/kubernetes/kubeconfig
- name: copy kubernetes manifests
when:
bootstrap_enabled
command: cp -a /tmp/bootkube/assets/manifests /etc/kubernetes/
args:
creates: /etc/kubernetes/manifests
- name: fetch kubeconfig
when:
bootstrap_enabled
fetch:
src: /etc/kubernetes/kubeconfig
dest: roles/deploy-kubelet/templates/kubeconfig
flat: yes

View File

@ -1,14 +0,0 @@
---
- name: Inject Custom manifests - kube-calico.yaml
when:
bootstrap_enabled
template:
src: kube-calico.yaml.j2
dest: "/tmp/bootkube/assets/manifests/kube-flannel.yaml"
- name: Inject Custom manifests - kube-calico-cfg.yaml
when:
bootstrap_enabled
template:
src: kube-calico-cfg.yaml.j2
dest: "/tmp/bootkube/assets/manifests/kube-flannel-cfg.yaml"

View File

@ -1,10 +0,0 @@
[Unit]
Description=Kubernetes Control Plane Bootstrapping
Documentation=https://github.com/kubernetes-incubator/bootkube
[Service]
ExecStart=/tmp/bootkube/bin/linux/bootkube start --asset-dir=/tmp/bootkube/assets/ --experimental-self-hosted-etcd --etcd-server=http://127.0.0.1:12379
Restart=on-failure
[Install]
WantedBy=multi-user.target

View File

@ -1,267 +0,0 @@
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# The location of your etcd cluster. This uses the Service clusterIP
# defined below.
etcd_endpoints: "http://10.96.232.136:6666"
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
}
---
# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet
# to force it to run on the master even when the master isn't schedulable, and uses
# nodeSelector to ensure it only runs on the master.
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: calico-etcd
namespace: kube-system
labels:
k8s-app: calico-etcd
spec:
template:
metadata:
labels:
k8s-app: calico-etcd
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
# Only run this pod on the master.
nodeSelector:
kubeadm.alpha.kubernetes.io/role: master
hostNetwork: true
containers:
- name: calico-etcd
image: gcr.io/google_containers/etcd:2.2.1
env:
- name: CALICO_ETCD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command: ["/bin/sh","-c"]
args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"]
volumeMounts:
- name: var-etcd
mountPath: /var/etcd
volumes:
- name: var-etcd
hostPath:
path: /var/etcd
---
# This manfiest installs the Service which gets traffic to the Calico
# etcd.
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: calico-etcd
name: calico-etcd
namespace: kube-system
spec:
# Select the calico-etcd pod running on the master.
selector:
k8s-app: calico-etcd
# This ClusterIP needs to be known in advance, since we cannot rely
# on DNS to get access to etcd.
clusterIP: 10.96.232.136
ports:
- port: 6666
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
hostNetwork: true
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v1.1.0
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Enable BGP. Disable to enforce policy only.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR
value: "192.168.0.0/16"
- name: CALICO_IPV4POOL_IPIP
value: "always"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
# Auto-detect the BGP IP address.
- name: IP
value: ""
securityContext:
privileged: true
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.6.1
command: ["/install-cni.sh"]
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
---
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# The policy controller can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy-controller
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
containers:
- name: calico-policy-controller
image: quay.io/calico/kube-policy-controller:v0.5.4
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"

View File

@ -1,144 +0,0 @@
# This ConfigMap is used to configure a self-hosted Calico installation without ETCD
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"log_level": "debug",
"datastore_type": "kubernetes",
"hostname": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
hostNetwork: true
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v1.1.0
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Enable felix debug logging.
- name: FELIX_LOGSEVERITYSCREEN
value: "debug"
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPV6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# The Calico IPv4 pool to use. This should match `--cluster-cidr`
- name: CALICO_IPV4POOL_CIDR
value: "10.244.0.0/16"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# No IP address needed.
- name: IP
value: ""
securityContext:
privileged: true
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.6.1
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d

View File

@ -1 +0,0 @@
#Nothing To Be Seen Here. Prevents Bootkube from coming up

View File

@ -1,75 +0,0 @@
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "kube-controller-manager",
"namespace": "kube-system",
"creationTimestamp": null,
"labels": {
"component": "kube-controller-manager",
"tier": "control-plane"
}
},
"spec": {
"volumes": [
{
"name": "k8s",
"hostPath": {
"path": "/etc/kubernetes"
}
},
{
"name": "certs",
"hostPath": {
"path": "/etc/ssl/certs"
}
}
],
"containers": [
{
"name": "kube-controller-manager",
"image": "quay.io/attcomdev/kube-controller-manager:{{ kube_controller_manager_version }}",
"command": [
"kube-controller-manager",
"--address=127.0.0.1",
"--leader-elect",
"--master=127.0.0.1:8080",
"--cluster-name=kubernetes",
"--root-ca-file=/etc/kubernetes/pki/ca.pem",
"--service-account-private-key-file=/etc/kubernetes/pki/apiserver-key.pem",
"--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem",
"--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem",
"--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap"
],
"resources": {
"requests": {
"cpu": "200m"
}
},
"volumeMounts": [
{
"name": "k8s",
"readOnly": true,
"mountPath": "/etc/kubernetes/"
},
{
"name": "certs",
"mountPath": "/etc/ssl/certs"
}
],
"livenessProbe": {
"httpGet": {
"path": "/healthz",
"port": 10252,
"host": "127.0.0.1"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15,
"failureThreshold": 8
}
}
],
"hostNetwork": true
},
"status": {}
}

View File

@ -1,45 +0,0 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-controller-manager
namespace: kube-system
labels:
k8s-app: kube-controller-manager
spec:
replicas: 2
template:
metadata:
labels:
k8s-app: kube-controller-manager
spec:
nodeSelector:
master: "true"
containers:
- name: kube-controller-manager
image: quay.io/attcomdev/kube-controller-manager:{{ kube_controller_manager_version }}
command:
- ./hyperkube
- controller-manager
- --allocate-node-cidrs=true
- --configure-cloud-routes=false
- --cluster-cidr=10.2.0.0/16
- --root-ca-file=/etc/kubernetes/secrets/ca.crt
- --service-account-private-key-file=/etc/kubernetes/secrets/service-account.key
- --leader-elect=true
- --cloud-provider=
volumeMounts:
- name: secrets
mountPath: /etc/kubernetes/secrets
readOnly: true
- name: ssl-host
mountPath: /etc/ssl/certs
readOnly: true
volumes:
- name: secrets
secret:
secretName: kube-controller-manager
- name: ssl-host
hostPath:
path: /usr/share/ca-certificates
dnsPolicy: Default # Don't use cluster DNS.

View File

@ -1,3 +0,0 @@
---
- name: restart kubelet
service: name=kubelet state=restarted

View File

@ -1,95 +0,0 @@
---
- name: Grab the ETCD IP
shell: hyperkube kubectl get services --all-namespaces | grep "etcd-service" | awk '{ print $3 }'
register: etcd_service_ip
# - name: Deploy Calico manifest template
# template:
# src: calico.yaml
# dest: /opt/openstack-helm/manifests/calico.yaml
# register: calico_changed
#
# - name: Install calicoctl tool
# get_url:
# url: "https://github.com/projectcalico/calicoctl/releases/download/{{ calicoctl_version }}/calicoctl"
# dest: /usr/bin/calicoctl
# validate_certs: false
# mode: 0755
#
# - name: Check for Calico deployment
# shell: hyperkube kubectl get services --all-namespaces | grep calico
# ignore_errors: True
# register: calico_deployed
#
# - name: Deploy BGP Peer Manifest (1)
# template:
# src: calico-peer.yaml
# dest: /opt/openstack-helm/manifests/calico-peer.yaml
#
# - name: Deploy BGP Peer Manifest (2)
# template:
# src: calico-peer2.yaml
# dest: /opt/openstack-helm/manifests/calico-peer2.yaml
#
# - name: Create Calico Pods
# shell: hyperkube kubectl create -f /opt/openstack-helm/manifests/calico.yaml
# when: calico_deployed | failed and "{{ inventory_hostname }} in groups['bootstrap']"
#
# - action: shell hyperkube kubectl get pods --all-namespaces | grep calico
# register: calico_output
# until: calico_output.stdout.find("Running") != -1
# retries: 20
# delay: 15
#
# - name: Create BGP Peering(1)
# shell: calicoctl create -f /opt/openstack-helm/manifests/calico-peer.yaml --skip-exists
# environment:
# ETCD_ENDPOINTS: "http://{{ etcd_service_ip.stdout }}:2379"
# when: calico_deployed | failed and "{{ inventory_hostname }} in groups['bootstrap']"
#
# - name: Create BGP Peering(2)
# shell: calicoctl create -f /opt/openstack-helm/manifests/calico-peer2.yaml --skip-exists
# environment:
# ETCD_ENDPOINTS: "http://{{ etcd_service_ip.stdout }}:2379"
# when: calico_deployed | failed and "{{ inventory_hostname }} in groups['bootstrap']"
- name: Check ClusterHA in KubeDNS
shell: hyperkube kubectl get services --all-namespaces | grep cluster-ha
ignore_errors: true
register: cluster_ha_present
- name: Install ClusterHA ConfigMaps
template:
src: cluster-ha.j2
dest: /opt/openstack-helm/manifests/cluster-ha.yaml
register: cluster_ha_configmaps
- name: Delete ClusterHA if present
shell: hyperkube kubectl delete -f /opt/openstack-helm/manifests/cluster-ha.yaml
when: cluster_ha_present | succeeded and cluster_ha_configmaps | changed
ignore_errors: true
- name: Deploy ClusterHA ConfigMaps
shell: hyperkube kubectl create -f /opt/openstack-helm/manifests/cluster-ha.yaml
when: cluster_ha_configmaps | changed
- name: Determine KubeDNS Server
shell: hyperkube kubectl get svc kube-dns --namespace=kube-system | awk '{print $2}' | sed -n '$p'
register: kube_dns_server
- name: Add KubeDNS to /etc/resolv.conf
lineinfile:
dest: /etc/resolv.conf
insertafter: "^# DO"
line: "nameserver {{ kube_dns_server.stdout }}"
state: present
backup: true
- name: Remove /etc/hosts entry if present
lineinfile:
dest: /etc/hosts
line: "{{ hostvars[groups['master'][0]]['ansible_default_ipv4']['address'] }} {{ api_server_fqdn }}"
state: absent
- name: Test Kubernetes cluster
shell: hyperkube kubectl get nodes

View File

@ -1,64 +0,0 @@
---
#TODO: Version kubelet, with checksum
- name: Install kubelet
get_url:
url: "http://storage.googleapis.com/kubernetes-release/release/{{ kubelet_version }}/bin/linux/amd64/kubelet"
dest: /usr/bin/kubelet
# checksum: md5:33af080e876b1f3d481b0ff1ceec3ab8
mode: 0755
- name: Ensures /etc/kubernetes dir exists
file:
path: /etc/kubernetes
state: directory
#Gets Kubeconfig from the bootstrap node. See roles/bootstrap/tasks/main.yml
- name: Install kubeconfig
template:
src: kubeconfig
dest: /etc/kubernetes/kubeconfig
- name: Setup kubelet.service
template:
src: kubelet.service
dest: /etc/systemd/system/kubelet.service
notify: restart kubelet
- name: Enable Kubelet to be started on boot
systemd:
name: kubelet
state: started
enabled: yes
daemon_reload: yes
- name: Create Directories for Kubernetes manifests
file:
path: /opt/openstack-helm/manifests
state: directory
#Wait for Kubeapi Server to come up
- action: shell hyperkube kubectl get pods --all-namespaces | grep kube-apiserver
register: kubeapi_output
until: kubeapi_output.stdout.find("Running") != -1
retries: 40
delay: 15
#Wait for cluster to stabilize across all nodes
- action: shell hyperkube kubectl get pods --all-namespaces
register: cluster_stable
until: '"ContainerCreating" not in cluster_stable.stdout'
retries: 40
delay: 15
#Re-Deploy Calico with ETCD
- name: Inject Custom manifests - kube-calico.yaml
template:
src: kube-calico.yaml.j2
dest: "/tmp/bootkube/assets/manifests/kube-flannel.yaml"
notify: restart kubelet
- name: Inject Custom manifests - kube-calico-cfg.yaml
template:
src: kube-calico-cfg.yaml.j2
dest: "/tmp/bootkube/assets/manifests/kube-flannel-cfg.yaml"
notify: restart kubelet

View File

@ -1,6 +0,0 @@
#Deploys Kubelet
---
- include: prep-host.yaml
- include: prep-hyperkube.yaml
- include: prep-cni.yaml
- include: deploy-kubernetes.yaml

View File

@ -1,11 +0,0 @@
---
- name: Ensures CNI dir exists
file:
path: /opt/cni/bin
state: directory
- name: Install CNI binaries
unarchive:
src: "https://github.com/containernetworking/cni/releases/download/{{ cni_version }}/cni-amd64-{{ cni_version }}.tgz"
dest: /opt/cni/bin
remote_src: True

View File

@ -1,19 +0,0 @@
---
- name: Install base packages
apt:
name: "{{ item }}"
state: present
with_items:
- "docker.io"
- "vim"
- "ethtool"
- "traceroute"
- "git"
- "build-essential"
- "lldpd"
- name: Insert Temporary Hosts File Entry for FQDN Resolution
lineinfile:
dest: /etc/hosts
line: "{{ hostvars[groups['master'][0]]['ansible_default_ipv4']['address'] }} {{ api_server_fqdn }}"
state: present

View File

@ -1,10 +0,0 @@
---
- name: Downloads Hyperkube
get_url:
url: "http://storage.googleapis.com/kubernetes-release/release/{{ hyperkube_version }}/bin/linux/amd64/hyperkube"
dest: /usr/bin/hyperkube
- name: Set hyperkube permissions
file:
path: /usr/bin/hyperkube
mode: 0755

View File

@ -1,8 +0,0 @@
apiVersion: v1
kind: bgpPeer
metadata:
peerIP: {{ calico_peer1 }}
scope: node
node: {{ ansible_hostname }}
spec:
asNumber: 64686

View File

@ -1,8 +0,0 @@
apiVersion: v1
kind: bgpPeer
metadata:
peerIP: {{ calico_peer2 }}
scope: node
node: {{ ansible_hostname }}
spec:
asNumber: 64686

View File

@ -1,323 +0,0 @@
# This ConfigMap is used to configure a self-hosted Calico installation.
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# The location of your etcd cluster. This uses the Service clusterIP
# defined below.
#etcd_endpoints: "http://10.96.232.136:6666"
#etcd_endpoints: "http://10.200.232.136:6666"
etcd_endpoints: "http://{{ etcd_service_ip.stdout }}:2379"
# True enables BGP networking, false tells Calico to enforce
# policy only, using native networking.
enable_bgp: "true"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
}
# The default IP Pool to be created for the cluster.
# Pod IP addresses will be assigned from this pool.
ippool.yaml: |
apiVersion: v1
kind: ipPool
metadata:
cidr: 10.200.0.0/16
spec:
ipip:
enabled: true
nat-outgoing: true
---
# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet
# to force it to run on the master even when the master isn't schedulable, and uses
# nodeSelector to ensure it only runs on the master.
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: calico-etcd
namespace: kube-system
labels:
k8s-app: calico-etcd
spec:
template:
metadata:
labels:
k8s-app: calico-etcd
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
# Only run this pod on the master.
nodeSelector:
kubeadm.alpha.kubernetes.io/role: master
hostNetwork: true
containers:
- name: calico-etcd
image: gcr.io/google_containers/etcd:2.2.1
env:
- name: CALICO_ETCD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command: ["/bin/sh","-c"]
args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"]
volumeMounts:
- name: var-etcd
mountPath: /var/etcd
volumes:
- name: var-etcd
hostPath:
path: /var/etcd
---
# This manfiest installs the Service which gets traffic to the Calico
# etcd.
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: calico-etcd
name: calico-etcd
namespace: kube-system
spec:
# Select the calico-etcd pod running on the master.
selector:
k8s-app: calico-etcd
# This ClusterIP needs to be known in advance, since we cannot rely
# on DNS to get access to etcd.
#clusterIP: 10.96.232.136
clusterIP: 10.3.0.190
ports:
- port: 6666
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
hostNetwork: true
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v1.0.2
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Enable BGP. Disable to enforce policy only.
- name: CALICO_NETWORKING
valueFrom:
configMapKeyRef:
name: calico-config
key: enable_bgp
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Don't configure a default pool. This is done by the Job
# below.
- name: NO_DEFAULT_POOLS
value: "true"
# Auto-detect the BGP IP address.
- name: IP
value: ""
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v1.5.6
command: ["/install-cni.sh"]
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
---
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# The policy controller can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy-controller
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
containers:
- name: calico-policy-controller
image: calico/kube-policy-controller:v0.5.2
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
---
## This manifest deploys a Job which performs one time
# configuration of Calico
apiVersion: batch/v1
kind: Job
metadata:
name: configure-calico
namespace: kube-system
labels:
k8s-app: calico
spec:
template:
metadata:
name: configure-calico
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
hostNetwork: true
restartPolicy: OnFailure
containers:
# Writes basic configuration to datastore.
- name: configure-calico
image: calico/ctl:v1.0.2
args:
- apply
- -f
- /etc/config/calico/ippool.yaml
volumeMounts:
- name: config-volume
mountPath: /etc/config
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
volumes:
- name: config-volume
configMap:
name: calico-config
items:
- key: ippool.yaml
path: calico/ippool.yaml

View File

@ -1,23 +0,0 @@
---
kind: Service
apiVersion: v1
metadata:
name: cluster-ha
spec:
clusterIP: None
ports:
- protocol: TCP
port: 443
targetPort: 443
---
kind: Endpoints
apiVersion: v1
metadata:
name: cluster-ha
subsets:
- addresses:
{% for node in groups['master'] %}
- ip: {{ hostvars[node]['ansible_default_ipv4']['address'] }}
{% endfor %}
ports:
- port: 443

View File

@ -1,53 +0,0 @@
# This ConfigMap is used to configure a self-hosted Calico installation.
# Becomes kube-flannel-cfg.yaml once deployed on target host
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Configure this with the location of your etcd cluster.
etcd_endpoints: "http://10.23.19.16:2379"
#etcd_endpoints: "http://127.0.0.1:2379"
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"etcd_key_file": "__ETCD_KEY_FILE__",
"etcd_cert_file": "__ETCD_CERT_FILE__",
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}
# The default IP Pool to be created for the cluster.
# Pod IP addresses will be assigned from this pool.
ippool.yaml: |
apiVersion: v1
kind: ipPool
metadata:
cidr: 10.2.0.0/16
spec:
nat-outgoing: true
# If you're using TLS enabled etcd uncomment the following.
# You must also populate the Secret below with these files.
etcd_ca: "" # "/calico-secrets/etcd-ca"
etcd_cert: "" # "/calico-secrets/etcd-cert"
etcd_key: "" # "/calico-secrets/etcd-key"

View File

@ -1,286 +0,0 @@
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
# This file becomes kube-flannel.yaml once deployed to overwrite the default bootkube deployment
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
hostNetwork: true
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v1.1.1
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Don't configure a default pool. This is done by the Job
# below.
- name: NO_DEFAULT_POOLS
value: "true"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Auto-detect the BGP IP address.
- name: IP
value: ""
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# - mountPath: /calico-secrets
# name: etcd-certs
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.6.2
command: ["/install-cni.sh"]
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# - mountPath: /calico-secrets
# name: etcd-certs
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Mount in the etcd TLS secrets.
# - name: etcd-certs
# secret:
# secretName: calico-etcd-secrets
---
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
# The policy controller can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
containers:
- name: calico-policy-controller
image: quay.io/calico/kube-policy-controller:v0.5.4
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
# volumeMounts:
# # Mount in the etcd TLS secrets.
# - mountPath: /calico-secrets
# name: etcd-certs
# volumes:
# Mount in the etcd TLS secrets.
# - name: etcd-certs
# secret:
# secretName: calico-etcd-secrets
---
## This manifest deploys a Job which performs one time
# configuration of Calico
apiVersion: batch/v1
kind: Job
metadata:
name: configure-calico
namespace: kube-system
labels:
k8s-app: calico
spec:
template:
metadata:
name: configure-calico
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
hostNetwork: true
restartPolicy: OnFailure
containers:
# Writes basic configuration to datastore.
- name: configure-calico
image: calico/ctl:v1.1.1
args:
- apply
- -f
- /etc/config/calico/ippool.yaml
volumeMounts:
- name: config-volume
mountPath: /etc/config
# Mount in the etcd TLS secrets.
# - mountPath: /calico-secrets
# name: etcd-certs
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
volumes:
- name: config-volume
configMap:
name: calico-config
items:
- key: ippool.yaml
path: calico/ippool.yaml
# Mount in the etcd TLS secrets.
# - name: etcd-certs
# secret:
# secretName: calico-etcd-secrets

View File

@ -1,26 +0,0 @@
#Default Override-able variables for bootstrap role
boot_kube_version: "v0.3.13"
bootstrap_enabled: "true"
#For DNS Resilliency, override this with FQDN in your environment which resolves to all "master" servers
api_server_fqdn: "kubeapi.test.local"
#Default Override-able variables for the Kubelet role
cni_version: "v0.5.2"
hyperkube_version: "v1.5.6"
kubelet_version: "v1.5.6"
calicoctl_version: "v1.1.0"
#Calico Peering - Physical Switch Fabric IPs
calico_peer1: 10.23.21.2
calico_peer2: 10.23.21.3
## Kubernetes Add-Ons:
# Optional Items: kube_dashboard, kube_helm (more to come).
addons_enabled: false
addons:
- dashboard
- helm
- osh
- ceph
- maas

64
scripts/common/func.sh Normal file
View File

@ -0,0 +1,64 @@
# Copyright 2017 The Promenade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function validate_environment {
local ERRORS=
if [ "x${NODE_HOSTNAME}" = "x" ]; then
echo Error: NODE_HOSTNAME not defined, but required.
ERRORS=1
fi
if ! docker info; then
cat <<EOS
Error: Unable to run `docker info`. You must mount /var/run/docker.sock when
you run this container, since it is used to launch containers on the host:
docker run -v /var/run/docker.sock:/var/run/docker.sock ...
EOS
ERRORS=1
fi
if [ ! -d /target/etc/systemd/system ]; then
cat <<EOS
Error: It appears that the host's root filesystem is not mounted at /target.
Make sure it is mounted:
docker run -v /:/target ...
EOS
ERRORS=1
fi
if [ "x$ERRORS" != "x" ]; then
exit 1
fi
}
function install_assets {
mkdir /target/etc/kubernetes
cp -R ./assets/* /target/etc/kubernetes
}
function install_cni {
mkdir -p /opt/cni/bin
tar xf cni.tgz -C /opt/cni/bin/
}
function install_kubelet {
cp ./kubelet /target/usr/local/bin/kubelet
cat ./kubelet.service.template | envsubst > /target/etc/systemd/system/kubelet.service
chown root:root /target/etc/systemd/system/kubelet.service
chmod 644 /target/etc/systemd/system/kubelet.service
chroot --userspec root:root /target /bin/bash < ./scripts/start-kubelet.sh
}

21
scripts/common/start-kubelet.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/bash
#
# Copyright 2017 The Promenade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
systemctl daemon-reload
systemctl enable kubelet.service
systemctl start kubelet.service

35
scripts/entrypoint-genesis.sh Executable file
View File

@ -0,0 +1,35 @@
#!/bin/bash
#
# Copyright 2017 The Promenade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
source ./scripts/env.sh
source ./scripts/func.sh
validate_environment
# XXX validate_genesis_assets
docker load -i ./genesis-images.tar
install_assets
install_cni
install_kubelet
docker run --rm \
-v /etc/kubernetes:/etc/kubernetes \
quay.io/coreos/bootkube:${BOOTKUBE_VERSION} \
/bootkube start \
--asset-dir=/etc/kubernetes

27
scripts/entrypoint-join.sh Executable file
View File

@ -0,0 +1,27 @@
#!/bin/bash
#
# Copyright 2017 The Promenade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
source ./scripts/env.sh
source ./scripts/func.sh
validate_environment
# XXX validate_join_assets
install_assets
install_cni
install_kubelet

View File

@ -1,27 +0,0 @@
- hosts: bootstrap
remote_user: ubuntu
become: yes
become_method: sudo
roles:
- deploy-bootstrap
- hosts: master
remote_user: ubuntu
become: yes
become_method: sudo
roles:
- deploy-kubelet
- hosts: workers
remote_user: ubuntu
become: yes
become_method: sudo
roles:
- deploy-kubelet
#- hosts: master
# remote_user: ubuntu
# become: yes
# become_method: sudo
# roles:
# - deploy-addons

20
test-install.sh Executable file
View File

@ -0,0 +1,20 @@
#!/usr/bin/env bash
set -ex
# Setup master
vagrant ssh n0 <<EOS
set -ex
sudo docker load -i /vagrant/promenade-genesis.tar
sudo docker run -v /:/target -v /var/run/docker.sock:/var/run/docker.sock -e NODE_HOSTNAME=n0 quay.io/attcomdev/promenade-genesis:dev
EOS
# Join nodes
for node in n1 n2; do
vagrant ssh $node <<EOS
set -ex
sudo docker load -i /vagrant/promenade-join.tar
# Should be: sudo docker run -v /:/target -e NODE_HOSTNAME=$node quay.io/attcomdev/promenade-join:dev
sudo docker run -v /:/target -v /var/run/docker.sock:/var/run/docker.sock -e NODE_HOSTNAME=$node quay.io/attcomdev/promenade-join:dev
EOS
done

View File

@ -0,0 +1,3 @@
host-record=kubernetes,192.168.77.10
host-record=kubernetes,192.168.77.11
host-record=kubernetes,192.168.77.12

View File

@ -0,0 +1,3 @@
{
"dns": ["172.17.0.1"]
}