Replace flannel with calico (#26)

* Uses the existing Kubernetes etcd cluster to store calico data.
* Adds `b64enc` filter for templates (useful for Secrets).
This commit is contained in:
Mark Burnett 2017-06-29 18:24:31 -05:00 committed by Alan Meadows
parent 78f42d657e
commit 9f968115b5
5 changed files with 446 additions and 141 deletions

View File

@ -44,6 +44,7 @@ spec:
kube_service_ip: 10.96.0.1 kube_service_ip: 10.96.0.1
pod_ip_cidr: 10.97.0.0/16 pod_ip_cidr: 10.97.0.0/16
service_ip_cidr: 10.96.0.0/16 service_ip_cidr: 10.96.0.0/16
etcd_service_ip: 10.96.232.136
dns_servers: dns_servers:
- 8.8.8.8 - 8.8.8.8
- 8.8.4.4 - 8.8.4.4

View File

@ -251,13 +251,21 @@ def _master_config(hostname, host_data, masters, network, keys):
'kubernetes.default.svc.cluster.local', 'kubernetes.default.svc.cluster.local',
'127.0.0.1', '127.0.0.1',
] ]
calico_domains = [
'calico-etcd',
'calico-etcd.kube-system',
'calico-etcd.kube-system.svc',
'calico-etcd.kube-system.svc.cluster.local',
network['etcd_service_ip'],
]
docs = [] docs = []
docs.extend(keys.generate_certificate( docs.extend(keys.generate_certificate(
alias='etcd-client', alias='etcd-client',
name='etcd:client:%s' % hostname, name='etcd:client:%s' % hostname,
ca_name='etcd-client', ca_name='etcd-client',
hosts=kube_domains + [hostname, host_data['ip']], hosts=kube_domains + calico_domains + [hostname, host_data['ip']],
target=hostname, target=hostname,
)) ))
docs.extend(keys.generate_certificate( docs.extend(keys.generate_certificate(
@ -267,6 +275,7 @@ def _master_config(hostname, host_data, masters, network, keys):
hosts=[hostname, host_data['ip']], hosts=[hostname, host_data['ip']],
target=hostname, target=hostname,
)) ))
docs.extend(keys.generate_certificate( docs.extend(keys.generate_certificate(
alias='etcd-peer', alias='etcd-peer',
name='etcd:peer:%s' % hostname, name='etcd:peer:%s' % hostname,
@ -332,6 +341,13 @@ def _genesis_config(hostname, host_data, masters, network, keys):
target=hostname, target=hostname,
)) ))
docs.extend(keys.generate_certificate(
alias='etcd-calico-client',
name='etcd:client:calico',
ca_name='etcd-client',
target=hostname,
))
return docs return docs

View File

@ -1,4 +1,5 @@
from . import logging from . import logging
import base64
import jinja2 import jinja2
import os import os
import pkg_resources import pkg_resources
@ -38,6 +39,7 @@ class Renderer:
LOG.debug('Templating "%s" into "%s"', path, target_path) LOG.debug('Templating "%s" into "%s"', path, target_path)
env = jinja2.Environment(undefined=jinja2.StrictUndefined) env = jinja2.Environment(undefined=jinja2.StrictUndefined)
env.filters['b64enc'] = _base64_encode
with open(path) as f: with open(path) as f:
template = env.from_string(f.read()) template = env.from_string(f.read())
@ -52,3 +54,7 @@ class Renderer:
def _ensure_path(path): def _ensure_path(path):
base = os.path.dirname(path) base = os.path.dirname(path)
os.makedirs(base, mode=0o775, exist_ok=True) os.makedirs(base, mode=0o775, exist_ok=True)
def _base64_encode(s):
return base64.b64encode(s.encode()).decode()

View File

@ -0,0 +1,422 @@
---
# This manfiest installs the Service which gets traffic to the Calico
# etcd.
apiVersion: v1
kind: Service
metadata:
labels:
tier: control-plane
component: kube-etcd
name: calico-etcd
namespace: kube-system
spec:
# Select the calico-etcd pod running on the master.
selector:
tier: control-plane
component: kube-etcd
# This ClusterIP needs to be known in advance, since we cannot rely
# on DNS to get access to etcd.
clusterIP: {{ config['Network']['etcd_service_ip'] }}
ports:
- port: 2379
---
# Calico Version v2.2.1
# http://docs.projectcalico.org/v2.2/releases#v2.2.1
# This manifest includes the following component versions:
# calico/node:v1.2.1
# calico/cni:v1.8.3
# calico/kube-policy-controller:v0.6.0
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# The location of your etcd cluster. This uses the Service clusterIP
# defined below.
etcd_endpoints: https://{{ config['Network']['etcd_service_ip'] }}:2379
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.1.0",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"etcd_key_file": "__ETCD_KEY_FILE__",
"etcd_cert_file": "__ETCD_CERT_FILE__",
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
}
# If you're using TLS enabled etcd uncomment the following.
# You must also populate the Secret below with these files.
etcd_ca: "/calico-secrets/etcd-ca"
etcd_cert: "/calico-secrets/etcd-cert"
etcd_key: "/calico-secrets/etcd-key"
# ippool.yaml: |-
# apiVersion: v1
# kind: ipPool
# metadata:
# cidr: {{ config['Network']['pod_ip_cidr'] }}
# spec:
# ipip:
# enabled: true
# nat-outgoing: true
---
# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: calico-etcd-secrets
namespace: kube-system
data:
# Populate the following files with etcd TLS configuration if desired, but leave blank if
# not using TLS for etcd.
# This self-hosted install expects three files with the following names. The values
# should be base64 encoded strings of the entire contents of each file.
etcd-key: {{ config.get(kind='CertificateKey', alias='etcd-calico-client')['data'] | b64enc }}
etcd-cert: {{ config.get(kind='Certificate', alias='etcd-calico-client')['data'] | b64enc }}
etcd-ca: {{ config.get(kind='CertificateAuthority', name='etcd-client')['data'] | b64enc }}
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: calico-cni-plugin
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v1.3.0
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
- name: WAIT_FOR_DATASTORE
value: "true"
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Enable BGP. Disable to enforce policy only.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR
value: {{ config['Network']['pod_ip_cidr'] }}
- name: CALICO_IPV4POOL_IPIP
value: "always"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Auto-detect the BGP IP address.
- name: IP
value: ""
securityContext:
privileged: true
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /calico-secrets
name: etcd-certs
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.9.1
command: ["/install-cni.sh"]
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
- name: ETCD_CA_CERT_FILE
value: /etc/kubernetes/calico/pki/etcd-client-ca.pem
- name: ETCD_CERT_FILE
value: /etc/kubernetes/calico/pki/etcd-client.pem
- name: ETCD_KEY_FILE
value: /etc/kubernetes/calico/pki/etcd-client-key.pem
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /calico-secrets
name: etcd-certs
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Mount in the etcd TLS secrets.
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
---
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# The policy controller can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy-controller
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: calico-policy-controller
containers:
- name: calico-policy-controller
image: quay.io/calico/kube-policy-controller:v0.6.0
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
volumeMounts:
# Mount in the etcd TLS secrets.
- mountPath: /calico-secrets
name: etcd-certs
volumes:
# Mount in the etcd TLS secrets.
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-cni-plugin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-cni-plugin
subjects:
- kind: ServiceAccount
name: calico-cni-plugin
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-cni-plugin
namespace: kube-system
rules:
- apiGroups: [""]
resources:
- pods
- nodes
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-cni-plugin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-policy-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-policy-controller
subjects:
- kind: ServiceAccount
name: calico-policy-controller
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-policy-controller
namespace: kube-system
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
verbs:
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-policy-controller
namespace: kube-system

View File

@ -1,140 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"type": "flannel",
"delegate": {
"isDefaultGateway": true
}
}
net-conf.json: |
{
"Network": "{{ config['Network']['pod_ip_cidr'] }}",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
template:
metadata:
labels:
tier: node
app: flannel
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
serviceAccountName: flannel
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.7.1-amd64
command:
- /opt/bin/flanneld
- --ip-masq
- --kube-subnet-mgr
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: quay.io/coreos/flannel:v0.7.1-amd64
command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ]
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg