Merge pull request #39 from mark-burnett/armadify-calico

Replace usage of kubectl apply with armada apply
This commit is contained in:
Scott Hussey 2017-07-28 14:37:58 -05:00 committed by GitHub
commit 772a8b3726
30 changed files with 769 additions and 613 deletions

View File

@ -48,5 +48,7 @@ RUN set -ex \
COPY requirements-frozen.txt /promenade
RUN pip install --no-cache-dir -r requirements-frozen.txt
COPY ./assets/ /assets/
COPY . /promenade
RUN pip install -e /promenade

View File

@ -0,0 +1 @@
/*.tgz

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,4 @@
apiVersion: v1
description: A Helm chart for Kubernetes
name: calico
version: 0.1.0

View File

@ -0,0 +1,51 @@
---
# Calico Version v2.2.1
# http://docs.projectcalico.org/v2.2/releases#v2.2.1
# This manifest includes the following component versions:
# calico/node:v1.2.1
# calico/cni:v1.8.3
# calico/kube-policy-controller:v0.6.0
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# The location of your etcd cluster. This uses the Service clusterIP
# defined below.
etcd_endpoints: https://{{ .Values.etcd.service.ip }}:{{ .Values.etcd.service.port }}
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.1.0",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"etcd_key_file": "__ETCD_KEY_FILE__",
"etcd_cert_file": "__ETCD_CERT_FILE__",
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
}
# If you're using TLS enabled etcd uncomment the following.
# You must also populate the Secret below with these files.
etcd_ca: "/calico-secrets/etcd-ca"
etcd_cert: "/calico-secrets/etcd-cert"
etcd_key: "/calico-secrets/etcd-key"

View File

@ -0,0 +1,165 @@
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: calico-cni-plugin
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: {{ .Values.images.node }}
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
- name: WAIT_FOR_DATASTORE
value: "true"
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Enable BGP. Disable to enforce policy only.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR
value: {{ .Values.calico.pod_ip_cidr }}
- name: CALICO_IPV4POOL_IPIP
value: "always"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Auto-detect the BGP IP address.
- name: IP
value: ""
{{- if .Values.calico.interface }}
- name: IP_AUTODETECTION_METHOD
value: interface={{ .Values.calico.interface }}
{{- end }}
securityContext:
privileged: true
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /calico-secrets
name: etcd-certs
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: {{ .Values.images.cni }}
command: ["/install-cni.sh"]
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
- name: ETCD_CA_CERT_FILE
value: /etc/kubernetes/calico/pki/etcd-client-ca.pem
- name: ETCD_CERT_FILE
value: /etc/kubernetes/calico/pki/etcd-client.pem
- name: ETCD_KEY_FILE
value: /etc/kubernetes/calico/pki/etcd-client-key.pem
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /calico-secrets
name: etcd-certs
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Mount in the etcd TLS secrets.
- name: etcd-certs
secret:
secretName: calico-etcd-secrets

View File

@ -0,0 +1,84 @@
---
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# The policy controller can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy-controller
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: calico-policy-controller
containers:
- name: calico-policy-controller
image: {{ .Values.images.policy_controller }}
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
volumeMounts:
# Mount in the etcd TLS secrets.
- mountPath: /calico-secrets
name: etcd-certs
volumes:
# Mount in the etcd TLS secrets.
- name: etcd-certs
secret:
secretName: calico-etcd-secrets

View File

@ -0,0 +1,72 @@
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-cni-plugin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-cni-plugin
subjects:
- kind: ServiceAccount
name: calico-cni-plugin
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-cni-plugin
namespace: kube-system
rules:
- apiGroups: [""]
resources:
- pods
- nodes
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-cni-plugin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-policy-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-policy-controller
subjects:
- kind: ServiceAccount
name: calico-policy-controller
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-policy-controller
namespace: kube-system
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
verbs:
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-policy-controller
namespace: kube-system

View File

@ -0,0 +1,17 @@
---
# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: calico-etcd-secrets
namespace: kube-system
data:
# Populate the following files with etcd TLS configuration if desired, but leave blank if
# not using TLS for etcd.
# This self-hosted install expects three files with the following names. The values
# should be base64 encoded strings of the entire contents of each file.
etcd-key: {{ .Values.calico.etcd.credentials.key | b64enc }}
etcd-cert: {{ .Values.calico.etcd.credentials.cert | b64enc }}
etcd-ca: {{ .Values.calico.etcd.credentials.ca | b64enc }}

View File

@ -0,0 +1,21 @@
---
# This manfiest installs the Service which gets traffic to the Calico
# etcd.
apiVersion: v1
kind: Service
metadata:
labels:
tier: control-plane
component: calico-etcd
name: calico-etcd
namespace: kube-system
spec:
# Select the calico-etcd pod running on the master.
selector:
tier: control-plane
component: calico-etcd
# This ClusterIP needs to be known in advance, since we cannot rely
# on DNS to get access to etcd.
clusterIP: {{ .Values.etcd.service.ip }}
ports:
- port: {{ .Values.etcd.service.port }}

View File

@ -0,0 +1,20 @@
calico:
pod_ip_cidr: 10.97.0.0/16
etcd:
credentials:
ca: |-
invalid ca
cert: |-
invalid cert
key: |-
invalid key
images:
node: quay.io/calico/node:v1.3.0
cni: quay.io/calico/cni:v1.9.1
policy_controller: quay.io/calico/kube-policy-controller:v0.6.0
etcd:
service:
ip: 10.96.232.136
port: 6666

View File

@ -0,0 +1,4 @@
apiVersion: v1
description: A Helm chart for Kubernetes
name: kube-dns
version: 0.1.0

View File

@ -0,0 +1,13 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
upstreamNameservers: |-
{{ .Values.kube_dns.upstream_nameservers | toJson }}

View File

@ -1,37 +1,3 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
upstreamNameservers: |-
{{ config['Network']['dns_servers'] | tojson }}
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{ config['Network']['cluster_dns'] }}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
---
apiVersion: extensions/v1beta1
kind: Deployment
@ -41,7 +7,7 @@ metadata:
name: kube-dns
namespace: kube-system
spec:
replicas: 2
replicas: {{ .Values.kube_dns.replicas }}
selector:
matchLabels:
k8s-app: kube-dns
@ -95,7 +61,7 @@ spec:
env:
- name: PROMETHEUS_PORT
value: "10055"
image: {{ config['Versions']['images']['kubernetes']['dns']['kubedns'] }}
image: {{ .Values.images.kube_dns }}
imagePullPolicy: IfNotPresent
ports:
- containerPort: 10053
@ -140,7 +106,7 @@ spec:
- --server=/cluster.local/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
image: {{ config['Versions']['images']['kubernetes']['dns']['dnsmasq'] }}
image: {{ .Values.images.dnsmasq }}
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
@ -174,7 +140,7 @@ spec:
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
image: {{ config['Versions']['images']['kubernetes']['dns']['sidecar'] }}
image: {{ .Values.images.sidecar }}
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
@ -215,43 +181,3 @@ spec:
name: kube-dns
optional: true
name: kube-dns-config
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-dns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-dns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-dns
subjects:
- kind: ServiceAccount
name: kube-dns
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system

View File

@ -0,0 +1,39 @@
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-dns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-dns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-dns
subjects:
- kind: ServiceAccount
name: kube-dns
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system

View File

@ -0,0 +1,21 @@
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{ .Values.service.cluster_ip }}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP

View File

@ -0,0 +1,13 @@
images:
dnsmasq: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.2
kube_dns: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.2
sidecar: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.2
kube_dns:
replicas: 2
upstream_nameservers:
- 8.8.8.8
- 8.8.4.4
service:
cluster_ip: 10.96.0.10

View File

@ -43,6 +43,9 @@ needed for a particular node.
`spec` contains specific data for each kind of configuration document.
Additionally, documents for [Armada](https://github.com/att-comdev/armada) are
allowed and will be applied after CNI and DNS are deployed.
## Generating Configuration from Minimal Input
To construct a complete set of cluster configuration, the minimal input are

View File

@ -8,7 +8,6 @@ spec:
nodes:
n0:
ip: 192.168.77.10
kubernetes_interface: enp0s8
roles:
- master
- genesis
@ -16,21 +15,18 @@ spec:
- beta.kubernetes.io/arch=amd64
n1:
ip: 192.168.77.11
kubernetes_interface: enp0s8
roles:
- master
additional_labels:
- beta.kubernetes.io/arch=amd64
n2:
ip: 192.168.77.12
kubernetes_interface: enp0s8
roles:
- master
additional_labels:
- beta.kubernetes.io/arch=amd64
n3:
ip: 192.168.77.13
kubernetes_interface: enp0s8
roles:
- worker
additional_labels:
@ -49,6 +45,7 @@ spec:
pod_ip_cidr: 10.97.0.0/16
service_ip_cidr: 10.96.0.0/16
calico_etcd_service_ip: 10.96.232.136
calico_interface: enp0s8
dns_servers:
- 8.8.8.8
- 8.8.4.4
@ -63,7 +60,7 @@ metadata:
target: all
spec:
images:
armada: quay.io/attcomdev/armada:latest
armada: quay.io/attcomdev/armada:master
calico:
cni: quay.io/calico/cni:v1.9.1
etcd: quay.io/coreos/etcd:v3.2.1
@ -81,10 +78,46 @@ spec:
proxy: gcr.io/google_containers/hyperkube-amd64:v1.6.4
scheduler: gcr.io/google_containers/hyperkube-amd64:v1.6.4
promenade: quay.io/attcomdev/promenade:latest
tiller: gcr.io/kubernetes-helm/tiller:v2.4.2
tiller: gcr.io/kubernetes-helm/tiller:v2.5.0
packages:
docker: docker.io=1.12.6-0ubuntu1~16.04.1
dnsmasq: dnsmasq=2.75-1ubuntu0.16.04.2
socat: socat=1.7.3.1-1
additional_packages:
- ceph-common=10.2.7-0ubuntu0.16.04.1
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: example-application
data:
release_prefix: example
chart_groups:
- example-application
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: example-application
data:
description: Just an example
chart_group:
- redis
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: redis
data:
chart_name: redis
release: example-redis
namespace: default
timeout: 600
values:
persistence:
enabled: false
source:
type: git
location: https://github.com/kubernetes/charts.git
subpath: stable/redis
dependencies: []

View File

@ -10,7 +10,22 @@ LOG = logging.getLogger(__name__)
def load(f):
return Configuration(list(map(Document, yaml.load_all(f))))
return Configuration(list(map(instantiate_document, yaml.safe_load_all(f))))
def instantiate_document(data):
if data.get('schema', '').startswith('armada'):
return Document({
'apiVersion': 'promenade/v1',
'kind': 'ArmadaDocument',
'metadata': {
'name': data['schema'] + '/' + data['metadata']['name'],
'target': 'none',
},
'spec': data,
})
else:
return Document(data)
class Document:
@ -22,6 +37,8 @@ class Document:
}
SUPPORTED_KINDS = {
'ArmadaDocument',
'Certificate',
'CertificateAuthority',
'CertificateAuthorityKey',
@ -116,6 +133,9 @@ class Configuration:
if not kind or document.kind == kind:
yield document
def get_armada_documents(self):
return [d.data['spec'] for d in self.iterate(kind='ArmadaDocument')]
def _iterate_with_target(self, target):
for document in self.documents:
if document.target == target or document.target == 'all':

View File

@ -41,6 +41,7 @@ class Generator:
cluster = self.input_config['Cluster']
network = self.input_config['Network']
versions = self.input_config['Versions']
armada_documents = list(self.input_config.iterate(kind='ArmadaDocument'))
cluster_name = cluster.metadata['name']
LOG.info('Generating configuration for cluster "%s"', cluster_name)
@ -198,6 +199,7 @@ class Generator:
role_specific_documents.extend(master_documents)
if 'genesis' in data.get('roles', []):
role_specific_documents.extend(armada_documents)
role_specific_documents.extend(_genesis_config(hostname, data,
masters, network, keys))
role_specific_documents.append(_genesis_etcd_config(cluster_name, hostname))

View File

@ -1,8 +1,10 @@
from . import logging
import base64
import io
import jinja2
import os
import pkg_resources
import yaml
__all__ = ['Renderer']
@ -45,6 +47,7 @@ class Renderer:
loader=jinja2.PackageLoader('promenade', 'templates/include'),
undefined=jinja2.StrictUndefined)
env.filters['b64enc'] = _base64_encode
env.filters['yaml_safe_dump_all'] = _yaml_safe_dump_all
with open(path) as f:
template = env.from_string(f.read())
@ -63,3 +66,9 @@ def _ensure_path(path):
def _base64_encode(s):
return base64.b64encode(s.encode()).decode()
def _yaml_safe_dump_all(documents):
f = io.StringIO()
yaml.safe_dump_all(documents, f)
return f.getvalue()

View File

@ -0,0 +1 @@
{{ config.get_armada_documents() | yaml_safe_dump_all }}

View File

@ -0,0 +1,71 @@
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: promenade-networking
data:
release_prefix: promenade
chart_groups:
- promenade-networking
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: promenade-networking
data:
description: core
sequenced: True
chart_group:
- calico
- kube-dns
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: calico
data:
chart_name: calico
release: calico
namespace: kube-system
timeout: 600
values:
calico:
etcd:
credentials:
ca: |-
{{ config.get(kind='CertificateAuthority', name='calico-etcd-client')['data'] | indent(12, True) }}
cert: |-
{{ config.get(kind='Certificate', alias='calico-etcd-node-client')['data'] | indent(12, True) }}
key: |-
{{ config.get(kind='CertificateKey', alias='calico-etcd-node-client')['data'] | indent(12, True) }}
source:
type: local
location: /etc/kubernetes/armada-loader/assets/charts
subpath: calico
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kube-dns
data:
chart_name: kube-dns
release: kube-dns
namespace: kube-system
timeout: 600
values:
kube_dns:
replicas: 1
{%- if config['Network']['dns_servers'] is defined %}
upstream_nameservers:
{%- for nameserver in config['Network']['dns_servers'] %}
- {{ nameserver }}
{%- endfor %}
{%- else %}
upstream_nameservers: []
{%- endif %}
source:
type: local
location: /etc/kubernetes/armada-loader/assets/charts
subpath: kube-dns
dependencies: []

View File

@ -1,90 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: helm
name: tiller
name: tiller-deploy
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: helm
name: tiller
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app: helm
name: tiller
spec:
serviceAccountName: tiller
containers:
- env:
- name: TILLER_NAMESPACE
value: kube-system
image: {{ config['Versions']['images']['tiller'] }}
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /liveness
port: 44135
scheme: HTTP
initialDelaySeconds: 1
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: tiller
ports:
- containerPort: 44134
name: tiller
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readiness
port: 44135
scheme: HTTP
initialDelaySeconds: 1
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- key: CriticalAddonsOnly
operator: Exists

View File

@ -1,416 +0,0 @@
---
# This manfiest installs the Service which gets traffic to the Calico
# etcd.
apiVersion: v1
kind: Service
metadata:
labels:
tier: control-plane
component: calico-etcd
name: calico-etcd
namespace: kube-system
spec:
# Select the calico-etcd pod running on the master.
selector:
tier: control-plane
component: calico-etcd
# This ClusterIP needs to be known in advance, since we cannot rely
# on DNS to get access to etcd.
clusterIP: {{ config['Network']['calico_etcd_service_ip'] }}
ports:
- port: 6666
---
# Calico Version v2.2.1
# http://docs.projectcalico.org/v2.2/releases#v2.2.1
# This manifest includes the following component versions:
# calico/node:v1.2.1
# calico/cni:v1.8.3
# calico/kube-policy-controller:v0.6.0
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# The location of your etcd cluster. This uses the Service clusterIP
# defined below.
etcd_endpoints: https://{{ config['Network']['calico_etcd_service_ip'] }}:6666
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.1.0",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"etcd_key_file": "__ETCD_KEY_FILE__",
"etcd_cert_file": "__ETCD_CERT_FILE__",
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
}
# If you're using TLS enabled etcd uncomment the following.
# You must also populate the Secret below with these files.
etcd_ca: "/calico-secrets/etcd-ca"
etcd_cert: "/calico-secrets/etcd-cert"
etcd_key: "/calico-secrets/etcd-key"
---
# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: calico-etcd-secrets
namespace: kube-system
data:
# Populate the following files with etcd TLS configuration if desired, but leave blank if
# not using TLS for etcd.
# This self-hosted install expects three files with the following names. The values
# should be base64 encoded strings of the entire contents of each file.
etcd-key: {{ config.get(kind='CertificateKey', alias='calico-etcd-node-client')['data'] | b64enc }}
etcd-cert: {{ config.get(kind='Certificate', alias='calico-etcd-node-client')['data'] | b64enc }}
etcd-ca: {{ config.get(kind='CertificateAuthority', name='calico-etcd-client')['data'] | b64enc }}
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: calico-cni-plugin
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: {{ config['Versions']['images']['calico']['node'] }}
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
- name: WAIT_FOR_DATASTORE
value: "true"
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Enable BGP. Disable to enforce policy only.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR
value: {{ config['Network']['pod_ip_cidr'] }}
- name: CALICO_IPV4POOL_IPIP
value: "always"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Auto-detect the BGP IP address.
- name: IP
value: ""
{%- if config['Node']['kubernetes_interface'] is defined %}
- name: IP_AUTODETECTION_METHOD
value: interface={{ config['Node']['kubernetes_interface'] }}
{%- endif %}
securityContext:
privileged: true
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /calico-secrets
name: etcd-certs
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: {{ config['Versions']['images']['calico']['cni'] }}
command: ["/install-cni.sh"]
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
- name: ETCD_CA_CERT_FILE
value: /etc/kubernetes/calico/pki/etcd-client-ca.pem
- name: ETCD_CERT_FILE
value: /etc/kubernetes/calico/pki/etcd-client.pem
- name: ETCD_KEY_FILE
value: /etc/kubernetes/calico/pki/etcd-client-key.pem
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /calico-secrets
name: etcd-certs
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Mount in the etcd TLS secrets.
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
---
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# The policy controller can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy-controller
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: calico-policy-controller
containers:
- name: calico-policy-controller
image: {{ config['Versions']['images']['calico']['policy-controller'] }}
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
volumeMounts:
# Mount in the etcd TLS secrets.
- mountPath: /calico-secrets
name: etcd-certs
volumes:
# Mount in the etcd TLS secrets.
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-cni-plugin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-cni-plugin
subjects:
- kind: ServiceAccount
name: calico-cni-plugin
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-cni-plugin
namespace: kube-system
rules:
- apiGroups: [""]
resources:
- pods
- nodes
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-cni-plugin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-policy-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-policy-controller
subjects:
- kind: ServiceAccount
name: calico-policy-controller
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-policy-controller
namespace: kube-system
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
verbs:
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-policy-controller
namespace: kube-system

View File

@ -12,21 +12,30 @@ spec:
containers:
- name: loader
image: {{ config['Versions']['images']['armada'] }}
imagePullPolicy: Always # We are following a moving branch for now.
imagePullPolicy: IfNotPresent
command:
- /bin/bash
- -c
- |-
set -x
mkdir -p /root/.kube
cp /etc/kubernetes/armada-loader/kubeconfig.yaml /root/.kube/config
cd /etc/kubernetes/armada-loader/assets
if [ -s promenade-armada.yaml ]; then
mkdir -p /root/.kube
cp /etc/kubernetes/armada-loader/kubeconfig.yaml /root/.kube/config
while true; do
sleep 10
if armada --debug apply promenade-armada.yaml ; then
if armada --debug apply --tiller-host 127.0.0.1 promenade-armada.yaml ; then
break
fi
done
fi
if [ -s external-armada.yaml ]; then
while true; do
sleep 10
if armada --debug apply --tiller-host 127.0.0.1 external-armada.yaml ; then
break
fi
done

View File

@ -22,22 +22,6 @@ spec:
export KUBECONFIG=/etc/kubernetes/asset-loader/kubeconfig.yaml
DELAY=20
while ! /kubectl apply -f /etc/kubernetes/asset-loader/cni; do
sleep $DELAY
done
while ! /kubectl get nodes | tail -n +2 | grep Ready; do
sleep $DELAY
done
while ! /kubectl apply -f /etc/kubernetes/asset-loader/dns; do
sleep $DELAY
done
while ! /kubectl -n kube-system get pods | grep dns | grep Running; do
sleep $DELAY
done
while ! /kubectl apply -f /etc/kubernetes/asset-loader/assets; do
sleep $DELAY
done

View File

@ -0,0 +1,55 @@
---
apiVersion: v1
kind: Pod
metadata:
name: tiller
namespace: kube-system
labels:
app: promenade
component: genesis-tiller
spec:
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
containers:
- env:
- name: TILLER_NAMESPACE
value: kube-system
image: {{ config['Versions']['images']['tiller'] }}
command:
- /tiller
- -logtostderr
- -v
- "99"
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /liveness
port: 44135
scheme: HTTP
initialDelaySeconds: 1
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: tiller
ports:
- containerPort: 44134
name: tiller
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readiness
port: 44135
scheme: HTTP
initialDelaySeconds: 1
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30

View File

@ -22,7 +22,8 @@ spec:
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds
- --anonymous-auth=false
- --client-ca-file=/etc/kubernetes/pki/cluster-ca.pem
- --insecure-port=0
- --insecure-port=8080
- --insecure-bind-address=127.0.0.1
- --bind-address=0.0.0.0
- --runtime-config=batch/v2alpha1=true
- --secure-port=443