Migrate to Promenade v2

Upgrade the deployment artifacts and script
to use the refactored Promenade v2 genesis process

- Include UCP service manifest in the promenade deployment
- Remove polling logic as Armada will block until a chart group
  is fully deployed
- Integrate Drydock w/ Deckhand
- Provide example topology documents in Deckhand format

Change-Id: Ie1a7c58f5fef128f404f3b9b239d19e9cd570383
This commit is contained in:
Scott Hussey 2017-11-08 09:25:18 -06:00 committed by Mark Burnett
parent 03121471f6
commit a0e7dba0bc
15 changed files with 1349 additions and 968 deletions

View File

@ -0,0 +1,17 @@
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: cluster-bootstrap
layeringDefinition:
abstract: false
layer: site
data:
release_prefix: ucp
chart_groups:
- kubernetes-proxy
- container-networking
- dns
- kubernetes
- kubernetes-rbac
...

View File

@ -0,0 +1,15 @@
---
schema: promenade/Docker/v1
metadata:
schema: metadata/Document/v1
name: docker
layeringDefinition:
abstract: false
layer: site
data:
config:
insecure-registries:
- registry:5000
live-restore: true
storage-driver: overlay2
...

View File

@ -0,0 +1,40 @@
---
schema: promenade/Genesis/v1
metadata:
schema: metadata/Document/v1
name: genesis
layeringDefinition:
abstract: false
layer: site
data:
hostname: ${GENESIS_NODE_NAME}
ip: ${GENESIS_NODE_IP}
labels:
static:
- calico-etcd=enabled
- node-role.kubernetes.io/master=
dynamic:
- kubernetes-apiserver=enabled
- kubernetes-controller-manager=enabled
- kubernetes-etcd=enabled
- kubernetes-scheduler=enabled
- promenade-genesis=enabled
- ucp-control-plane=enabled
- ceph-osd=enabled
- ceph-mon=enabled
- ceph-rgw=enabled
- ceph-mds=enabled
images:
armada: ${ARMADA_IMAGE}
helm:
tiller: ${TILLER_IMAGE}
kubernetes:
apiserver: ${KUBE_APISERVER_IMAGE}
controller-manager: ${KUBE_CTLRMGR_IMAGE}
etcd: ${KUBE_ETCD_IMAGE}
scheduler: ${KUBE_SCHED_IMAGE}
files:
- path: /var/lib/anchor/calico-etcd-bootstrap
content: "# placeholder for triggering calico etcd bootstrapping"
mode: 0644
...

View File

@ -0,0 +1,62 @@
---
schema: promenade/HostSystem/v1
metadata:
schema: metadata/Document/v1
name: host-system
layeringDefinition:
abstract: false
layer: site
data:
files:
- path: /opt/kubernetes/bin/kubelet
tar_url: ${KUBE_KUBELET_TAR}
tar_path: kubernetes/node/bin/kubelet
mode: 0555
images:
coredns: coredns/coredns:011
helm:
helm: lachlanevenson/k8s-helm:v2.5.1
kubernetes:
kubectl: ${KUBECTL_IMAGE}
packages:
repositories:
- deb ${DOCKER_REPO_URL} ubuntu-xenial main
keys:
- |-
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBFWln24BEADrBl5p99uKh8+rpvqJ48u4eTtjeXAWbslJotmC/CakbNSqOb9o
ddfzRvGVeJVERt/Q/mlvEqgnyTQy+e6oEYN2Y2kqXceUhXagThnqCoxcEJ3+KM4R
mYdoe/BJ/J/6rHOjq7Omk24z2qB3RU1uAv57iY5VGw5p45uZB4C4pNNsBJXoCvPn
TGAs/7IrekFZDDgVraPx/hdiwopQ8NltSfZCyu/jPpWFK28TR8yfVlzYFwibj5WK
dHM7ZTqlA1tHIG+agyPf3Rae0jPMsHR6q+arXVwMccyOi+ULU0z8mHUJ3iEMIrpT
X+80KaN/ZjibfsBOCjcfiJSB/acn4nxQQgNZigna32velafhQivsNREFeJpzENiG
HOoyC6qVeOgKrRiKxzymj0FIMLru/iFF5pSWcBQB7PYlt8J0G80lAcPr6VCiN+4c
NKv03SdvA69dCOj79PuO9IIvQsJXsSq96HB+TeEmmL+xSdpGtGdCJHHM1fDeCqkZ
hT+RtBGQL2SEdWjxbF43oQopocT8cHvyX6Zaltn0svoGs+wX3Z/H6/8P5anog43U
65c0A+64Jj00rNDr8j31izhtQMRo892kGeQAaaxg4Pz6HnS7hRC+cOMHUU4HA7iM
zHrouAdYeTZeZEQOA7SxtCME9ZnGwe2grxPXh/U/80WJGkzLFNcTKdv+rwARAQAB
tDdEb2NrZXIgUmVsZWFzZSBUb29sIChyZWxlYXNlZG9ja2VyKSA8ZG9ja2VyQGRv
Y2tlci5jb20+iQI4BBMBAgAiBQJVpZ9uAhsvBgsJCAcDAgYVCAIJCgsEFgIDAQIe
AQIXgAAKCRD3YiFXLFJgnbRfEAC9Uai7Rv20QIDlDogRzd+Vebg4ahyoUdj0CH+n
Ak40RIoq6G26u1e+sdgjpCa8jF6vrx+smpgd1HeJdmpahUX0XN3X9f9qU9oj9A4I
1WDalRWJh+tP5WNv2ySy6AwcP9QnjuBMRTnTK27pk1sEMg9oJHK5p+ts8hlSC4Sl
uyMKH5NMVy9c+A9yqq9NF6M6d6/ehKfBFFLG9BX+XLBATvf1ZemGVHQusCQebTGv
0C0V9yqtdPdRWVIEhHxyNHATaVYOafTj/EF0lDxLl6zDT6trRV5n9F1VCEh4Aal8
L5MxVPcIZVO7NHT2EkQgn8CvWjV3oKl2GopZF8V4XdJRl90U/WDv/6cmfI08GkzD
YBHhS8ULWRFwGKobsSTyIvnbk4NtKdnTGyTJCQ8+6i52s+C54PiNgfj2ieNn6oOR
7d+bNCcG1CdOYY+ZXVOcsjl73UYvtJrO0Rl/NpYERkZ5d/tzw4jZ6FCXgggA/Zxc
jk6Y1ZvIm8Mt8wLRFH9Nww+FVsCtaCXJLP8DlJLASMD9rl5QS9Ku3u7ZNrr5HWXP
HXITX660jglyshch6CWeiUATqjIAzkEQom/kEnOrvJAtkypRJ59vYQOedZ1sFVEL
MXg2UCkD/FwojfnVtjzYaTCeGwFQeqzHmM241iuOmBYPeyTY5veF49aBJA1gEJOQ
TvBR8Q==
=Fm3p
-----END PGP PUBLIC KEY BLOCK-----
additional:
- curl
- jq
- ceph-common
required:
docker: ${DOCKER_PACKAGE}
socat: socat=1.7.3.1-1
...

View File

@ -0,0 +1,39 @@
---
schema: promenade/KubernetesNetwork/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-network
layeringDefinition:
abstract: false
layer: site
data:
dns:
cluster_domain: cluster.local
service_ip: 10.96.0.10
bootstrap_validation_checks:
- calico-etcd.kube-system.svc.cluster.local
- google.com
- kubernetes-etcd.kube-system.svc.cluster.local
- kubernetes.default.svc.cluster.local
upstream_servers:
- 8.8.8.8
- 8.8.4.4
kubernetes:
pod_cidr: 10.97.0.0/16
service_cidr: 10.96.0.0/16
service_ip: 10.96.0.1
etcd:
service_ip: 10.96.0.2
hosts_entries:
- ip: 192.168.77.1
names:
- registry
# proxy:
# url: http://proxy.example.com:8080
# additional_no_proxy:
# - 10.0.1.1
...

View File

@ -0,0 +1,792 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-proxy
layeringDefinition:
abstract: false
layer: site
data:
description: Kubernetes proxy
sequenced: true
chart_group:
- kubernetes-proxy
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: container-networking
layeringDefinition:
abstract: false
layer: site
data:
description: Container networking via Calico
sequenced: true
chart_group:
- calico-etcd
- calico
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: dns
layeringDefinition:
abstract: false
layer: site
data:
description: Cluster DNS
chart_group:
- coredns
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-rbac
layeringDefinition:
abstract: false
layer: site
data:
description: Role Based Access Control configuration for Kubernetes
sequenced: true
chart_group:
- kubernetes-rbac
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: kubernetes
layeringDefinition:
abstract: false
layer: site
data:
description: Kubernetes components
chart_group:
- kubernetes-etcd
- kubernetes-apiserver
- kubernetes-controller-manager
- kubernetes-scheduler
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: helm-toolkit
data:
chart_name: helm-toolkit
release: helm-toolkit
namespace: helm-toolkit
timeout: 600
upgrade:
no_hooks: true
values: {}
source:
type: git
location: ${HTK_CHART_REPO}
subpath: ${HTK_CHART_PATH}
reference: ${HTK_CHART_BRANCH}
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-proxy
layeringDefinition:
abstract: false
layer: site
substitutions:
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes
path: $
dest:
path: '$.values.tls.ca'
- src:
schema: deckhand/Certificate/v1
name: proxy
path: $
dest:
path: '$.values.tls.cert'
- src:
schema: deckhand/CertificateKey/v1
name: proxy
path: $
dest:
path: '$.values.tls.key'
data:
chart_name: proxy
release: kubernetes-proxy
namespace: kube-system
timeout: 600
upgrade:
no_hooks: true
values:
tls:
ca: placeholder
cert: placeholder
key: placeholder
images:
proxy: ${KUBE_PROXY_IMAGE}
network:
kubernetes_netloc: apiserver.kubernetes.promenade:6443
pod_cidr: 10.97.0.0/16
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: proxy
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: calico-etcd
layeringDefinition:
abstract: false
layer: site
substitutions:
- src:
schema: deckhand/CertificateAuthority/v1
name: calico-etcd
path: $
dest:
path: '$.values.tls.client.ca'
- src:
schema: deckhand/CertificateAuthority/v1
name: calico-etcd-peer
path: $
dest:
path: '$.values.tls.peer.ca'
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-anchor
path: $
dest:
path: '$.values.anchor.tls.cert'
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-anchor
path: $
dest:
path: '$.values.anchor.tls.key'
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-${GENESIS_NODE_NAME}
path: $
dest:
path: '$.values.nodes[0].tls.client.cert'
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-${GENESIS_NODE_NAME}
path: $
dest:
path: '$.values.nodes[0].tls.client.key'
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-${GENESIS_NODE_NAME}-peer
path: $
dest:
path: '$.values.nodes[0].tls.peer.cert'
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-${GENESIS_NODE_NAME}-peer
path: $
dest:
path: '$.values.nodes[0].tls.peer.key'
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-${MASTER_NODE_NAME}
path: $
dest:
path: '$.values.nodes[1].tls.client.cert'
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-${MASTER_NODE_NAME}
path: $
dest:
path: '$.values.nodes[1].tls.client.key'
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-${MASTER_NODE_NAME}-peer
path: $
dest:
path: '$.values.nodes[1].tls.peer.cert'
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-${MASTER_NODE_NAME}-peer
path: $
dest:
path: '$.values.nodes[1].tls.peer.key'
data:
chart_name: etcd
release: calico-etcd
namespace: kube-system
timeout: 600
upgrade:
no_hooks: true
values:
anchor:
etcdctl_endpoint: 10.96.232.136
node_selector:
key: calico-etcd
value: enabled
tls:
cert: placeholder
key: placeholder
etcd:
host_data_path: ${ETCD_CALICO_DATA_PATH}
host_etc_path: ${ETCD_CALICO_ETC_PATH}
bootstrapping:
enabled: true
host_directory: /var/lib/anchor
filename: calico-etcd-bootstrap
images:
etcd: ${CALICO_ETCD_IMAGE}
etcdctl: ${CALICO_ETCDCTL_IMAGE}
nodes:
- name: ${GENESIS_NODE_NAME}
tls:
client:
cert: placeholder
key: placeholder
peer:
cert: placeholder
key: placeholder
- name: ${MASTER_NODE_NAME}
tls:
client:
cert: placeholder
key: placeholder
peer:
cert: placeholder
key: placeholder
service:
name: calico-etcd
ip: 10.96.232.136
client:
port: 6666
target_port: 6666
peer:
port: 6667
target_port: 6667
tls:
client:
ca: placeholder
peer:
ca: placeholder
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: etcd
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: calico
layeringDefinition:
abstract: false
layer: site
substitutions:
- src:
schema: deckhand/CertificateAuthority/v1
name: calico-etcd
path: $
dest:
path: '$.values.etcd.tls.ca'
- src:
schema: deckhand/Certificate/v1
name: calico-node
path: $
dest:
path: '$.values.etcd.tls.cert'
- src:
schema: deckhand/CertificateKey/v1
name: calico-node
path: $
dest:
path: '$.values.etcd.tls.key'
data:
chart_name: calico
release: calico
namespace: kube-system
timeout: 600
upgrade:
no_hooks: true
values:
calico:
ip_autodetection_method: interface=${NODE_NET_IFACE}
pod_ip_cidr: 10.97.0.0/16
ctl:
install_on_host: true
etcd:
service:
ip: 10.96.232.136
port: 6666
tls:
ca: placeholder
cert: placeholder
key: placeholder
images:
cni: ${CALICO_CNI_IMAGE}
ctl: ${CALICO_CTL_IMAGE}
node: ${CALICO_NODE_IMAGE}
policy_controller: ${CALICO_POLICYCTLR_IMAGE}
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: calico
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: coredns
layeringDefinition:
abstract: false
layer: site
substitutions:
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes
path: $
dest:
path: '$.values.tls.ca'
- src:
schema: deckhand/Certificate/v1
name: coredns
path: $
dest:
path: '$.values.tls.cert'
- src:
schema: deckhand/CertificateKey/v1
name: coredns
path: $
dest:
path: '$.values.tls.key'
data:
chart_name: coredns
release: coredns
namespace: kube-system
timeout: 600
upgrade:
no_hooks: true
values:
coredns:
kubernetes_zones:
- cluster.local
- 10.96.0.0/16
- 10.97.0.0/16
upstream_nameservers:
- 8.8.8.8
- 8.8.4.4
zones:
- name: promenade
services:
- bind_name: apiserver.kubernetes
service:
name: kubernetes-apiserver
namespace: kube-system
- bind_name: etcd.kubernetes
service:
name: kubernetes-etcd
namespace: kube-system
- bind_name: etcd.calico
service:
name: calico-etcd
namespace: kube-system
images:
anchor: ${KUBE_ANCHOR_IMAGE}
coredns: ${KUBE_COREDNS_IMAGE}
tls:
ca: placeholder
cert: placeholder
key: placeholder
network:
kubernetes_netloc: apiserver.kubernetes.promenade:6443
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: coredns
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-apiserver
layeringDefinition:
abstract: false
layer: site
substitutions:
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes
path: $
dest:
path: $.values.secrets.tls.ca
- src:
schema: deckhand/Certificate/v1
name: apiserver
path: $
dest:
path: $.values.secrets.tls.cert
- src:
schema: deckhand/CertificateKey/v1
name: apiserver
path: $
dest:
path: $.values.secrets.tls.key
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes-etcd
path: $
dest:
path: $.values.secrets.etcd.tls.ca
- src:
schema: deckhand/Certificate/v1
name: apiserver-etcd
path: $
dest:
path: $.values.secrets.etcd.tls.cert
- src:
schema: deckhand/CertificateKey/v1
name: apiserver-etcd
path: $
dest:
path: $.values.secrets.etcd.tls.key
- src:
schema: deckhand/PublicKey/v1
name: service-account
path: $
dest:
path: $.values.secrets.service_account.public_key
data:
chart_name: apiserver
release: kubernetes-apiserver
namespace: kube-system
timeout: 600
upgrade:
no_hooks: true
values:
apiserver:
etcd:
endpoints: https://etcd.kubernetes.promenade:2379
images:
anchor: ${KUBE_ANCHOR_IMAGE}
apiserver: ${KUBE_APISERVER_IMAGE}
secrets:
service_account:
public_key: placeholder
tls:
ca: placeholder
cert: placeholder
key: placeholder
etcd:
tls:
ca: placeholder
cert: placeholder
key: placeholder
network:
kubernetes_service_ip: 10.96.0.1
pod_cidr: 10.97.0.0/16
service_cidr: 10.96.0.0/16
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: apiserver
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-controller-manager
layeringDefinition:
abstract: false
layer: site
substitutions:
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes
path: $
dest:
path: $.values.secrets.tls.ca
- src:
schema: deckhand/Certificate/v1
name: controller-manager
path: $
dest:
path: $.values.secrets.tls.cert
- src:
schema: deckhand/CertificateKey/v1
name: controller-manager
path: $
dest:
path: $.values.secrets.tls.key
- src:
schema: deckhand/PrivateKey/v1
name: service-account
path: $
dest:
path: $.values.secrets.service_account.private_key
data:
chart_name: controller_manager
release: kubernetes-controller-manager
namespace: kube-system
timeout: 600
upgrade:
no_hooks: true
values:
images:
anchor: ${KUBE_ANCHOR_IMAGE}
controller_manager: ${KUBE_CTLRMGR_IMAGE}
secrets:
service_account:
private_key: placeholder
tls:
ca: placeholder
cert: placeholder
key: placeholder
network:
kubernetes_netloc: apiserver.kubernetes.promenade:6443
pod_cidr: 10.97.0.0/16
service_cidr: 10.96.0.0/16
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: controller_manager
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-scheduler
layeringDefinition:
abstract: false
layer: site
substitutions:
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes
path: $
dest:
path: $.values.tls.ca
- src:
schema: deckhand/Certificate/v1
name: scheduler
path: $
dest:
path: $.values.tls.cert
- src:
schema: deckhand/CertificateKey/v1
name: scheduler
path: $
dest:
path: $.values.tls.key
data:
chart_name: scheduler
release: kubernetes-scheduler
namespace: kube-system
timeout: 600
upgrade:
no_hooks: true
values:
tls:
ca: placeholder
cert: placeholder
key: placeholder
network:
kubernetes_netloc: apiserver.kubernetes.promenade:6443
images:
anchor: ${KUBE_ANCHOR_IMAGE}
scheduler: ${KUBE_SCHED_IMAGE}
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: scheduler
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-etcd
layeringDefinition:
abstract: false
layer: site
substitutions:
-
src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes-etcd
path: $
dest:
path: '$.values.tls.client.ca'
-
src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes-etcd-peer
path: $
dest:
path: '$.values.tls.peer.ca'
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-anchor
path: $
dest:
path: '$.values.anchor.tls.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-anchor
path: $
dest:
path: '$.values.anchor.tls.key'
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-${GENESIS_NODE_NAME}
path: $
dest:
path: '$.values.nodes[0].tls.client.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-${GENESIS_NODE_NAME}
path: $
dest:
path: '$.values.nodes[0].tls.client.key'
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-${GENESIS_NODE_NAME}-peer
path: $
dest:
path: '$.values.nodes[0].tls.peer.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-${GENESIS_NODE_NAME}-peer
path: $
dest:
path: '$.values.nodes[0].tls.peer.key'
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-${MASTER_NODE_NAME}
path: $
dest:
path: '$.values.nodes[1].tls.client.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-${MASTER_NODE_NAME}
path: $
dest:
path: '$.values.nodes[1].tls.client.key'
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-${MASTER_NODE_NAME}-peer
path: $
dest:
path: '$.values.nodes[1].tls.peer.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-${MASTER_NODE_NAME}-peer
path: $
dest:
path: '$.values.nodes[1].tls.peer.key'
data:
chart_name: etcd
release: kubernetes-etcd
namespace: kube-system
timeout: 600
upgrade:
no_hooks: true
values:
anchor:
etcdctl_endpoint: 10.96.0.2
node_selector:
key: kubernetes-etcd
value: enabled
tls:
cert: placeholder
key: placeholder
etcd:
host_data_path: ${ETCD_KUBE_DATA_PATH}
host_etc_path: ${ETCD_KUBE_ETC_PATH}
images:
etcd: ${KUBE_ETCD_IMAGE}
etcdctl: ${KUBE_ETCDCTL_IMAGE}
nodes:
- name: ${GENESIS_NODE_NAME}
tls:
client:
cert: placeholder
key: placeholder
peer:
cert: placeholder
key: placeholder
- name: ${MASTER_NODE_NAME}
tls:
client:
cert: placeholder
key: placeholder
peer:
cert: placeholder
key: placeholder
service:
name: kubernetes-etcd
ip: 10.96.0.2
client:
port: 2379
target_port: 2379
peer:
port: 2380
target_port: 2380
tls:
client:
ca: placeholder
peer:
ca: placeholder
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: etcd
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-rbac
layeringDefinition:
abstract: false
layer: site
data:
chart_name: rbac
release: rbac
namespace: kube-system
timeout: 600
values: {}
upgrade:
no_hooks: true
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: rbac
dependencies: []
...

View File

@ -1,655 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: helm-toolkit
data:
chart_name: helm-toolkit
release: helm-toolkit
namespace: helm-toolkit
timeout: 100
values: {}
source:
type: git
location: https://github.com/openstack/openstack-helm
subpath: helm-toolkit
reference: master
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ceph
data:
chart_name: ceph
release: ceph
namespace: ceph
timeout: 3600
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- name: ceph-bootstrap
type: job
labels:
- application: ceph
- component: bootstrap
- release_group: armada-ucp
- name: ceph-mds-keyring-generator
type: job
labels:
- application: ceph
- component: mds-keyring-generator
- release_group: armada-ucp
- name: ceph-mon-keyring-generator
type: job
labels:
- application: ceph
- component: mon-keyring-generator
- release_group: armada-ucp
- name: ceph-rgw-keyring-generator
type: job
labels:
- application: ceph
- component: rgw-keyring-generator
- release_group: armada-ucp
- name: ceph-storage-keys-generator
type: job
labels:
- application: ceph
- component: storage-keys-generator
- release_group: armada-ucp
- name: ceph-osd-keyring-generator
type: job
labels:
- application: ceph
- component: osd-keyring-generator
- release_group: armada-ucp
values:
labels:
jobs:
node_selector_key: ucp-control-plane
node_selector_value: enabled
endpoints:
identity:
namespace: ucp
object_store:
namespace: ceph
ceph_mon:
namespace: ceph
ceph:
rgw_keystone_auth: true
storage:
osd_directory: /var/lib/openstack-helm/ceph/osd
network:
public: 172.24.1.0/24
cluster: 172.24.1.0/24
deployment:
storage_secrets: true
ceph: true
rbd_provisioner: true
client_secrets: false
rgw_keystone_user_and_endpoints: false
bootstrap:
enabled: true
source:
type: git
location: https://github.com/openstack/openstack-helm
subpath: ceph
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-ceph-config
data:
chart_name: ucp-ceph-config
release: ucp-ceph-config
namespace: ucp
timeout: 3600
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- name: ceph-namespace-client-key-generator
type: job
labels:
- application: ceph
- component: namespace-client-key-generator
- release_group: armada-ucp
values:
labels:
jobs:
node_selector_key: ucp-control-plane
node_selector_value: enabled
endpoints:
identity:
namespace: ucp
object_store:
namespace: ceph
ceph_mon:
namespace: ceph
ceph:
rgw_keystone_auth: true
network:
public: 172.24.1.0/24
cluster: 172.24.1.0/24
deployment:
storage_secrets: false
ceph: false
rbd_provisioner: false
client_secrets: true
rgw_keystone_user_and_endpoints: false
source:
type: git
location: https://github.com/openstack/openstack-helm
subpath: ceph
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-mariadb
data:
chart_name: ucp-mariadb
release: ucp-mariadb
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
values:
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
replicas:
server: 1
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
subpath: mariadb
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-memcached
data:
chart_name: ucp-memcached
release: ucp-memcached
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
values:
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
subpath: memcached
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-keystone
data:
chart_name: ucp-keystone
release: keystone
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- name: keystone-db-sync
type: job
labels:
- job-name: keystone-db-sync
- name: keystone-db-init
type: job
labels:
- job-name: keystone-db-init
post:
delete: []
create: []
values:
conf:
keystone:
override:
paste:
override:
replicas: 2
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
subpath: keystone
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: maas-postgresql
data:
chart_name: maas-postgresql
release: maas-postgresql
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete: []
create: []
post:
delete: []
create: []
values:
development:
enabled: false
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
subpath: postgresql
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: maas
data:
chart_name: maas
release: maas
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
values:
bootdata_url: http://172.24.1.100:31000/api/v1.0/bootdata/
labels:
rack:
node_selector_key: ucp-control-plane
node_selector_value: enabled
region:
node_selector_key: ucp-control-plane
node_selector_value: enabled
network:
proxy:
node_port:
enabled: true
port: 31800
gui:
node_port:
enabled: true
port: 31900
conf:
maas:
credentials:
secret:
namespace: ucp
url:
maas_url: http://172.24.1.100:31900/MAAS
proxy:
proxy_enabled: 'false'
proxy_server: http://one.proxy.att.com:8080
ntp:
use_external_only: 'false'
ntp_servers: ntp.ubuntu.com
dns:
require_dnssec: 'no'
dns_servers: 8.8.8.8
secrets:
maas_region:
value: 3858a12230ac3c915f300c664f12063f
source:
type: git
location: https://github.com/att-comdev/maas
subpath: charts/maas
reference: refs/changes/79/384379/1
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: drydock
data:
chart_name: drydock
release: drydock
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
values:
images:
tags:
drydock: docker.io/sthussey/drydock:384384v1
drydock_db_sync: docker.io/sthussey/drydock:384384v1
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
network:
drydock:
node_port:
enabled: true
endpoints:
physicalprovisioner:
port:
api:
nodeport: 31000
conf:
drydock:
maasdriver:
maas_api_url: http://172.24.1.100:31900/MAAS/api/2.0/
source:
type: git
location: https://github.com/att-comdev/drydock
subpath: charts/drydock
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: deckhand
data:
chart_name: deckhand
release: deckhand
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
values:
images:
deckhand: quay.io/attcomdev/deckhand:master
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
conf:
deckhand:
DEFAULT:
debug: true
use_stderr: true
use_syslog: true
source:
type: git
location: https://github.com/att-comdev/deckhand
subpath: charts/deckhand
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-barbican
data:
chart_name: ucp-barbican
release: barbican
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete: []
post:
delete: []
create: []
values:
images:
tags:
api: docker.io/kolla/ubuntu-source-barbican-api:3.0.3
db_sync: docker.io/kolla/ubuntu-source-barbican-api:3.0.3
test: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
pod:
replicas:
api: 1
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
subpath: barbican
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: shipyard
data:
chart_name: shipyard
release: shipyard
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
values:
prod_environment: true
images:
airflow: quay.io/attcomdev/airflow:master
shipyard: quay.io/attcomdev/shipyard:master
airflow_db_sync: quay.io/attcomdev/airflow:master
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
network:
shipyard:
node_port: 31901
enable_node_port: true
airflow:
web:
node_port: 32080
enable_node_port: true
conf:
shipyard:
keystone_authtoken:
memcache_security_strategy: None
source:
type: git
location: https://github.com/att-comdev/shipyard
subpath: charts/shipyard
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: armada
data:
chart_name: armada
release: armada
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
values:
images:
tags:
api: quay.io/attcomdev/armada:v0.7.0
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
network:
api:
node_port:
enabled: true
port: 31903
source:
type: git
location: https://github.com/att-comdev/armada
subpath: charts/armada
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-etcd-rabbitmq
data:
chart_name: ucp-etcd-rabbitmq
release: etcd-rabbitmq
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete: []
post:
delete: []
create: []
values:
pod:
replicas:
etcd: 1
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
subpath: etcd
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-rabbitmq
data:
chart_name: ucp-rabbitmq
release: rabbitmq
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete: []
post:
delete: []
create: []
values:
pod:
replicas:
server: 1
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
subpath: rabbitmq
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ingress
data:
chart_name: ingress
release: ingress
namespace: ucp
timeout: 300
install:
no_hooks: false
upgrade:
no_hooks: false
values:
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
source:
type: git
location: https://github.com/openstack/openstack-helm
subpath: ingress
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: ucp-basic
data:
release_prefix: armada-ucp
chart_groups:
- ceph
- ucp-infra
- ucp-services
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ceph
data:
description: 'Storage Backend'
sequenced: true
chart_group:
- ceph
- ucp-ceph-config
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ucp-infra
data:
description: 'UCP Infrastructure'
chart_group:
- ucp-mariadb
- ucp-memcached
- maas-postgresql
- ucp-etcd-rabbitmq
- ucp-rabbitmq
- ingress
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ucp-services
data:
description: 'UCP Services'
chart_group:
- maas
- drydock
- deckhand
- shipyard
- armada
- ucp-keystone
- ucp-barbican
...

View File

@ -289,7 +289,6 @@ data:
upgrade:
no_hooks: false
values:
bootdata_url: http://${DRYDOCK_NODE_IP}:${DRYDOCK_NODE_PORT}/api/v1.0/bootdata/
labels:
rack:
node_selector_key: ucp-control-plane
@ -307,6 +306,8 @@ data:
enabled: true
port: 31900
conf:
drydock:
bootaction_url: http://${DRYDOCK_NODE_IP}:${DRYDOCK_NODE_PORT}/api/v1.0/bootactions/nodes/
maas:
credentials:
secret:
@ -367,6 +368,8 @@ data:
drydock:
maasdriver:
maas_api_url: http://${MAAS_NODE_IP}:${MAAS_NODE_PORT}/MAAS/api/2.0/
plugins:
ingester: drydock_provisioner.ingester.plugins.deckhand.DeckhandIngester
source:
type: git
location: ${DRYDOCK_CHART_REPO}
@ -602,17 +605,6 @@ data:
dependencies:
- helm-toolkit
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: ucp-basic
data:
release_prefix: armada-ucp
chart_groups:
- ceph
- ucp-infra
- ucp-services
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
@ -653,3 +645,18 @@ data:
- ucp-keystone
- ucp-barbican
...
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: ucp_deploy
layeringDefinition:
abstract: false
layer: site
data:
release_prefix: ucp
chart_groups:
- ceph
- ucp-infra
- ucp-services
...

View File

@ -31,6 +31,10 @@ export ARMADA_NODE_PORT=${ARMADA_NODE_PORT:-31903}
# Storage
export CEPH_OSD_DIR=${CEPH_OSD_DIR:-"/var/lib/openstack-helm/ceph/osd"}
export ETCD_KUBE_DATA_PATH=${ETCD_KUBE_DATA_PATH:-"/var/lib/etcd/kubernetes"}
export ETCD_KUBE_ETC_PATH=${ETCD_KUBE_ETC_PATH:-"/etc/etcd/kubernetes"}
export ETCD_CALICO_DATA_PATH=${ETCD_CALICO_DATA_PATH:-"/var/lib/etcd/calico"}
export ETCD_CALICO_ETC_PATH=${ETCD_CALICO_ETC_PATH:-"/etc/etcd/calico"}
# Hostnames
export GENESIS_NODE_NAME=${GENESIS_NODE_NAME:-"node1"}
@ -61,7 +65,26 @@ export ARMADA_CHART_REPO=${ARMADA_CHART_REPO:-"https://github.com/att-comdev/arm
export ARMADA_CHART_PATH=${ARMADA_CHART_PATH:-"charts/armada"}
export ARMADA_CHART_BRANCH=${ARMADA_CHART_BRANCH:-"master"}
#Kubernetes artifacts
export KUBE_PROXY_IMAGE=${KUBE_PROXY_IMAGE:-"gcr.io/google_containers/hyperkube-amd64:v1.8.0"}
export KUBE_ETCD_IMAGE=${KUBE_ETCD_IMAGE:-"quay.io/coreos/etcd:v3.0.17"}
export KUBE_ETCDCTL_IMAGE=${KUBE_ETCDCTL_IMAGE:-"quay.io/coreos/etcd:v3.0.17"}
export KUBE_ANCHOR_IMAGE=${KUBE_ANCHOR_IMAGE:-"gcr.io/google_containers/hyperkube-amd64:v1.8.0"}
export KUBE_COREDNS_IMAGE=${KUBE_COREDNS_IMAGE:-"coredns/coredns:0.9.9"}
export KUBE_APISERVER_IMAGE=${KUBE_APISERVER_IMAGE:-"gcr.io/google_containers/hyperkube-amd64:v1.8.0"}
export KUBE_CTLRMGR_IMAGE=${KUBE_CTLRMGR_IMAGE:-"gcr.io/google_containers/hyperkube-amd64:v1.8.0"}
export KUBE_SCHED_IMAGE=${KUBE_SCHED_IMAGE:-"gcr.io/google_containers/hyperkube-amd64:v1.8.0"}
export KUBECTL_IMAGE=${KUBECTL_IMAGE:-"gcr.io/google_containers/hyperkube-amd64:v1.8.0"}
export CALICO_CNI_IMAGE=${CALICO_CNI_IMAGE:-"quay.io/calico/cni:v1.11.0"}
export CALICO_CTL_IMAGE=${CALICO_CTL_IMAGE:-"quay.io/calico/ctl:v1.6.1"}
export CALICO_NODE_IMAGE=${CALICO_NODE_IMAGE:-"quay.io/calico/node:v2.6.1"}
export CALICO_POLICYCTLR_IMAGE=${CALICO_POLICYCTLR_IMAGE:-"quay.io/calico/kube-controllers:v1.0.0"}
export CALICO_ETCD_IMAGE=${CALICO_ETCD_IMAGE:-"quay.io/coreos/etcd:v3.0.17"}
export CALICO_ETCDCTL_IMAGE=${CALICO_ETCDCTL_IMAGE:-"quay.io/coreos/etcd:v3.0.17"}
export KUBE_KUBELET_TAR=${KUBE_KUBELET_TAR:-"https://dl.k8s.io/v1.8.0/kubernetes-node-linux-amd64.tar.gz"}
# Images
export TILLER_IMAGE=${TILLER_IMAGE:-"gcr.io/kubernetes-helm/tiller:v2.5.1"}
export DRYDOCK_IMAGE=${DRYDOCK_IMAGE:-"quay.io/attcomdev/drydock:master"}
export ARMADA_IMAGE=${ARMADA_IMAGE:-"quay.io/attcomdev/armada:master"}
export PROMENADE_IMAGE=${PROMENADE_IMAGE:-"quay.io/attcomdev/promenade:master"}
@ -69,10 +92,13 @@ export DECKHAND_IMAGE=${DECKHAND_IMAGE:-"quay.io/attcomdev/deckhand:master"}
export SHIPYARD_IMAGE=${SHIPYARD_IMAGE:-"quay.io/attcomdev/shipyard:master"}
export AIRFLOW_IMAGE=${AIRFLOW_IMAGE:-"quay.io/attcomdev/airflow:master"}
# Docker
export DOCKER_REPO_URL=${DOCKER_REPO_URL:-"http://apt.dockerproject.org/repo"}
export DOCKER_PACKAGE=${DOCKER_PACKAGE:-"docker-engine=1.13.1-0~ubuntu-xenial"}
# Filenames
export ARMADA_CONFIG=${ARMADA_CONFIG:-"armada.yaml"}
export PROMENADE_CONFIG=${PROMENADE_CONFIG:-"promenade.yaml"}
export UP_SCRIPT_FILE=${UP_SCRIPT_FILE:-"up.sh"}
export UP_SCRIPT_FILE=${UP_SCRIPT_FILE:-"genesis.sh"}
# Validate environment
if [[ $GENESIS_NODE_IP == "NA" || $MASTER_NODE_IP == "NA" ]]
@ -99,87 +125,67 @@ then
exit -1
fi
rm -rf configs
mkdir configs
chmod 777 configs
cat joining-host-config.yaml.sub | envsubst > configs/joining-host-config.yaml
cat armada-resources.yaml.sub | envsubst > configs/armada-resources.yaml
cat armada.yaml.sub | envsubst > ${ARMADA_CONFIG}
cat Genesis.yaml.sub | envsubst > configs/Genesis.yaml
cat HostSystem.yaml.sub | envsubst > configs/HostSystem.yaml
cp KubernetesNetwork.yaml.sub configs/KubernetesNetwork.yaml
cp Docker.yaml configs/
cp ArmadaManifest.yaml configs/
if [[ $PROXY_ENABLED == 'true' ]]
then
export http_proxy=$PROXY_ADDRESS
export https_proxy=$PROXY_ADDRESS
export HTTP_PROXY=$PROXY_ADDRESS
export HTTPS_PROXY=$PROXY_ADDRESS
echo ' proxy:' >> configs/KubernetesNetwork.yaml
echo " url: ${PROXY_ADDRESS}" >> configs/KubernetesNetwork.yaml
fi
# Install docker
apt -qq update
apt -y install docker.io jq
# Required inputs
# Promenade input-config.yaml
# Armada Manifest for integrated UCP services
# Generate certificates
docker run --rm -t -w /target -v $(pwd)/configs:/target ${PROMENADE_IMAGE} promenade generate-certs -o /target $(ls ./configs)
cat promenade.yaml.sub | envsubst > ${PROMENADE_CONFIG}
cat armada.yaml.sub | envsubst > ${ARMADA_CONFIG}
rm -rf configs
mkdir configs
if [[ $? -ne 0 ]]
then
echo "Promenade certificate generation failed."
exit
fi
# Generate Promenade configuration
docker run -t -v $(pwd):/target ${PROMENADE_IMAGE} promenade generate -c /target/${PROMENADE_CONFIG} -o /target/configs
# Generate promenade join artifactos
docker run --rm -t -w /target -v $(pwd)/configs:/target ${PROMENADE_IMAGE} promenade build-all -o /target --validators $(ls ./configs)
if [[ $? -ne 0 ]]
then
echo "Promenade join artifact generation failed."
exit
fi
# Do Promenade genesis process
cd configs
sudo bash ${UP_SCRIPT_FILE} ./${GENESIS_NODE_NAME}.yaml
. ${UP_SCRIPT_FILE}
cd ..
if [[ $? -ne 0 ]]
then
echo "Genesis process failed."
exit
fi
# Setup kubeconfig
mkdir ~/.kube
cp -r /etc/kubernetes/admin/pki ~/.kube/pki
cat /etc/kubernetes/admin/kubeconfig.yaml | sed -e 's/\/etc\/kubernetes\/admin/./' > ~/.kube/config
# Polling to ensure genesis is complete
while [[ -z $(kubectl get pods -n kube-system | grep 'kube-dns' | grep -e '3/3') ]]
do
sleep 5
done
docker run -t -v ~/.kube:/armada/.kube -v $(pwd):/target --net=host ${ARMADA_IMAGE} apply /target/${ARMADA_CONFIG}
# Squash Kubernetes RBAC to be compatible w/ OSH
kubectl update -f ./rbac-generous-permissions.yaml
# Do Armada deployment of UCP integrated services
docker run -t -v ~/.kube:/armada/.kube -v $(pwd):/target --net=host \
${ARMADA_IMAGE} apply /target/${ARMADA_CONFIG} --tiller-host=${GENESIS_NODE_IP} --tiller-port=44134
# Polling for UCP service deployment
deploy_counter=1
deploy_timeout=${1:-720}
check_timeout_counter() {
# Check total elapsed time
# The default time out is set to 1hr
# This value can be changed by setting $1
if [[ $deploy_counter -eq $deploy_timeout ]]; then
echo 'UCP control plane deployment timed out.'
break
fi
}
while true; do
# Check the status of drydock, deckhand, armada and shipyard api pod
# Ignore db or ks related pod
for i in drydock deckhand armada shipyard
do
while [[ -z $(kubectl get pods -n ucp | grep $i | grep -v db | grep -v ks | grep Running) ]]
do
((deploy_counter++))
check_timeout_counter
sleep 5
done
done
# Check that the total elapsed time is less than time out
# Print message stating that UCP Control Plane is deployed
if [[ $deploy_counter -lt $deploy_timeout ]]; then
echo 'UCP control plane deployed.'
fi
# Exit while loop
break
done
echo 'UCP control plane deployed.'

View File

@ -12,27 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Site/Region wide definitions. Each design part will be a constituent
# of the design for exactly one Region
apiVersion: 'drydock/v1'
kind: Region
schema: 'drydock/Region/v1'
metadata:
schema: 'metadata/Document/v1'
name: atl_foundry
date: 17-FEB-2017
description: Sample site design
author: sh8121@att.com
spec:
# List of query-based definitions for applying tags to deployed nodes
data:
tag_definitions:
- tag: 'high_memory'
# Tag to apply to nodes that qualify for the query
definition_type: 'lshw_xpath'
# Only support on type for now - 'lshw_xpath' used by MaaS
definition: //node[@id="memory"]/'size units="bytes"' > 137438953472
# an xpath query that is run against the output of 'lshw -xml' from the node
# Image and package repositories needed by Drydock drivers. Needs to be defined
repositories:
- name: 'ubuntu-main'
authorized_keys:
- |
ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAgqUTJwZEMjZCWOnXQw+FFdvnf/lYrGXm01
@ -40,194 +28,114 @@ spec:
8ZyDE3x1FYno5u3OB4rRDcvKe6J0ygPcu4Uec5ASsd58yGnE4zTl1D/J30rNa00si+s= r
sa-key-20120124
---
apiVersion: 'drydock/v1'
kind: NetworkLink
schema: 'drydock/Region/v1'
metadata:
schema: 'metadata/Document/v1'
name: oob
region: atl_foundry
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
data:
labels:
- 'noconfig'
spec:
bonding:
# Mode can be 'disabled', '802.3ad', 'balanced-rr', 'active-backup'. Defaults to disabled
mode: 'disabled'
# Physical link default MTU size. No default
mtu: 1500
# Physical link speed. Supports 'auto', '100full'. Gigabit+ speeds require auto. No default
linkspeed: 'auto'
# Settings for using a link for multiple L2 networks
trunking:
# Trunking mode. Supports 'disabled', '802.1q'. Defaults to disabled
mode: disabled
# If disabled, what network is this port on. If '802.1q' what is the default network for the port. No default.
default_network: oob
allowed_networks:
- 'oob'
---
apiVersion: 'drydock/v1'
kind: NetworkLink
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe-rack1
region: atl_foundry
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
spec:
data:
bonding:
# Mode can be 'disabled', '802.3ad', 'balanced-rr', 'active-backup'. Defaults to disabled
mode: 'disabled'
# Physical link default MTU size. No default
mtu: 1500
# Physical link speed. Supports 'auto', '100full'. Gigabit+ speeds require auto. No default
linkspeed: 'auto'
# Settings for using a link for multiple L2 networks
trunking:
# Trunking mode. Supports 'disabled', '802.1q'. Defaults to disabled
mode: disabled
# If disabled, what network is this port on. If '802.1q' what is the default network for the port. No default.
default_network: pxe-rack1
allowed_networks:
- 'pxe-rack1'
---
apiVersion: 'drydock/v1'
kind: Network
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: oob
region: atl_foundry
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2 and 3 attributes. Primary key is 'name'.
data:
labels:
- 'noconfig'
spec:
# CIDR representation of network number and netmask
cidr: '172.24.10.0/24'
# How addresses are allocated on the network. Supports 'static', 'dhcp'. Defaults to 'static'
allocation: 'static'
---
apiVersion: 'drydock/v1'
kind: Network
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe-rack1
region: atl_foundry
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2 and 3 attributes. Primary key is 'name'.
spec:
# CIDR representation of network number and netmask
data:
cidr: '172.24.1.0/24'
# How addresses are allocated on the network. Supports 'static', 'dhcp'. Defaults to 'static'
allocation: 'static'
routes:
# The network being routed to in CIDR notation. Default gateway is 0.0.0.0/0.
- subnet: '0.0.0.0/0'
# Next hop for traffic using this route
gateway: '172.24.1.1'
# Selection metric for the host selecting this route. No default
metric: 100
ranges:
# Type of range. Supports 'reserved', 'static' or 'dhcp'. No default
- type: 'reserved'
# Start of the address range, inclusive. No default
start: '172.24.1.1'
# End of the address range, inclusive. No default
end: '172.24.1.100'
- type: 'dhcp'
start: '172.24.1.200'
end: '172.24.1.250'
---
apiVersion: 'drydock/v1'
kind: Network
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe-rack2
region: atl_foundry
date: 17-FEB-2017
author: sh8121@att.com
description: Describe layer 2 and 3 attributes. Primary key is 'name'.
spec:
# CIDR representation of network number and netmask
data:
cidr: '172.24.2.0/24'
# How addresses are allocated on the network. Supports 'static', 'dhcp'. Defaults to 'static'
allocation: 'static'
routes:
# The network being routed to in CIDR notation. Default gateway is 0.0.0.0/0.
- subnet: '0.0.0.0/0'
# Next hop for traffic using this route
gateway: '172.24.2.1'
# Selection metric for the host selecting this route. No default
metric: 100
ranges:
# Type of range. Supports 'reserved', 'static' or 'dhcp'. No default
- type: 'reserved'
# Start of the address range, inclusive. No default
start: '172.24.2.1'
# End of the address range, inclusive. No default
end: '172.24.2.100'
- type: 'dhcp'
start: '172.24.2.200'
end: '172.24.2.250'
---
apiVersion: 'drydock/v1'
kind: HardwareProfile
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: DellR820v1
region: atl_foundry
date: 17-FEB-2017
author: sh8121@att.com
description: Describe server hardware attributes. Not a specific server, but profile adopted by a server defintion.
spec:
# Chassis vendor
data:
vendor: 'Dell'
# Chassis model generation
generation: '1'
# Chassis model version
hw_version: '2'
# Certified BIOS version for this chassis
bios_version: '2.2.3'
# Boot mode. Supports 'bios' or 'uefi'
boot_mode: 'bios'
# How the node should be initially bootstrapped. Supports 'pxe'
bootstrap_protocol: 'pxe'
# What network interface to use for PXE booting
# for chassis that support selection
pxe_interface: '0'
# Mapping of hardware alias/role to physical address
device_aliases:
# the device alias that will be referenced in HostProfile or BaremetalNode design parts
pnic01:
# The hardware bus the device resides on. Supports 'pci' and 'scsi'. No default
bus_type: 'pci'
# The type of device as reported by lshw. Can be used to validate hardware manifest. No default
dev_type: 'Intel 10Gbps NIC'
# Physical address on the bus
address: '0000:00:03.0'
---
apiVersion: 'drydock/v1'
kind: HostProfile
schema: 'drydock/HostProfile/v1'
metadata:
name: defaults
region: atl_foundry
date: 17-FEB-2017
author: sh8121@att.com
description: Specify a physical server.
spec:
# The HardwareProfile describing the node hardware. No default.
schema: 'metadata/Document/v1'
data:
hardware_profile: 'DellR820v1'
primary_network: 'pxe-rack1'
# OOB access to node
oob:
# Type of OOB access. Supports 'ipmi'
type: 'ipmi'
# Which network - as defined in a Network design part - to access the OOB interface on
network: 'oob'
# Account name for authenticating on the OOB interface
account: 'admin'
# Credential for authentication on the OOB interface. The OOB driver will interpret this.
credential: 'password'
# How local node storage is configured
storage:
physical_devices:
sda:
@ -247,45 +155,27 @@ spec:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
# Physical and logical network interfaces
interfaces:
# What the interface should be named in the operating system. May not match a hardware device name
ens3:
# The NetworkLink connected to this interface. Must be the name of a NetworkLink design part
ens3f0:
device_link: 'pxe-rack1'
# Hardware devices that support this interface. For configurating a physical device, this would be a list of one
# For bonds, this would be a list of all the physical devices in the bond. These can refer to HardwareProfile device aliases
# or explicit device names
slaves:
- 'ens3'
# Network that will be accessed on this interface. These should each be to the name of a Network design part
# Multiple networks listed here assume that this interface is attached to a NetworkLink supporting trunking
- 'ens3f0'
networks:
- 'pxe-rack1'
platform:
# Which image to deploy on the node, must be available in the provisioner. Defaults to 'ubuntu/xenial'
image: 'ubuntu/xenial'
# Which kernel to enable. Defaults to generic, can also be hwe (hardware enablement)
kernel: 'generic'
# K/V list of kernel parameters to configure on boot. No default. Use value of true for params that are just flags
metadata:
# Explicit tags to propagate to Kubernetes. Simple strings of any value
rack: rack1
---
apiVersion: 'drydock/v1'
kind: BaremetalNode
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
name: node2
region: atl_foundry
date: 17-FEB-2017
author: sh8121@att.com
description: Specify a physical server.
spec:
data:
host_profile: defaults
addressing:
# The name of a defined Network design part also listed in the 'networks' section of a interface definition
- network: 'pxe-rack1'
# Address should be an explicit IP address assignment or 'dhcp'
address: '172.24.1.101'
- network: 'oob'
address: '172.24.10.101'

View File

@ -0,0 +1,220 @@
#Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
schema: 'drydock/Region/v1'
metadata:
schema: 'metadata/Document/v1'
name: atl_foundry
data:
tag_definitions:
- tag: 'high_memory'
definition_type: 'lshw_xpath'
definition: //node[@id="memory"]/'size units="bytes"' > 137438953472
authorized_keys:
- |
ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAgqUTJwZEMjZCWOnXQw+FFdvnf/lYrGXm01
rf/ZYUanoymkMWIK1/c8a3Ez9/HY3dyfWBcuzlIV4bNCvJcMg4UPuh6NQBJWAlfp7wfW9O
8ZyDE3x1FYno5u3OB4rRDcvKe6J0ygPcu4Uec5ASsd58yGnE4zTl1D/J30rNa00si+s= r
sa-key-20120124
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: oob
data:
labels:
noconfig: 'enabled'
bonding:
mode: 'disabled'
mtu: 1500
linkspeed: 'auto'
trunking:
mode: disabled
default_network: oob
allowed_networks:
- 'oob'
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe-rack1
data:
bonding:
mode: 'disabled'
mtu: 1500
linkspeed: 'auto'
trunking:
mode: disabled
default_network: pxe-rack1
allowed_networks:
- 'pxe-rack1'
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: oob
data:
labels:
noconfig: 'enabled'
cidr: '172.24.10.0/24'
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe-rack1
data:
cidr: '172.24.1.0/24'
routes:
- subnet: '0.0.0.0/0'
gateway: '172.24.1.1'
metric: 100
ranges:
- type: 'reserved'
start: '172.24.1.1'
end: '172.24.1.100'
- type: 'dhcp'
start: '172.24.1.200'
end: '172.24.1.250'
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe-rack2
data:
cidr: '172.24.2.0/24'
routes:
- subnet: '0.0.0.0/0'
gateway: '172.24.2.1'
metric: 100
ranges:
- type: 'reserved'
start: '172.24.2.1'
end: '172.24.2.100'
- type: 'dhcp'
start: '172.24.2.200'
end: '172.24.2.250'
---
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: DellR820v1
data:
vendor: 'Dell'
generation: '1'
hw_version: '2'
bios_version: '2.2.3'
boot_mode: 'bios'
bootstrap_protocol: 'pxe'
pxe_interface: 0
device_aliases:
pnic01:
bus_type: 'pci'
dev_type: 'Intel 10Gbps NIC'
address: '0000:00:03.0'
---
schema: 'drydock/HostProfile/v1'
metadata:
name: defaults
schema: 'metadata/Document/v1'
data:
hardware_profile: 'DellR820v1'
primary_network: 'pxe-rack1'
oob:
type: 'ipmi'
network: 'oob'
account: 'admin'
credential: 'password'
storage:
physical_devices:
sda:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '10g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
interfaces:
ens3f0:
device_link: 'pxe-rack1'
slaves:
- 'ens3f0'
networks:
- 'pxe-rack1'
platform:
image: 'ubuntu/xenial'
kernel: 'generic'
metadata:
rack: rack1
---
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
name: node2
data:
host_profile: defaults
addressing:
- network: 'pxe-rack1'
address: '172.24.1.101'
- network: 'oob'
address: '172.24.10.101'
metadata:
tags:
- 'masters'
...
---
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: promjoin
storagePolicy: 'cleartext'
labels:
application: 'drydock'
data:
node_filter:
filter_set_type: 'union'
filter_set:
- filter_type: 'union'
node_names:
- 'node2'
assets:
- path: /opt/promjoin.sh
type: file
permissions: '555'
location: http://172.24.1.100:6880/{{node.hostname}}.join
location_pipeline:
- template
data_pipeline:
- utf8_decode
- path: /lib/systemd/system/promjoin.service
type: unit
permissions: '600'
data: |-
W1VuaXRdCkRlc2NyaXB0aW9uPVByb21lbmFkZSBJbml0aWFsaXphdGlvbiBTZXJ2aWNlCkFmdGVy
PW5ldHdvcmstb25saW5lLnRhcmdldCBsb2NhbC1mcy50YXJnZXQKQ29uZGl0aW9uUGF0aEV4aXN0
cz0hL3Zhci9saWIvcHJvbS5kb25lCgpbU2VydmljZV0KVHlwZT1zaW1wbGUKRXhlY1N0YXJ0PS9v
cHQvcHJvbWpvaW4uc2gKCltJbnN0YWxsXQpXYW50ZWRCeT1tdWx0aS11c2VyLnRhcmdldAo=
data_pipeline:
- base64_decode
- utf8_decode
...

View File

@ -0,0 +1,46 @@
---
schema: promenade/KubernetesNode/v1
metadata:
schema: metadata/Document/v1
name: ${GENESIS_NODE_NAME}
layeringDefinition:
abstract: false
layer: site
data:
hostname: ${GENESIS_NODE_NAME}
ip: ${GENESIS_NODE_IP}
join_ip: ${MASTER_NODE_IP}
labels:
dynamic:
- ucp-control-plane=enabled
- ceph-osd=enabled
- ceph-mon=enabled
- ceph-rgw=enabled
- ceph-mds=enabled
---
schema: promenade/KubernetesNode/v1
metadata:
schema: metadata/Document/v1
name: ${MASTER_NODE_NAME}
layeringDefinition:
abstract: false
layer: site
data:
hostname: ${MASTER_NODE_NAME}
ip: ${MASTER_NODE_IP}
join_ip: ${GENESIS_NODE_IP}
labels:
static:
- node-role.kubernetes.io/master=
dynamic:
- calico-etcd=enabled
- kubernetes-apiserver=enabled
- kubernetes-controller-manager=enabled
- kubernetes-etcd=enabled
- kubernetes-scheduler=enabled
- ucp-control-plane=enabled
- ceph-osd=enabled
- ceph-mon=enabled
- ceph-rgw=enabled
- ceph-mds=enabled
...

View File

@ -1,82 +0,0 @@
---
apiVersion: promenade/v1
kind: Cluster
metadata:
name: example
target: none
spec:
nodes:
${GENESIS_NODE_NAME}:
ip: ${GENESIS_NODE_IP}
roles:
- master
- genesis
additional_labels:
- beta.kubernetes.io/arch=amd64
- ucp-control-plane=enabled
- ceph-mon=enabled
- ceph-osd=enabled
- ceph-mds=enabled
${MASTER_NODE_NAME}:
ip: ${MASTER_NODE_IP}
roles:
- master
additional_labels:
- beta.kubernetes.io/arch=amd64
- ucp-control-plane=enabled
- ceph-mon=enabled
- ceph-osd=enabled
- ceph-mds=enabled
---
apiVersion: promenade/v1
kind: Network
metadata:
cluster: example
name: example
target: all
spec:
cluster_domain: cluster.local
cluster_dns: 10.96.0.10
kube_service_ip: 10.96.0.1
pod_ip_cidr: 10.97.0.0/16
service_ip_cidr: 10.96.0.0/16
calico_etcd_service_ip: 10.96.232.136
calico_interface: ${NODE_NET_IFACE}
dns_servers:
- 8.8.8.8
- 8.8.4.4
---
apiVersion: promenade/v1
kind: Versions
metadata:
cluster: example
name: example
target: all
spec:
images:
armada: ${ARMADA_IMAGE}
calico:
cni: quay.io/calico/cni:v1.9.1
etcd: quay.io/coreos/etcd:v3.2.1
node: quay.io/calico/node:v1.3.0
policy-controller: quay.io/calico/kube-policy-controller:v0.6.0
kubernetes:
apiserver: gcr.io/google_containers/hyperkube-amd64:v1.6.7
controller-manager: quay.io/attcomdev/kube-controller-manager:v1.6.7
dns:
dnsmasq: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.2
kubedns: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.2
sidecar: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.2
etcd: quay.io/coreos/etcd:v3.2.1
kubectl: gcr.io/google_containers/hyperkube-amd64:v1.6.7
proxy: gcr.io/google_containers/hyperkube-amd64:v1.6.7
scheduler: gcr.io/google_containers/hyperkube-amd64:v1.6.7
promenade: ${PROMENADE_IMAGE}
tiller: gcr.io/kubernetes-helm/tiller:v2.5.0
packages:
docker: docker.io=1.13.1-0ubuntu1~16.04.2
dnsmasq: dnsmasq=2.75-1ubuntu0.16.04.3
socat: socat=1.7.3.1-1
additional_packages:
- ceph-common=10.2.9-0ubuntu0.16.04.1
...

View File

@ -1,16 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1alpha1
kind: ClusterRoleBinding
metadata:
name: generous-permissions
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: Group
name: system:masters
- kind: Group
name: system:authenticated
- kind: Group
name: system:unauthenticated

View File

@ -4,6 +4,6 @@ export CEPH_PUBLIC_NET=172.24.1.0/24
export GENESIS_NODE_IP=172.24.1.100
export MASTER_NODE_IP=172.24.1.101
export NODE_NET_IFACE=ens3
export PROMENADE_IMAGE=quay.io/attcomdev/promenade:v0.2.2
export ARMADA_IMAGE=quay.io/attcomdev/armada:v0.7.0
export DRYDOCK_IMAGE=quay.io/attcomdev/drydock:v0.2.0
export PROMENADE_IMAGE=quay.io/attcomdev/promenade:master
export ARMADA_IMAGE=quay.io/attcomdev/armada:v0.7.1
export DRYDOCK_IMAGE=sthussey/drydock:386668v14