Add basic OpenStack components to single node dev

Change-Id: Idd0b54482970e6c773126507266d0bd501f55225
This commit is contained in:
Mark Burnett 2018-05-18 10:56:19 -05:00 committed by Bryan Strassner
parent ee7fc61fb3
commit 4de027a36f
42 changed files with 1265 additions and 3041 deletions

View File

@ -67,16 +67,15 @@ data:
labels: labels:
dynamic: dynamic:
- calico-etcd=enabled - calico-etcd=enabled
- ceph-mds=enabled - coredns=enabled
- ceph-mon=enabled
- ceph-osd=enabled
- ceph-rgw=enabled
- ceph-mgr=enabled
- kube-ingress=enabled - kube-ingress=enabled
- kubernetes-apiserver=enabled - kubernetes-apiserver=enabled
- kubernetes-controller-manager=enabled - kubernetes-controller-manager=enabled
- kubernetes-etcd=enabled - kubernetes-etcd=enabled
- kubernetes-scheduler=enabled - kubernetes-scheduler=enabled
- openstack-compute-node=enabled
- openstack-control-plane=enabled
- openvswitch=enabled
- promenade-genesis=enabled - promenade-genesis=enabled
- ucp-control-plane=enabled - ucp-control-plane=enabled
files: files:

View File

@ -55,7 +55,7 @@ metadata:
name: common-addresses name: common-addresses
path: .calico.ip_autodetection_method path: .calico.ip_autodetection_method
dest: dest:
path: .values.conf.node.ip_autodetection_method path: .values.conf.node.IP_AUTODETECTION_METHOD
# Certificates # Certificates
- -

View File

@ -0,0 +1,18 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-component-infrastructure
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: "OpenStack Component Infrastructure"
sequenced: False
chart_group:
- openstack-mariadb
- rabbitmq
- memcached
- libvirt
- openvswitch

View File

@ -0,0 +1,17 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-component-shared
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: "OpenStack Shared and Common components"
sequenced: False
chart_group:
- keystone
- glance
- heat
- horizon

View File

@ -0,0 +1,92 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: glance
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.glance
dest:
path: .source
data:
chart_name: glance
release: glance
namespace: openstack
wait:
timeout: 1800
labels:
release_group: airship-glance
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
application: glance
component: db-init
- type: job
labels:
application: glance
component: db-sync
- type: job
labels:
application: glance
component: ks-user
- type: job
labels:
application: glance
component: ks-service
- type: job
labels:
application: glance
component: ks-endpoints
- type: job
labels:
application: glance
component: storage-init
values:
labels:
api:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
registry:
node_selector_key: openstack-control-plane
node_selector_value: enabled
storage: pvc
dependencies:
- openstack-glance-helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-glance-helm-toolkit
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.glance-htk
dest:
path: .source
data:
chart_name: openstack-glance-helm-toolkit
release: openstack-glance-helm-toolkit
namespace: helm-toolkit
values: {}
dependencies: []

View File

@ -0,0 +1,97 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: heat
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.heat
dest:
path: .source
data:
chart_name: heat
release: heat
namespace: openstack
wait:
timeout: 1800
labels:
release_group: airship-heat
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
application: heat
component: db-init
- type: job
labels:
application: heat
component: db-sync
- type: job
labels:
application: heat
component: ks-user
- type: job
labels:
application: heat
component: ks-service
- type: job
labels:
application: heat
component: ks-endpoints
- type: cronjob
labels:
application: heat
component: engine-cleaner
values:
labels:
api:
node_selector_key: openstack-control-plane
node_selector_value: enabled
cfn:
node_selector_key: openstack-control-plane
node_selector_value: enabled
cloudwatch:
node_selector_key: openstack-control-plane
node_selector_value: enabled
engine:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
dependencies:
- openstack-heat-helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-heat-helm-toolkit
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.heat-htk
dest:
path: .source
data:
chart_name: openstack-heat-helm-toolkit
release: openstack-heat-helm-toolkit
namespace: helm-toolkit
values: {}
dependencies: []

View File

@ -0,0 +1,72 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: horizon
labels:
component: horizon
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.horizon
dest:
path: .source
data:
chart_name: horizon
release: horizon
namespace: openstack
wait:
timeout: 1800
labels:
release_group: airship-horizon
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
application: horizon
component: db-init
- type: job
labels:
application: horizon
component: db-sync
post:
create: []
values:
labels:
node_selector_key: openstack-control-plane
node_selector_value: enabled
dependencies:
- openstack-horizon-helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-horizon-helm-toolkit
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.horizon-htk
dest:
path: .source
data:
chart_name: openstack-horizon-helm-toolkit
release: openstack-horizon-helm-toolkit
namespace: helm-toolkit
values: {}
dependencies: []

View File

@ -0,0 +1,97 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: keystone
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.keystone
dest:
path: .source
data:
chart_name: keystone
release: openstack-keystone
namespace: openstack
wait:
timeout: 1800
labels:
release_group: airship-openstack-keystone
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
application: keystone
component: bootstrap
- type: job
labels:
application: keystone
component: credential-setup
- type: job
labels:
application: keystone
component: db-init
- type: job
labels:
application: keystone
component: db-sync
- type: job
labels:
application: keystone
component: fernet-setup
- type: job
labels:
application: keystone
component: domain-manage
- type: cronjob
labels:
application: keystone
component: credential-rotate
- type: cronjob
labels:
application: keystone
component: fernet-rotate
values:
labels:
api:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
dependencies:
- openstack-keystone-helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-keystone-helm-toolkit
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.keystone-htk
dest:
path: .source
data:
chart_name: openstack-keystone-helm-toolkit
release: openstack-keystone-helm-toolkit
namespace: helm-toolkit
values: {}
dependencies: []

View File

@ -0,0 +1,63 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: libvirt
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.libvirt
dest:
path: .source
data:
chart_name: libvirt
release: openstack-libvirt
namespace: openstack
wait:
timeout: 1800
labels:
release_group: airship-openstack-libvirt
install:
no_hooks: false
upgrade:
no_hooks: false
values:
labels:
agent:
libvirt:
node_selector_key: openstack-compute-node
node_selector_value: enabled
conf:
ceph:
enabled: false
dependencies:
- openstack-libvirt-helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-libvirt-helm-toolkit
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.libvirt-htk
dest:
path: .source
data:
chart_name: openstack-libvirt-helm-toolkit
release: openstack-libvirt-helm-toolkit
namespace: helm-toolkit
values: {}
dependencies: []

View File

@ -0,0 +1,65 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-mariadb
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.mariadb
dest:
path: .source
data:
chart_name: mariadb
release: openstack-mariadb
namespace: openstack
wait:
timeout: 1800
labels:
release_group: airship-openstack-mariadb
install:
no_hooks: false
upgrade:
no_hooks: false
values:
volume:
enabled: false
labels:
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
prometheus_mysql_exporter:
node_selector_key: openstack-control-plane
node_selector_value: enabled
dependencies:
- openstack-mariadb-helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-mariadb-helm-toolkit
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.mariadb-htk
dest:
path: .source
data:
chart_name: openstack-mariadb-helm-toolkit
release: openstack-mariadb-helm-toolkit
namespace: helm-toolkit
values: {}
dependencies: []

View File

@ -0,0 +1,60 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: memcached
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.memcached
dest:
path: .source
data:
chart_name: memcached
release: openstack-memcached
namespace: openstack
wait:
timeout: 1800
labels:
release_group: airship-openstack-memcached
install:
no_hooks: false
upgrade:
no_hooks: false
values:
labels:
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
dependencies:
- openstack-memcached-helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-memcached-helm-toolkit
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.memcached-htk
dest:
path: .source
data:
chart_name: openstack-memcached-helm-toolkit
release: openstack-memcached-helm-toolkit
namespace: helm-toolkit
values: {}
dependencies: []

View File

@ -0,0 +1,60 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openvswitch
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.openvswitch
dest:
path: .source
data:
chart_name: openvswitch
release: openvswitch
namespace: openstack
wait:
timeout: 1800
labels:
release_group: airship-openvswitch
install:
no_hooks: false
upgrade:
no_hooks: false
values:
labels:
ovs:
node_selector_key: openvswitch
node_selector_value: enabled
dependencies:
- openstack-openvswitch-helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-openvswitch-helm-toolkit
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.openvswitch-htk
dest:
path: .source
data:
chart_name: openstack-openvswitch-helm-toolkit
release: openstack-openvswitch-helm-toolkit
namespace: helm-toolkit
values: {}
dependencies: []

View File

@ -0,0 +1,62 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: rabbitmq
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.rabbitmq
dest:
path: .source
data:
chart_name: rabbitmq
release: rabbitmq
namespace: openstack
wait:
timeout: 1800
labels:
release_group: airship-rabbitmq
install:
no_hooks: false
upgrade:
no_hooks: false
values:
labels:
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
replicas:
server: 1
dependencies:
- openstack-rabbitmq-helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-rabbitmq-helm-toolkit
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.rabbitmq-htk
dest:
path: .source
data:
chart_name: openstack-rabbitmq-helm-toolkit
release: openstack-rabbitmq-helm-toolkit
namespace: helm-toolkit
values: {}
dependencies: []

View File

@ -0,0 +1,15 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-compute-kit
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: "Deploy nova and neutron"
sequenced: False
chart_group:
- nova
- neutron

View File

@ -0,0 +1,122 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: neutron
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.neutron
dest:
path: .source
data:
chart_name: neutron
release: neutron
namespace: openstack
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
application: neutron
component: db-init
- type: job
labels:
application: neutron
component: db-sync
- type: job
labels:
application: neutron
component: ks-user
- type: job
labels:
application: neutron
component: ks-service
- type: job
labels:
application: neutron
component: ks-endpoints
values:
labels:
agent:
dhcp:
node_selector_key: openstack-control-plane
node_selector_value: enabled
l3:
node_selector_key: openstack-control-plane
node_selector_value: enabled
metadata:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
lb:
node_selector_key: linuxbridge
node_selector_value: enabled
ovs:
node_selector_key: openvswitch
node_selector_value: enabled
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
test:
node_selector_key: openstack-control-plane
node_selector_value: enabled
network:
interface:
tunnel: docker0
conf:
neutron:
DEFAULT:
l3_ha: False
min_l3_agents_per_router: 1
max_l3_agents_per_router: 1
l3_ha_network_type: vxlan
dhcp_agents_per_network: 1
plugins:
ml2_conf:
ml2_type_flat:
flat_networks: public
openvswitch_agent:
agent:
tunnel_types: vxlan
ovs:
bridge_mappings: public:br-ex
linuxbridge_agent:
linux_bridge:
bridge_mappings: public:br-ex
dependencies:
- openstack-neutron-helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-neutron-helm-toolkit
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.neutron-htk
dest:
path: .source
data:
chart_name: openstack-neutron-helm-toolkit
release: openstack-neutron-helm-toolkit
namespace: helm-toolkit
values: {}
dependencies: []

View File

@ -0,0 +1,137 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: nova
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.nova
dest:
path: .source
data:
chart_name: nova
release: nova
namespace: openstack
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
application: nova
component: db-init
- type: job
labels:
application: nova
component: db-sync
- type: job
labels:
application: nova
component: ks-user
- type: job
labels:
application: nova
component: ks-service
- type: job
labels:
application: nova
component: ks-endpoints
- type: job
labels:
application: placement
component: ks-user
- type: job
labels:
application: placement
component: ks-service
- type: job
labels:
application: placement
component: ks-endpoints
- type: job
labels:
application: nova
component: cell-setup
- type: cronjob
labels:
application: nova
component: cell-setup
values:
labels:
agent:
compute:
node_selector_key: openstack-compute-node
node_selector_value: enabled
compute_ironic:
node_selector_key: openstack-compute-node
node_selector_value: enabled
api_metadata:
node_selector_key: openstack-control-plane
node_selector_value: enabled
conductor:
node_selector_key: openstack-control-plane
node_selector_value: enabled
consoleauth:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
novncproxy:
node_selector_key: openstack-control-plane
node_selector_value: enabled
osapi:
node_selector_key: openstack-control-plane
node_selector_value: enabled
placement:
node_selector_key: openstack-control-plane
node_selector_value: enabled
scheduler:
node_selector_key: openstack-control-plane
node_selector_value: enabled
spiceproxy:
node_selector_key: openstack-control-plane
node_selector_value: enabled
test:
node_selector_key: openstack-control-plane
node_selector_value: enabled
conf:
ceph:
enabled: false
nova:
libvirt:
virt_type: qemu
dependencies:
- openstack-nova-helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-nova-helm-toolkit
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.nova-htk
dest:
path: .source
data:
chart_name: openstack-nova-helm-toolkit
release: openstack-nova-helm-toolkit
namespace: helm-toolkit
values: {}
dependencies: []

View File

@ -0,0 +1,14 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-ingress-controller
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
description: "OpenStack Ingress Controller"
sequenced: False
chart_group:
- openstack-ingress-controller

View File

@ -0,0 +1,62 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-ingress-controller
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.ingress
dest:
path: .source
data:
chart_name: openstack-ingress-controller
release: openstack-ingress-controller
namespace: openstack
wait:
timeout: 300
labels:
release_group: airship-openstack-ingress-controller
install:
no_hooks: false
upgrade:
no_hooks: false
values:
labels:
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
error_server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
dependencies:
- openstack-ingress-helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: openstack-ingress-helm-toolkit
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.openstack.ingress-htk
dest:
path: .source
data:
chart_name: openstack-ingress-helm-toolkit
release: openstack-ingress-helm-toolkit
namespace: helm-toolkit
values: {}
dependencies: []

View File

@ -160,6 +160,9 @@ data:
maas_api_url: http://MAAS_IP:MAAS_PORT/MAAS/api/2.0/ maas_api_url: http://MAAS_IP:MAAS_PORT/MAAS/api/2.0/
plugins: plugins:
ingester: drydock_provisioner.ingester.plugins.deckhand.DeckhandIngester ingester: drydock_provisioner.ingester.plugins.deckhand.DeckhandIngester
oob_driver:
- 'drydock_provisioner.drivers.oob.pyghmi_driver.driver.PyghmiDriver'
- 'drydock_provisioner.drivers.oob.manual_driver.driver.ManualDriver'
dependencies: dependencies:
- drydock-htk - drydock-htk
... ...

View File

@ -18,7 +18,6 @@ metadata:
path: .charts.ucp.keystone path: .charts.ucp.keystone
dest: dest:
path: .source path: .source
# Images # Images
- src: - src:
schema: pegleg/SoftwareVersions/v1 schema: pegleg/SoftwareVersions/v1
@ -26,7 +25,6 @@ metadata:
path: .images.ucp.keystone path: .images.ucp.keystone
dest: dest:
path: .values.images.tags path: .values.images.tags
# Endpoints # Endpoints
- src: - src:
schema: pegleg/EndpointCatalogue/v1 schema: pegleg/EndpointCatalogue/v1
@ -77,7 +75,6 @@ metadata:
dest: dest:
path: .values.endpoints.oslo_db.path path: .values.endpoints.oslo_db.path
pattern: DB_NAME pattern: DB_NAME
# Secrets # Secrets
- dest: - dest:
path: .values.endpoints.identity.auth.admin.password path: .values.endpoints.identity.auth.admin.password
@ -138,8 +135,6 @@ data:
job: job:
node_selector_key: ucp-control-plane node_selector_key: ucp-control-plane
node_selector_value: enabled node_selector_value: enabled
dependencies: dependencies:
- ucp-keystone-htk - ucp-keystone-htk
... ...

View File

@ -9,13 +9,13 @@ metadata:
storagePolicy: cleartext storagePolicy: cleartext
data: data:
description: UCP Services description: UCP Services
sequenced: false sequenced: true
chart_group: chart_group:
- ucp-maas
- ucp-tiller - ucp-tiller
- ucp-armada - ucp-armada
- ucp-barbican - ucp-barbican
- ucp-deckhand - ucp-deckhand
- ucp-drydock
- ucp-maas
- ucp-promenade - ucp-promenade
- ucp-shipyard - ucp-shipyard
- ucp-drydock

View File

@ -132,7 +132,7 @@ data:
type: git type: git
location: https://github.com/att-comdev/armada location: https://github.com/att-comdev/armada
subpath: charts/armada subpath: charts/armada
reference: 2f1997c8b1acfc25b59275d10db0e3539e8b15b8 reference: 2b714888c490a9f7c5a11383eb18b7226d1b1dc8
armada-htk: armada-htk:
type: git type: git
location: https://github.com/openstack/openstack-helm location: https://github.com/openstack/openstack-helm
@ -162,7 +162,7 @@ data:
type: git type: git
location: https://github.com/att-comdev/deckhand location: https://github.com/att-comdev/deckhand
subpath: charts/deckhand subpath: charts/deckhand
reference: 99e3064eda9da0227780b57ee30baeb264b3040d reference: 7385d077395dde975200a071f7ea503a9522e32c
deckhand-htk: deckhand-htk:
type: git type: git
location: https://github.com/openstack/openstack-helm location: https://github.com/openstack/openstack-helm
@ -182,7 +182,7 @@ data:
type: git type: git
location: https://github.com/att-comdev/drydock location: https://github.com/att-comdev/drydock
subpath: charts/drydock subpath: charts/drydock
reference: be667ab3c01663693a18c679d283ce572023d376 reference: 506e06623a5f1c11c0d34f2089851cc8381f06ae
drydock-htk: drydock-htk:
type: git type: git
location: https://github.com/openstack/openstack-helm location: https://github.com/openstack/openstack-helm
@ -202,7 +202,7 @@ data:
type: git type: git
location: https://git.openstack.org/openstack/openstack-helm location: https://git.openstack.org/openstack/openstack-helm
subpath: postgresql subpath: postgresql
reference: refs/changes/80/569480/1 reference: refs/changes/80/569480/2
# Liveness probe is too aggressive right now, using ref above instead # Liveness probe is too aggressive right now, using ref above instead
# location: https://github.com/openstack/openstack-helm # location: https://github.com/openstack/openstack-helm
# subpath: postgresql # subpath: postgresql
@ -236,7 +236,7 @@ data:
type: git type: git
location: https://github.com/att-comdev/maas location: https://github.com/att-comdev/maas
subpath: charts/maas subpath: charts/maas
reference: 791e49c5193a3c69f23052acb17673fc3e59d0e2 reference: 3e4849e132353f22378fa7d10838f3e0c2f0eef2
maas-htk: maas-htk:
type: git type: git
location: https://github.com/openstack/openstack-helm location: https://github.com/openstack/openstack-helm
@ -276,7 +276,7 @@ data:
type: git type: git
location: https://github.com/att-comdev/shipyard location: https://github.com/att-comdev/shipyard
subpath: charts/shipyard subpath: charts/shipyard
reference: ac4dac972dfaee6b2c1b9878af4ca781193f9725 reference: afc2ea501deda27083b951f14791c5ed6141d8ae
shipyard-htk: shipyard-htk:
type: git type: git
location: https://github.com/openstack/openstack-helm location: https://github.com/openstack/openstack-helm
@ -292,12 +292,134 @@ data:
location: https://github.com/openstack/openstack-helm location: https://github.com/openstack/openstack-helm
subpath: helm-toolkit subpath: helm-toolkit
reference: f902cd14fac7de4c4c9f7d019191268a6b4e9601 reference: f902cd14fac7de4c4c9f7d019191268a6b4e9601
openstack:
glance:
type: git
location: https://github.com/openstack/openstack-helm
subpath: glance
reference: a4fa9b761e2de14df588c1e37f3693174f15ad36
glance-htk:
type: git
location: https://github.com/openstack/openstack-helm-infra
subpath: helm-toolkit
reference: 9f921f23fb2df702981a0b3d33062ff607350f75
heat:
type: git
location: https://github.com/openstack/openstack-helm
subpath: heat
reference: a4fa9b761e2de14df588c1e37f3693174f15ad36
heat-htk:
type: git
location: https://github.com/openstack/openstack-helm-infra
subpath: helm-toolkit
reference: 9f921f23fb2df702981a0b3d33062ff607350f75
horizon:
type: git
location: https://git.openstack.org/openstack/openstack-helm
subpath: horizon
reference: a4fa9b761e2de14df588c1e37f3693174f15ad36
horizon-htk:
type: git
location: https://github.com/openstack/openstack-helm-infra
subpath: helm-toolkit
reference: 9f921f23fb2df702981a0b3d33062ff607350f75
ingress:
type: git
location: https://github.com/openstack/openstack-helm
subpath: ingress
reference: a4fa9b761e2de14df588c1e37f3693174f15ad36
ingress-htk:
type: git
location: https://github.com/openstack/openstack-helm-infra
subpath: helm-toolkit
reference: 9f921f23fb2df702981a0b3d33062ff607350f75
keystone:
type: git
location: https://github.com/openstack/openstack-helm
subpath: keystone
reference: a4fa9b761e2de14df588c1e37f3693174f15ad36
keystone-htk:
type: git
location: https://github.com/openstack/openstack-helm-infra
subpath: helm-toolkit
reference: 9f921f23fb2df702981a0b3d33062ff607350f75
libvirt:
type: git
location: https://github.com/openstack/openstack-helm
subpath: libvirt
reference: a4fa9b761e2de14df588c1e37f3693174f15ad36
libvirt-htk:
type: git
location: https://github.com/openstack/openstack-helm-infra
subpath: helm-toolkit
reference: 9f921f23fb2df702981a0b3d33062ff607350f75
mariadb:
type: git
location: https://github.com/openstack/openstack-helm
subpath: mariadb
reference: a4fa9b761e2de14df588c1e37f3693174f15ad36
mariadb-htk:
type: git
location: https://github.com/openstack/openstack-helm-infra
subpath: helm-toolkit
reference: 9f921f23fb2df702981a0b3d33062ff607350f75
memcached:
type: git
location: https://github.com/openstack/openstack-helm
subpath: memcached
reference: a4fa9b761e2de14df588c1e37f3693174f15ad36
memcached-htk:
type: git
location: https://github.com/openstack/openstack-helm-infra
subpath: helm-toolkit
reference: 9f921f23fb2df702981a0b3d33062ff607350f75
neutron:
type: git
location: https://github.com/openstack/openstack-helm
subpath: neutron
reference: a4fa9b761e2de14df588c1e37f3693174f15ad36
neutron-htk:
type: git
location: https://github.com/openstack/openstack-helm-infra
subpath: helm-toolkit
reference: 9f921f23fb2df702981a0b3d33062ff607350f75
nova:
type: git
location: https://github.com/openstack/openstack-helm
subpath: nova
reference: a4fa9b761e2de14df588c1e37f3693174f15ad36
nova-htk:
type: git
location: https://github.com/openstack/openstack-helm-infra
subpath: helm-toolkit
reference: 9f921f23fb2df702981a0b3d33062ff607350f75
openvswitch:
type: git
location: https://github.com/openstack/openstack-helm
subpath: openvswitch
reference: a4fa9b761e2de14df588c1e37f3693174f15ad36
openvswitch-htk:
type: git
location: https://github.com/openstack/openstack-helm-infra
subpath: helm-toolkit
reference: 9f921f23fb2df702981a0b3d33062ff607350f75
rabbitmq:
type: git
location: https://github.com/openstack/openstack-helm
subpath: rabbitmq
reference: a4fa9b761e2de14df588c1e37f3693174f15ad36
rabbitmq-htk:
type: git
location: https://github.com/openstack/openstack-helm-infra
subpath: helm-toolkit
reference: 9f921f23fb2df702981a0b3d33062ff607350f75
files: files:
kubelet: https://dl.k8s.io/v1.10.2/kubernetes-node-linux-amd64.tar.gz kubelet: https://dl.k8s.io/v1.10.2/kubernetes-node-linux-amd64.tar.gz
images: images:
ucp: ucp:
armada: armada:
api: artifacts-aic.atlantafoundry.com/att-comdev/armada:2f1997c8b1acfc25b59275d10db0e3539e8b15b8 api: artifacts-aic.atlantafoundry.com/att-comdev/armada:2b714888c490a9f7c5a11383eb18b7226d1b1dc8
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.0 dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.0
ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
@ -311,10 +433,10 @@ data:
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
deckhand: deckhand:
deckhand: artifacts-aic.atlantafoundry.com/att-comdev/deckhand:99e3064eda9da0227780b57ee30baeb264b3040d deckhand: artifacts-aic.atlantafoundry.com/att-comdev/deckhand:7385d077395dde975200a071f7ea503a9522e32c
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.0 dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.0
db_init: docker.io/postgres:9.5 db_init: docker.io/postgres:9.5
db_sync: docker.io/postgres:9.5 db_sync: artifacts-aic.atlantafoundry.com/att-comdev/deckhand:7385d077395dde975200a071f7ea503a9522e32c
ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
@ -330,31 +452,31 @@ data:
ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
barbican_api: docker.io/kolla/ubuntu-source-barbican-api:3.0.3 barbican_api: docker.io/kolla/ubuntu-source-barbican-api:3.0.3
drydock: drydock:
drydock: artifacts-aic.atlantafoundry.com/att-comdev/drydock:be667ab3c01663693a18c679d283ce572023d376 drydock: artifacts-aic.atlantafoundry.com/att-comdev/drydock:506e06623a5f1c11c0d34f2089851cc8381f06ae
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.0 dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.0
ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
drydock_db_init: docker.io/postgres:9.5 drydock_db_init: docker.io/postgres:9.5
drydock_db_sync: artifacts-aic.atlantafoundry.com/att-comdev/drydock:be667ab3c01663693a18c679d283ce572023d376 drydock_db_sync: artifacts-aic.atlantafoundry.com/att-comdev/drydock:506e06623a5f1c11c0d34f2089851cc8381f06ae
shipyard: shipyard:
airflow: artifacts-aic.atlantafoundry.com/att-comdev/airflow:ac4dac972dfaee6b2c1b9878af4ca781193f9725 airflow: artifacts-aic.atlantafoundry.com/att-comdev/airflow:afc2ea501deda27083b951f14791c5ed6141d8ae
shipyard: artifacts-aic.atlantafoundry.com/att-comdev/shipyard:ac4dac972dfaee6b2c1b9878af4ca781193f9725 shipyard: artifacts-aic.atlantafoundry.com/att-comdev/shipyard:afc2ea501deda27083b951f14791c5ed6141d8ae
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.0 dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.0
shipyard_db_init: docker.io/postgres:9.5 shipyard_db_init: docker.io/postgres:9.5
shipyard_db_sync: artifacts-aic.atlantafoundry.com/att-comdev/shipyard:ac4dac972dfaee6b2c1b9878af4ca781193f9725 shipyard_db_sync: artifacts-aic.atlantafoundry.com/att-comdev/shipyard:afc2ea501deda27083b951f14791c5ed6141d8ae
airflow_db_init: docker.io/postgres:9.5 airflow_db_init: docker.io/postgres:9.5
airflow_db_sync: artifacts-aic.atlantafoundry.com/att-comdev/airflow:ac4dac972dfaee6b2c1b9878af4ca781193f9725 airflow_db_sync: artifacts-aic.atlantafoundry.com/att-comdev/airflow:afc2ea501deda27083b951f14791c5ed6141d8ae
ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3 ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
maas: maas:
db_init: docker.io/postgres:9.5 db_init: docker.io/postgres:9.5
db_sync: artifacts-aic.atlantafoundry.com/att-comdev/maas-region-controller:791e49c5193a3c69f23052acb17673fc3e59d0e2 db_sync: artifacts-aic.atlantafoundry.com/att-comdev/maas-region-controller:3e4849e132353f22378fa7d10838f3e0c2f0eef2
maas_rack: artifacts-aic.atlantafoundry.com/att-comdev/maas-rack-controller:791e49c5193a3c69f23052acb17673fc3e59d0e2 maas_rack: artifacts-aic.atlantafoundry.com/att-comdev/maas-rack-controller:3e4849e132353f22378fa7d10838f3e0c2f0eef2
maas_region: artifacts-aic.atlantafoundry.com/att-comdev/maas-region-controller:791e49c5193a3c69f23052acb17673fc3e59d0e2 maas_region: artifacts-aic.atlantafoundry.com/att-comdev/maas-region-controller:3e4849e132353f22378fa7d10838f3e0c2f0eef2
bootstrap: artifacts-aic.atlantafoundry.com/att-comdev/maas-region-controller:791e49c5193a3c69f23052acb17673fc3e59d0e2 bootstrap: artifacts-aic.atlantafoundry.com/att-comdev/maas-region-controller:3e4849e132353f22378fa7d10838f3e0c2f0eef2
export_api_key: artifacts-aic.atlantafoundry.com/att-comdev/maas-region-controller:791e49c5193a3c69f23052acb17673fc3e59d0e2 export_api_key: artifacts-aic.atlantafoundry.com/att-comdev/maas-region-controller:3e4849e132353f22378fa7d10838f3e0c2f0eef2
maas_cache: artifacts-aic.atlantafoundry.com/att-comdev/sstream-cache@sha256:70aa6cc9cdf0d07ed933c99f232ecc82cb89048ffdb030811e44a537bdfad67e maas_cache: artifacts-aic.atlantafoundry.com/att-comdev/sstream-cache@sha256:70aa6cc9cdf0d07ed933c99f232ecc82cb89048ffdb030811e44a537bdfad67e
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.0 dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.0
keystone: keystone:

View File

@ -8,7 +8,7 @@ metadata:
layer: global layer: global
storagePolicy: cleartext storagePolicy: cleartext
data: data:
release_prefix: aic release_prefix: airship
chart_groups: chart_groups:
- kubernetes-proxy - kubernetes-proxy
- kubernetes-container-networking - kubernetes-container-networking

View File

@ -8,7 +8,7 @@ metadata:
layer: global layer: global
storagePolicy: cleartext storagePolicy: cleartext
data: data:
release_prefix: aic release_prefix: airship
chart_groups: chart_groups:
- kubernetes-proxy - kubernetes-proxy
- kubernetes-container-networking - kubernetes-container-networking
@ -25,3 +25,7 @@ data:
# same change in bootstrap manifest. # same change in bootstrap manifest.
# - ucp-divingbell # - ucp-divingbell
- ucp-services - ucp-services
- openstack-ingress-controller
- openstack-component-infrastructure
- openstack-component-shared
- openstack-compute-kit

View File

@ -1,17 +0,0 @@
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: cluster-bootstrap
layeringDefinition:
abstract: false
layer: site
data:
release_prefix: ucp
chart_groups:
- kubernetes-proxy
- container-networking
- dns
- kubernetes
- ingress-system
...

View File

@ -1,16 +0,0 @@
---
schema: promenade/Docker/v1
metadata:
schema: metadata/Document/v1
name: docker
layeringDefinition:
abstract: false
layer: site
data:
config:
insecure-registries:
- registry:5000
- 10.24.20.19:30092
live-restore: true
storage-driver: overlay2
...

View File

@ -1,42 +0,0 @@
---
schema: promenade/Genesis/v1
metadata:
schema: metadata/Document/v1
name: genesis
layeringDefinition:
abstract: false
layer: site
data:
hostname: ${GENESIS_NODE_NAME}
ip: ${GENESIS_NODE_IP}
labels:
static:
- calico-etcd=enabled
- node-role.kubernetes.io/master=
dynamic:
- kubernetes-apiserver=enabled
- kubernetes-controller-manager=enabled
- kubernetes-etcd=enabled
- kubernetes-scheduler=enabled
- promenade-genesis=enabled
- ucp-control-plane=enabled
- ceph-osd=enabled
- ceph-mon=enabled
- ceph-rgw=enabled
- ceph-mds=enabled
- ceph-mgr=enabled
- kube-ingress=enabled
images:
armada: ${ARMADA_IMAGE}
helm:
tiller: ${TILLER_IMAGE}
kubernetes:
apiserver: ${KUBE_APISERVER_IMAGE}
controller-manager: ${KUBE_CTLRMGR_IMAGE}
etcd: ${KUBE_ETCD_IMAGE}
scheduler: ${KUBE_SCHED_IMAGE}
files:
- path: /var/lib/anchor/calico-etcd-bootstrap
content: "# placeholder for triggering calico etcd bootstrapping"
mode: 0644
...

View File

@ -1,62 +0,0 @@
---
schema: promenade/HostSystem/v1
metadata:
schema: metadata/Document/v1
name: host-system
layeringDefinition:
abstract: false
layer: site
data:
files:
- path: /opt/kubernetes/bin/kubelet
tar_url: ${KUBE_KUBELET_TAR}
tar_path: kubernetes/node/bin/kubelet
mode: 0555
images:
haproxy: ${HAPROXY_IMAGE}
helm:
helm: lachlanevenson/k8s-helm:v2.7.2
kubernetes:
kubectl: ${KUBECTL_IMAGE}
packages:
repositories:
- deb ${DOCKER_REPO_URL} ubuntu-xenial main
keys:
- |-
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBFWln24BEADrBl5p99uKh8+rpvqJ48u4eTtjeXAWbslJotmC/CakbNSqOb9o
ddfzRvGVeJVERt/Q/mlvEqgnyTQy+e6oEYN2Y2kqXceUhXagThnqCoxcEJ3+KM4R
mYdoe/BJ/J/6rHOjq7Omk24z2qB3RU1uAv57iY5VGw5p45uZB4C4pNNsBJXoCvPn
TGAs/7IrekFZDDgVraPx/hdiwopQ8NltSfZCyu/jPpWFK28TR8yfVlzYFwibj5WK
dHM7ZTqlA1tHIG+agyPf3Rae0jPMsHR6q+arXVwMccyOi+ULU0z8mHUJ3iEMIrpT
X+80KaN/ZjibfsBOCjcfiJSB/acn4nxQQgNZigna32velafhQivsNREFeJpzENiG
HOoyC6qVeOgKrRiKxzymj0FIMLru/iFF5pSWcBQB7PYlt8J0G80lAcPr6VCiN+4c
NKv03SdvA69dCOj79PuO9IIvQsJXsSq96HB+TeEmmL+xSdpGtGdCJHHM1fDeCqkZ
hT+RtBGQL2SEdWjxbF43oQopocT8cHvyX6Zaltn0svoGs+wX3Z/H6/8P5anog43U
65c0A+64Jj00rNDr8j31izhtQMRo892kGeQAaaxg4Pz6HnS7hRC+cOMHUU4HA7iM
zHrouAdYeTZeZEQOA7SxtCME9ZnGwe2grxPXh/U/80WJGkzLFNcTKdv+rwARAQAB
tDdEb2NrZXIgUmVsZWFzZSBUb29sIChyZWxlYXNlZG9ja2VyKSA8ZG9ja2VyQGRv
Y2tlci5jb20+iQI4BBMBAgAiBQJVpZ9uAhsvBgsJCAcDAgYVCAIJCgsEFgIDAQIe
AQIXgAAKCRD3YiFXLFJgnbRfEAC9Uai7Rv20QIDlDogRzd+Vebg4ahyoUdj0CH+n
Ak40RIoq6G26u1e+sdgjpCa8jF6vrx+smpgd1HeJdmpahUX0XN3X9f9qU9oj9A4I
1WDalRWJh+tP5WNv2ySy6AwcP9QnjuBMRTnTK27pk1sEMg9oJHK5p+ts8hlSC4Sl
uyMKH5NMVy9c+A9yqq9NF6M6d6/ehKfBFFLG9BX+XLBATvf1ZemGVHQusCQebTGv
0C0V9yqtdPdRWVIEhHxyNHATaVYOafTj/EF0lDxLl6zDT6trRV5n9F1VCEh4Aal8
L5MxVPcIZVO7NHT2EkQgn8CvWjV3oKl2GopZF8V4XdJRl90U/WDv/6cmfI08GkzD
YBHhS8ULWRFwGKobsSTyIvnbk4NtKdnTGyTJCQ8+6i52s+C54PiNgfj2ieNn6oOR
7d+bNCcG1CdOYY+ZXVOcsjl73UYvtJrO0Rl/NpYERkZ5d/tzw4jZ6FCXgggA/Zxc
jk6Y1ZvIm8Mt8wLRFH9Nww+FVsCtaCXJLP8DlJLASMD9rl5QS9Ku3u7ZNrr5HWXP
HXITX660jglyshch6CWeiUATqjIAzkEQom/kEnOrvJAtkypRJ59vYQOedZ1sFVEL
MXg2UCkD/FwojfnVtjzYaTCeGwFQeqzHmM241iuOmBYPeyTY5veF49aBJA1gEJOQ
TvBR8Q==
=Fm3p
-----END PGP PUBLIC KEY BLOCK-----
additional:
- curl
- jq
- ceph-common
required:
docker: ${DOCKER_PACKAGE}
socat: socat=1.7.3.1-1
...

View File

@ -1,19 +0,0 @@
---
schema: promenade/Kubelet/v1
metadata:
schema: metadata/Document/v1
name: kubelet
layeringDefinition:
abstract: false
layer: site
data:
arguments:
- --cni-bin-dir=/opt/cni/bin
- --cni-conf-dir=/etc/cni/net.d
- --eviction-max-pod-grace-period=-1
- --network-plugin=cni
- --node-status-update-frequency=5s
- --v=5
images:
pause: gcr.io/google_containers/pause-amd64:3.0
...

View File

@ -1,42 +0,0 @@
---
schema: promenade/KubernetesNetwork/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-network
layeringDefinition:
abstract: false
layer: site
data:
dns:
cluster_domain: cluster.local
service_ip: 10.96.0.10
bootstrap_validation_checks:
- calico-etcd.kube-system.svc.cluster.local
- google.com
- kubernetes-etcd.kube-system.svc.cluster.local
- kubernetes.default.svc.cluster.local
upstream_servers:
- 8.8.8.8
- 8.8.4.4
kubernetes:
apiserver_port: 6443
haproxy_port: 6553
pod_cidr: 10.97.0.0/16
service_cidr: 10.96.0.0/16
service_ip: 10.96.0.1
etcd:
container_port: 2379
haproxy_port: 2378
hosts_entries:
- ip: 192.168.77.1
names:
- registry
# proxy:
# url: http://proxy.example.com:8080
# additional_no_proxy:
# - 10.0.1.1
...

View File

@ -1,166 +0,0 @@
---
schema: promenade/PKICatalog/v1
metadata:
schema: metadata/Document/v1
name: cluster-certificates
layeringDefinition:
abstract: false
layer: site
data:
certificate_authorities:
kubernetes:
description: CA for Kubernetes components
certificates:
- document_name: apiserver
description: Service certificate for Kubernetes apiserver
common_name: apiserver
hosts:
- localhost
- 127.0.0.1
- 10.96.0.1
kubernetes_service_names:
- kubernetes.default.svc.cluster.local
- document_name: kubelet-genesis
common_name: system:node:${GENESIS_NODE_NAME}
hosts:
- ${GENESIS_NODE_NAME}
- ${GENESIS_NODE_IP}
groups:
- system:nodes
- document_name: kubelet-${GENESIS_NODE_NAME}
common_name: system:node:${GENESIS_NODE_NAME}
hosts:
- ${GENESIS_NODE_NAME}
- ${GENESIS_NODE_IP}
groups:
- system:nodes
- document_name: kubelet-${MASTER_NODE_NAME}
common_name: system:node:${MASTER_NODE_NAME}
hosts:
- ${MASTER_NODE_NAME}
- ${MASTER_NODE_IP}
groups:
- system:nodes
- document_name: scheduler
description: Service certificate for Kubernetes scheduler
common_name: system:kube-scheduler
- document_name: controller-manager
description: certificate for controller-manager
common_name: system:kube-controller-manager
- document_name: admin
common_name: admin
groups:
- system:masters
- document_name: armada
common_name: armada
groups:
- system:masters
kubernetes-etcd:
description: Certificates for Kubernetes's etcd servers
certificates:
- document_name: apiserver-etcd
description: etcd client certificate for use by Kubernetes apiserver
common_name: apiserver
# NOTE(mark-burnett): hosts not required for client certificates
- document_name: kubernetes-etcd-anchor
description: anchor
common_name: anchor
- document_name: kubernetes-etcd-genesis
common_name: kubernetes-etcd-genesis
hosts:
- ${GENESIS_NODE_NAME}
- ${GENESIS_NODE_IP}
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
- document_name: kubernetes-etcd-${GENESIS_NODE_NAME}
common_name: kubernetes-etcd-${GENESIS_NODE_NAME}
hosts:
- ${GENESIS_NODE_NAME}
- ${GENESIS_NODE_IP}
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
- document_name: kubernetes-etcd-${MASTER_NODE_NAME}
common_name: kubernetes-etcd-${MASTER_NODE_NAME}
hosts:
- ${MASTER_NODE_NAME}
- ${MASTER_NODE_IP}
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
kubernetes-etcd-peer:
certificates:
- document_name: kubernetes-etcd-genesis-peer
common_name: kubernetes-etcd-genesis-peer
hosts:
- ${GENESIS_NODE_NAME}
- ${GENESIS_NODE_IP}
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
- document_name: kubernetes-etcd-${GENESIS_NODE_NAME}-peer
common_name: kubernetes-etcd-${GENESIS_NODE_NAME}-peer
hosts:
- ${GENESIS_NODE_NAME}
- ${GENESIS_NODE_IP}
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
- document_name: kubernetes-etcd-${MASTER_NODE_NAME}-peer
common_name: kubernetes-etcd-${MASTER_NODE_NAME}-peer
hosts:
- ${MASTER_NODE_NAME}
- ${MASTER_NODE_IP}
- 127.0.0.1
- localhost
- kubernetes-etcd.kube-system.svc.cluster.local
calico-etcd:
description: Certificates for Calico etcd client traffic
certificates:
- document_name: calico-etcd-anchor
description: anchor
common_name: anchor
- document_name: calico-etcd-${GENESIS_NODE_NAME}
common_name: calico-etcd-${GENESIS_NODE_NAME}
hosts:
- ${GENESIS_NODE_NAME}
- ${GENESIS_NODE_IP}
- 127.0.0.1
- localhost
- 10.96.232.136
- document_name: calico-etcd-${MASTER_NODE_NAME}
common_name: calico-etcd-${MASTER_NODE_NAME}
hosts:
- ${MASTER_NODE_NAME}
- ${MASTER_NODE_IP}
- 127.0.0.1
- localhost
- 10.96.232.136
- document_name: calico-node
common_name: calcico-node
calico-etcd-peer:
description: Certificates for Calico etcd clients
certificates:
- document_name: calico-etcd-${GENESIS_NODE_NAME}-peer
common_name: calico-etcd-${GENESIS_NODE_NAME}-peer
hosts:
- ${GENESIS_NODE_NAME}
- ${GENESIS_NODE_IP}
- 127.0.0.1
- localhost
- 10.96.232.136
- document_name: calico-etcd-${MASTER_NODE_NAME}-peer
common_name: calico-etcd-${MASTER_NODE_NAME}-peer
hosts:
- ${MASTER_NODE_NAME}
- ${MASTER_NODE_IP}
- 127.0.0.1
- localhost
- 10.96.232.136
- document_name: calico-node-peer
common_name: calcico-node-peer
keypairs:
- name: service-account
description: Service account signing key for use by Kubernetes controller-manager.
...

View File

@ -1,869 +0,0 @@
---
schema: 'deckhand/LayeringPolicy/v1'
metadata:
schema: 'metadata/Control/v1'
name: layering-policy
data:
layerOrder:
- site
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-proxy
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
description: Kubernetes proxy
sequenced: true
chart_group:
- kubernetes-proxy
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: container-networking
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
description: Container networking via Calico
sequenced: true
chart_group:
- calico-etcd
- calico
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: dns
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
description: Cluster DNS
chart_group:
- coredns
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: kubernetes
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
description: Kubernetes components
chart_group:
- haproxy
- kubernetes-etcd
- kubernetes-apiserver
- kubernetes-controller-manager
- kubernetes-scheduler
- tiller
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ingress-system
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
description: Ingress for the site
chart_group:
- ingress-kube-system
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: helm-toolkit
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
chart_name: helm-toolkit
release: helm-toolkit
namespace: helm-toolkit
timeout: 600
upgrade:
no_hooks: true
values: {}
source:
type: git
location: ${HTK_CHART_REPO}
subpath: ${HTK_CHART_PATH}
reference: ${HTK_CHART_BRANCH}
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-proxy
layeringDefinition:
abstract: false
layer: site
data:
chart_name: proxy
release: kubernetes-proxy
namespace: kube-system
timeout: 600
upgrade:
no_hooks: true
values:
images:
tags:
proxy: ${KUBE_PROXY_IMAGE}
network:
kubernetes_netloc: 127.0.0.1:6553
pod_cidr: 10.97.0.0/16
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: proxy
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: calico-etcd
layeringDefinition:
abstract: false
layer: site
substitutions:
- src:
schema: deckhand/CertificateAuthority/v1
name: calico-etcd
path: .
dest:
path: '.values.secrets.tls.client.ca'
- src:
schema: deckhand/CertificateAuthority/v1
name: calico-etcd-peer
path: .
dest:
path: '.values.secrets.tls.peer.ca'
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-anchor
path: .
dest:
path: '.values.secrets.anchor.tls.cert'
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-anchor
path: .
dest:
path: '.values.secrets.anchor.tls.key'
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-${GENESIS_NODE_NAME}
path: .
dest:
path: '.values.nodes[0].tls.client.cert'
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-${GENESIS_NODE_NAME}
path: .
dest:
path: '.values.nodes[0].tls.client.key'
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-${GENESIS_NODE_NAME}-peer
path: .
dest:
path: '.values.nodes[0].tls.peer.cert'
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-${GENESIS_NODE_NAME}-peer
path: .
dest:
path: '.values.nodes[0].tls.peer.key'
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-${MASTER_NODE_NAME}
path: .
dest:
path: '.values.nodes[1].tls.client.cert'
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-${MASTER_NODE_NAME}
path: .
dest:
path: '.values.nodes[1].tls.client.key'
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-${MASTER_NODE_NAME}-peer
path: .
dest:
path: '.values.nodes[1].tls.peer.cert'
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-${MASTER_NODE_NAME}-peer
path: .
dest:
path: '.values.nodes[1].tls.peer.key'
data:
chart_name: etcd
release: calico-etcd
namespace: kube-system
timeout: 600
upgrade:
no_hooks: true
values:
anchor:
etcdctl_endpoint: 10.96.232.136
labels:
anchor:
node_selector_key: calico-etcd
node_selector_value: enabled
secrets:
anchor:
tls:
cert: placeholder
key: placeholder
tls:
client:
ca: placeholder
peer:
ca: placeholder
etcd:
host_data_path: ${ETCD_CALICO_DATA_PATH}
host_etc_path: ${ETCD_CALICO_ETC_PATH}
bootstrapping:
enabled: true
host_directory: /var/lib/anchor
filename: calico-etcd-bootstrap
images:
tags:
etcd: ${CALICO_ETCD_IMAGE}
etcdctl: ${CALICO_ETCDCTL_IMAGE}
nodes:
- name: ${GENESIS_NODE_NAME}
tls:
client:
cert: placeholder
key: placeholder
peer:
cert: placeholder
key: placeholder
- name: ${MASTER_NODE_NAME}
tls:
client:
cert: placeholder
key: placeholder
peer:
cert: placeholder
key: placeholder
service:
name: calico-etcd
ip: 10.96.232.136
network:
service_client:
name: service_client
port: 6666
target_port: 6666
service_peer:
name: service_peer
port: 6667
target_port: 6667
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: etcd
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: calico
layeringDefinition:
abstract: false
layer: site
substitutions:
- src:
schema: deckhand/CertificateAuthority/v1
name: calico-etcd
path: .
dest:
path: '.values.etcd.tls.ca'
- src:
schema: deckhand/Certificate/v1
name: calico-node
path: .
dest:
path: '.values.etcd.tls.cert'
- src:
schema: deckhand/CertificateKey/v1
name: calico-node
path: .
dest:
path: '.values.etcd.tls.key'
data:
chart_name: calico
release: calico
namespace: kube-system
timeout: 600
upgrade:
no_hooks: true
values:
calico:
ip_autodetection_method: interface=${NODE_NET_IFACE}
pod_ip_cidr: 10.97.0.0/16
ctl:
install_on_host: true
etcd:
service:
ip: 10.96.232.136
port: 6666
tls:
ca: placeholder
cert: placeholder
key: placeholder
images:
cni: ${CALICO_CNI_IMAGE}
ctl: ${CALICO_CTL_IMAGE}
node: ${CALICO_NODE_IMAGE}
policy_controller: ${CALICO_POLICYCTLR_IMAGE}
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: calico
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: coredns
layeringDefinition:
abstract: false
layer: site
data:
chart_name: coredns
release: coredns
namespace: kube-system
timeout: 600
upgrade:
no_hooks: true
values:
images:
tags:
coredns: ${KUBE_COREDNS_IMAGE}
test: ${KUBE_COREDNS_IMAGE}
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: coredns
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: haproxy
layeringDefinition:
abstract: false
layer: site
data:
chart_name: haproxy
release: haproxy
namespace: kube-system
timeout: 600
wait:
timeout: 600
upgrade:
no_hooks: true
values:
conf:
anchor:
kubernetes_url: https://kubernetes.default:443
services:
default:
kubernetes:
server_opts: "check"
conf_parts:
frontend:
- mode tcp
- option tcpka
- bind *:6553
backend:
- mode tcp
- option tcpka
kube-system:
kubernetes-etcd:
server_opts: "check"
conf_parts:
frontend:
- mode tcp
- option tcpka
- bind *:2378
backend:
- mode tcp
- option tcpka
images:
tags:
anchor: gcr.io/google_containers/hyperkube-amd64:v1.8.6
haproxy: haproxy:1.8.3
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: haproxy
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-apiserver
layeringDefinition:
abstract: false
layer: site
substitutions:
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes
path: .
dest:
path: .values.secrets.tls.ca
- src:
schema: deckhand/Certificate/v1
name: apiserver
path: .
dest:
path: .values.secrets.tls.cert
- src:
schema: deckhand/CertificateKey/v1
name: apiserver
path: .
dest:
path: .values.secrets.tls.key
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes-etcd
path: .
dest:
path: .values.secrets.etcd.tls.ca
- src:
schema: deckhand/Certificate/v1
name: apiserver-etcd
path: .
dest:
path: .values.secrets.etcd.tls.cert
- src:
schema: deckhand/CertificateKey/v1
name: apiserver-etcd
path: .
dest:
path: .values.secrets.etcd.tls.key
- src:
schema: deckhand/PublicKey/v1
name: service-account
path: .
dest:
path: .values.secrets.service_account.public_key
data:
chart_name: apiserver
release: kubernetes-apiserver
namespace: kube-system
timeout: 600
upgrade:
no_hooks: true
values:
apiserver:
etcd:
endpoints: https://127.0.0.1:2378
images:
tags:
anchor: ${KUBE_ANCHOR_IMAGE}
apiserver: ${KUBE_APISERVER_IMAGE}
secrets:
service_account:
public_key: placeholder
tls:
ca: placeholder
cert: placeholder
key: placeholder
etcd:
tls:
ca: placeholder
cert: placeholder
key: placeholder
network:
kubernetes_service_ip: 10.96.0.1
pod_cidr: 10.97.0.0/16
service_cidr: 10.96.0.0/16
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: apiserver
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-controller-manager
layeringDefinition:
abstract: false
layer: site
substitutions:
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes
path: .
dest:
path: .values.secrets.tls.ca
- src:
schema: deckhand/Certificate/v1
name: controller-manager
path: .
dest:
path: .values.secrets.tls.cert
- src:
schema: deckhand/CertificateKey/v1
name: controller-manager
path: .
dest:
path: .values.secrets.tls.key
- src:
schema: deckhand/PrivateKey/v1
name: service-account
path: .
dest:
path: .values.secrets.service_account.private_key
data:
chart_name: controller_manager
release: kubernetes-controller-manager
namespace: kube-system
timeout: 600
upgrade:
no_hooks: true
values:
images:
anchor: ${KUBE_ANCHOR_IMAGE}
controller_manager: ${KUBE_CTLRMGR_IMAGE}
secrets:
service_account:
private_key: placeholder
tls:
ca: placeholder
cert: placeholder
key: placeholder
network:
kubernetes_netloc: 127.0.0.1:6553
pod_cidr: 10.97.0.0/16
service_cidr: 10.96.0.0/16
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: controller_manager
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-scheduler
layeringDefinition:
abstract: false
layer: site
substitutions:
- src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes
path: .
dest:
path: .values.secrets.tls.ca
- src:
schema: deckhand/Certificate/v1
name: scheduler
path: .
dest:
path: .values.secrets.tls.cert
- src:
schema: deckhand/CertificateKey/v1
name: scheduler
path: .
dest:
path: .values.secrets.tls.key
data:
chart_name: scheduler
release: kubernetes-scheduler
namespace: kube-system
timeout: 600
upgrade:
no_hooks: true
values:
secrets:
tls:
ca: placeholder
cert: placeholder
key: placeholder
network:
kubernetes_netloc: 127.0.0.1:6553
images:
tags:
anchor: ${KUBE_ANCHOR_IMAGE}
scheduler: ${KUBE_SCHED_IMAGE}
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: scheduler
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-etcd
layeringDefinition:
abstract: false
layer: site
substitutions:
-
src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes-etcd
path: .
dest:
path: '.values.secrets.tls.client.ca'
-
src:
schema: deckhand/CertificateAuthority/v1
name: kubernetes-etcd-peer
path: .
dest:
path: '.values.secrets.tls.peer.ca'
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-anchor
path: .
dest:
path: '.values.secrets.anchor.tls.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-anchor
path: .
dest:
path: '.values.secrets.anchor.tls.key'
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-${GENESIS_NODE_NAME}
path: .
dest:
path: '.values.nodes[0].tls.client.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-${GENESIS_NODE_NAME}
path: .
dest:
path: '.values.nodes[0].tls.client.key'
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-${GENESIS_NODE_NAME}-peer
path: .
dest:
path: '.values.nodes[0].tls.peer.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-${GENESIS_NODE_NAME}-peer
path: .
dest:
path: '.values.nodes[0].tls.peer.key'
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-${MASTER_NODE_NAME}
path: .
dest:
path: '.values.nodes[1].tls.client.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-${MASTER_NODE_NAME}
path: .
dest:
path: '.values.nodes[1].tls.client.key'
-
src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-${MASTER_NODE_NAME}-peer
path: .
dest:
path: '.values.nodes[1].tls.peer.cert'
-
src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-${MASTER_NODE_NAME}-peer
path: .
dest:
path: '.values.nodes[1].tls.peer.key'
data:
chart_name: etcd
release: kubernetes-etcd
namespace: kube-system
timeout: 600
upgrade:
no_hooks: true
values:
anchor:
etcdctl_endpoint: 10.96.0.2
labels:
anchor:
node_selector_key: kubernetes-etcd
node_selector_value: enabled
secrets:
anchor:
tls:
cert: placeholder
key: placeholder
tls:
client:
ca: placeholder
peer:
ca: placeholder
etcd:
host_data_path: ${ETCD_KUBE_DATA_PATH}
host_etc_path: ${ETCD_KUBE_ETC_PATH}
images:
tags:
etcd: ${KUBE_ETCD_IMAGE}
etcdctl: ${KUBE_ETCDCTL_IMAGE}
nodes:
- name: ${GENESIS_NODE_NAME}
tls:
client:
cert: placeholder
key: placeholder
peer:
cert: placeholder
key: placeholder
- name: ${MASTER_NODE_NAME}
tls:
client:
cert: placeholder
key: placeholder
peer:
cert: placeholder
key: placeholder
service:
name: kubernetes-etcd
ip: 10.96.0.2
network:
service_client:
name: service_client
port: 2379
target_port: 2379
service_peer:
name: service_peer
port: 2380
target_port: 2380
source:
type: local
location: /etc/genesis/armada/assets/charts
subpath: etcd
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ingress-kube-system
layeringDefinition:
abstract: false
layer: site
data:
chart_name: ingress-kube-system
release: ingress-kube-system
namespace: kube-system
timeout: 300
install:
no_hooks: false
upgrade:
no_hooks: false
values:
labels:
server:
node_selector_key: kube-ingress
node_selector_value: enabled
error_server:
node_selector_key: kube-ingress
node_selector_value: enabled
deployment:
mode: cluster
type: DaemonSet
network:
host_namespace: true
pod:
replicas:
error_page: 2
conf:
services:
udp:
53: 'kube-system/coredns:53'
source:
type: git
location: https://github.com/openstack/openstack-helm
subpath: ingress
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: tiller
layeringDefinition:
abstract: false
layer: site
data:
chart_name: tiller
release: tiller
namespace: kube-system
install:
no_hooks: false
upgrade:
no_hooks: false
values:
images:
tags:
tiller: ${TILLER_IMAGE}
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
source:
type: git
location: ${TILLER_CHART_REPO}
subpath: ${TILLER_CHART_PATH}
reference: ${TILLER_CHART_BRANCH}
dependencies:
- helm-toolkit
...

View File

@ -1,945 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: helm-toolkit
layeringDefinition:
abstract: false
layer: site
data:
chart_name: helm-toolkit
release: helm-toolkit
namespace: helm-toolkit
timeout: 100
values: {}
source:
type: git
location: ${HTK_CHART_REPO}
subpath: ${HTK_CHART_PATH}
reference: ${HTK_CHART_BRANCH}
dependencies: []
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: divingbell
layeringDefinition:
abstract: false
layer: site
data:
chart_name: divingbell
release: divingbell
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
source:
type: git
location: ${DIVINGBELL_CHART_REPO}
subpath: ${DIVINGBELL_CHART_PATH}
reference: ${DIVINGBELL_CHART_BRANCH}
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ceph
layeringDefinition:
abstract: false
layer: site
data:
chart_name: ceph
release: ceph
namespace: ceph
timeout: 3600
wait:
timeout: 3600
labels:
release_group: ucp-ceph
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
application: ceph
component: bootstrap
- type: job
labels:
application: ceph
component: mds-keyring-generator
- type: job
labels:
application: ceph
component: mon-keyring-generator
- type: job
labels:
application: ceph
component: rgw-keyring-generator
- type: job
labels:
application: ceph
component: storage-keys-generator
- type: job
labels:
application: ceph
component: osd-keyring-generator
values:
endpoints:
identity:
namespace: ucp
object_store:
namespace: ceph
ceph_mon:
namespace: ceph
deployment:
storage_secrets: true
ceph: true
rbd_provisioner: true
cephfs_provisioner: true
client_secrets: false
rgw_keystone_user_and_endpoints: false
bootstrap:
enabled: true
conf:
rgw_ks:
enabled: true
ceph:
global:
fsid: 926602d0-11a7-11e8-9f47-080027ef795a
osd_pool_default_size: ${CEPH_OSD_POOL_SIZE}
osd:
osd_crush_chooseleaf_type: 0
pool:
crush:
tunables: ${CEPH_CRUSH_TUNABLES}
target:
osd: 1
pg_per_osd: 100
default:
crush_rule: same_host
spec:
# RBD pool
- name: rbd
application: rbd
replication: 1
percent_total_data: 40
# CephFS pools
- name: cephfs_metadata
application: cephfs
replication: 1
percent_total_data: 5
- name: cephfs_data
application: cephfs
replication: 1
percent_total_data: 10
# RadosGW pools
- name: .rgw.root
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.control
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.data.root
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.gc
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.log
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.intent-log
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.meta
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.usage
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.keys
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.email
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.swift
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.uid
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.buckets.extra
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.buckets.index
application: rgw
replication: 1
percent_total_data: 3
- name: default.rgw.buckets.data
application: rgw
replication: 1
percent_total_data: 34.8
storage:
osd:
- data:
type: directory
location: ${CEPH_OSD_DIR}/osd-one
journal:
type: directory
location: ${CEPH_OSD_DIR}/journal-one
labels:
job:
node_selector_key: ucp-control-plane
node_selector_value: enabled
provisioner:
node_selector_key: ucp-control-plane
node_selector_value: enabled
network:
public: ${CEPH_PUBLIC_NET}
cluster: ${CEPH_CLUSTER_NET}
source:
type: git
location: ${CEPH_CHART_REPO}
subpath: ${CEPH_CHART_PATH}
reference: ${CEPH_CHART_BRANCH}
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-ceph-config
layeringDefinition:
abstract: false
layer: site
data:
chart_name: ucp-ceph-config
release: ucp-ceph-config
namespace: ucp
timeout: 3600
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
application: ceph
component: namespace-client-key-generator
values:
labels:
job:
node_selector_key: ucp-control-plane
node_selector_value: enabled
endpoints:
identity:
namespace: ucp
object_store:
namespace: ceph
ceph_mon:
namespace: ceph
ceph:
rgw_keystone_auth: true
network:
public: ${CEPH_PUBLIC_NET}
cluster: ${CEPH_CLUSTER_NET}
deployment:
storage_secrets: false
ceph: false
rbd_provisioner: false
cephfs_provisioner: false
client_secrets: true
rgw_keystone_user_and_endpoints: false
source:
type: git
location: ${CEPH_CHART_REPO}
subpath: ${CEPH_CHART_PATH}
reference: ${CEPH_CHART_BRANCH}
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-mariadb
layeringDefinition:
abstract: false
layer: site
data:
chart_name: ucp-mariadb
release: ucp-mariadb
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
values:
labels:
server:
node_selector_key: ucp-control-plane
node_selector_value: enabled
prometheus_mysql_exporter:
node_selector_key: ucp-control-plane
node_selector_value: enabled
pod:
replicas:
server: 1
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
subpath: mariadb
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-memcached
layeringDefinition:
abstract: false
layer: site
data:
chart_name: ucp-memcached
release: ucp-memcached
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
values:
labels:
server:
node_selector_key: ucp-control-plane
node_selector_value: enabled
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
subpath: memcached
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-keystone
layeringDefinition:
abstract: false
layer: site
data:
chart_name: ucp-keystone
release: keystone
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- name: keystone-db-sync
type: job
labels:
job-name: keystone-db-sync
- name: keystone-db-init
type: job
labels:
job-name: keystone-db-init
post:
create: []
values:
conf:
keystone:
override:
paste:
override:
replicas: 2
labels:
api:
node_selector_key: ucp-control-plane
node_selector_value: enabled
job:
node_selector_key: ucp-control-plane
node_selector_value: enabled
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
subpath: keystone
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: maas-postgresql
layeringDefinition:
abstract: false
layer: site
data:
chart_name: maas-postgresql
release: maas-postgresql
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete: []
create: []
post:
create: []
values:
endpoints:
postgresql:
auth:
admin:
password: postgres
development:
enabled: false
labels:
server:
node_selector_key: ucp-control-plane
node_selector_value: enabled
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
subpath: postgresql
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: maas
layeringDefinition:
abstract: false
layer: site
data:
chart_name: maas
release: maas
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: 'job'
labels:
application: 'maas'
values:
endpoints:
maas_db:
auth:
admin:
password: postgres
images:
tags:
db_sync: ${MAAS_REGION_IMAGE}
maas_rack: ${MAAS_RACK_IMAGE}
maas_region: ${MAAS_REGION_IMAGE}
export_api_key: ${MAAS_REGION_IMAGE}
maas_cache: ${MAAS_CACHE_IMAGE}
labels:
rack:
node_selector_key: ucp-control-plane
node_selector_value: enabled
region:
node_selector_key: ucp-control-plane
node_selector_value: enabled
network:
proxy:
node_port:
enabled: true
port: 31800
gui:
node_port:
enabled: true
port: 31900
conf:
drydock:
bootaction_url: http://${DRYDOCK_NODE_IP}:${DRYDOCK_NODE_PORT}/api/v1.0/bootactions/nodes/
cache:
enabled: ${MAAS_CACHE_ENABLED}
maas:
credentials:
secret:
namespace: ucp
url:
maas_url: http://${MAAS_NODE_IP}:${MAAS_NODE_PORT}/MAAS
proxy:
proxy_enabled: '${PROXY_ENABLED}'
proxy_server: ${PROXY_ADDRESS}
ntp:
use_external_only: 'false'
ntp_servers: ntp.ubuntu.com
dns:
require_dnssec: 'no'
dns_servers: 8.8.8.8
secrets:
maas_region:
value: 3858a12230ac3c915f300c664f12063f
source:
type: git
location: ${MAAS_CHART_REPO}
subpath: ${MAAS_CHART_PATH}
reference: ${MAAS_CHART_BRANCH}
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: drydock
layeringDefinition:
abstract: false
layer: site
data:
chart_name: drydock
release: drydock
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: 'job'
labels:
application: 'drydock'
values:
images:
tags:
drydock: ${DRYDOCK_IMAGE}
drydock_db_sync: ${DRYDOCK_IMAGE}
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
network:
drydock:
node_port:
enabled: true
port: ${DRYDOCK_NODE_PORT}
endpoints:
physicalprovisioner:
port:
api:
nodeport: ${DRYDOCK_NODE_PORT}
postgresql:
auth:
admin:
password: postgres
replicas:
drydock: 1
conf:
drydock:
maasdriver:
maas_api_url: http://${MAAS_NODE_IP}:${MAAS_NODE_PORT}/MAAS/api/2.0/
plugins:
ingester: drydock_provisioner.ingester.plugins.deckhand.DeckhandIngester
source:
type: git
location: ${DRYDOCK_CHART_REPO}
subpath: ${DRYDOCK_CHART_PATH}
reference: ${DRYDOCK_CHART_BRANCH}
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: deckhand
layeringDefinition:
abstract: false
layer: site
data:
chart_name: deckhand
release: deckhand
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: 'job'
labels:
application: 'deckhand'
values:
endpoints:
postgresql:
auth:
admin:
password: postgres
images:
tags:
deckhand: ${DECKHAND_IMAGE}
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
conf:
deckhand:
DEFAULT:
debug: true
use_stderr: true
use_syslog: true
keystone_authtoken:
memcache_security_strategy: None
source:
type: git
location: ${DECKHAND_CHART_REPO}
subpath: ${DECKHAND_CHART_PATH}
reference: ${DECKHAND_CHART_BRANCH}
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-barbican
layeringDefinition:
abstract: false
layer: site
data:
chart_name: ucp-barbican
release: barbican
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete: []
post:
create: []
values:
pod:
replicas:
api: 1
labels:
job:
node_selector_key: ucp-control-plane
node_selector_value: enabled
api:
node_selector_key: ucp-control-plane
node_selector_value: enabled
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
subpath: barbican
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: shipyard
layeringDefinition:
abstract: false
layer: site
data:
chart_name: shipyard
release: shipyard
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: 'job'
labels:
application: 'shipyard'
- type: 'job'
labels:
application: 'airflow'
values:
endpoints:
postgresql_shipyard_db:
auth:
admin:
password: postgres
postgresql_airflow_db:
auth:
admin:
password: postgres
prod_environment: ${SHIPYARD_PROD_DEPLOY}
pod:
mounts:
dag_path: ${AIRFLOW_PATH_DAG}
plugin_path: ${AIRFLOW_PATH_PLUGIN}
log_path: ${AIRFLOW_PATH_LOG}
replicas:
shipyard:
api: 1
airflow:
web: 1
worker: 1
flower: 1
scheduler: 1
images:
tags:
airflow: ${AIRFLOW_IMAGE}
shipyard: ${SHIPYARD_IMAGE}
airflow_db_sync: ${AIRFLOW_IMAGE}
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
network:
shipyard:
node_port: ${SHIPYARD_NODE_PORT}
enable_node_port: true
airflow:
web:
node_port: ${AIRFLOW_NODE_PORT}
enable_node_port: true
conf:
shipyard:
keystone_authtoken:
memcache_security_strategy: None
source:
type: git
location: ${SHIPYARD_CHART_REPO}
subpath: ${SHIPYARD_CHART_PATH}
reference: ${SHIPYARD_CHART_BRANCH}
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: armada
layeringDefinition:
abstract: false
layer: site
data:
chart_name: armada
release: armada
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
values:
images:
tags:
api: ${ARMADA_IMAGE}
labels:
node_selector_key: ucp-control-plane
node_selector_value: enabled
network:
api:
node_port:
enabled: true
port: ${ARMADA_NODE_PORT}
conf:
armada:
DEFAULT:
debug: true
tiller_namespace: kube-system
manifests:
service_tiller_deploy: false
deployment_tiller: false
source:
type: git
location: ${ARMADA_CHART_REPO}
subpath: ${ARMADA_CHART_PATH}
reference: ${ARMADA_CHART_BRANCH}
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-rabbitmq
layeringDefinition:
abstract: false
layer: site
data:
chart_name: ucp-rabbitmq
release: rabbitmq
namespace: ucp
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete: []
post:
create: []
values:
pod:
replicas:
server: 1
labels:
server:
node_selector_key: ucp-control-plane
node_selector_value: enabled
prometheus_mysql_exporter:
node_selector_key: ucp-control-plane
node_selector_value: enabled
source:
type: git
location: https://git.openstack.org/openstack/openstack-helm
subpath: rabbitmq
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ingress
layeringDefinition:
abstract: false
layer: site
data:
chart_name: ingress
release: ingress
namespace: ucp
timeout: 300
install:
no_hooks: false
upgrade:
no_hooks: false
values:
labels:
server:
node_selector_key: ucp-control-plane
node_selector_value: enabled
error_server:
node_selector_key: ucp-control-plane
node_selector_value: enabled
pod:
replicas:
ingress: 2
error_page: 2
source:
type: git
location: https://github.com/openstack/openstack-helm
subpath: ingress
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: promenade
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
chart_name: promenade
release: promenade
namespace: ucp
timeout: 600
wait:
timeout: 600
values:
pod:
env:
promenade_api:
- name: PROMENADE_DEBUG
value: '1'
images:
tags:
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
promenade: ${PROMENADE_IMAGE}
upgrade:
no_hooks: true
source:
type: git
location: ${PROMENADE_CHART_REPO}
reference: ${PROMENADE_CHART_BRANCH}
subpath: ${PROMENADE_CHART_PATH}
dependencies:
- helm-toolkit
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ceph
layeringDefinition:
abstract: false
layer: site
data:
description: 'Storage Backend'
sequenced: true
chart_group:
- ceph
- ucp-ceph-config
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ucp-infra
layeringDefinition:
abstract: false
layer: site
data:
description: 'UCP Infrastructure'
chart_group:
- ucp-mariadb
- ucp-memcached
- maas-postgresql
- ucp-rabbitmq
- ingress
- divingbell
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ucp-services
layeringDefinition:
abstract: false
layer: site
data:
description: 'UCP Services'
chart_group:
- maas
- drydock
- deckhand
- shipyard
- armada
- ucp-keystone
- ucp-barbican
- promenade
...
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: ucp_deploy
layeringDefinition:
abstract: false
layer: site
data:
release_prefix: ucp
chart_groups:
- ceph
- ucp-infra
- ucp-services
...

View File

@ -1,247 +0,0 @@
#!/bin/bash
set -x
# Check that we are root
if [[ $(whoami) != "root" ]]
then
echo "Must be root to run $0"
exit -1
fi
function init_env {
# Setup environmental variables
# with stable defaults
# Network
export CEPH_CLUSTER_NET=${CEPH_CLUSTER_NET:-"NA"}
export CEPH_PUBLIC_NET=${CEPH_PUBLIC_NET:-"NA"}
export GENESIS_NODE_IP=${GENESIS_NODE_IP:-"NA"}
export DRYDOCK_NODE_IP=${DRYDOCK_NODE_IP:-${GENESIS_NODE_IP}}
export DRYDOCK_NODE_PORT=${DRYDOCK_NODE_PORT:-31000}
export MAAS_NODE_IP=${MAAS_NODE_IP:-${GENESIS_NODE_IP}}
export MAAS_NODE_PORT=${MAAS_NODE_PORT:-31900}
export MASTER_NODE_IP=${MASTER_NODE_IP:-"NA"}
export NODE_NET_IFACE=${NODE_NET_IFACE:-"eth0"}
export PROXY_ADDRESS=${PROXY_ADDRESS:-"http://one.proxy.att.com:8080"}
export PROXY_ENABLED=${PROXY_ENABLED:-"false"}
export AIRFLOW_NODE_PORT=${AIRFLOW_NODE_PORT:-32080}
export SHIPYARD_NODE_PORT=${SHIPYARD_NODE_PORT:-31901}
export ARMADA_NODE_PORT=${ARMADA_NODE_PORT:-31903}
# UCP Service Config
export SHIPYARD_PROD_DEPLOY=${SHIPYARD_PROD_DEPLOY:-"true"}
export AIRFLOW_PATH_DAG=${AIRFLOW_PATH_DAG:-"/var/tmp/airflow/dags"}
export AIRFLOW_PATH_PLUGIN=${AIRFLOW_PATH_PLUGIN:-"/var/tmp/airflow/plugins"}
export AIRFLOW_PATH_LOG=${AIRFLOW_PATH_LOG:-"/var/tmp/airflow/logs"}
export MAAS_CACHE_ENABLED=${MAAS_CACHE_ENABLED:-"false"}
# NOTE - Pool size of 1 is NOT production-like. Workaround for Ceph Luminous
# until disk targetting is implemented to have multiple OSDs on Genesis
export CEPH_OSD_POOL_SIZE=${CEPH_OSD_POOL_SIZE:-"1"}
# Storage
export CEPH_OSD_DIR=${CEPH_OSD_DIR:-"/var/lib/openstack-helm/ceph/osd"}
export ETCD_KUBE_DATA_PATH=${ETCD_KUBE_DATA_PATH:-"/var/lib/etcd/kubernetes"}
export ETCD_KUBE_ETC_PATH=${ETCD_KUBE_ETC_PATH:-"/etc/etcd/kubernetes"}
export ETCD_CALICO_DATA_PATH=${ETCD_CALICO_DATA_PATH:-"/var/lib/etcd/calico"}
export ETCD_CALICO_ETC_PATH=${ETCD_CALICO_ETC_PATH:-"/etc/etcd/calico"}
# Hostnames
export GENESIS_NODE_NAME=${GENESIS_NODE_NAME:-"node1"}
export GENESIS_NODE_NAME=$(echo $GENESIS_NODE_NAME | tr '[:upper:]' '[:lower:]')
export MASTER_NODE_NAME=${MASTER_NODE_NAME:-"node2"}
export MASTER_NODE_NAME=$(echo $MASTER_NODE_NAME | tr '[:upper:]' '[:lower:]')
# Charts
export HTK_CHART_REPO=${HTK_CHART_REPO:-"https://github.com/openstack/openstack-helm"}
export HTK_CHART_PATH=${HTK_CHART_PATH:-"helm-toolkit"}
export HTK_CHART_BRANCH=${HTK_CHART_BRANCH:-"master"}
export CEPH_CHART_REPO=${CEPH_CHART_REPO:-"https://github.com/openstack/openstack-helm"}
export CEPH_CHART_PATH=${CEPH_CHART_PATH:-"ceph"}
export CEPH_CHART_BRANCH=${CEPH_CHART_BRANCH:-"master"}
export DRYDOCK_CHART_REPO=${DRYDOCK_CHART_REPO:-"https://github.com/att-comdev/drydock"}
export DRYDOCK_CHART_PATH=${DRYDOCK_CHART_PATH:-"charts/drydock"}
export DRYDOCK_CHART_BRANCH=${DRYDOCK_CHART_BRANCH:-"master"}
export MAAS_CHART_REPO=${MAAS_CHART_REPO:-"https://github.com/att-comdev/maas"}
export MAAS_CHART_PATH=${MAAS_CHART_PATH:-"charts/maas"}
export MAAS_CHART_BRANCH=${MAAS_CHART_BRANCH:-"master"}
export DECKHAND_CHART_REPO=${DECKHAND_CHART_REPO:-"https://github.com/att-comdev/deckhand"}
export DECKHAND_CHART_PATH=${DECKHAND_CHART_PATH:-"charts/deckhand"}
export DECKHAND_CHART_BRANCH=${DECKHAND_CHART_BRANCH:-"master"}
export SHIPYARD_CHART_REPO=${SHIPYARD_CHART_REPO:-"https://github.com/att-comdev/shipyard"}
export SHIPYARD_CHART_PATH=${SHIPYARD_CHART_PATH:-"charts/shipyard"}
export SHIPYARD_CHART_BRANCH=${SHIPYARD_CHART_BRANCH:-"master"}
export ARMADA_CHART_REPO=${ARMADA_CHART_REPO:-"https://github.com/att-comdev/armada"}
export ARMADA_CHART_PATH=${ARMADA_CHART_PATH:-"charts/armada"}
export ARMADA_CHART_BRANCH=${ARMADA_CHART_BRANCH:-"master"}
export DIVINGBELL_CHART_REPO=${DIVINGBELL_CHART_REPO:-"https://github.com/att-comdev/divingbell"}
export DIVINGBELL_CHART_PATH=${DIVINGBELL_CHART_PATH:-"divingbell"}
export DIVINGBELL_CHART_BRANCH=${DIVINGBELL_CHART_BRANCH:-"master"}
export TILLER_CHART_REPO=${TILLER_CHART_REPO:-"https://github.com/att-comdev/armada"}
export TILLER_CHART_PATH=${TILLER_CHART_PATH:-"charts/tiller"}
export TILLER_CHART_BRANCH=${TILLER_CHART_BRANCH:-"master"}
export PROMENADE_CHART_REPO=${PROMENADE_CHART_REPO:-"https://github.com/att-comdev/promenade"}
export PROMENADE_CHART_PATH=${PROMENADE_CHART_PATH:-"charts/promenade"}
export PROMENADE_CHART_BRANCH=${PROMENADE_CHART_BRANCH:-"master"}
#Kubernetes artifacts
export KUBE_PROXY_IMAGE=${KUBE_PROXY_IMAGE:-"gcr.io/google_containers/hyperkube-amd64:v1.8.6"}
export KUBE_ETCD_IMAGE=${KUBE_ETCD_IMAGE:-"quay.io/coreos/etcd:v3.0.17"}
export KUBE_ETCDCTL_IMAGE=${KUBE_ETCDCTL_IMAGE:-"quay.io/coreos/etcd:v3.0.17"}
export KUBE_ANCHOR_IMAGE=${KUBE_ANCHOR_IMAGE:-"gcr.io/google_containers/hyperkube-amd64:v1.8.6"}
export KUBE_COREDNS_IMAGE=${KUBE_COREDNS_IMAGE:-"coredns/coredns:1.0.5"}
export KUBE_APISERVER_IMAGE=${KUBE_APISERVER_IMAGE:-"gcr.io/google_containers/hyperkube-amd64:v1.8.6"}
export HAPROXY_IMAGE=${HAPROXY_IMAGE:-"haproxy:1.8.3"}
export KUBE_CTLRMGR_IMAGE=${KUBE_CTLRMGR_IMAGE:-"gcr.io/google_containers/hyperkube-amd64:v1.8.6"}
export KUBE_SCHED_IMAGE=${KUBE_SCHED_IMAGE:-"gcr.io/google_containers/hyperkube-amd64:v1.8.6"}
export KUBECTL_IMAGE=${KUBECTL_IMAGE:-"gcr.io/google_containers/hyperkube-amd64:v1.8.6"}
export CALICO_CNI_IMAGE=${CALICO_CNI_IMAGE:-"quay.io/calico/cni:v1.11.0"}
export CALICO_CTL_IMAGE=${CALICO_CTL_IMAGE:-"quay.io/calico/ctl:v1.6.1"}
export CALICO_NODE_IMAGE=${CALICO_NODE_IMAGE:-"quay.io/calico/node:v2.6.1"}
export CALICO_POLICYCTLR_IMAGE=${CALICO_POLICYCTLR_IMAGE:-"quay.io/calico/kube-controllers:v1.0.0"}
export CALICO_ETCD_IMAGE=${CALICO_ETCD_IMAGE:-"quay.io/coreos/etcd:v3.0.17"}
export CALICO_ETCDCTL_IMAGE=${CALICO_ETCDCTL_IMAGE:-"quay.io/coreos/etcd:v3.0.17"}
export KUBE_KUBELET_TAR=${KUBE_KUBELET_TAR:-"https://dl.k8s.io/v1.8.6/kubernetes-node-linux-amd64.tar.gz"}
# Images
export TILLER_IMAGE=${TILLER_IMAGE:-"gcr.io/kubernetes-helm/tiller:v2.7.2"}
export DRYDOCK_IMAGE=${DRYDOCK_IMAGE:-"quay.io/attcomdev/drydock:latest"}
export ARMADA_IMAGE=${ARMADA_IMAGE:-"quay.io/attcomdev/armada:latest"}
export PROMENADE_IMAGE=${PROMENADE_IMAGE:-"quay.io/attcomdev/promenade:latest"}
export DECKHAND_IMAGE=${DECKHAND_IMAGE:-"quay.io/attcomdev/deckhand:latest"}
export SHIPYARD_IMAGE=${SHIPYARD_IMAGE:-"quay.io/attcomdev/shipyard:latest"}
export AIRFLOW_IMAGE=${AIRFLOW_IMAGE:-"quay.io/attcomdev/airflow:latest"}
export MAAS_CACHE_IMAGE=${MAAS_CACHE_IMAGE:-"quay.io/attcomdev/maas-cache:latest"}
export MAAS_REGION_IMAGE=${MAAS_REGION_IMAGE:-"sthussey/maas-region-controller:2.3_patchv4"}
export MAAS_RACK_IMAGE=${MAAS_RACK_IMAGE:-"sthussey/maas-rack-controller:2.3"}
# Docker
export DOCKER_REPO_URL=${DOCKER_REPO_URL:-"http://apt.dockerproject.org/repo"}
export DOCKER_PACKAGE=${DOCKER_PACKAGE:-"docker-engine=1.13.1-0~ubuntu-xenial"}
# Filenames
export ARMADA_CONFIG=${ARMADA_CONFIG:-"armada.yaml"}
export UP_SCRIPT_FILE=${UP_SCRIPT_FILE:-"genesis.sh"}
# detect the proper Ceph config for this kernel
kern_minor=$(uname -a | cut -d ' ' -f 3 | cut -d '.' -f 2)
if [[ $kern_minor -lt 5 ]]
then
CEPH_CRUSH_TUNABLES='hammer'
else
CEPH_CRUSH_TUNABLES='null'
fi
export CEPH_CRUSH_TUNABLES
# Validate environment
if [[ $GENESIS_NODE_IP == "NA" || $MASTER_NODE_IP == "NA" ]]
then
echo "GENESIS_NODE_IP and MASTER_NODE_IP env vars must be set to correct IP addresses."
exit -1
fi
if [[ $CEPH_CLUSTER_NET == "NA" || $CEPH_PUBLIC_NET == "NA" ]]
then
echo "CEPH_CLUSTER_NET and CEPH_PUBLIC_NET env vars must be set to correct IP subnet CIDRs."
exit -1
fi
if [[ $(hostname) != $GENESIS_NODE_NAME ]]
then
echo "Local node hostname $(hostname) does not match GENESIS_NODE_NAME $GENESIS_NODE_NAME."
exit -1
fi
if [[ -z $(grep $GENESIS_NODE_NAME /etc/hosts | grep $GENESIS_NODE_IP) ]]
then
echo "No /etc/hosts entry found for $GENESIS_NODE_NAME. Please add one."
exit -1
fi
echo "Saving deployment environment to deploy-env.sh."
env | xargs -n 1 -d '\n' echo "export" >> deploy-env.sh
}
function genesis {
rm -rf configs
mkdir configs
chmod 777 configs
cat PKICatalog.yaml.sub | envsubst > configs/PKICatalog.yaml
cat armada-resources.yaml.sub | envsubst > configs/armada-resources.yaml
cat armada.yaml.sub | envsubst > ${ARMADA_CONFIG}
cat Genesis.yaml.sub | envsubst > configs/Genesis.yaml
cat HostSystem.yaml.sub | envsubst > configs/HostSystem.yaml
cp Kubelet.yaml.sub configs/Kubelet.yaml
cp KubernetesNetwork.yaml.sub configs/KubernetesNetwork.yaml
cp Docker.yaml configs/
cp ArmadaManifest.yaml configs/
if [[ $PROXY_ENABLED == 'true' ]]
then
export http_proxy=$PROXY_ADDRESS
export https_proxy=$PROXY_ADDRESS
export HTTP_PROXY=$PROXY_ADDRESS
export HTTPS_PROXY=$PROXY_ADDRESS
echo ' proxy:' >> configs/KubernetesNetwork.yaml
echo " url: ${PROXY_ADDRESS}" >> configs/KubernetesNetwork.yaml
fi
# Support a custom deployment for shipyard developers
if [[ $SHIPYARD_PROD_DEPLOY == 'false' ]]
then
mkdir -p $AIRFLOW_PATH_DAG
mkdir -p $AIRFLOW_PATH_PLUGIN
mkdir -p $AIRFLOW_PATH_LOG
fi
# Install docker
apt -qq update
apt -y install docker.io jq
# Generate certificates
docker run --rm -t -w /target -v $(pwd)/configs:/target ${PROMENADE_IMAGE} promenade generate-certs -o /target $(ls ./configs)
if [[ $? -ne 0 ]]
then
echo "Promenade certificate generation failed."
exit
fi
# Generate promenade join artifactos
docker run --rm -t -w /target -v $(pwd)/configs:/target ${PROMENADE_IMAGE} promenade build-all -o /target --validators $(ls ./configs)
if [[ $? -ne 0 ]]
then
echo "Promenade join artifact generation failed."
exit
fi
# Do Promenade genesis process
cd configs
. ${UP_SCRIPT_FILE}
cd ..
if [[ $? -ne 0 ]]
then
echo "Genesis process failed."
exit
fi
# Setup kubeconfig
mkdir ~/.kube
cp -r /etc/kubernetes/admin/pki ~/.kube/pki
cat /etc/kubernetes/admin/kubeconfig.yaml | sed -e 's/\/etc\/kubernetes\/admin/./' > ~/.kube/config
}
function ucp_deploy {
docker run -t -v ~/.kube:/armada/.kube -v $(pwd):/target --net=host ${ARMADA_IMAGE} apply /target/${ARMADA_CONFIG}
echo 'UCP control plane deployed.'
}
init_env
genesis
ucp_deploy

View File

@ -1,185 +0,0 @@
#Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
schema: 'drydock/Region/v1'
metadata:
schema: 'metadata/Document/v1'
name: atl_foundry
data:
tag_definitions:
- tag: 'high_memory'
definition_type: 'lshw_xpath'
definition: //node[@id="memory"]/'size units="bytes"' > 137438953472
authorized_keys:
- |
ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAgqUTJwZEMjZCWOnXQw+FFdvnf/lYrGXm01
rf/ZYUanoymkMWIK1/c8a3Ez9/HY3dyfWBcuzlIV4bNCvJcMg4UPuh6NQBJWAlfp7wfW9O
8ZyDE3x1FYno5u3OB4rRDcvKe6J0ygPcu4Uec5ASsd58yGnE4zTl1D/J30rNa00si+s= r
sa-key-20120124
---
schema: 'drydock/Region/v1'
metadata:
schema: 'metadata/Document/v1'
name: oob
data:
labels:
- 'noconfig'
bonding:
mode: 'disabled'
mtu: 1500
linkspeed: 'auto'
trunking:
mode: disabled
default_network: oob
allowed_networks:
- 'oob'
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe-rack1
data:
bonding:
mode: 'disabled'
mtu: 1500
linkspeed: 'auto'
trunking:
mode: disabled
default_network: pxe-rack1
allowed_networks:
- 'pxe-rack1'
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: oob
data:
labels:
- 'noconfig'
cidr: '172.24.10.0/24'
allocation: 'static'
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe-rack1
data:
cidr: '172.24.1.0/24'
routes:
- subnet: '0.0.0.0/0'
gateway: '172.24.1.1'
metric: 100
ranges:
- type: 'reserved'
start: '172.24.1.1'
end: '172.24.1.100'
- type: 'dhcp'
start: '172.24.1.200'
end: '172.24.1.250'
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe-rack2
data:
cidr: '172.24.2.0/24'
routes:
- subnet: '0.0.0.0/0'
gateway: '172.24.2.1'
metric: 100
ranges:
- type: 'reserved'
start: '172.24.2.1'
end: '172.24.2.100'
- type: 'dhcp'
start: '172.24.2.200'
end: '172.24.2.250'
---
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: DellR820v1
data:
vendor: 'Dell'
generation: '1'
hw_version: '2'
bios_version: '2.2.3'
boot_mode: 'bios'
bootstrap_protocol: 'pxe'
pxe_interface: '0'
device_aliases:
pnic01:
bus_type: 'pci'
dev_type: 'Intel 10Gbps NIC'
address: '0000:00:03.0'
---
schema: 'drydock/HostProfile/v1'
metadata:
name: defaults
schema: 'metadata/Document/v1'
data:
hardware_profile: 'DellR820v1'
primary_network: 'pxe-rack1'
oob:
type: 'ipmi'
network: 'oob'
account: 'admin'
credential: 'password'
storage:
physical_devices:
sda:
labels:
bootdrive: true
partitions:
- name: 'root'
size: '10g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
interfaces:
ens3f0:
device_link: 'pxe-rack1'
slaves:
- 'ens3f0'
networks:
- 'pxe-rack1'
platform:
image: 'ubuntu/xenial'
kernel: 'generic'
metadata:
rack: rack1
---
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
name: node2
data:
host_profile: defaults
addressing:
- network: 'pxe-rack1'
address: '172.24.1.101'
- network: 'oob'
address: '172.24.10.101'
metadata:
tags:
- 'masters'
...

View File

@ -1,276 +0,0 @@
#Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
schema: deckhand/LayeringPolicy/v1
metadata:
schema: metadata/Control/v1
name: layering-policy
data:
layerOrder:
- site
---
schema: 'drydock/Region/v1'
metadata:
schema: 'metadata/Document/v1'
name: atl_foundry
layeringDefinition:
abstract: false
layer: site
data:
tag_definitions:
- tag: 'high_memory'
definition_type: 'lshw_xpath'
definition: //node[@id="memory"]/'size units="bytes"' > 137438953472
authorized_keys:
- |
ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAgqUTJwZEMjZCWOnXQw+FFdvnf/lYrGXm01
rf/ZYUanoymkMWIK1/c8a3Ez9/HY3dyfWBcuzlIV4bNCvJcMg4UPuh6NQBJWAlfp7wfW9O
8ZyDE3x1FYno5u3OB4rRDcvKe6J0ygPcu4Uec5ASsd58yGnE4zTl1D/J30rNa00si+s= r
sa-key-20120124
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: oob
layeringDefinition:
abstract: false
layer: site
data:
labels:
noconfig: 'enabled'
bonding:
mode: 'disabled'
mtu: 1500
linkspeed: 'auto'
trunking:
mode: disabled
default_network: oob
allowed_networks:
- 'oob'
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe-rack1
layeringDefinition:
abstract: false
layer: site
data:
bonding:
mode: 'disabled'
mtu: 1500
linkspeed: 'auto'
trunking:
mode: disabled
default_network: pxe-rack1
allowed_networks:
- 'pxe-rack1'
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: mgmt
layeringDefinition:
abstract: false
layer: site
data:
bonding:
mode: 'disabled'
mtu: 1500
linkspeed: 'auto'
trunking:
mode: disabled
default_network: mgmt
allowed_networks:
- 'mgmt'
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: oob
layeringDefinition:
abstract: false
layer: site
data:
labels:
noconfig: 'enabled'
cidr: '172.24.10.0/24'
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe-rack1
layeringDefinition:
abstract: false
layer: site
data:
cidr: '172.24.1.0/24'
routes:
- subnet: '0.0.0.0/0'
gateway: '172.24.1.1'
metric: 100
ranges:
- type: 'reserved'
start: '172.24.1.1'
end: '172.24.1.100'
- type: 'dhcp'
start: '172.24.1.200'
end: '172.24.1.250'
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: mgmt
layeringDefinition:
abstract: false
layer: site
data:
cidr: '172.24.2.0/24'
routes:
- subnet: '0.0.0.0/0'
gateway: '172.24.2.1'
metric: 100
ranges:
- type: 'reserved'
start: '172.24.2.1'
end: '172.24.2.100'
- type: 'dhcp'
start: '172.24.2.200'
end: '172.24.2.250'
---
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: DellR820v1
layeringDefinition:
abstract: false
layer: site
data:
vendor: 'Dell'
generation: '1'
hw_version: '2'
bios_version: '2.2.3'
boot_mode: 'bios'
bootstrap_protocol: 'pxe'
pxe_interface: 0
device_aliases:
pnic01:
bus_type: 'pci'
dev_type: 'Intel 10Gbps NIC'
address: '0000:00:03.0'
---
schema: 'drydock/HostProfile/v1'
metadata:
name: defaults
schema: 'metadata/Document/v1'
layeringDefinition:
abstract: false
layer: site
data:
hardware_profile: 'DellR820v1'
primary_network: 'pxe-rack1'
oob:
type: 'ipmi'
network: 'oob'
account: 'admin'
credential: 'password'
storage:
physical_devices:
sda:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '20g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
interfaces:
ens3:
device_link: 'mgmt'
slaves:
- 'ens3f0'
networks:
- 'mgmt'
platform:
image: 'ubuntu/xenial'
kernel: 'generic'
metadata:
rack: rack1
---
schema: 'drydock/BaremetalNode/v1'
metadata:
schema: 'metadata/Document/v1'
name: node2
layeringDefinition:
abstract: false
layer: site
data:
host_profile: defaults
addressing:
- network: 'mgmt'
address: '172.24.2.101'
- network: 'oob'
address: '172.24.10.101'
metadata:
tags:
- 'masters'
...
---
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: promjoin
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: site
labels:
application: 'drydock'
data:
node_filter:
filter_set_type: 'union'
filter_set:
- filter_type: 'union'
node_names:
- 'node2'
assets:
- path: /opt/promjoin.sh
type: file
permissions: '555'
location: promenade+http://promenade-api.ucp.svc.cluster.local/api/v1.0/join-scripts?design_ref={{ action.design_ref | urlencode }}&hostname={{ node.hostname }}&ip={{ node.network.mgmt.ip }}{% for k, v in node.labels.items() %}&labels.dynamic={{ k }}={{ v }}{% endfor %}
location_pipeline:
- template
data_pipeline:
- utf8_decode
- path: /lib/systemd/system/promjoin.service
type: unit
permissions: '600'
data: |-
W1VuaXRdCkRlc2NyaXB0aW9uPVByb21lbmFkZSBJbml0aWFsaXphdGlvbiBTZXJ2aWNlCkFmdGVy
PW5ldHdvcmstb25saW5lLnRhcmdldCBsb2NhbC1mcy50YXJnZXQKQ29uZGl0aW9uUGF0aEV4aXN0
cz0hL3Zhci9saWIvcHJvbS5kb25lCgpbU2VydmljZV0KVHlwZT1zaW1wbGUKRXhlY1N0YXJ0PS9v
cHQvcHJvbWpvaW4uc2gKCltJbnN0YWxsXQpXYW50ZWRCeT1tdWx0aS11c2VyLnRhcmdldAo=
data_pipeline:
- base64_decode
- utf8_decode
...

View File

@ -1,4 +0,0 @@
#!/bin/bash
TOKEN=$(docker run --rm --net=host -e 'OS_AUTH_URL=http://keystone-api.ucp.svc.cluster.local:80/v3' -e 'OS_PASSWORD=password' -e 'OS_PROJECT_DOMAIN_NAME=default' -e 'OS_PROJECT_NAME=service' -e 'OS_REGION_NAME=RegionOne' -e 'OS_USERNAME=drydock' -e 'OS_USER_DOMAIN_NAME=default' -e 'OS_IDENTITY_API_VERSION=3' kolla/ubuntu-source-keystone:3.0.3 openstack token issue -f shell | grep ^id | cut -d'=' -f2 | tr -d '"')
echo $TOKEN

View File

@ -1,88 +0,0 @@
# Artifacts to deploy a basic UCP control plane
The scripts and artifacts in this directory can be used to deploy
a basic UCP control plane on a single node.
1. Generate Promenade configuration and certificates
2. Run Promenade genesis process to bootstrap Kubernetes
3. Deploy Ceph using Armada
4. Deploy UCP backend services (MariaDB, Postgres) using Armada
5. Deploy Drydock and MaaS using Armada
## Setup environment for your environment
This deployment process utilizes several environment variables to
customize the deployment to your environment. The set-env.sh file has
an example environment known to work.
* CEPH\_CLUSTER\_NET
The CIDR of the network(s) that Ceph will utilize for storage replication and
other intra-cluster communication. Can be a comma-separated list of CIDRs.
* CEPH\_PUBLIC\_NET
The CIDR of the network(s) that Ceph will utilize for accepting requests
for storage provisioning. Can be a comma-separated list of CIDRs.
* CEPH\_OSD\_DIR
The directory Ceph will use for OSD storage
* GENESIS\_NODE\_IP
The IP address of the genesis node or VM.
* MASTER\_NODE\_IP
The IP address of the second node to be added to the cluster. Scripting does not yet
support deployment of this node, but it is *REQUIRED* to be included in the bootstrap
configuration
* DRYDOCK\_NODE\_IP
The IP address of the node that will host the Drydock container. Defaults to the genesis
node which is normally correct.
* MAAS\_NODE\_IP
The IP address of the node that will hsot the MaaS container. Defaults to the genesis
node which is normally correct.
* NODE\_NET\_IFACE
The NIC interface on each node that Calico should use to access the underlay network. Defaults
to 'eth0'
* PROXY\_ADDRESS
If a HTTP/HTTPS proxy is needed for public access, specify the address here in URL format.
* PROXY\_ENABLED
Whether to enable proxy use. Should be 'true' or 'false', defaults to 'false'.
* GENESIS\_NODE\_NAME
The hostname of the genesis node. REQUIRED to be accurate. Defaults to 'node1'
* MASTER\_NODE\_NAME
The hostname of the master (or second) node. REQUIRED to be accurate. Defaults to 'node2'
* \*\_CHART\_REPO
The Git repository used for pulling charts. \* can be any of 'CEPH', 'DRYDOCK' or 'MAAS'
* \*\_CHART\_BRANCH
The Git branch used for pulling charts. \* can be any of 'CEPH', 'DRYDOCK' or 'MAAS'
* \*\_IMAGE
The Docker image file used for deployments and running commands. \* can be any of 'DRYDOCK',
'ARMADA', 'PROMENADE'.
## Run the deployment
Once all of the above environmental variables are correct, run `deploy_ucp.sh` as root.

View File

@ -1,9 +0,0 @@
# Example environment customization
export CEPH_CLUSTER_NET=172.24.1.0/24
export CEPH_PUBLIC_NET=172.24.1.0/24
export GENESIS_NODE_IP=172.24.1.100
export MASTER_NODE_IP=172.24.1.101
export NODE_NET_IFACE=ens3
export PROMENADE_IMAGE=quay.io/attcomdev/promenade:latest
export ARMADA_IMAGE=quay.io/attcomdev/armada:latest
export DRYDOCK_IMAGE=quay.io/attcomdev/drydock:latest

View File

@ -22,7 +22,9 @@
# # # #
############################################################################### ###############################################################################
echo ""
echo "Welcome to Airship in a Bottle" echo "Welcome to Airship in a Bottle"
echo ""
echo " /--------------------\\" echo " /--------------------\\"
echo "| \\" echo "| \\"
echo "| |---| \\----" echo "| |---| \\----"
@ -32,13 +34,27 @@ echo "| | /"
echo "| \____|____/ /----" echo "| \____|____/ /----"
echo "| /" echo "| /"
echo " \--------------------/" echo " \--------------------/"
echo ""
echo ""
echo "A prototype example of deploying the Airship suite on a single VM."
echo ""
sleep 1 sleep 1
echo "" echo ""
echo "The minimum recommended size of the Ubuntu 16.04 VM is 4 vCPUs, 16GB of RAM with 64GB disk space." echo "This example will run through:"
echo " - Setup"
echo " - Genesis of Airship (Kubernetes)"
echo " - Basic deployment of Openstack (including Nova, Neutron, and Horizon using Openstack Helm)"
echo " - VM creation automation using Heat"
echo ""
echo "The expected runtime of this script is greater than 1 hour"
echo ""
sleep 1
echo ""
echo "The minimum recommended size of the Ubuntu 16.04 VM is 4 vCPUs, 20GB of RAM with 64GB disk space."
CPU_COUNT=$(grep -c processor /proc/cpuinfo) CPU_COUNT=$(grep -c processor /proc/cpuinfo)
RAM_TOTAL=$(awk '/MemTotal/ {print $2}' /proc/meminfo) RAM_TOTAL=$(awk '/MemTotal/ {print $2}' /proc/meminfo)
source /etc/os-release source /etc/os-release
if [[ $CPU_COUNT -lt 4 || $RAM_TOTAL -lt 16777216 || $NAME != "Ubuntu" || $VERSION_ID != "16.04" ]]; then if [[ $CPU_COUNT -lt 4 || $RAM_TOTAL -lt 20000000 || $NAME != "Ubuntu" || $VERSION_ID != "16.04" ]]; then
echo "Error: minimum VM recommendations are not met. Exiting." echo "Error: minimum VM recommendations are not met. Exiting."
exit 1 exit 1
fi fi
@ -65,7 +81,7 @@ fi
# Shells out to get the hostname to avoid some config conflicts # Shells out to get the hostname to avoid some config conflicts
set -x set -x
SHORT_HOSTNAME=$(hostname -s) export SHORT_HOSTNAME=$(hostname -s)
set +x set +x
# Updates the /etc/hosts file # Updates the /etc/hosts file

View File

@ -44,6 +44,8 @@ HOSTIP=${HOSTIP:-""}
HOSTCIDR=${HOSTCIDR:-""} HOSTCIDR=${HOSTCIDR:-""}
# The interface on the host/genesis node. e.g.: 'ens3' # The interface on the host/genesis node. e.g.: 'ens3'
NODE_NET_IFACE=${NODE_NET_IFACE:-""} NODE_NET_IFACE=${NODE_NET_IFACE:-""}
# Allowance for Genesis/Armada to settle in seconds:
POST_GENESIS_DELAY=${POST_GENESIS_DELAY:-60}
# Repositories # Repositories
@ -100,7 +102,7 @@ function setup_workspace() {
# Setup workspace directories # Setup workspace directories
mkdir -p ${WORKSPACE}/collected mkdir -p ${WORKSPACE}/collected
mkdir -p ${WORKSPACE}/genesis mkdir -p ${WORKSPACE}/genesis
# Open permissions for output from promenade # Open permissions for output from Promenade
chmod -R 777 ${WORKSPACE}/genesis chmod -R 777 ${WORKSPACE}/genesis
} }
@ -144,7 +146,7 @@ EOF
function install_dependencies() { function install_dependencies() {
apt -qq update apt -qq update
# Install docker # Install docker
apt -y install docker.io jq apt -y install --no-install-recommends docker.io jq
} }
function run_pegleg_collect() { function run_pegleg_collect() {
@ -153,7 +155,7 @@ function run_pegleg_collect() {
} }
function generate_certs() { function generate_certs() {
# Runs the generation of certs by promenade and builds bootstrap scripts # Runs the generation of certs by Promenade and builds bootstrap scripts
# Note: In the really real world, CAs and certs would be provided as part of # Note: In the really real world, CAs and certs would be provided as part of
# the supplied design. In this dev/test environment, self signed is fine. # the supplied design. In this dev/test environment, self signed is fine.
# Moves the generated certificates from /genesis to the design, so that a # Moves the generated certificates from /genesis to the design, so that a
@ -219,8 +221,13 @@ function genesis_complete() {
cp -r /etc/kubernetes/admin/pki ~/.kube/pki cp -r /etc/kubernetes/admin/pki ~/.kube/pki
cat /etc/kubernetes/admin/kubeconfig.yaml | sed -e 's/\/etc\/kubernetes\/admin/./' > ~/.kube/config cat /etc/kubernetes/admin/kubeconfig.yaml | sed -e 's/\/etc\/kubernetes\/admin/./' > ~/.kube/config
# signals that genesis completed
set +x set +x
echo "-----------"
echo "Waiting ${POST_GENESIS_DELAY} seconds for Genesis process to settle. This is a good time to grab a coffee :)"
echo "-----------"
sleep ${POST_GENESIS_DELAY}
# signals that genesis completed
echo "Genesis complete. " echo "Genesis complete. "
echo "The .yaml files in ${WORKSPACE} contain the site design that may be suitable for use with Shipyard. " echo "The .yaml files in ${WORKSPACE} contain the site design that may be suitable for use with Shipyard. "
echo "The Shipyard Keystone password may be found in ${WORKSPACE}/airship-in-a-bottle/deployment_files/site/${TARGET_SITE}/secrets/passphrases/ucp_shipyard_keystone_password.yaml" echo "The Shipyard Keystone password may be found in ${WORKSPACE}/airship-in-a-bottle/deployment_files/site/${TARGET_SITE}/secrets/passphrases/ucp_shipyard_keystone_password.yaml"
@ -237,16 +244,16 @@ function setup_deploy_site() {
cp ${WORKSPACE}/genesis/*.yaml ${WORKSPACE}/site cp ${WORKSPACE}/genesis/*.yaml ${WORKSPACE}/site
cp ${WORKSPACE}/airship-shipyard/tools/run_shipyard.sh ${WORKSPACE}/site cp ${WORKSPACE}/airship-shipyard/tools/run_shipyard.sh ${WORKSPACE}/site
cp ${WORKSPACE}/airship-shipyard/tools/shipyard_docker_base_command.sh ${WORKSPACE}/site cp ${WORKSPACE}/airship-shipyard/tools/shipyard_docker_base_command.sh ${WORKSPACE}/site
cp ${WORKSPACE}/airship-shipyard/tools/execute_shipyard_action.sh ${WORKSPACE}/site
set +x set +x
echo " " echo " "
echo "${WORKSPACE}/site is now set up with creds.sh which can be sourced to set up credentials for use in running Shipyard" echo "${WORKSPACE}/site is now set up with creds.sh which can be sourced to set up credentials for use in running Shipyard"
echo "${WORKSPACE}/site contains .yaml files that represent the single-node site deployment. (deployment_files.yaml, certificats.yaml)" echo "${WORKSPACE}/site contains .yaml files that represent the single-node site deployment. (deployment_files.yaml, certificates.yaml)"
echo " " echo " "
echo "NOTE 2018-03-23: due to a bug in pegleg's document gathering, deployment_files.yaml may need to be updated to remove the duplicate SiteDefinition at the tail end of the file."
echo "NOTE: If you changed the Shipyard keystone password (see above printouts), the creds.sh file needs to be updated to match before use." echo "NOTE: If you changed the Shipyard keystone password (see above printouts), the creds.sh file needs to be updated to match before use."
echo " " echo " "
echo "----------------------------------------------------------------------------------" echo "----------------------------------------------------------------------------------"
echo "The following commands will execute shipyard to setup and run a deploy_site action" echo "The following commands will execute Shipyard to setup and run a deploy_site action"
echo "----------------------------------------------------------------------------------" echo "----------------------------------------------------------------------------------"
echo "cd ${WORKSPACE}/site" echo "cd ${WORKSPACE}/site"
echo "source creds.sh" echo "source creds.sh"
@ -258,7 +265,7 @@ function setup_deploy_site() {
echo "-----------" echo "-----------"
echo "Other Notes" echo "Other Notes"
echo "-----------" echo "-----------"
echo "If you need to run armada directly to deploy charts (fix something broken?), the following maybe of use:" echo "If you need to run Armada directly to deploy charts (fix something broken?), the following may be of use:"
echo "export ARMADA_IMAGE=artifacts-aic.atlantafoundry.com/att-comdev/armada" echo "export ARMADA_IMAGE=artifacts-aic.atlantafoundry.com/att-comdev/armada"
echo "docker run -t -v ~/.kube:/armada/.kube -v ${WORKSPACE}/site:/target --net=host '${ARMADA_IMAGE}' apply /target/your-yaml.yaml" echo "docker run -t -v ~/.kube:/armada/.kube -v ${WORKSPACE}/site:/target --net=host '${ARMADA_IMAGE}' apply /target/your-yaml.yaml"
echo " " echo " "
@ -266,13 +273,23 @@ function setup_deploy_site() {
} }
function execute_deploy_site() { function execute_deploy_site() {
set +x
echo " "
echo "This is an automated deployment using Shipyard, running commands noted previously"
echo "Please stand by while Shipyard deploys the site"
echo " "
set -x set -x
echo cd ${WORKSPACE}/site #Automate the steps of deploying a site.
echo source creds.sh cd ${WORKSPACE}/site
echo ./run_shipyard.sh create configdocs design --filename=/home/shipyard/host/deployment_files.yaml source creds.sh
echo ./run_shipyard.sh create configdocs secrets --filename=/home/shipyard/host/certificates.yaml --append ./run_shipyard.sh create configdocs design --filename=/home/shipyard/host/deployment_files.yaml
echo ./run_shipyard.sh commit configdocs ./run_shipyard.sh create configdocs secrets --filename=/home/shipyard/host/certificates.yaml --append
echo ./run_shipyard.sh create action deploy_site ./run_shipyard.sh commit configdocs
# set variables used in execute_shipyard_action.sh
export max_shipyard_count=${max_shipyard_count:-60}
export shipyard_query_time=${shipyard_query_time:-90}
# monitor the execution of deploy_site
bash execute_shipyard_action.sh 'deploy_site'
} }

View File

@ -32,8 +32,10 @@ sudo docker run -t --rm --net=host
-e OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME:-default} -e OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME:-default}
-e OS_PASSWORD=${OS_PASSWORD:-password} -e OS_PASSWORD=${OS_PASSWORD:-password}
-e OS_PROJECT_DOMAIN_NAME=${OS_PROJECT_DOMAIN_NAME:-default} -e OS_PROJECT_DOMAIN_NAME=${OS_PROJECT_DOMAIN_NAME:-default}
-e OS_PROJECT_NAME=${OS_PROJECT_NAME:-service} -e OS_PROJECT_NAME=${OS_PROJECT_NAME:-admin}
-e OS_REGION_NAME=${OS_REGION_NAME:-RegionOne} -e OS_REGION_NAME=${OS_REGION_NAME:-RegionOne}
-e OS_IDENTITY_API_VERSION=${OS_IDENTITY_API_VERSION:-3} -e OS_IDENTITY_API_VERSION=${OS_IDENTITY_API_VERSION:-3}
-w /host/$(pwd)
-v /:/host:rshared
EndOfCommand EndOfCommand
) )