Add cruiserlite type for the CNTT reference implementation deployment in OPNFV lab.

Change-Id: I83ae854fb991f904ce883378b07b37807192b0ea
Signed-off-by: James Gu <james.gu@att.com>
This commit is contained in:
James Gu 2020-07-22 09:17:20 -07:00
parent e4f3f81ec5
commit 760e5fdb13
94 changed files with 6681 additions and 27 deletions

View File

@ -29,14 +29,15 @@ data:
docker: docker.io
repo_urls:
artfactory_web_url: artifacts-example.com/artifactory
auth_key: changeme
infrastructure:
dns:
upstream_servers:
- 8.8.8.8
- 1.1.1.1
- 4.4.4.4
# Repeat the same values as above, but formatted as a common separated
# string
upstream_servers_joined: 8.8.8.8,1.1.1.1
upstream_servers_joined: 8.8.8.8, 4.4.4.4
ntp:
# Verify that a minimum of three (3) ATT NTP servers are reachable in your
# environment; otherwise override them at the site level
@ -50,10 +51,8 @@ data:
# 2. A DNS failure scenario would not result in the common-mode failure of
# nodes to be able to maintain accurate time.
servers:
# time WWT
- 10.255.0.1
# cloudfare
- 1.1.1.1
- 0.ubuntu.pool.ntp.org
- 1.ubuntu.pool.ntp.org
smtp:
server: smtp.test.com
@ -117,6 +116,7 @@ data:
user_home: /home/aic-ord
ranger_agent_keystone_user: ranger-agent-admin
rds_listener_endpoint: https://ranger.example.com/v1/rds/status
keystone_password: changeme
calico:
calico_startup_loglevel: 'Debug'
felix_logseverity: 'Info'

View File

@ -0,0 +1,138 @@
---
schema: 'drydock/HardwareProfile/v1'
metadata:
schema: 'metadata/Document/v1'
name: intel-s2600wt
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
# Vendor of the server chassis
vendor: Intel
# Generation of the chassis model
generation: '4'
# Version of the chassis model within its generation - not version of the hardware definition
hw_version: '3'
# The certified version of the chassis BIOS
bios_version: 'SE5C610.86B.01.01.0019.101220160604'
# Mode of the default boot of hardware - bios, uefi
boot_mode: bios
# Protocol of boot of the hardware - pxe, usb, hdd
bootstrap_protocol: pxe
# Which interface to use for network booting within the OOB manager, not OS device
pxe_interface: 0
# Map hardware addresses to aliases/roles to allow a mix of hardware configs
# in a site to result in a consistent configuration
device_aliases:
## network
# $ sudo lspci |grep -i ethernet
# 03:00.0 Ethernet controller: Intel Corporation I350 Gigabit Network Connection (rev 01)
# 03:00.3 Ethernet controller: Intel Corporation I350 Gigabit Network Connection (rev 01)
# 05:00.0 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 01)
# 05:00.1 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 01)
# 05:00.2 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 01)
# 05:00.3 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 01)
# control networks
# eno1
ctrl_nic1:
address: '0000:03:00.0'
dev_type: 'I350 Gigabit Network Connection'
bus_type: 'pci'
# eno2
ctrl_nic2:
address: '0000:03:00.3'
dev_type: 'I350 Gigabit Network Connection'
bus_type: 'pci'
# data networks
# ens785f0
data_nic1:
address: '0000:05:00.0'
dev_type: 'Ethernet Controller X710 for 10GbE SFP+'
bus_type: 'pci'
# ens785f1
data_nic2:
address: '0000:05:00.1'
dev_type: 'Ethernet Controller X710 for 10GbE SFP+'
bus_type: 'pci'
# ens785f2
data_nic3:
address: '0000:05:00.2'
dev_type: 'Ethernet Controller X710 for 10GbE SFP+'
bus_type: 'pci'
# ens785f3
data_nic4:
address: '0000:05:00.3'
dev_type: 'Ethernet Controller X710 for 10GbE SFP+'
bus_type: 'pci'
## storage
# $ sudo lshw -c disk
# *-disk
# description: ATA Disk
# product: INTEL SSDSC2BB48
# physical id: 0.0.0
# bus info: scsi@4:0.0.0
# logical name: /dev/sda
# version: 0101
# serial: PHDV637602LL480BGN
# size: 447GiB (480GB)
# capabilities: gpt-1.00 partitioned partitioned:gpt
# configuration: ansiversion=5 guid=ea7d0b6a-c105-4409-8d4c-dc104cb38737 logicalsectorsize=512 sectorsize=4096
# *-disk
# description: ATA Disk
# product: ST91000640NS
# vendor: Seagate
# physical id: 0.0.0
# bus info: scsi@5:0.0.0
# logical name: /dev/sdb
# version: SN03
# serial: 9XG6LX48
# size: 931GiB (1TB)
# capabilities: gpt-1.00 partitioned partitioned:gpt
# configuration: ansiversion=5 guid=27f17348-e081-4b00-8d4c-5960513a40cd logicalsectorsize=512 sectorsize=512
# /dev/sda
bootdisk:
address: '4:0.0.0'
dev_type: 'INTEL SSDSC2BB48'
bus_type: 'scsi'
# /dev/sdb
datadisk:
address: '5:0.0.0'
dev_type: 'ST91000640NS'
bus_type: 'scsi'
cpu_sets:
# CPUS pinning
# Host OS CPUs are inferred, and will be the remaining cores
kvm: '2-21,24-43,46-65,68-87'
# Kernel config
# Reduce OS jitter on the offloaded CPUs.
rcu_nocbs: '2-21,24-43,46-65,68-87'
# Nova config
# CPUs dedicated to tenant workload.
vcpu_pin_set: '4-21,26-43,48-65,70-87'
# OVS config
# CPUs used by OVS-DPDK processes, same as CPUs used by host OS.
# For this lab, the CPU/Numa allocation is as followed:
# NUMA node0 CPU(s): 0-21,44-65
# NUMA node1 CPU(s): 22-43,66-87
# So to spread them about both numa, the following is assigned:
# VCPUs 0,44,1,45,22,66,23,67 = first 4 CPU cores
dpdk-lcore-mask: '0x0C0000300000C00003'
# OVS config
# CPUs used by dpdk Poll Mode Drivers (PMD)
# OVS configu paramter for DPDK.
# VCPUs 2,46,3,47,24,68,25,69 CPU cores 4-7
pmd-cpu-mask: '0x300000C0000300000C'
hugepages:
dpdk:
size: '1G'
# Referenced in the compute host profile. Control host profile sets its own value.
count: 32
socket-mem: '4096,4096'
...

View File

@ -43,11 +43,6 @@ data:
type: string
additionalProperties: false
required:
- auth_key
- hostnames
- artfactory_ip
- repo_urls
gstools:
type: object
@ -377,13 +372,6 @@ data:
debug_level:
type: string
additionalProperties: false
required:
- rds_listener_endpoint
- ranger_agent_keystone_user
- user
- user_home
- keystone_password
- debug_level
additionalProperties: false
required:

View File

@ -4,7 +4,7 @@ metadata:
schema: metadata/Document/v1
name: osh-infra-ceph-config
labels:
name: osh-infra-ceph-config-global
name: osh-infra-ceph-config-chart-group-global
layeringDefinition:
abstract: false
layer: global

View File

@ -4,7 +4,7 @@ metadata:
schema: metadata/Document/v1
name: osh-infra-dashboards
labels:
name: osh-infra-dashboards-global
name: osh-infra-dashboards-chart-group-global
layeringDefinition:
abstract: false
layer: global

View File

@ -4,7 +4,7 @@ metadata:
schema: metadata/Document/v1
name: osh-infra-ingress-controller
labels:
name: osh-infra-ingress-controller-global
name: osh-infra-ingress-controller-chart-group-global
layeringDefinition:
abstract: false
layer: global

View File

@ -4,7 +4,7 @@ metadata:
schema: metadata/Document/v1
name: osh-infra-radosgw
labels:
name: osh-infra-radosgw-global
name: osh-infra-radosgw-chart-group-global
layeringDefinition:
abstract: false
layer: global

View File

@ -4,7 +4,7 @@ metadata:
schema: metadata/Document/v1
name: openstack-ingress-controller
labels:
name: openstack-ingress-controller-global
name: openstack-ingress-controller-chart-group-global
layeringDefinition:
abstract: false
layer: global

View File

@ -4,7 +4,7 @@ metadata:
schema: metadata/Document/v1
name: openstack-mariadb
labels:
name: openstack-mariadb-global
name: openstack-mariadb-chart-group-global
layeringDefinition:
abstract: false
layer: global

View File

@ -4,7 +4,7 @@ metadata:
schema: metadata/Document/v1
name: openstack-rabbitmq
labels:
name: openstack-rabbitmq-global
name: openstack-rabbitmq-chart-group-global
layeringDefinition:
abstract: false
layer: global

View File

@ -138,6 +138,12 @@ data:
do
sleep 5
done
ceph_mgr_enabled_modules:
- restful
- status
- prometheus
- balancer
- iostat
jobs:
pool_checkPGs:
# Run once a month at midnight of the first day of the month

View File

@ -127,6 +127,12 @@ data:
do
sleep 5
done
ceph_mgr_enabled_modules:
- restful
- status
- prometheus
- balancer
- iostat
jobs:
pool_checkPGs:
# Run once a month at midnight of the first day of the month

View File

@ -111,6 +111,12 @@ data:
ceph: true
bootstrap:
enabled: true
ceph_mgr_enabled_modules:
- restful
- status
- prometheus
- balancer
- iostat
pod:
replicas:
mds: 1

View File

@ -3,6 +3,8 @@ schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-ceph-ingress
labels:
name: ucp-ceph-ingress-global
layeringDefinition:
abstract: false
layer: global

View File

@ -1294,8 +1294,8 @@ data:
neutron_metadata: *mos_neutron
neutron_openvswitch_agent: *mos_neutron_sriov
neutron_server: *mos_neutron
neutron_sriov_agent: *mos_neutron
neutron_sriov_agent_init: *mos_neutron
neutron_sriov_agent: *mos_neutron_sriov
neutron_sriov_agent_init: *mos_neutron_sriov
nova:
test: *rally_test
rabbit_init: *rabbitmq_mgmt

View File

@ -0,0 +1,69 @@
---
# This file defines a boot action for MaaS to deploy the calico-ip-rules script
# to nodes, register with systemd, and runs the script on all PXE booted nodes.
# On the genesis node, this is a manual step detailed in deployment documentation.
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: calico-ip-rules
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: type
labels:
name: calico-ip-rules
application: 'drydock'
substitutions:
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .calico.ip_rule.gateway
dest:
path: .assets[0].data
pattern: DH_SUB_GATEWAY_IP
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.pod_cidr
dest:
path: .assets[0].data
pattern: DH_SUB_POD_CIDR
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .calico.bgp.ipv4.public_service_cidr
dest:
path: .assets[0].data
pattern: DH_SUB_INGRESS_CIDR
# Substitution of the configure-ip-rules script into this bootaction
- src:
schema: pegleg/Script/v1
name: configure-ip-rules
path: .
dest:
path: .assets[1].data
data:
signaling: false
assets:
- path: /etc/systemd/system/configure-ip-rules.service
type: unit
permissions: '444'
data: |-
[Unit]
Description=IP Rules Initialization Service
After=network-online.target local-fs.target
[Service]
Type=simple
ExecStart=/opt/configure-ip-rules.sh -g DH_SUB_GATEWAY_IP -c DH_SUB_POD_CIDR -s DH_SUB_INGRESS_CIDR
[Install]
WantedBy=multi-user.target
data_pipeline:
- utf8_decode
- path: /opt/configure-ip-rules.sh
type: file
permissions: '700'
data_pipeline:
- utf8_decode
...

View File

@ -0,0 +1,55 @@
---
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: i40e-dkms-install
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: type
labels:
application: 'drydock'
name: i40e-dkms-install-type
substitutions:
- src:
schema: pegleg/Script/v1
name: i40e-dkms-install
path: .
dest:
path: .assets[1].data
data:
signaling: false
assets:
- path: /etc/systemd/system/i40e-dkms-install.service
type: unit
permissions: '444'
data: |
[Unit]
Description=Service for Installing i40e driver
DefaultDependencies=no
Before=promjoin.service node-reboot.service
After=network-online.target local-fs.target cloud-init.target
[Service]
Type=oneshot
ExecStart=/opt/i40e-dkms-install.sh -s -r
RemainAfterExit=true
[Install]
WantedBy=airship.target
data_pipeline:
- utf8_decode
- path: /opt/i40e-dkms-install.sh
type: file
permissions: '700'
data_pipeline:
- utf8_decode
- path: /etc/modprobe.d/sriov_blacklist.conf
type: file
permissions: '644'
data_pipeline:
- utf8_decode
data: |
blacklist i40evf
...

View File

@ -0,0 +1,38 @@
---
# This file defines a boot action which is responsible for fetching the node's
# promjoin script from the promenade API. This is the script responsible for
# installing kubernetes on the node and joining the kubernetes cluster.
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: promjoin-compute
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: type
labels:
name: promjoin-compute
application: 'drydock'
data:
signaling: false
node_filter:
filter_set_type: 'union'
filter_set:
- filter_type: 'union'
node_labels:
# execute boot action on compute nodes
openstack-nova-compute: enabled
assets:
- path: /opt/promjoin.sh
type: file
permissions: '555'
# The ip= parameter must match the MaaS network name of the network used
# to contact kubernetes. With a standard, reference NC deployment where
# L2 networks are shared between all racks, the network name (i.e. calico)
# should be correct.
location: promenade+http://promenade-api.ucp.svc.cluster.local/api/v1.0/join-scripts?design_ref={{ action.design_ref | urlencode }}&hostname={{ node.hostname }}&ip={{ node.network.calico.ip }}&external_ip={{ node.network.oam.ip }}&domain={{ node.domain }}{% for k, v in node.labels.items() %}&labels.dynamic={{ k }}={{ v }}{% endfor %}
location_pipeline:
- template
data_pipeline:
- utf8_decode
...

View File

@ -0,0 +1,38 @@
---
# This file defines a boot action which is responsible for fetching the node's
# promjoin script from the promenade API. This is the script responsible for
# installing kubernetes on the node and joining the kubernetes cluster.
schema: 'drydock/BootAction/v1'
metadata:
schema: 'metadata/Document/v1'
name: promjoin-control
storagePolicy: 'cleartext'
layeringDefinition:
abstract: false
layer: type
labels:
name: promjoin-control
application: 'drydock'
data:
signaling: false
node_filter:
filter_set_type: 'union'
filter_set:
- filter_type: 'union'
node_labels:
# execute boot action on control nodes
ucp-control-plane: enabled
assets:
- path: /opt/promjoin.sh
type: file
permissions: '555'
# The ip= parameter must match the MaaS network name of the network used
# to contact kubernetes. With a standard, reference NC deployment where
# L2 networks are shared between all racks, the network name (i.e. calico)
# should be correct.
location: promenade+http://promenade-api.ucp.svc.cluster.local/api/v1.0/join-scripts?design_ref={{ action.design_ref | urlencode }}&hostname={{ node.hostname }}&ip={{ node.network.calico.ip }}&external_ip={{ node.network.oam.ip }}&domain={{ node.domain }}&leave_kubectl=true{% for k, v in node.labels.items() %}&labels.dynamic={{ k }}={{ v }}{% endfor %}
location_pipeline:
- template
data_pipeline:
- utf8_decode
...

View File

@ -0,0 +1,44 @@
---
# The purpose of this file is to provide shipyard related deployment config
# parameters. This should not require modification for a new site. However,
# shipyard deployment strategies can be very useful in getting around certain
# failures, like misbehaving nodes that hold up the deployment. See more at
# https://github.com/openstack/airship-shipyard/blob/master/docs/source/site-definition-documents.rst#using-a-deployment-strategy
schema: shipyard/DeploymentConfiguration/v1
metadata:
schema: metadata/Document/v1
name: deployment-configuration
layeringDefinition:
abstract: false
layer: type
labels:
name: deployment-configuration-cruiser
storagePolicy: cleartext
data:
physical_provisioner:
deployment_strategy: deployment-strategy
deploy_interval: 30
deploy_timeout: 3600
destroy_interval: 30
destroy_timeout: 900
join_wait: 0
prepare_node_interval: 30
prepare_node_timeout: 1800
prepare_site_interval: 10
prepare_site_timeout: 300
verify_interval: 10
verify_timeout: 60
kubernetes_provisioner:
drain_timeout: 3600
drain_grace_period: 1800
clear_labels_timeout: 1800
remove_etcd_timeout: 1800
etcd_ready_timeout: 600
armada:
get_releases_timeout: 300
get_status_timeout: 300
manifest: 'full-site'
# NOTE(mb874d): Set the Armada apply timeout to 3 hours.
post_apply_timeout: 10800
validate_design_timeout: 600
...

View File

@ -0,0 +1,252 @@
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: oob
layeringDefinition:
abstract: false
layer: type
storagePolicy: cleartext
data:
# MaaS doesn't own this network like it does the others,
# so the noconfig label is specified.
labels:
noconfig: enabled
bonding:
mode: disabled
mtu: 1500
linkspeed: auto
trunking:
mode: disabled
default_network: oob
allowed_networks:
- oob
...
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe
layeringDefinition:
abstract: false
layer: type
storagePolicy: cleartext
data:
bonding:
mode: disabled
mtu: 1500
linkspeed: auto
trunking:
mode: disabled
default_network: pxe
allowed_networks:
- pxe
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: oob-cruiser
layeringDefinition:
abstract: true
layer: type
labels:
network_role: oob
topology: cruiserlite
storagePolicy: cleartext
data:
mtu: 1500
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: pxe-cruiserlite
layeringDefinition:
abstract: true
layer: type
labels:
network_role: pxe
topology: cruiserlite
substitutions:
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .dns.node_domain
dest:
path: .dns.domain
storagePolicy: cleartext
data:
mtu: 1500
...
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: dmz
layeringDefinition:
abstract: false
layer: type
storagePolicy: cleartext
data:
bonding:
mode: disabled
mtu: 1500
linkspeed: auto
trunking:
mode: disabled
default_network: oam
allowed_networks:
- oam
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: oam-cruiserlite
layeringDefinition:
abstract: true
layer: type
labels:
network_role: oam
topology: cruiserlite
substitutions:
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .dns.node_domain
dest:
path: .dns.domain
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .dns.upstream_servers_joined
dest:
path: .dns.servers
storagePolicy: cleartext
data:
mtu: 1500
...
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: data1
layeringDefinition:
abstract: false
layer: type
storagePolicy: cleartext
data:
bonding:
mode: disabled
# NEWSITE-CHANGEME: Ensure the network switches in the environment are
# configured for this MTU or greater.
mtu: 1500
linkspeed: auto
trunking:
mode: 802.1q
allowed_networks:
- calico
- overlay
...
---
schema: 'drydock/NetworkLink/v1'
metadata:
schema: 'metadata/Document/v1'
name: data2
layeringDefinition:
abstract: false
layer: type
storagePolicy: cleartext
data:
bonding:
mode: disabled
# NEWSITE-CHANGEME: Ensure the network switches in the environment are
# configured for this MTU or greater.
mtu: 1500
linkspeed: auto
trunking:
mode: 802.1q
default_network: storage
allowed_networks:
- storage
- routable
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: storage-cruiserlite
layeringDefinition:
abstract: true
layer: type
labels:
network_role: storage
topology: cruiserlite
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: Set the VLAN ID which the Storage network is on
# use '0' if the vlan is untagged
vlan: '0'
# NEWSITE-CHANGEME: Ensure the network switches in the environment are
# configured for this MTU or greater.
mtu: 1500
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: calico-cruiserlite
layeringDefinition:
abstract: true
layer: type
labels:
network_role: calico
topology: cruiserlite
storagePolicy: cleartext
data:
mtu: 1500
# NEWSITE-CHANGEME: Set the VLAN ID which the Private network is on
# use '0' if the vlan is untagged
vlan: '0'
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: overlay-cruiserlite
layeringDefinition:
abstract: true
layer: type
labels:
network_role: os-overlay
topology: cruiserlite
storagePolicy: cleartext
data:
mtu: 1500
# MaaS doesnt own this network like it does the others, so the noconfig label
# is specified.
labels:
noconfig: enabled
...
---
schema: 'drydock/Network/v1'
metadata:
schema: 'metadata/Document/v1'
name: routable-cruiserlite
layeringDefinition:
abstract: true
layer: type
labels:
network_role: os-routable
topology: cruiserlite
storagePolicy: cleartext
data:
# NEWSITE-CHANGEME: Ensure the network switches in the environment are
# configured for this MTU or greater.
mtu: 1500
# MaaS doesnt own this network like it does the others, so the noconfig label
# is specified.
labels:
noconfig: enabled
...

View File

@ -0,0 +1,512 @@
---
schema: pegleg/PassphraseCatalog/v1
metadata:
schema: metadata/Document/v1
name: passphrase-catalog
layeringDefinition:
abstract: false
layer: type
storagePolicy: cleartext
data:
passphrases:
- description: 'osh barbican oslo db password'
document_name: osh_barbican_oslo_db_password
encrypted: true
profile: hex_lower
- description: 'osh oslo messaging admin password'
document_name: osh_oslo_messaging_admin_password
encrypted: true
profile: hex_lower
- description: 'osh barbican oslo messaging password'
document_name: osh_barbican_oslo_messaging_password
encrypted: true
profile: hex_lower
- description: 'osh barbican password'
document_name: osh_barbican_password
encrypted: true
profile: hex_lower
- description: 'osh rabbitmq erlang cookie'
document_name: osh_rabbitmq_erlang_cookie
encrypted: true
profile: hex_lower
- description: 'osh cinder oslo db password'
document_name: osh_cinder_oslo_db_password
encrypted: true
profile: hex_lower
- description: 'osh cinder oslo messaging password'
document_name: osh_cinder_oslo_messaging_password
encrypted: true
profile: hex_lower
- description: 'osh cinder password'
document_name: osh_cinder_password
encrypted: true
profile: hex_lower
- description: 'osh cinder test password'
document_name: osh_cinder_test_password
encrypted: true
profile: hex_lower
- description: 'osh glance oslo db password'
document_name: osh_glance_oslo_db_password
encrypted: true
profile: hex_lower
- description: 'osh glance oslo messaging password'
document_name: osh_glance_oslo_messaging_password
encrypted: true
profile: hex_lower
- description: 'osh glance password'
document_name: osh_glance_password
encrypted: true
profile: hex_lower
- description: 'osh glance test password'
document_name: osh_glance_test_password
encrypted: true
profile: hex_lower
- description: 'osh heat oslo db password'
document_name: osh_heat_oslo_db_password
encrypted: true
profile: hex_lower
- description: 'osh heat oslo messaging password'
document_name: osh_heat_oslo_messaging_password
encrypted: true
profile: hex_lower
- description: 'osh heat password'
document_name: osh_heat_password
encrypted: true
profile: hex_lower
- description: 'osh heat test password'
document_name: osh_heat_test_password
encrypted: true
profile: hex_lower
- description: 'osh heat stack user password'
document_name: osh_heat_stack_user_password
encrypted: true
profile: hex_lower
- description: 'osh heat trustee password'
document_name: osh_heat_trustee_password
encrypted: true
profile: hex_lower
- description: 'osh horizon oslo db password'
document_name: osh_horizon_oslo_db_password
encrypted: true
profile: hex_lower
- description: 'osh horizon secret key'
document_name: osh_horizon_secret_key
encrypted: true
profile: hex_lower
- description: 'osh infra dmaap mechid password do_not_generate'
document_name: osh_infra_dmaap_mechid_password
encrypted: true
profile: hex_lower
type: passphrase
prompt: true
regenerable: false
- description: 'osh infra elasticsearch admin password'
document_name: osh_infra_elasticsearch_admin_password
encrypted: true
profile: hex_lower
- description: 'osh infra grafana admin password'
document_name: osh_infra_grafana_admin_password
encrypted: true
profile: hex_lower
- description: 'osh infra grafana oslo db password'
document_name: osh_infra_grafana_oslo_db_password
encrypted: true
profile: hex_lower
- description: 'osh infra grafana oslo db session password'
document_name: osh_infra_grafana_oslo_db_session_password
encrypted: true
profile: hex_lower
- description: 'osh infra kibana admin password'
document_name: osh_infra_kibana_admin_password
encrypted: true
profile: hex_lower
- description: 'osh infra nagios admin password'
document_name: osh_infra_nagios_admin_password
encrypted: true
profile: hex_lower
- description: 'osh infra openstack exporter password'
document_name: osh_infra_openstack_exporter_password
encrypted: true
profile: hex_lower
- description: 'osh infra oslo db admin password'
document_name: osh_infra_oslo_db_admin_password
encrypted: true
profile: hex_lower
- description: 'osh infra oslo db audit password'
document_name: osh_infra_oslo_db_audit_password
encrypted: true
profile: hex_lower
- description: 'osh infra oslo db exporter password'
document_name: osh_infra_oslo_db_exporter_password
encrypted: true
profile: hex_lower
- description: 'osh infra prometheus admin password'
document_name: osh_infra_prometheus_admin_password
encrypted: true
profile: hex_lower
- description: 'osh infra rgw ks password'
document_name: osh_infra_rgw_ks_password
encrypted: true
profile: hex_lower
- description: 'osh infra rgw s3 admin secret key'
document_name: osh_infra_rgw_s3_admin_secret_key
encrypted: true
profile: hex_lower
- description: 'osh infra rgw s3 admin access key'
document_name: osh_infra_rgw_s3_admin_access_key
encrypted: true
profile: hex_lower
- description: 'osh infra rgw s3 elasticsearch secret key'
document_name: osh_infra_rgw_s3_elasticsearch_secret_key
encrypted: true
profile: hex_lower
- description: 'osh infra rgw s3 elasticsearch access key'
document_name: osh_infra_rgw_s3_elasticsearch_access_key
encrypted: true
profile: hex_lower
- description: 'osh ironic password'
document_name: osh_ironic_password
encrypted: true
profile: hex_lower
- description: 'osh keystone admin password'
document_name: osh_keystone_admin_password
encrypted: true
profile: hex_lower
- description: 'osh keystone oslo db password'
document_name: osh_keystone_oslo_db_password
encrypted: true
profile: hex_lower
- description: 'osh keystone oslo messaging password'
document_name: osh_keystone_oslo_messaging_password
encrypted: true
profile: hex_lower
- description: 'osh keystone test password'
document_name: osh_keystone_test_password
encrypted: true
profile: hex_lower
- description: 'osh neutron oslo db password'
document_name: osh_neutron_oslo_db_password
encrypted: true
profile: hex_lower
- description: 'osh neutron oslo messaging password'
document_name: osh_neutron_oslo_messaging_password
encrypted: true
profile: hex_lower
- description: 'osh neutron password'
document_name: osh_neutron_password
encrypted: true
profile: hex_lower
- description: 'osh neutron test password'
document_name: osh_neutron_test_password
encrypted: true
profile: hex_lower
- description: 'osh nova metadata proxy shared secret'
document_name: osh_nova_metadata_proxy_shared_secret
encrypted: true
profile: hex_lower
- description: 'osh nova oslo db password'
document_name: osh_nova_oslo_db_password
encrypted: true
profile: hex_lower
- description: 'osh nova oslo messaging password'
document_name: osh_nova_oslo_messaging_password
encrypted: true
profile: hex_lower
- description: 'osh nova password'
document_name: osh_nova_password
encrypted: true
profile: hex_lower
- description: 'osh nova test password'
document_name: osh_nova_test_password
encrypted: true
profile: hex_lower
- description: 'osh oslo cache secret key'
document_name: osh_oslo_cache_secret_key
encrypted: true
profile: hex_lower
- description: 'osh oslo db admin password'
document_name: osh_oslo_db_admin_password
encrypted: true
profile: hex_lower
- description: 'osh oslo db exporter password'
document_name: osh_oslo_db_exporter_password
encrypted: true
profile: hex_lower
- description: 'osh placement password'
document_name: osh_placement_password
encrypted: true
profile: hex_lower
- description: 'ucp keystone admin password'
document_name: ucp_keystone_admin_password
encrypted: true
profile: hex_lower
- description: 'ucp keystone test password'
document_name: ucp_keystone_test_password
encrypted: true
profile: hex_lower
- description: 'ucp armada keystone password'
document_name: ucp_armada_keystone_password
encrypted: true
profile: hex_lower
- description: 'ceph swift keystone password'
document_name: ceph_swift_keystone_password
encrypted: true
profile: hex_lower
- description: 'ucp airflow oslo messaging password'
document_name: ucp_airflow_oslo_messaging_password
encrypted: true
profile: hex_lower
- description: 'ucp postgres admin password'
document_name: ucp_postgres_admin_password
encrypted: true
profile: hex_lower
prompt: false
regenerable: false
- description: 'ucp postgres audit password'
document_name: ucp_postgres_audit_password
encrypted: true
profile: hex_lower
- description: 'ucp oslo db admin password'
document_name: ucp_oslo_db_admin_password
encrypted: true
profile: hex_lower
- description: 'ucp deckhand postgres password'
document_name: ucp_deckhand_postgres_password
encrypted: true
profile: hex_lower
- description: 'ucp deckhand keystone password'
document_name: ucp_deckhand_keystone_password
encrypted: true
profile: hex_lower
- description: 'ucp barbican keystone password'
document_name: ucp_barbican_keystone_password
encrypted: true
profile: hex_lower
- description: 'ucp barbican oslo db password'
document_name: ucp_barbican_oslo_db_password
encrypted: true
profile: hex_lower
- description: 'ucp oslo messaging password'
document_name: ucp_oslo_messaging_password
encrypted: true
profile: hex_lower
- description: 'ucp drydock postgres password'
document_name: ucp_drydock_postgres_password
encrypted: true
profile: hex_lower
- description: 'ucp drydock keystone password'
document_name: ucp_drydock_keystone_password
encrypted: true
profile: hex_lower
- description: 'ucp maas admin password'
document_name: ucp_maas_admin_password
encrypted: true
profile: hex_lower
- description: 'ucp maas postgres password'
document_name: ucp_maas_postgres_password
encrypted: true
profile: hex_lower
- description: 'ucp keystone oslo db password'
document_name: ucp_keystone_oslo_db_password
encrypted: true
profile: hex_lower
- description: 'ucp promenade keystone password'
document_name: ucp_promenade_keystone_password
encrypted: true
profile: hex_lower
- description: 'ucp shipyard keystone password'
document_name: ucp_shipyard_keystone_password
encrypted: true
profile: hex_lower
- description: 'ucp shipyard postgres password'
document_name: ucp_shipyard_postgres_password
encrypted: true
profile: hex_lower
- description: 'ucp airflow postgres password'
document_name: ucp_airflow_postgres_password
encrypted: true
profile: hex_lower
prompt: false
regenerable: false
- description: 'ucp postgres exporter postgres password'
document_name: ucp_postgres_exporter_postgres_password
encrypted: true
profile: hex_lower
- description: 'ucp rabbitmq erlang cookie'
document_name: ucp_rabbitmq_erlang_cookie
encrypted: true
profile: hex_lower
- description: 'ucp openstack exporter keystone password'
document_name: ucp_openstack_exporter_keystone_password
encrypted: true
profile: hex_lower
- description: 'ucp oslo db exporter password'
document_name: ucp_oslo_db_exporter_password
encrypted: true
profile: hex_lower
- description: 'maas-region-key'
document_name: maas-region-key
encrypted: true
profile: hex_lower
prompt: false
regenerable: false
- description: 'artifactory_oslo_db_sst_password'
document_name: artifactory_oslo_db_sst_password
encrypted: true
profile: hex_lower
- description: 'artifactory_oslo_db_audit_password'
document_name: artifactory_oslo_db_audit_password
encrypted: true
profile: hex_lower
- description: 'artifactory rgw s3 admin access key'
document_name: artifactory_rgw_s3_admin_access_key
encrypted: true
profile: hex_lower
- description: 'osh_addons_keystone_ranger-agent_password'
document_name: osh_addons_keystone_ranger-agent_password
encrypted: true
profile: hex_lower
- description: 'osh_addons_ranger-agent_admin_oslo_messaging_password'
document_name: osh_addons_ranger-agent_admin_oslo_messaging_password
encrypted: true
profile: hex_lower
- description: 'osh_addons_ranger-agent_oslo_db_password'
document_name: osh_addons_ranger-agent_oslo_db_password
encrypted: true
profile: hex_lower
- description: 'osh_infra_oslo_db_sst_password'
document_name: osh_infra_oslo_db_sst_password
encrypted: true
profile: hex_lower
- description: 'osh_oslo_db_sst_password'
document_name: osh_oslo_db_sst_password
encrypted: true
profile: hex_lower
- description: 'osh_oslo_db_audit_password'
document_name: osh_oslo_db_audit_password
encrypted: true
profile: hex_lower
- description: 'ro_keystone_password'
document_name: ro_keystone_password
encrypted: true
profile: hex_lower
- description: 'ro_oslo_db_password'
document_name: ro_oslo_db_password
encrypted: true
profile: hex_lower
- description: 'ro_oslo_messaging_password'
document_name: ro_oslo_messaging_password
encrypted: true
profile: hex_lower
- description: 'ucp_oslo_db_sst_password'
document_name: ucp_oslo_db_sst_password
encrypted: true
profile: hex_lower
- description: 'ucp_oslo_db_audit_password'
document_name: ucp_oslo_db_audit_password
encrypted: true
profile: hex_lower
- description: 'ucp_postgres_replica_password'
document_name: ucp_postgres_replica_password
encrypted: true
profile: hex_lower
- description: 'ucp_webhook_keystone_password'
document_name: ucp_webhook_keystone_password
encrypted: true
profile: hex_lower
# - description: 'aqua_executor'
# document_name: aqua_executor
# encrypted: true
# profile: hex_lower
# - description: 'aqua_orchestrator'
# document_name: aqua_orchestrator
# encrypted: true
# profile: hex_lower
- description: 'apiserver-encryption-key-key1'
document_name: apiserver-encryption-key-key1
encrypted: true
profile: hex_lower
type: base64
length: 32
prompt: false
regenerable: false
- description: 'apiserver-encryption-key-key2'
document_name: apiserver-encryption-key-key2
encrypted: true
profile: hex_lower
type: base64
length: 32
prompt: false
regenerable: false
- description: 'osh_keystone_ldap_mechid_password do_not_generate'
document_name: osh_keystone_ldap_mechid_password
encrypted: true
profile: hex_lower
type: passphrase
prompt: true
regenerable: false
- description: 'ceph_fsid uuid'
document_name: ceph_fsid
type: uuid
prompt: false
profile: hex_lower
regenerable: false
encrypted: true
- description: 'tenant_ceph_fsid uuid'
document_name: tenant_ceph_fsid
type: uuid
prompt: false
regenerable: false
encrypted: true
profile: hex_lower
- description: 'ucp_keystone_ldap_mechid_password do_not_generate'
document_name: ucp_keystone_ldap_mechid_password
type: passphrase
encrypted: true
profile: hex_lower
prompt: true
regenerable: false
- description: 'ipmi_admin_password do_not_generate'
document_name: ipmi_admin_password
type: passphrase
regenerable: false
encrypted: true
prompt: true
profile: hex_lower
- description: 'ubuntu_crypt_password linux-crypt-sha-512'
document_name: ubuntu_crypt_password
type: passphrase
prompt: false
profile: hex_lower
encrypted: true
- description: 'osh addons ranger-agent oslo messaging password'
document_name: osh_addons_ranger-agent_oslo_messaging_password
type: passphrase
prompt: false
encrypted: true
profile: hex_lower
- description: 'osh_addons_ranger_oslo_db_password'
document_name: osh_addons_ranger_oslo_db_password
type: passphrase
prompt: false
encrypted: true
profile: hex_lower
- description: 'osh_addons_ranger_oslo_db_sst_password'
document_name: osh_addons_ranger_oslo_db_sst_password
type: passphrase
prompt: false
encrypted: true
profile: hex_lower
- description: 'osh_addons_ranger_admin_oslo_db_password'
document_name: osh_addons_ranger_admin_oslo_db_password
type: passphrase
prompt: false
encrypted: true
profile: hex_lower
- description: 'osh_addons_ranger_agent_rabbitmq_erlang_cookie'
document_name: osh_addons_ranger_agent_rabbitmq_erlang_cookie
type: passphrase
prompt: false
encrypted: true
profile: hex_lower
...

View File

@ -0,0 +1,193 @@
---
schema: promenade/PKICatalog/v1
metadata:
schema: metadata/Document/v1
name: calico-etcd
labels:
name: calico-etcd-type
layeringDefinition:
abstract: false
layer: type
substitutions:
# Service IP substitutions
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .calico.etcd.service_ip
dest:
- path: .certificate_authorities.calico-etcd.certificates[0].hosts[2]
- path: .certificate_authorities.calico-etcd.certificates[1].hosts[2]
- path: .certificate_authorities.calico-etcd.certificates[2].hosts[2]
# Substitutions for master 0
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[0].hostname
dest:
- path: .certificate_authorities.calico-etcd.certificates[0].common_name
pattern: HOSTNAME
- path: .certificate_authorities.calico-etcd.certificates[0].hosts[3]
- path: .certificate_authorities.calico-etcd-peer.certificates[0].common_name
pattern: HOSTNAME
- path: .certificate_authorities.calico-etcd-peer.certificates[0].hosts[2]
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[0].ip.oam
dest:
- path: .certificate_authorities.calico-etcd.certificates[0].hosts[4]
- path: .certificate_authorities.calico-etcd-peer.certificates[0].hosts[3]
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[0].ip.ksn
dest:
- path: .certificate_authorities.calico-etcd.certificates[0].hosts[5]
- path: .certificate_authorities.calico-etcd-peer.certificates[0].hosts[4]
# Substitutions for master 1
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[1].hostname
dest:
- path: .certificate_authorities.calico-etcd.certificates[1].common_name
pattern: HOSTNAME
- path: .certificate_authorities.calico-etcd.certificates[1].hosts[3]
- path: .certificate_authorities.calico-etcd-peer.certificates[1].common_name
pattern: HOSTNAME
- path: .certificate_authorities.calico-etcd-peer.certificates[1].hosts[2]
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[1].ip.oam
dest:
- path: .certificate_authorities.calico-etcd.certificates[1].hosts[4]
- path: .certificate_authorities.calico-etcd-peer.certificates[1].hosts[3]
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[1].ip.ksn
dest:
- path: .certificate_authorities.calico-etcd.certificates[1].hosts[5]
- path: .certificate_authorities.calico-etcd-peer.certificates[1].hosts[4]
# Substitutions for master 2
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[2].hostname
dest:
- path: .certificate_authorities.calico-etcd.certificates[2].common_name
pattern: HOSTNAME
- path: .certificate_authorities.calico-etcd.certificates[2].hosts[3]
- path: .certificate_authorities.calico-etcd-peer.certificates[2].common_name
pattern: HOSTNAME
- path: .certificate_authorities.calico-etcd-peer.certificates[2].hosts[2]
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[2].ip.oam
dest:
- path: .certificate_authorities.calico-etcd.certificates[2].hosts[4]
- path: .certificate_authorities.calico-etcd-peer.certificates[2].hosts[3]
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[2].ip.ksn
dest:
- path: .certificate_authorities.calico-etcd.certificates[2].hosts[5]
- path: .certificate_authorities.calico-etcd-peer.certificates[2].hosts[4]
storagePolicy: cleartext
data:
certificate_authorities:
calico-etcd:
description: Certificates for Kubernetes's etcd servers
certificates:
- document_name: calico-etcd-master-0
common_name: calico-etcd-HOSTNAME
hosts:
- 127.0.0.1
- localhost
# NOTE(mb874d): These are stubs and get replaced via substitution
- CALICO_ETCD_SERVICE_IP
- HOSTNAME
- OAM_IP
- KSN_IP
kubernetes_service_names:
- calico-etcd.kube-system.svc.cluster.local
- document_name: calico-etcd-master-1
common_name: calico-etcd-HOSTNAME
hosts:
- 127.0.0.1
- localhost
# NOTE(mb874d): These are stubs and get replaced via substitution
- CALICO_ETCD_SERVICE_IP
- HOSTNAME
- OAM_IP
- KSN_IP
kubernetes_service_names:
- calico-etcd.kube-system.svc.cluster.local
- document_name: calico-etcd-master-2
common_name: calico-etcd-HOSTNAME
hosts:
- 127.0.0.1
- localhost
# NOTE(mb874d): These are stubs and get replaced via substitution
- CALICO_ETCD_SERVICE_IP
- HOSTNAME
- OAM_IP
- KSN_IP
kubernetes_service_names:
- calico-etcd.kube-system.svc.cluster.local
- document_name: calico-etcd-anchor
common_name: anchor
- document_name: calico-node
common_name: calico-node
calico-etcd-peer:
certificates:
- document_name: calico-etcd-master-0-peer
common_name: calico-etcd-HOSTNAME-peer
hosts:
- 127.0.0.1
- localhost
# NOTE(mb874d): These are stubs and get replaced via substitution
- HOSTNAME
- OAM_IP
- KSN_IP
- document_name: calico-etcd-master-1-peer
common_name: calico-etcd-HOSTNAME-peer
hosts:
- 127.0.0.1
- localhost
# NOTE(mb874d): These are stubs and get replaced via substitution
- HOSTNAME
- OAM_IP
- KSN_IP
- document_name: calico-etcd-master-2-peer
common_name: calico-etcd-HOSTNAME-peer
hosts:
- 127.0.0.1
- localhost
# NOTE(mb874d): These are stubs and get replaced via substitution
- HOSTNAME
- OAM_IP
- KSN_IP
...

View File

@ -0,0 +1,14 @@
---
schema: promenade/PKICatalog/v1
metadata:
schema: metadata/Document/v1
name: keypairs
layeringDefinition:
abstract: false
layer: type
storagePolicy: cleartext
data:
keypairs:
- name: service-account
description: Service account signing key for use by Kubernetes controller-manager.
...

View File

@ -0,0 +1,248 @@
---
schema: promenade/PKICatalog/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-etcd
labels:
name: kubernetes-etcd-type
layeringDefinition:
abstract: false
layer: type
substitutions:
# Service IP substitutions
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.etcd_service_ip
dest:
- path: .certificate_authorities.kubernetes-etcd.certificates[0].hosts[2]
- path: .certificate_authorities.kubernetes-etcd.certificates[1].hosts[2]
- path: .certificate_authorities.kubernetes-etcd.certificates[2].hosts[2]
- path: .certificate_authorities.kubernetes-etcd.certificates[3].hosts[2]
# Substitutions for bootstrapping Genesis etcd
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .genesis.hostname
dest:
- path: .certificate_authorities.kubernetes-etcd.certificates[0].hosts[3]
- path: .certificate_authorities.kubernetes-etcd-peer.certificates[0].hosts[2]
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .genesis.ip.oam
dest:
- path: .certificate_authorities.kubernetes-etcd.certificates[0].hosts[4]
- path: .certificate_authorities.kubernetes-etcd-peer.certificates[0].hosts[3]
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .genesis.ip.ksn
dest:
- path: .certificate_authorities.kubernetes-etcd.certificates[0].hosts[5]
- path: .certificate_authorities.kubernetes-etcd-peer.certificates[0].hosts[4]
# Substitutions for master 0
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[0].hostname
dest:
- path: .certificate_authorities.kubernetes-etcd.certificates[1].common_name
pattern: HOSTNAME
- path: .certificate_authorities.kubernetes-etcd.certificates[1].hosts[3]
- path: .certificate_authorities.kubernetes-etcd-peer.certificates[1].common_name
pattern: HOSTNAME
- path: .certificate_authorities.kubernetes-etcd-peer.certificates[1].hosts[2]
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[0].ip.oam
dest:
- path: .certificate_authorities.kubernetes-etcd.certificates[1].hosts[4]
- path: .certificate_authorities.kubernetes-etcd-peer.certificates[1].hosts[3]
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[0].ip.ksn
dest:
- path: .certificate_authorities.kubernetes-etcd.certificates[1].hosts[5]
- path: .certificate_authorities.kubernetes-etcd-peer.certificates[1].hosts[4]
# Substitutions for master 1
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[1].hostname
dest:
- path: .certificate_authorities.kubernetes-etcd.certificates[2].common_name
pattern: HOSTNAME
- path: .certificate_authorities.kubernetes-etcd.certificates[2].hosts[3]
- path: .certificate_authorities.kubernetes-etcd-peer.certificates[2].common_name
pattern: HOSTNAME
- path: .certificate_authorities.kubernetes-etcd-peer.certificates[2].hosts[2]
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[1].ip.oam
dest:
- path: .certificate_authorities.kubernetes-etcd.certificates[2].hosts[4]
- path: .certificate_authorities.kubernetes-etcd-peer.certificates[2].hosts[3]
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[1].ip.ksn
dest:
- path: .certificate_authorities.kubernetes-etcd.certificates[2].hosts[5]
- path: .certificate_authorities.kubernetes-etcd-peer.certificates[2].hosts[4]
# Substitutions for master 2
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[2].hostname
dest:
- path: .certificate_authorities.kubernetes-etcd.certificates[3].common_name
pattern: HOSTNAME
- path: .certificate_authorities.kubernetes-etcd.certificates[3].hosts[3]
- path: .certificate_authorities.kubernetes-etcd-peer.certificates[3].common_name
pattern: HOSTNAME
- path: .certificate_authorities.kubernetes-etcd-peer.certificates[3].hosts[2]
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[2].ip.oam
dest:
- path: .certificate_authorities.kubernetes-etcd.certificates[3].hosts[4]
- path: .certificate_authorities.kubernetes-etcd-peer.certificates[3].hosts[3]
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[2].ip.ksn
dest:
- path: .certificate_authorities.kubernetes-etcd.certificates[3].hosts[5]
- path: .certificate_authorities.kubernetes-etcd-peer.certificates[3].hosts[4]
storagePolicy: cleartext
data:
certificate_authorities:
kubernetes-etcd:
description: Certificates for Kubernetes's etcd servers
certificates:
- document_name: kubernetes-etcd-genesis
common_name: kubernetes-etcd-genesis
hosts:
- 127.0.0.1
- localhost
# NOTE(mb874d): These are stubs and get replaced via substitution
- KUBERNETES_ETCD_SERVICE_IP
- GENESIS_HOSTNAME
- GENESIS_OAM_IP
- GENESIS_KSN_IP
kubernetes_service_names:
- kubernetes-etcd.kube-system.svc.cluster.local
- document_name: kubernetes-etcd-master-0
common_name: kubernetes-etcd-HOSTNAME
hosts:
- 127.0.0.1
- localhost
# NOTE(mb874d): These are stubs and get replaced via substitution
- KUBERNETES_ETCD_SERVICE_IP
- HOSTNAME
- OAM_IP
- KSN_IP
kubernetes_service_names:
- kubernetes-etcd.kube-system.svc.cluster.local
- document_name: kubernetes-etcd-master-1
common_name: kubernetes-etcd-HOSTNAME
hosts:
- 127.0.0.1
- localhost
# NOTE(mb874d): These are stubs and get replaced via substitution
- KUBERNETES_ETCD_SERVICE_IP
- HOSTNAME
- OAM_IP
- KSN_IP
kubernetes_service_names:
- kubernetes-etcd.kube-system.svc.cluster.local
- document_name: kubernetes-etcd-master-2
common_name: kubernetes-etcd-HOSTNAME
hosts:
- 127.0.0.1
- localhost
# NOTE(mb874d): These are stubs and get replaced via substitution
- KUBERNETES_ETCD_SERVICE_IP
- HOSTNAME
- OAM_IP
- KSN_IP
kubernetes_service_names:
- kubernetes-etcd.kube-system.svc.cluster.local
- document_name: apiserver-etcd
description: etcd client certificate for use by Kubernetes apiserver
common_name: apiserver
- document_name: kubernetes-etcd-anchor
description: anchor
common_name: anchor
- document_name: apiserver-webhook-etcd
description: etcd client certificate for use by Kubernetes apiserver with webhook
common_name: apiserver_webhook
kubernetes-etcd-peer:
certificates:
- document_name: kubernetes-etcd-genesis-peer
common_name: kubernetes-etcd-genesis-peer
hosts:
- 127.0.0.1
- localhost
# NOTE(mb874d): These are stubs and get replaced via substitution
- GENESIS_HOSTNAME
- GENESIS_OAM_IP
- GENESIS_KSN_IP
- document_name: kubernetes-etcd-master-0-peer
common_name: kubernetes-etcd-HOSTNAME-peer
hosts:
- 127.0.0.1
- localhost
# NOTE(mb874d): These are stubs and get replaced via substitution
- HOSTNAME
- OAM_IP
- KSN_IP
- document_name: kubernetes-etcd-master-1-peer
common_name: kubernetes-etcd-HOSTNAME-peer
hosts:
- 127.0.0.1
- localhost
# NOTE(mb874d): These are stubs and get replaced via substitution
- HOSTNAME
- OAM_IP
- KSN_IP
- document_name: kubernetes-etcd-master-2-peer
common_name: kubernetes-etcd-HOSTNAME-peer
hosts:
- 127.0.0.1
- localhost
# NOTE(mb874d): These are stubs and get replaced via substitution
- HOSTNAME
- OAM_IP
- KSN_IP
...

View File

@ -0,0 +1,77 @@
---
schema: promenade/PKICatalog/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-control-plane
labels:
name: kubernetes-control-plane-type
layeringDefinition:
abstract: false
layer: type
substitutions:
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .kubernetes.api_service_ip
dest:
path: .certificate_authorities.kubernetes.certificates[0].hosts[2]
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .genesis.hostname
dest:
- path: .certificate_authorities.kubernetes.certificates[1].hosts[0]
- path: .certificate_authorities.kubernetes.certificates[1].common_name
pattern: HOSTNAME
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .genesis.ip.ksn
dest:
- path: .certificate_authorities.kubernetes.certificates[1].hosts[1]
storagePolicy: cleartext
data:
certificate_authorities:
kubernetes:
description: CA for Kubernetes components
certificates:
- document_name: apiserver
description: Service certificate for Kubernetes apiserver
common_name: apiserver
hosts:
- localhost
- 127.0.0.1
- KUBERNETES_SERVICE_IP
kubernetes_service_names:
- kubernetes.default.svc.cluster.local
- document_name: kubelet-genesis
common_name: system:node:HOSTNAME
hosts:
- GENESIS_HOSTNAME
- GENESIS_KSN_IP
groups:
- system:nodes
- document_name: scheduler
description: Service certificate for Kubernetes scheduler
common_name: system:kube-scheduler
- document_name: controller-manager
description: certificate for controller-manager
common_name: system:kube-controller-manager
- document_name: admin
common_name: admin
groups:
- system:masters
- document_name: armada
common_name: armada
groups:
- system:masters
- document_name: apiserver-webhook-kubelet
description: Cert for kubelet access from apiserver w/ webhook
common_name: apiserver-webhook
...

View File

@ -0,0 +1,27 @@
---
schema: promenade/PKICatalog/v1
metadata:
schema: metadata/Document/v1
name: webhook-apiserver
layeringDefinition:
abstract: false
layer: type
storagePolicy: cleartext
data:
certificate_authorities:
keystone-webhook:
description: Certificates for the Keystone Webhook
certificates:
- document_name: keystone-webhook-server
description: Server cert for Keystone Webhook
common_name: keystone-webhook
hosts:
- 127.0.0.1
- localhost
apiserver-webhook:
description: Certificates for APIservers w/ Webhook
certificates:
- document_name: apiserver-webhook-server
description: Server cert for APIserver w/ webhook
common_name: apiserver-webhook
...

View File

@ -0,0 +1,129 @@
---
# The primary control plane host profile for Airship for DELL R720s, and
# should not need to be altered if you are using matching HW. The active
# participants in the Ceph cluster run on this profile. Other control plane
# services are not affected by primary vs secondary designation.
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: cp-intel-s2600wt
storagePolicy: cleartext
layeringDefinition:
abstract: false
layer: type
parentSelector:
hosttype: nc-cp-adv
actions:
- method: replace
path: .interfaces
- method: replace
path: .storage
- method: merge
path: .
substitutions:
- dest:
path: .oob.credential
src:
schema: deckhand/Passphrase/v1
name: ipmi_admin_password
path: .
- dest:
path: .interfaces.data1.sriov.device
src:
schema: drydock/HardwareProfile/v1
name: intel-s2600wt
path: .device_aliases.data_nic1.address
- dest:
path: .interfaces.data2.sriov.device
src:
schema: drydock/HardwareProfile/v1
name: intel-s2600wt
path: .device_aliases.data_nic2.address
data:
hardware_profile: intel-s2600wt
oob:
type: 'ipmi'
network: 'oob'
# Not used. Keep for lint purpose
account: 'tier4'
primary_network: dmz
interfaces:
dmz:
device_link: dmz
slaves:
- ctrl_nic1
networks:
- oam
pxe:
device_link: pxe
slaves:
- ctrl_nic2
networks:
- pxe
data1:
device_link: data1
slaves:
- data_nic1
networks:
- calico
- overlay
data2:
device_link: data2
slaves:
- data_nic2
networks:
- storage
- routable
sriov:
num_vfs: 32
promisc: false
ovs_dpdk:
vf_index: 0
pci_whitelist:
trusted: false
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var_log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '>100g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
platform:
kernel_params:
# NOTE: For the hugepagez config, must use real value here
# because this list is also consumed by pre-genesis.sh to
# configure hugepages for genesis node, and unlike drydock
# it does not have capability to perform the necessarily
# substitution at run time. See nc-p1-adv profile for
# preferred substitution pattern.
hugepagesz: '1G'
hugepages: '12'
metadata:
owner_data:
openstack-ranger-agent: enabled
...

View File

@ -0,0 +1,128 @@
---
# The data plane host profile for Airship for DELL R720s, and should
# not need to be altered if you are using matching HW. The host profile is setup
# for cpu isolation (for nova pinning), hugepages, and sr-iov.
schema: drydock/HostProfile/v1
metadata:
schema: metadata/Document/v1
name: dp-intel-s2600wt
storagePolicy: cleartext
layeringDefinition:
abstract: false
layer: type
parentSelector:
hosttype: nc-p1-adv
actions:
- method: replace
path: .interfaces
- method: replace
path: .storage
- method: merge
path: .
substitutions:
- dest:
path: .oob.credential
src:
schema: deckhand/Passphrase/v1
name: ipmi_admin_password
path: .
- dest:
path: .interfaces.data2.sriov.device
src:
schema: drydock/HardwareProfile/v1
name: intel-s2600wt
path: .device_aliases.data_nic2.address
- dest:
- path: .platform.kernel_params.isolcpus
src:
schema: drydock/HardwareProfile/v1
name: intel-s2600wt
path: .cpu_sets.kvm
- dest:
- path: .platform.kernel_params.rcu_nocbs
src:
schema: drydock/HardwareProfile/v1
name: intel-s2600wt
path: .cpu_sets.rcu_nocbs
- dest:
path: .platform.kernel_params.dpdk-socket-mem
src:
schema: drydock/HardwareProfile/v1
name: intel-s2600wt
path: .hugepages.dpdk.socket-mem
data:
hardware_profile: intel-s2600wt
primary_network: dmz
interfaces:
dmz:
device_link: dmz
slaves:
- ctrl_nic1
networks:
- oam
pxe:
device_link: pxe
slaves:
- ctrl_nic2
networks:
- pxe
data1:
device_link: data1
slaves:
- data_nic1
networks:
- calico
- overlay
data2:
device_link: data2
slaves:
- data_nic2
networks:
- storage
- routable
sriov:
num_vfs: 32
promisc: false
ovs_dpdk:
vf_index: 0
pci_whitelist:
trusted: false
storage:
physical_devices:
bootdisk:
labels:
bootdrive: 'true'
partitions:
- name: 'root'
size: '30g'
bootable: true
filesystem:
mountpoint: '/'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'boot'
size: '1g'
filesystem:
mountpoint: '/boot'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'log'
size: '100g'
filesystem:
mountpoint: '/var/log'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'var'
size: '100g'
filesystem:
mountpoint: '/var'
fstype: 'ext4'
mount_options: 'defaults'
- name: 'nova'
size: '>200g'
filesystem:
mountpoint: '/var/lib/nova'
fstype: 'ext4'
mount_options: 'defaults'
...

View File

@ -0,0 +1,42 @@
---
schema: promenade/HostSystem/v1
metadata:
schema: metadata/Document/v1
name: host-system
replacement: true
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: host-system-global
actions:
- method: merge
path: .data.files[11]
- method: merge
path: .data.files[12]
- method: merge
path: .data.files[13]
storagePolicy: cleartext
data:
files:
# placeholders for merge with parent document.
- {} # 0
- {} # 1
- {} # 2
- {} # 3
- {} # 4
- {} # 5
- {} # 6
- {} # 7
- {} # 8
- {} # 9
- {} # 10
- path: /srv/elasticsearch-data/.info
content: "elasticsearch-data folder"
mode: 0640
- path: /srv/elasticsearch-master/.info
content: "elasticsearch-master folder"
mode: 0640
- path: /srv/prometheus-data/.info
content: "prometheus-data folder"
mode: 0640

View File

@ -0,0 +1,37 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-calico
labels:
name: kubernetes-calico
layer: type
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: kubernetes-calico-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
conf:
node:
CALICO_STARTUP_LOGLEVEL: INFO
CLUSTER_TYPE: "k8s,bgp"
ETCD_CA_CERT_FILE: /etc/calico/pki/ca
ETCD_CERT_FILE: /etc/calico/pki/crt
ETCD_KEY_FILE: /etc/calico/pki/key
WAIT_FOR_STORAGE: "true"
networking:
settings:
mesh: "on"
ippool:
ipip:
enabled: "true"
mode: "Always"
nat_outgoing: "true"
disabled: "false"
...

View File

@ -0,0 +1,113 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-calico-etcd
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: kubernetes-calico-etcd-global
actions:
- method: merge
path: .
storagePolicy: cleartext
substitutions:
# Master 0
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[0].hostname
dest:
path: .values.nodes[0].name
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-master-0
path: .
dest:
path: .values.nodes[0].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-master-0
path: .
dest:
path: .values.nodes[0].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-master-0-peer
path: .
dest:
path: .values.nodes[0].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-master-0-peer
path: .
dest:
path: .values.nodes[0].tls.peer.key
# Master 1
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[1].hostname
dest:
path: .values.nodes[1].name
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-master-1
path: .
dest:
path: .values.nodes[1].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-master-1
path: .
dest:
path: .values.nodes[1].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-master-1-peer
path: .
dest:
path: .values.nodes[1].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-master-1-peer
path: .
dest:
path: .values.nodes[1].tls.peer.key
# Master 2
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[2].hostname
dest:
path: .values.nodes[2].name
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-master-2
path: .
dest:
path: .values.nodes[2].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-master-2
path: .
dest:
path: .values.nodes[2].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: calico-etcd-master-2-peer
path: .
dest:
path: .values.nodes[2].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: calico-etcd-master-2-peer
path: .
dest:
path: .values.nodes[2].tls.peer.key
data: {}
...

View File

@ -0,0 +1,136 @@
---
schema: nc/Policy/v1
metadata:
schema: metadata/Document/v1
name: type-policy
labels:
name: type-policy
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: global-policy
actions:
- method: merge
path: .
substitutions:
# bgp peering only
# - src:
# schema: drydock/Network/v1
# name: oam
# path: .cidr
# dest:
# - path: .policy.sitelevel.rules[0].spec.egress[0].source.nets[0]
# pattern: OAM_CIDR
# - path: .policy.sitelevel.rules[1].spec.ingress[0].destination.nets[0]
# pattern: OAM_CIDR
# bgp peering only
# - src:
# schema: pegleg/CommonAddresses/v1
# name: common-addresses
# path: .calico.bgp.ipv4.peers[0]
# dest:
# - path: .policy.sitelevel.rules[0].spec.egress[0].destination.nets[0]
# pattern: CALICO_BGP0_IP
# - path: .policy.sitelevel.rules[1].spec.ingress[0].source.nets[0]
# pattern: CALICO_BGP0_IP
# - src:
# schema: pegleg/CommonAddresses/v1
# name: common-addresses
# path: .calico.bgp.ipv4.peers[1]
# dest:
# - path: .policy.sitelevel.rules[0].spec.egress[0].destination.nets[1]
# pattern: CALICO_BGP1_IP
# - path: .policy.sitelevel.rules[1].spec.ingress[0].source.nets[1]
# pattern: CALICO_BGP1_IP
- src:
schema: drydock/Network/v1
name: calico
path: .cidr
dest:
- path: .policy.sitelevel.rules[0].spec.egress[0].destination.nets[0]
pattern: CALICO_CIDR
- path: .policy.sitelevel.rules[1].spec.egress[0].destination.nets[0]
pattern: CALICO_CIDR
storagePolicy: cleartext
data:
policy:
sitelevel:
priority: 5
rules:
#bgp peering only
# - apiVersion: projectcalico.org/v3
# kind: GlobalNetworkPolicy
# metadata:
# name: nc1-bgp-peering-egress
# spec:
# order: 21
# selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
# egress:
# - action: Allow
# protocol: TCP
# source:
# nets: ["OAM_CIDR"]
# destination:
# nets: ["CALICO_BGP0_IP/32","CALICO_BGP1_IP/32"]
# ports:
# - 179
# doNotTrack: false
# preDNAT: false
# applyOnForward: true
# - apiVersion: projectcalico.org/v3
# kind: GlobalNetworkPolicy
# metadata:
# name: nc1-bgp-peering-ingress
# spec:
# order: 22
# selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
# ingress:
# - action: Allow
# protocol: TCP
# source:
# nets: ["CALICO_BGP0_IP/32","CALICO_BGP1_IP/32"]
# destination:
# nets: ["OAM_CIDR"]
# ports:
# - 179
# doNotTrack: false
# preDNAT: false
# applyOnForward: true
# rule 2: all UCP containers should allow outgoing TCP connections to the calico etcd IP / ports
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: ucp-calico-etcd
spec:
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
order: 0
egress:
- action: Allow
protocol: TCP
destination:
nets:
- "CALICO_CIDR"
ports: [6666, 6667]
doNotTrack: false
preDNAT: false
applyOnForward: true
# rule 3: allow egress to the Kubernetes apiservers
- apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: kubernetes-apiserver-service
spec:
selector: host in { 'nc-control', 'nc-compute' } && intf-alias == 'oam'
order: 0
egress:
- action: Allow
protocol: TCP
destination:
nets:
- "CALICO_CIDR"
ports: [6443]
doNotTrack: false
preDNAT: false
...

View File

@ -0,0 +1,14 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-dns-scaled
layeringDefinition:
abstract: false
layer: type
storagePolicy: cleartext
data:
description: Scale out coredns to a daemonset
chart_group:
- coredns-scaled
...

View File

@ -0,0 +1,43 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: coredns-scaled
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: coredns-global
actions:
- method: merge
path: .
labels:
name: coredns-scaled-type
storagePolicy: cleartext
data:
values:
# conf:
# coredns:
# corefile: |
# .:53 {
# errors
# loadbalance round_robin
# health
# autopath @kubernetes
# kubernetes CLUSTER_DOMAIN SERVICE_CIDR POD_CIDR {
# pods insecure
# fallthrough in-addr.arpa ip6.arpa
# upstream UPSTREAM1
# }
# prometheus :9253
# forward . UPSTREAM1
# cache 30
# log . {
# class denial error
# }
# }
manifests:
# scale out a daemonset in addition to the deployment
daemonset: true
deployment: false
...

View File

@ -0,0 +1,112 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kubernetes-etcd
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: kubernetes-etcd-global
actions:
- method: merge
path: .
storagePolicy: cleartext
substitutions:
# Master 0
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[0].hostname
dest:
path: .values.nodes[0].name
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-master-0
path: .
dest:
path: .values.nodes[0].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-master-0
path: .
dest:
path: .values.nodes[0].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-master-0-peer
path: .
dest:
path: .values.nodes[0].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-master-0-peer
path: .
dest:
path: .values.nodes[0].tls.peer.key
# Master 1
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[1].hostname
dest:
path: .values.nodes[1].name
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-master-1
path: .
dest:
path: .values.nodes[1].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-master-1
path: .
dest:
path: .values.nodes[1].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-master-1-peer
path: .
dest:
path: .values.nodes[1].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-master-1-peer
path: .
dest:
path: .values.nodes[1].tls.peer.key
# Master 2
- src:
schema: nc/ControlPlaneAddresses/v1
name: control-plane-addresses
path: .masters[2].hostname
dest:
path: .values.nodes[2].name
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-master-2
path: .
dest:
path: .values.nodes[2].tls.client.cert
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-master-2
path: .
dest:
path: .values.nodes[2].tls.client.key
- src:
schema: deckhand/Certificate/v1
name: kubernetes-etcd-master-2-peer
path: .
dest:
path: .values.nodes[2].tls.peer.cert
- src:
schema: deckhand/CertificateKey/v1
name: kubernetes-etcd-master-2-peer
path: .
dest:
path: .values.nodes[2].tls.peer.key
data: {}
...

View File

@ -0,0 +1,37 @@
---
# The purpose of this file is to define the environment-specific public-facing
# VIP for the ingress controller
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: ingress-kube-system
layeringDefinition:
abstract: false
layer: type
parentSelector:
ingress: kube-system
actions:
- method: merge
path: .
substitutions:
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .calico.bgp.ipv4.ingress_vip
dest:
path: .values.network.vip.addr
storagePolicy: cleartext
data:
values:
network:
ingress:
disable-ipv6: 'true'
vip:
manage: true
interface: ingress0
conf:
ingress:
# The default number is set to the number of cpus, wich is 88 in the Intel lab.
worker-processes: "10"
...

View File

@ -0,0 +1,29 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: grafana
labels:
name: grafana-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: grafana-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
pod:
resources:
grafana:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
...

View File

@ -0,0 +1,36 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: kibana
labels:
name: kibana-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: kibana-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
pod:
resources:
apache_proxy:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "100m"
kibana:
requests:
memory: "512Mi"
cpu: "100m"
limits:
memory: "2048Mi"
cpu: "2000m"
...

View File

@ -0,0 +1,26 @@
---
# The purpose of this file is to define the environment-specific public-facing
# VIP for the ingress controller
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: osh-infra-ingress-controller
labels:
name: osh-infra-ingress-controller-global
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: osh-infra-ingress-controller-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
conf:
ingress:
# The default number is set to the number of cpus, wich is 88 in the Intel lab.
worker-processes: "10"
...

View File

@ -0,0 +1,21 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: osh-infra-logging
labels:
name: osh-infra-logging-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: osh-infra-logging-global
actions:
- method: replace
path: .chart_group
storagePolicy: cleartext
data:
chart_group:
- elasticsearch
- fluentd

View File

@ -0,0 +1,515 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: elasticsearch
labels:
name: elasticsearch-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
hosttype: elasticsearch-global
actions:
- method: replace
path: .values.conf.curator.action_file
- method: merge
path: .
storagePolicy: cleartext
data:
values:
pod:
replicas:
master: 3
data: 3
client: 3
resources:
client:
requests:
memory: "2Gi"
cpu: "1000m"
limits:
memory: "2Gi"
cpu: "2000m"
master:
requests:
memory: "2Gi"
cpu: "1000m"
limits:
memory: "2Gi"
cpu: "2000m"
data:
requests:
memory: "2Gi"
cpu: "1000m"
limits:
memory: "2Gi"
cpu: "2000m"
storage:
data:
requests:
storage: 200G
storage_class: general
master:
requests:
storage: 5G
storage_class: general
conf:
jvm_options: |
-Xms1g
-Xmx1g
-XX:+UseConcMarkSweepGC
-XX:CMSInitiatingOccupancyFraction=75
-XX:+UseCMSInitiatingOccupancyOnly
-Des.networkaddress.cache.ttl=60
-Des.networkaddress.cache.negative.ttl=10
-XX:+AlwaysPreTouch
-Xss1m
-Djava.awt.headless=true
-Dfile.encoding=UTF-8
-Djna.nosys=true
-XX:-OmitStackTraceInFastThrow
-Dio.netty.noUnsafe=true
-Dio.netty.noKeySetOptimization=true
-Dio.netty.recycler.maxCapacityPerThread=0
-Dlog4j.shutdownHookEnabled=false
-Dlog4j2.disable.jmx=true
-Djava.io.tmpdir=${ES_TMPDIR}
-XX:+HeapDumpOnOutOfMemoryError
-XX:HeapDumpPath=data
-XX:ErrorFile=logs/hs_err_pid%p.log
8:-XX:+PrintGCDetails
8:-XX:+PrintGCDateStamps
8:-XX:+PrintTenuringDistribution
8:-XX:+PrintGCApplicationStoppedTime
8:-Xloggc:logs/gc.log
8:-XX:+UseGCLogFileRotation
8:-XX:NumberOfGCLogFiles=32
8:-XX:GCLogFileSize=64m
9-:-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m
9-:-Djava.locale.providers=COMPAT
10-:-XX:UseAVX=2
elasticsearch:
env:
java_opts:
client: "-Xms1g -Xmx1g"
data: "-Xms1g -Xmx1g"
master: "-Xms1g -Xmx1g"
curator:
#run every 12th hour
schedule: "0 */12 * * *"
action_file:
actions:
1:
# Below indices for non-security-logs-snapshot
# libvirt-|qemu- #libvirt and qemu indices
# |journal-|kernel-|syslog-|kernel_syslog- #journald, kernel, and syslog indices
# |ceph-|tenant_ceph- #ceph and tenant_ceph indices
# |calico- #calico indices
# |armada- #armada indices
# |logstash- #generic logstash indices
# |flows- #flowlogs indices
# |utility_access- #utility container indices
# |openvswitch- #openvswitch log indices
action: snapshot
description: >-
"Snapshot all non-security logs indices older than 10 days"
options:
repository: logstash_snapshots
name: "non-security-logs-snapshot-%Y.%m.%d.%H.%M"
wait_for_completion: True
max_wait: -1
wait_interval: 30
ignore_empty_list: True
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: '^(libvirt-|qemu-|journal-|kernel-|syslog-|kernel_syslog-|ceph-|tenant_ceph-|calico-|armada-|logstash-|flows-|utility_access-|openvswitch-).*$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
2:
# Below indices for security-logs-snapshot
# keystone-|glance-|cinder-|nova-|neutron-|heat-|horizon-|openstack-|ro-|ranger- #openstack indices
# |grafana-|prometheus-|nagios-|kibana-|elasticsearch-|lma- #grafana, prometheus, nagios, kibana and elasticsearch indices
# |auth- #auth indices
# |shipyard- #shipyard indices
# |jenkins- #jenkins indices
# |airship- #airship indices
# |audit_tsee- #tsee audit logs
action: snapshot
description: >-
"Snapshot all security logs indices older than 10 days"
options:
repository: logstash_snapshots
name: "security-logs-snapshot-%Y.%m.%d.%H.%M"
wait_for_completion: True
max_wait: -1
wait_interval: 30
ignore_empty_list: True
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: '^(keystone-|glance-|cinder-|nova-|neutron-|heat-|horizon-|openstack-|ro-|ranger-|grafana-|prometheus-|nagios-|kibana-|elasticsearch-|lma-|auth-|shipyard-|jenkins-|airship-|audit_tsee-).*$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
3:
action: delete_indices
description: >-
"Delete libvirt and qemu indices older than 14 days"
options:
timeout_override: 7200
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: '^(libvirt-|qemu-).*$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
4:
action: delete_indices
description: >-
"Delete journald, kernel, and syslog indices older than 14 days"
options:
timeout_override: 7200
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: '^(journal-|kernel-|syslog-|kernel_syslog-).*$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
5:
action: delete_indices
description: >-
"Delete ceph and tenant_ceph indices older than 14 days"
options:
timeout_override: 7200
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: '^(ceph-|tenant_ceph-).*$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
6:
action: delete_indices
description: >-
"Delete Openvswitch log indices older than 14 days"
options:
timeout_override: 7200
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: 'openvswitch-'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
7:
action: delete_indices
description: >-
"Delete calico indices older than 14 days"
options:
timeout_override: 7200
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: 'calico-'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
8:
action: delete_indices
description: >-
"Delete armada indices older than 14 days"
options:
timeout_override: 7200
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: 'armada-'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
9:
action: delete_indices
description: >-
"Delete generic logstash indices older than 14 days"
options:
timeout_override: 7200
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: 'logstash-'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
10:
action: delete_indices
description: >-
"Delete openstack service indices older than 14 days"
options:
timeout_override: 7200
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: '^(keystone-|glance-|cinder-|nova-|neutron-|heat-|horizon-|openstack-).*$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
11:
action: delete_indices
description: >-
"Delete grafana, prometheus, nagios, kibana and elasticsearch indices older than 14 days"
options:
timeout_override: 7200
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: '^(grafana-|prometheus-|nagios-|kibana-|elasticsearch-|lma-).*$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
12:
action: delete_indices
description: >-
"Delete auth indices older than 14 days"
options:
timeout_override: 7200
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: 'auth-'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
13:
action: delete_indices
description: >-
"Delete shipyard indices older than 14 days"
options:
timeout_override: 7200
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: 'shipyard-'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
14:
action: delete_indices
description: >-
"Delete jenkins indices older than 14 days"
options:
timeout_override: 7200
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: 'jenkins-'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
15:
action: delete_indices
description: >-
"Delete flows indices older than 14 days"
options:
timeout_override: 7200
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: 'flows-'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
16:
action: delete_indices
description: >-
"Delete airship indices older than 14 days"
options:
timeout_override: 7200
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: 'airship-'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
17:
action: delete_indices
description: >-
"Delete tsee audit indices older than 14 days"
options:
timeout_override: 7200
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: 'audit_tsee-'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
18:
action: delete_indices
description: >-
"Delete utility container log indices older than 14 days"
options:
timeout_override: 7200
continue_if_exception: False
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: 'utility_access-'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
19:
action: delete_snapshots
description: >-
"Delete non-security logs snapshots older than 14 days"
options:
repository: logstash_snapshots
timeout_override: 21600
retry_interval: 120
retry_count: 5
ignore_empty_list: True
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: 'non-security-logs-snapshot-'
- filtertype: age
source: creation_date
direction: older
unit: days
unit_count: 2
20:
action: delete_snapshots
description: >-
"Delete security logs snapshots older than 180 days"
options:
repository: logstash_snapshots
timeout_override: 21600
retry_interval: 120
retry_count: 5
ignore_empty_list: True
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: 'security-logs-snapshot-'
- filtertype: age
source: creation_date
direction: older
unit: days
unit_count: 2
...

View File

@ -0,0 +1,30 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: fluentd
labels:
hosttype: fluentd-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
hosttype: fluentd-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
pod:
resources:
fluentd:
limits:
memory: '4Gi'
cpu: '2000m'
requests:
memory: '2Gi'
cpu: '1000m'
...

View File

@ -0,0 +1,29 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: osh-infra-mariadb
labels:
name: openstack-infra-mariadb-cruiserlite
component: mariadb
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: openstack-infra-mariadb-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
volume:
size: 30Gi
backup:
size: 50Gi
pod:
replicas:
error_page: 2
...

View File

@ -0,0 +1,24 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: osh-infra-monitoring
labels:
name: osh-infra-monitoring-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: osh-infra-monitoring-global
actions:
- method: replace
path: .chart_group
storagePolicy: cleartext
data:
chart_group:
- prometheus
- prometheus-node-exporter
- prometheus-kube-state-metrics
- prometheus-process-exporter
- nagios

View File

@ -0,0 +1,36 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: nagios
labels:
name: nagios-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: nagios-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
pod:
resources:
nagios:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "100m"
apache_proxy:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "100m"
...

View File

@ -0,0 +1,30 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: prometheus-kube-state-metrics
labels:
name: prometheus-kube-state-metrics-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: prometheus-kube-state-metrics-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
pod:
resources:
enabled: true
kube_state_metrics:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
...

View File

@ -0,0 +1,29 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: prometheus-node-exporter
labels:
name: prometheus-node-exporter-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: prometheus-node-exporter-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
pod:
resources:
node_exporter:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
...

View File

@ -0,0 +1,29 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: prometheus-process-exporter
labels:
name: prometheus-process-exporter-cruiser-lite
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: prometheus-process-exporter-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
pod:
resources:
process_exporter:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
...

View File

@ -0,0 +1,34 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: prometheus
labels:
name: prometheus-type
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: osh-infra-prometheus-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
pod:
resources:
enabled: true
prometheus:
limits:
memory: "4Gi"
cpu: "2000m"
requests:
memory: "2Gi"
cpu: "1000m"
storage:
requests:
storage: 100G
storage_class: general
...

View File

@ -0,0 +1,25 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: osh-infra-radosgw
labels:
name: osh-infra-radosgw-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: osh-infra-radosgw-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
pod:
replicas:
rgw: 2
...

View File

@ -0,0 +1,26 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: libvirt
replacement: true
labels:
name: libvirt-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: libvirt-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
wait:
timeout: 12000
values:
network:
backend:
- openvswitch
- sriov
...

View File

@ -0,0 +1,391 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: neutron
labels:
name: neutron-cruiserlite
component: neutron
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: neutron-global
actions:
- method: merge
path: .
storagePolicy: cleartext
substitutions:
# OVS-DPDK settings for neutron
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .ovs-dpdk.ovs_bridge_dpdk
dest:
- path: .values.conf.ovs_dpdk.bridges[0].name
pattern: TUNNEL_BRIDGE
- path: .values.conf.ovs_dpdk.bonds[0].bridge
pattern: TUNNEL_BRIDGE
- path: .values.conf.plugins.openvswitch_agent.ovs.bridge_mappings
pattern: TUNNEL_BRIDGE
## NOTE: setting MTU same as overlay(9150) for now
- src:
schema: pegleg/NetworkSettings/v1
name: network-settings
path: .mtu.primary_bond
dest:
path: .values.conf.ovs_dpdk.bonds[0].mtu
- src:
schema: drydock/HardwareProfile/v1
name: intel-s2600wt
path: .device_aliases.data_nic2.address
dest:
path: .values.conf.ovs_dpdk.bonds[0].nics[0].pci_id
- src:
schema: drydock/HardwareProfile/v1
name: intel-s2600wt
path: .device_aliases.data_nic2.address
dest:
path: .values.conf.ovs_dpdk.bonds[0].nics[1].pci_id
# Site data mappings
# routable network
- src:
schema: drydock/Network/v1
name: routable
path: .vlan
dest:
path: .values.bootstrap.script
pattern: PUBLIC_SEGMENTATION_ID_VALUE
- src:
schema: drydock/Network/v1
name: routable
path: .labels.enabled
dest:
path: .values.bootstrap.script
pattern: PUBLIC_NETWORK_ENABLED_VALUE
- src:
schema: drydock/Network/v1
name: routable
path: ranges[0].start
dest:
path: .values.bootstrap.script
pattern: PUBLIC_START_IP_ADDRESS_VALUE
- src:
schema: drydock/Network/v1
name: routable
path: ranges[0].end
dest:
path: .values.bootstrap.script
pattern: PUBLIC_END_IP_ADDRESS_VALUE
- src:
schema: drydock/Network/v1
name: routable
path: .routes[0].gateway
dest:
path: .values.bootstrap.script
pattern: PUBLIC_NETWORK_GATEWAY_VALUE
- src:
schema: drydock/Network/v1
name: routable
path: .cidr
dest:
path: .values.bootstrap.script
pattern: PUBLIC_NETWORK_CIDR_VALUE
# overlay network
- src:
schema: drydock/Network/v1
name: overlay
path: .vlan
dest:
path: .values.bootstrap.script
pattern: OVERLAY_SEGMENTATION_ID_VALUE
- src:
schema: drydock/Network/v1
name: overlay
path: .labels.enabled
dest:
path: .values.bootstrap.script
pattern: OVERLAY_NETWORK_ENABLED_VALUE
- src:
schema: drydock/Network/v1
name: overlay
path: ranges[1].start
dest:
path: .values.bootstrap.script
pattern: OVERLAY_START_IP_ADDRESS_VALUE
- src:
schema: drydock/Network/v1
name: overlay
path: ranges[1].end
dest:
path: .values.bootstrap.script
pattern: OVERLAY_END_IP_ADDRESS_VALUE
- src:
schema: drydock/Network/v1
name: overlay
path: .routes[0].gateway
dest:
path: .values.bootstrap.script
pattern: OVERLAY_NETWORK_GATEWAY_VALUE
- src:
schema: drydock/Network/v1
name: overlay
path: .cidr
dest:
path: .values.bootstrap.script
pattern: OVERLAY_NETWORK_CIDR_VALUE
data:
wait:
timeout: 12000
test:
timeout: 1200
values:
pod:
use_fqdn:
neutron_agent: false
replicas:
server: 3
labels:
sriov:
node_selector_key: sriov
node_selector_value: enabled
network:
interface:
sriov:
- device: ens785f1
num_vfs: 32
promisc: false
qos:
- vf_num: 0
share: 20
backend:
- openvswitch
- sriov
conf:
# FIXME(je808k) confirm with NCD
auto_bridge_add:
br-bond1: null
dhcp_agent:
DEFAULT:
ovs_use_veth: true
ovs_dpdk:
enabled: true
driver: vfio-pci
nics: []
bridges:
- name: TUNNEL_BRIDGE
bonds:
- name: dpdkbond0
bridge: TUNNEL_BRIDGE
# The IP from the first nic in nics list shall be used
migrate_ip: false
n_rxq: 4
n_rxq_size: 4096
n_txq_size: 4096
ovs_options: "bond_mode=active-backup"
nics:
- name: dpdk_b0s0
vf_index: 0
- name: dpdk_b0s1
vf_index: 1
neutron:
DEFAULT:
#service_plugins: router,taas,trunk
service_plugins: router,trunk,neutron.services.qos.qos_plugin.QoSPlugin
plugins:
openvswitch_agent:
default:
ovs_vsctl_timeout: 30
agent:
tunnel_types: ""
securitygroup:
enable_security_group: False
firewall_driver: neutron.agent.firewall.NoopFirewallDriver
ovs:
bridge_mappings: ovsnet:TUNNEL_BRIDGE
datapath_type: netdev
of_connect_timeout: 60
of_request_timeout: 30
vhostuser_socket_dir: /var/run/openvswitch/vhostuser
ml2_conf:
ml2:
mechanism_drivers: l2population,openvswitch,sriovnicswitch
extension_drivers: port_security, qos
agent:
extensions: qos
ml2_type_vlan:
## NOTE: Must have at least 1 sriov network defined
network_vlan_ranges: external,sriovnet1:100:4000,ovsnet:2:4094
sriov_agent:
securitygroup:
firewall_driver: neutron.agent.firewall.NoopFirewallDriver
sriov_nic:
physical_device_mappings: sriovnet1:ens785f1
exclude_devices: ens785f1:0000:05:06.0; 0000:05:06.1
taas:
taas:
enabled: True
taas_plugin:
service_providers:
service_provider: TAAS:TAAS:neutron_taas.services.taas.service_drivers.taas_rpc.TaasRpcDriver:default
# Need to wait for sriov agent to come up and configure VFs first
dependencies:
dynamic:
targeted:
openvswitch:
ovs_agent:
pod:
- requireSameNode: true
labels:
application: neutron
component: neutron-sriov-agent
# Disable the boostrap script for now because there is no longer an oam network
# on vlan45 to put the public network on.
bootstrap:
enabled: false
ks_user: neutron
script: |
# TODO this should be moved out of neutron into an ending chart group that config
# a running openstack site to the targeted tenet's needs.
set +e
# Static Values
export PUBLIC_PHYSICAL_NETWORK="ovsnet"
export RESOURCE_DOMAIN=qa
export RESOURCE_PROJECT=aqua-admin
# Non-site data
export DNS_UPSTREAM_SERVERS="DNS_UPSTREAM_SERVERS_JOINED_VALUE"
# Site Data
# Declare associative arrays with each network data
declare -A PUBLIC_NET=(
[NETWORK_NAME]="routable"
[NETWORK_ENABLED]="PUBLIC_NETWORK_ENABLED_VALUE"
[SEGMENTATION_ID]="PUBLIC_SEGMENTATION_ID_VALUE"
[START_IP_ADDRESS]="PUBLIC_START_IP_ADDRESS_VALUE"
[END_IP_ADDRESS]="PUBLIC_END_IP_ADDRESS_VALUE"
[NETWORK_GATEWAY]="PUBLIC_NETWORK_GATEWAY_VALUE"
[NETWORK_CIDR]="PUBLIC_NETWORK_CIDR_VALUE"
[EXTERNAL]="true"
)
export PUBLIC_NET
declare -A OVERLAY_NET=(
[NETWORK_NAME]="default-private"
[NETWORK_ENABLED]="OVERLAY_NETWORK_ENABLED_VALUE"
[SEGMENTATION_ID]="OVERLAY_SEGMENTATION_ID_VALUE"
[START_IP_ADDRESS]="OVERLAY_START_IP_ADDRESS_VALUE"
[END_IP_ADDRESS]="OVERLAY_END_IP_ADDRESS_VALUE"
[NETWORK_GATEWAY]="OVERLAY_NETWORK_GATEWAY_VALUE"
[NETWORK_CIDR]="OVERLAY_NETWORK_CIDR_VALUE"
[EXTERNAL]="false"
)
export OVERLAY_NET
# Defining function for validating ip address
valid_ip() {
local ip=$1
if expr "$ip" : '[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*$' >/dev/null; then
for i in 1 2 3 4; do
if [ $(echo "$ip" | cut -d. -f$i) -gt 255 ]; then
return 1
fi
done
return 0
else
return 1
fi
}
# Declare array of names of associative arrays
export networks=("${!PUBLIC_NET@}" "${!OVERLAY_NET@}")
# Declare loop variable as nameref
declare -n net_ref
# Loop over networks
for net_ref in "${networks[@]}"; do
# NOTE: If the network is not found, just assume this is a new deployment
# and create all the essentials if enabled.
NET_ENABLED=$(echo "${net_ref[NETWORK_ENABLED]}" | tr [:upper:] [:lower:])
if [ "${NET_ENABLED}" = "true" ]; then
EXISTING_NET=($(openstack network list \
--provider-physical-network "${PUBLIC_PHYSICAL_NETWORK}" \
--provider-network-type vlan \
--provider-segment "${net_ref[SEGMENTATION_ID]}" \
--long -f value -c ID -c Name -c Project))
if [ ! -z "${EXISTING_NET[0]}" ]; then
echo "WARN network [${EXISTING_NET[0]}], [${EXISTING_NET[1]}] already exists in project ${EXISTING_NET[2]}"
CURNET="${EXISTING_NET[0]}"
else
## NOTE: Due to this nova RBAC rule "network:attach_external_network:!"
## the routable network cannot be created with --external
## otherwise will not be able to create VM directly off of it.
echo "INFO Creating network with name [${net_ref[NETWORK_NAME]}]"
CURNET=$(openstack network create "${net_ref[NETWORK_NAME]}" \
--enable \
--provider-physical-network "${PUBLIC_PHYSICAL_NETWORK}" \
--provider-network-type vlan \
--provider-segment "${net_ref[SEGMENTATION_ID]}" \
--enable-port-security \
--project-domain "${RESOURCE_DOMAIN}" \
--project "${RESOURCE_PROJECT}" -f value -c id)
if [ -z "${CURNET}" ]; then
echo "ERROR Failed to create network"
exit 0
fi
fi
SUBNET=$(openstack subnet list --name "${net_ref[NETWORK_NAME]}-subnet" \
--network "${CURNET}" -f value -c ID)
if [ "${SUBNET}" = "" ]; then
export EXTRA_SUBNET_ARGS=()
# we should have dns servers only for routable network
if [ "${net_ref[EXTERNAL]}" = "true" ]; then
for dns_server in $(echo "${DNS_UPSTREAM_SERVERS}" | tr ',' ' '); do
EXTRA_SUBNET_ARGS+=(--dns-nameserver "${dns_server}")
done
fi
if ! valid_ip "${net_ref[NETWORK_GATEWAY]}"; then
net_ref[NETWORK_GATEWAY]=none
fi
EXTRA_SUBNET_ARGS+=(
--gateway "${net_ref[NETWORK_GATEWAY]}"
)
echo "INFO Creating subnet for network ${net_ref[NETWORK_NAME]}"
SUBNET=$(openstack subnet create "${net_ref[NETWORK_NAME]}-subnet" \
--network "${CURNET}" \
--allocation-pool start="${net_ref[START_IP_ADDRESS]}",end="${net_ref[END_IP_ADDRESS]}" \
"${EXTRA_SUBNET_ARGS[@]}" \
--subnet-range "${net_ref[NETWORK_CIDR]}" \
--dhcp \
--project-domain "${RESOURCE_DOMAIN}" \
--project "${RESOURCE_PROJECT}" -f value -c id)
if [ -z "${SUBNET}" ]; then
echo "ERROR Failed to create subnet [${net_ref[NETWORK_NAME]}-subnet]"
exit 0
fi
else
echo "WARN Subnet [${net_ref[NETWORK_NAME]}-subnet] for [${net_ref[NETWORK_NAME]}] already exists, not re-creating."
fi
else
echo "INFO The creation of a public network [${net_ref[NETWORK_NAME]}] was disabled, no attempt to create related resources was made."
fi
done
exit 0
...

View File

@ -0,0 +1,62 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: nova
labels:
name: nova-cruiserlite
component: nova
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: nova-global
actions:
- method: merge
path: .
storagePolicy: cleartext
substitutions:
# Nova config for OVS-DPDK
- src:
schema: drydock/HardwareProfile/v1
name: intel-s2600wt
path: .cpu_sets.vcpu_pin_set
dest:
path: .values.conf.nova.DEFAULT.vcpu_pin_set
data:
wait:
timeout: 12000
test:
timeout: 1200
values:
pod:
use_fqdn:
compute: false
replicas:
api_metadata: 3
compute_ironic: 3
placement: 3
osapi: 3
conductor: 3
consoleauth: 3
scheduler: 3
novncproxy: 3
spiceproxy: 3
network:
backend:
- openvswitch
- sriov
conf:
nova:
libvirt:
virt_type: kvm
DEFAULT:
force_config_drive: true
cpu_allocation_ratio: 1.0
pci:
alias: '{"name": "numa0", "capability_type": "pci", "product_id": "154c", "vendor_id": "8086", "device_type": "type-PCI", "numa_policy": "required"}'
## NOTE: Exclude vf0 from each of the sriov nic
passthrough_whitelist: |
[{"address": {"domain":"0000","bus":"05","slot":"06","function":"[2-7]"}, "physical_network": "sriovnet1"},{"address": "0000:05:07.*", "physical_network": "sriovnet1"},{"address": "0000:05:08.*", "physical_network": "sriovnet1"},{"address": "0000:05:09.*", "physical_network": "sriovnet1"}]
...

View File

@ -0,0 +1,73 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: openvswitch
labels:
name: openvswitch-cruiserlite
component: openvswitch
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: openvswitch-global
actions:
- method: merge
path: .
storagePolicy: cleartext
replacement: true
substitutions:
# OVS-DPDK config
- src:
schema: drydock/HardwareProfile/v1
name: intel-s2600wt
path: .cpu_sets.dpdk-lcore-mask
dest:
path: .values.conf.ovs_dpdk.lcore_mask
- src:
schema: drydock/HardwareProfile/v1
name: intel-s2600wt
path: .cpu_sets.pmd-cpu-mask
dest:
path: .values.conf.ovs_dpdk.pmd_cpu_mask
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .ovs-dpdk.ovs_bridge_dpdk
dest:
- path: .values.network.external_bridge
- path: .values.network.interface.external
data:
values:
pod:
resources:
enabled: true
ovs:
vswitchd:
requests:
memory: "2Gi"
cpu: "2"
limits:
memory: "2Gi"
hugepages-1Gi: "1Gi"
cpu: "2"
conf:
ovs_dpdk:
enabled: true
socket_memory: 4096,4096
vhostuser_socket_dir: vhostuser
hugepages_mountpath: /dev/hugepages
# WIP(pk734q) upstream: https://review.opendev.org/#/c/692390/
vhost_iommu_support: true
driver: vfio-pci
# Need to wait for sriov agent to come up and configure VFs first
dependencies:
static:
vswitchd:
pod:
- requireSameNode: true
labels:
application: neutron
component: neutron-sriov-agent
...

View File

@ -0,0 +1,29 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: glance
labels:
name: glance-type
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: glance-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
test:
enabled: false
values:
pod:
replicas:
api: 3
registry: 3
conf:
rally_tests:
run_tempest: false
...

View File

@ -0,0 +1,28 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: heat
labels:
name: heat-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: heat-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
test:
timeout: 1200
values:
pod:
replicas:
api: 2
cfn: 2
cloudwatch: 2
engine: 2
...

View File

@ -0,0 +1,45 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: horizon
labels:
name: horizon-cruiserlite
component: horizon
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: horizon-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
wait:
timeout: 1800
test:
timeout: 1200
values:
pod:
replicas:
server: 2
conf:
software:
apache2:
site_dir: /etc/apache2/sites-available
conf_dir: /etc/apache2/conf-available
mods_dir: /etc/apache2/mods-available
horizon:
local_settings:
config:
secure_proxy_ssl_header: false
csrf_cookie_secure: "False"
session_cookie_secure: "False"
session_cookie_httponly: "True"
allowed_hosts:
- '*'
- .DOMAIN
- '.cluster.local'
...

View File

@ -0,0 +1,26 @@
---
# The purpose of this file is to define the environment-specific public-facing
# VIP for the ingress controller
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: openstack-ingress-controller
labels:
name: openstack-ingress-controller-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: openstack-ingress-controller-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
conf:
ingress:
# The default number is set to the number of cpus, wich is 88 in the Intel lab.
worker-processes: "10"
...

View File

@ -0,0 +1,24 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: keystone
labels:
name: keystone-cruiserlite
component: keystone
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: keystone-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
pod:
replicas:
api: 6
...

View File

@ -0,0 +1,33 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: openstack-mariadb
labels:
name: openstack-mariadb-cruiserlite
component: mariadb
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: openstack-mariadb-global
component: mariadb
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
volume:
size: 10Gi
backup:
size: 15Gi
monitoring:
prometheus:
enabled: true
pod:
replicas:
error_page: 2
...

View File

@ -0,0 +1,22 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: openstack-rabbitmq
labels:
name: openstack-rabbitmq-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: openstack-rabbitmq-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
volume:
size: 5Gi
...

View File

@ -0,0 +1,103 @@
---
# The purpose of this file is to define envrionment-specific parameters for the
# ceph client
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: tenant-ceph-client
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: tenant-ceph-client-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
conf:
ceph:
global:
osd_pool_default_size: 1
# todo: add docs explaining reduced redundancy
# we only have 2 computes, e.g. 2 OSDs
pool:
target:
osd: 2
default:
crush_rule: same_host
spec:
# RBD pool
- name: rbd
application: rbd
replication: 1
percent_total_data: 10
# Cinder volumes pool
- name: cinder.volumes
application: cinder-volume
replication: 1
percent_total_data: 40
# RadosGW pools
- name: .rgw.root
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.control
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.data.root
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.gc
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.log
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.intent-log
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.meta
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.usage
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.keys
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.email
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.swift
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.uid
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.buckets.extra
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.buckets.index
application: rgw
replication: 1
percent_total_data: 3
- name: default.rgw.buckets.data
application: rgw
replication: 1
percent_total_data: 30
...

View File

@ -0,0 +1,26 @@
---
# The purpose of this file is to define the environment-specific public-facing
# VIP for the ingress controller
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: tenant-ceph-ingress
labels:
name: tenant-ceph-ingress-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: tenant-ceph-ingress-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
conf:
ingress:
# The default number is set to the number of cpus, wich is 88 in the Intel lab.
worker-processes: "10"
...

View File

@ -0,0 +1,37 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: tenant-ceph-osd-sdb
labels:
name: tenant-ceph-osd-sdb-nc
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: tenant-ceph-osd-global
actions:
- method: replace
path: .values.conf.storage.osd
- method: merge
path: .
storagePolicy: cleartext
data:
chart_name: tenant-ceph-osd-sdb
release: tenant-ceph-osd-sdb
wait:
labels:
release_group: clcp-tenant-ceph-osd-sdb
values:
daemonset:
prefix_name: "clcp-tenant-ceph-osd-sdb"
conf:
storage:
failure_domain_by_hostname: 1-11
osd:
- data:
type: bluestore
location: /dev/sdb
...

View File

@ -0,0 +1,30 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: openstack-tenant-ceph
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: openstack-tenant-ceph-global
actions:
- method: replace
path: .chart_group
- method: merge
path: .
storagePolicy: cleartext
labels:
name: openstack-tenant-ceph-nc
data:
description: Ceph Storage
sequenced: true
chart_group:
- tenant-ceph-ingress
- tenant-ceph-mon
- tenant-ceph-osd-sdb
- tenant-ceph-client
- tenant-ceph-config
- tenant-ceph-utility-config
- tenant-ceph-utility
...

View File

@ -0,0 +1,22 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-apiserver-webhook-scaled
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-apiserver-webhook-global
actions:
- method: merge
path: .
labels:
name: ucp-apiserver-webhook-scaled-type
storagePolicy: cleartext
data:
values:
pod:
replicas:
api: 2
...

View File

@ -0,0 +1,22 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-armada-scaled
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-armada-global
actions:
- method: merge
path: .
labels:
name: ucp-armada-scaled-type
storagePolicy: cleartext
data:
values:
pod:
replicas:
api: 2
...

View File

@ -0,0 +1,21 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ucp-armada-scaled
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: chartgroup-ucp-armada-global
actions:
- method: merge
path: .
labels:
name: chartgroup-ucp-armada-scaled-type
storagePolicy: cleartext
data:
chart_group:
- ucp-tiller
- ucp-armada-scaled
...

View File

@ -0,0 +1,88 @@
---
# The purpose of this file is to define environment-specific parameters for ceph
# client update
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-ceph-client-update
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-ceph-client-update-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
pod:
replicas:
# (jamesgu) adjust down to 2 to work around the issue that a fourth pod
# is stuck at pending during Aramada phase. Intel pod has one genesis and
# two controller nodes vs 3 controller nodes in wwt lab.
mds: 2
conf:
pool:
spec:
# RBD pool
- name: rbd
application: rbd
replication: 2
percent_total_data: 40
# CephFS pools
- name: cephfs_metadata
application: cephfs
replication: 2
percent_total_data: 1
- name: cephfs_data
application: cephfs
replication: 2
percent_total_data: 2.5
# RadosGW pools
- name: .rgw.root
application: rgw
replication: 2
percent_total_data: 0.1
- name: default.rgw.control
application: rgw
replication: 2
percent_total_data: 0.1
- name: default.rgw.log
application: rgw
replication: 2
percent_total_data: 5
- name: default.rgw.intent-log
application: rgw
replication: 2
percent_total_data: 0.1
- name: default.rgw.meta
application: rgw
replication: 2
percent_total_data: 0.1
- name: default.rgw.usage
application: rgw
replication: 2
percent_total_data: 0.1
- name: default.rgw.users.uid
application: rgw
replication: 2
percent_total_data: 0.1
- name: default.rgw.buckets.non-ec
application: rgw
replication: 2
percent_total_data: 0.1
- name: default.rgw.buckets.index
application: rgw
replication: 2
percent_total_data: 3
- name: default.rgw.buckets.data
application: rgw
replication: 2
percent_total_data: 34.8
target:
# NEWSITE-CHANGEME: Total number of OSDs. Does not need to change if
# your HW matches this site's HW. Verify for your environment.
# 8 OSDs per node x 3 nodes = 24
osd: 3
...

View File

@ -0,0 +1,81 @@
---
# The purpose of this file is to define envrionment-specific parameters for the
# ceph client
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-ceph-client
layeringDefinition:
abstract: false
layer: site
parentSelector:
name: ucp-ceph-client-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
conf:
pool:
target:
# NEWSITE-CHANGEME: The number of OSDs per ceph node. Does not need to
# change if your deployment HW matches this site's HW.
osd: 1
spec:
# RBD pool
- name: rbd
application: rbd
replication: 1
percent_total_data: 15
# CephFS pools
- name: cephfs_metadata
application: cephfs
replication: 1
percent_total_data: 1
- name: cephfs_data
application: cephfs
replication: 1
percent_total_data: 2.5
# RadosGW pools
- name: .rgw.root
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.control
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.log
application: rgw
replication: 1
percent_total_data: 5
- name: default.rgw.intent-log
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.meta
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.usage
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.users.uid
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.buckets.non-ec
application: rgw
replication: 1
percent_total_data: 0.1
- name: default.rgw.buckets.index
application: rgw
replication: 1
percent_total_data: 3
- name: default.rgw.buckets.data
application: rgw
replication: 1
percent_total_data: 34.8
...

View File

@ -0,0 +1,26 @@
---
# The purpose of this file is to define the environment-specific public-facing
# VIP for the ingress controller
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: ucp-ceph-ingress
labels:
name: ucp-ceph-ingress-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-ceph-ingress-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
conf:
ingress:
# The default number is set to the number of cpus, wich is 88 in the Intel lab.
worker-processes: "10"
...

View File

@ -0,0 +1,35 @@
---
# The purpose of this file is to define environment-specific parameters for
# ceph-osd
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-ceph-osd-sdb
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-ceph-osd-global
actions:
- method: replace
path: .values.conf.storage.osd
- method: merge
path: .
storagePolicy: cleartext
data:
chart_name: ucp-ceph-osd-sdb
release: ucp-ceph-osd-sdb
wait:
labels:
release_group: clcp-ucp-ceph-osd-sdb
values:
daemonset:
prefix_name: "clcp-ucp-ceph-osd-sdb"
conf:
storage:
failure_domain_by_hostname: 1-11
osd:
- data:
type: bluestore
location: /dev/sdb
...

View File

@ -0,0 +1,26 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ucp-ceph-update
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-ceph-update-global
actions:
- method: merge
path: .
storagePolicy: cleartext
labels:
name: ucp-ceph-update-nc
data:
description: Ceph Storage
sequenced: true
chart_group:
- ucp-ceph-ingress
- ucp-ceph-mon
- ucp-ceph-osd-sdb
- ucp-ceph-client-update
- ucp-ceph-provisioners-update
...

View File

@ -0,0 +1,26 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ucp-ceph
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-ceph-global
actions:
- method: merge
path: .
storagePolicy: cleartext
labels:
name: ucp-ceph-nc
data:
description: Ceph Storage
sequenced: true
chart_group:
- ucp-ceph-ingress
- ucp-ceph-mon
- ucp-ceph-osd-sdb
- ucp-ceph-client
- ucp-ceph-provisioners
...

View File

@ -0,0 +1,21 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ucp-services-scaled
layeringDefinition:
abstract: false
layer: type
storagePolicy: cleartext
data:
description: UCP Services
sequenced: false
chart_group:
- ucp-barbican
- ucp-deckhand
- ucp-maas
- ucp-drydock
- ucp-promenade
- ucp-shipyard
- ucp-apiserver-webhook
...

View File

@ -0,0 +1,18 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ucp-infra-scaled
layeringDefinition:
abstract: false
layer: type
storagePolicy: cleartext
data:
description: Common UCP Components
chart_group:
- ucp-ingress-scaled
- ucp-mariadb-scaled
- ucp-postgresql-scaled
- ucp-rabbitmq-scaled
- ucp-keystone-memcached
...

View File

@ -0,0 +1,26 @@
---
# The purpose of this file is to define the environment-specific public-facing
# VIP for the ingress controller
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: ucp-ingress
labels:
name: ucp-ingress-cruiserlite
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-ingress-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
values:
conf:
ingress:
# The default number is set to the number of cpus, wich is 88 in the Intel lab.
worker-processes: "10"
...

View File

@ -0,0 +1,91 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-ingress-scaled
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-ingress-global
actions:
- method: merge
path: .
labels:
name: ucp-ingress-scaled-type
storagePolicy: cleartext
data:
values:
pod:
replicas:
ingress: 2
error_page: 2
...
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-mariadb-scaled
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-mariadb-global
actions:
- method: merge
path: .
labels:
name: ucp-mariadb-scaled-type
storagePolicy: cleartext
data:
values:
pod:
replicas:
server: 3
ingress: 2
error_page: 2
...
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-rabbitmq-scaled
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-rabbitmq-global
actions:
- method: merge
path: .
labels:
name: ucp-rabbitmq-scaled-type
storagePolicy: cleartext
data:
values:
pod:
replicas:
server: 2
...
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-postgresql-scaled
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-postgresql-global
actions:
- method: merge
path: .
labels:
name: ucp-postgresql-scaled-type
storagePolicy: cleartext
data:
values:
pod:
replicas:
server: 3
...

View File

@ -0,0 +1,26 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-barbican-scaled
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-barbican-global
actions:
- method: merge
path: .
labels:
name: ucp-barbican-scaled-type
storagePolicy: cleartext
data:
values:
pod:
replicas:
api: 2
endpoints:
oslo_messaging:
statefulset:
replicas: 2
...

View File

@ -0,0 +1,22 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-deckhand-scaled
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-deckhand-global
actions:
- method: merge
path: .
labels:
name: ucp-deckhand-scaled-type
storagePolicy: cleartext
data:
values:
pod:
replicas:
deckhand: 2
...

View File

@ -0,0 +1,16 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ucp-drydock-scaled
layeringDefinition:
abstract: false
layer: type
storagePolicy: cleartext
data:
description: Drydock
sequenced: true
chart_group:
- ucp-maas-scaled
- ucp-drydock
...

View File

@ -0,0 +1,21 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-drydock-scaled
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-drydock-global
actions:
- method: merge
path: .
labels:
name: ucp-drydock-scaled-type
storagePolicy: cleartext
data:
values:
replicas:
drydock: 2
...

View File

@ -0,0 +1,30 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-maas-scaled
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-maas-global
actions:
- method: merge
path: .
labels:
name: ucp-maas-scaled-type
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .calico.bgp.ipv4.maas_vip
dest:
path: .values.network.maas_ingress.addr
data:
values:
pod:
replicas:
region: 2
rack: 2
...

View File

@ -0,0 +1,30 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-maas
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-maas-global
actions:
- method: merge
path: .
labels:
name: ucp-maas-type
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .calico.bgp.ipv4.maas_vip
dest:
path: .values.network.maas_ingress.addr
data:
values:
pod:
replicas:
region: 1
rack: 1
...

View File

@ -0,0 +1,20 @@
---
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: ucp-keystone-scaled
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: chartgroup-ucp-keystone-global
actions:
- method: merge
path: .
labels:
name: chartgroup-ucp-keystone-scaled-type
storagePolicy: cleartext
data:
chart_group:
- ucp-keystone-scaled
...

View File

@ -0,0 +1,26 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-keystone-scaled
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-keystone-global
actions:
- method: merge
path: .
labels:
name: ucp-keystone-scaled-type
storagePolicy: cleartext
data:
values:
pod:
replicas:
api: 2
endpoints:
oslo_messaging:
statefulset:
replicas: 2
...

View File

@ -0,0 +1,18 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-keystone
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-keystone-global
actions:
- method: merge
path: .
labels:
name: ucp-keystone-type
storagePolicy: cleartext
data: {}
...

View File

@ -0,0 +1,22 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-promenade-scaled
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-promenade-global
actions:
- method: merge
path: .
labels:
name: ucp-promenade-scaled-type
storagePolicy: cleartext
data:
values:
pod:
replicas:
api: 2
...

View File

@ -0,0 +1,27 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-shipyard-scaled
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: ucp-shipyard-global
actions:
- method: merge
path: .
labels:
name: ucp-shipyard-scaled-type
storagePolicy: cleartext
data:
values:
pod:
replicas:
shipyard:
api: 2
airflow:
worker: 2
# Set replicas to 0 to disable the standalone scheduler
scheduler: 0
...

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,411 @@
---
# The purpose of this file is to define the account catalog for the site. This
# mostly contains service usernames, but also contain some information which
# should be changed like the region (site) name.
schema: pegleg/AccountCatalogue/v1
metadata:
schema: metadata/Document/v1
name: osh_service_accounts
layeringDefinition:
abstract: false
layer: type
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/CommonSoftwareConfig/v1
name: common-software-config
path: .osh.region_name
dest:
- path: .osh.keystone.admin.region_name
- path: .osh.keystone.test.region_name
- path: .osh.cinder.cinder.region_name
- path: .osh.cinder.test.region_name
- path: .osh.glance.glance.region_name
- path: .osh.glance.test.region_name
- path: .osh.heat.heat.region_name
- path: .osh.heat.test.region_name
- path: .osh.heat.heat_trustee.region_name
- path: .osh.heat.heat_stack_user.region_name
- path: .osh.swift.keystone.region_name
- path: .osh.neutron.neutron.region_name
- path: .osh.neutron.test.region_name
- path: .osh.nova.nova.region_name
- path: .osh.nova.test.region_name
- path: .osh.nova.placement.region_name
- path: .osh.barbican.barbican.region_name
- path: .osh.horizon.admin.region_name
data:
osh:
keystone:
admin:
username: admin
project_name: admin
user_domain_name: default
project_domain_name: default
test:
role: admin
username: keystone-test
project_name: test
user_domain_name: service
project_domain_name: service
oslo_db:
username: keystone
database: keystone
oslo_messaging:
keystone:
username: keystone-rabbitmq-user
cinder:
cinder:
role: admin
username: cinder
project_name: service
user_domain_name: default
project_domain_name: default
test:
role: admin
username: cinder-test
project_name: test
user_domain_name: service
project_domain_name: service
oslo_db:
username: cinder
database: cinder
oslo_messaging:
cinder:
username: cinder-rabbitmq-user
glance:
glance:
role: admin
username: glance
project_name: service
user_domain_name: default
project_domain_name: default
test:
role: admin
username: glance-test
project_name: test
user_domain_name: service
project_domain_name: service
oslo_db:
username: glance
database: glance
oslo_messaging:
glance:
username: glance-rabbitmq-user
ceph_object_store:
username: glance
heat:
heat:
role: admin
username: heat
project_name: service
user_domain_name: default
project_domain_name: default
test:
role: admin
username: heat-test
project_name: test
user_domain_name: service
project_domain_name: service
heat_trustee:
role: admin
username: heat-trust
project_name: service
user_domain_name: default
project_domain_name: default
heat_stack_user:
role: admin
username: heat-domain
domain_name: heat
oslo_db:
username: heat
database: heat
oslo_messaging:
heat:
username: heat-rabbitmq-user
swift:
keystone:
role: admin
username: swift
project_name: service
user_domain_name: default
project_domain_name: default
oslo_db:
admin:
username: root
sst:
username: sst
audit:
username: audit
prometheus_mysql_exporter:
user:
username: osh-oslodb-exporter
neutron:
neutron:
role: admin
username: neutron
project_name: service
user_domain_name: default
project_domain_name: default
test:
role: admin
username: neutron-test
project_name: test
user_domain_name: service
project_domain_name: service
oslo_db:
username: neutron
database: neutron
oslo_messaging:
neutron:
username: neutron-rabbitmq-user
nova:
nova:
role: admin
username: nova
project_name: service
user_domain_name: default
project_domain_name: default
test:
role: admin
username: nova-test
project_name: test
user_domain_name: service
project_domain_name: service
placement:
role: admin
username: placement
project_name: service
user_domain_name: default
project_domain_name: default
oslo_db:
username: nova
database: nova
oslo_db_api:
username: nova
database: nova_api
oslo_db_cell0:
username: nova
database: "nova_cell0"
oslo_messaging:
nova:
username: nova-rabbitmq-user
horizon:
admin:
username: admin
project_name: admin
user_domain_name: default
project_domain_name: default
oslo_db:
username: horizon
database: horizon
barbican:
barbican:
role: admin
username: barbican
project_name: service
user_domain_name: default
project_domain_name: default
oslo_db:
username: barbican
database: barbican
oslo_messaging:
barbican:
username: barbican-rabbitmq-user
oslo_messaging:
admin:
username: admin
...
---
schema: pegleg/AccountCatalogue/v1
metadata:
schema: metadata/Document/v1
name: osh_infra_service_accounts
layeringDefinition:
abstract: false
layer: type
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/CommonSoftwareConfig/v1
name: common-software-config
path: .osh.region_name
dest:
path: .osh_infra.prometheus_openstack_exporter.user.region_name
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .ldap.username
dest:
path: .osh_infra.ldap.admin.bind
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .infrastructure.dmaap.user
dest:
path: .osh_infra.kafka.admin.username
data:
osh_infra:
ceph_object_store:
admin:
username: s3_admin
elasticsearch:
username: elasticsearch
grafana:
admin:
username: grafana
oslo_db:
username: grafana
database: grafana
oslo_db_session:
username: grafana_session
database: grafana_session
elasticsearch:
admin:
username: elasticsearch
oslo_messaging:
admin:
username: admin
oslo_db:
admin:
username: root
sst:
username: sst
audit:
username: audit
prometheus_mysql_exporter:
user:
username: osh-infra-oslodb-exporter
prometheus_openstack_exporter:
user:
role: admin
username: prometheus-openstack-exporter
project_name: service
user_domain_name: default
project_domain_name: default
nagios:
admin:
username: nagios
prometheus:
admin:
username: prometheus
...
---
schema: pegleg/AccountCatalogue/v1
metadata:
schema: metadata/Document/v1
name: ro_service_accounts
layeringDefinition:
abstract: false
layer: type
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/CommonSoftwareConfig/v1
name: common-software-config
path: .osh.region_name
dest:
- path: .ro.keystone.admin.region_name
- path: .ro.keystone.ro.region_name
data:
ro:
keystone:
ro:
username: ro_admin
project_name: admin
user_domain_name: default
project_domain_name: default
oslo_db:
username: ro_admin
database: ro_admin
oslo_messaging:
ro:
username: ro_admin
...
---
schema: pegleg/AccountCatalogue/v1
metadata:
schema: metadata/Document/v1
name: osh_addons_service_accounts
layeringDefinition:
abstract: false
layer: type
storagePolicy: cleartext
substitutions:
- src:
schema: pegleg/CommonSoftwareConfig/v1
name: common-software-config
path: .osh.region_name
dest:
path: .osh_addons.ranger-agent.ranger_agent.region_name
- src:
schema: pegleg/CommonSoftwareConfig/v1
name: common-software-config
path: .osh.region_name
dest:
path: .osh_addons.ranger-agent.ranger.region_name
- src:
schema: pegleg/CommonSoftwareConfig/v1
name: common-software-config
path: .osh.region_name
dest:
path: .osh_addons.ranger-agent.ranger_admin.region_name
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .ranger.ranger-agent.ranger_agent_keystone_user
dest:
path: .osh_addons.ranger-agent.ranger_agent.username
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .ranger.ranger.ranger_keystone_user
dest:
path: .osh_addons.ranger-agent.ranger.username
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .ranger.ranger.ranger_keystone_user
dest:
path: .osh_addons.ranger-agent.ranger_admin.username
data:
osh_addons:
ranger-agent:
ranger_agent:
role: admin
project_name: service
user_domain_name: default
project_domain_name: default
oslo_db:
admin:
username: root
sst:
username: sst
audit:
username: audit
ranger_agent:
username: ranger-agent
database: ord
oslo_messaging:
admin:
username: admin
ranger-agent:
username: ranger-agent-rabbitmq-user
ranger:
role:
- admin
- admin_support
- admin_viewer
- ranger_customer_creator
- ranger_flavor_creator
- admin_image
project_name: service
user_domain_name: default
project_domain_name: default
ranger_admin:
role: admin
project_name: admin
user_domain_name: default
project_domain_name: default
...

View File

@ -0,0 +1,25 @@
---
# OVS-DPDK NOTE:
# Add ovs-dpdk section to specify images and charts needed
# to support deployment of ovs-dpdk
schema: pegleg/SoftwareVersions/v1
metadata:
schema: metadata/Document/v1
replacement: true
name: software-versions
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: software-versions-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
kernel_drivers:
ixgbe_driver:
type: tar
location: https://sourceforge.net/projects/e1000/files/ixgbe%20stable/5.6.3/ixgbe-5.6.3.tar.gz
...

View File

@ -0,0 +1,57 @@
---
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: full-site
labels:
name: full-site
layer: type
layeringDefinition:
abstract: false
layer: type
parentSelector:
name: full-site-global
actions:
- method: merge
path: .
storagePolicy: cleartext
data:
release_prefix: clcp
chart_groups:
- podsecuritypolicy
- kubernetes-proxy
- kubernetes-container-networking
- kubernetes-dns-scaled
- kubernetes-etcd
- kubernetes-haproxy
- kubernetes-core
- ingress-kube-system
- ucp-ceph-update
- ucp-ceph-config
- ucp-divingbell
- ucp-infra
- ucp-keystone
- ucp-armada
- ucp-services-scaled
- osh-infra-ingress-controller
- osh-infra-ceph-config
- osh-infra-radosgw
- osh-infra-logging
- osh-infra-monitoring
- osh-infra-mariadb
- osh-infra-dashboards
- openstack-ingress-controller
- openstack-ceph-config
- openstack-tenant-ceph
- openstack-mariadb
- openstack-rabbitmq
- openstack-memcached
- openstack-keystone
- openstack-radosgw
- openstack-glance
- openstack-cinder
- openstack-compute-kit
- openstack-heat
- openstack-horizon
#depends on all pods being deployed, up and running and thus needs to run last
...