initial push for 2.7 alignment

Change-Id: Ibe39992bfadc0ff958bbc84948741311e721d1f7
This commit is contained in:
Sreejith Punnapuzha 2021-08-27 10:42:22 -05:00
parent 5e4e4d37d9
commit 6e1a814b25
49 changed files with 1513 additions and 448 deletions

View File

@ -135,8 +135,8 @@ data:
hugepagesz: '1G'
hugepages: '20'
transparent_hugepage: 'never'
kernel_package: 'linux-image-4.15.0-64-generic'
console: 'ttyS1,115200n8'
kernel_package: 'linux-image-4.15.0-140-generic'
console: 'ttyS0,115200n8 console=tty0'
cgroup_disable: 'hugetlb'
amd_iommu: 'on'
intel_iommu: 'on'

View File

@ -87,8 +87,8 @@ data:
image: 'bionic'
kernel: 'ga-18.04'
kernel_params:
kernel_package: 'linux-image-4.15.0-64-generic'
console: 'ttyS1,115200n8'
kernel_package: 'linux-image-4.15.0-140-generic'
console: 'ttyS0,115200n8 console=tty0'
cgroup_disable: 'hugetlb'
metadata:
owner_data:

View File

@ -135,8 +135,8 @@ data:
hugepagesz: 'hardwareprofile:hugepages.dpdk.size'
hugepages: 'hardwareprofile:hugepages.dpdk.count'
transparent_hugepage: 'never'
kernel_package: 'linux-image-4.15.0-64-generic'
console: 'ttyS1,115200n8'
kernel_package: 'linux-image-4.15.0-140-generic'
console: 'ttyS0,115200n8 console=tty0'
amd_iommu: 'on'
intel_iommu: 'on'
iommu: 'pt'

View File

@ -101,8 +101,8 @@ data:
image: 'bionic'
kernel: 'ga-18.04'
kernel_params:
kernel_package: 'linux-image-4.15.0-64-generic'
console: 'ttyS1,115200n8'
kernel_package: 'linux-image-4.15.0-140-generic'
console: 'ttyS0,115200n8 console=tty0'
intel_iommu: 'on'
iommu: 'pt'
amd_iommu: 'on'

View File

@ -216,12 +216,12 @@ metadata:
pattern: PAUSE_IMAGE
# CRI tool
# - src:
# schema: pegleg/SoftwareVersions/v1
# name: software-versions
# path: .files.crictl
# dest:
# path: .files[9].tar_url
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .files.crictl
dest:
path: .files[9].tar_url
data:
# see (and update if needed)
# type/cruiser/profiles/kubernetes-host.yaml (data.files)
@ -336,12 +336,17 @@ data:
mode: 0400
content: |-
version = 2
disabled_plugins = ["restart"]
[plugins.cri]
systemd_cgroup = true
[debug]
level = "warn"
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "PAUSE_IMAGE"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
# .files[9]
- path: /usr/bin/crictl
tar_path: crictl

View File

@ -168,6 +168,15 @@ data:
release_group: clcp-kubernetes-apiserver
values:
pod:
mandatory_access_control:
type: apparmor
kubernetes_apiserver_anchor:
anchor: runtime/default
kube-apiserver:
init: runtime/default
apiserver-key-rotate: runtime/default
apiserver:
apiserver: runtime/default
lifecycle:
upgrades:
daemonsets:

View File

@ -9,6 +9,7 @@ metadata:
storagePolicy: cleartext
data:
description: Kubernetes components
sequenced: true
chart_group:
- kubernetes-apiserver
- kubernetes-controller-manager

View File

@ -120,6 +120,12 @@ data:
network:
kubernetes_netloc: 127.0.0.1:6553
pod:
mandatory_access_control:
type: apparmor
kubernetes-controller-manager-anchor:
anchor: runtime/default
controller-manager:
controller-manager: runtime/default
lifecycle:
upgrades:
daemonsets:

View File

@ -125,6 +125,12 @@ data:
# and Deployment-managed pods as we transition to DaemonSet.
replicas:
coredns: 2
mandatory_access_control:
type: apparmor
coredns:
coredns: runtime/default
coredns-health: runtime/default
coredns-test: runtime/default
labels:
coredns:
# NOTE(mb874d): This is the label specified in the IPDD

View File

@ -98,6 +98,14 @@ data:
release_group: clcp-kubernetes-etcd
values:
pod:
mandatory_access_control:
type: apparmor
etcd-anchor:
etcdctl: runtime/default
etcd:
etcd-test: runtime/default
etcd-backup:
etcd-backup: runtime/default
lifecycle:
upgrades:
daemonsets:

View File

@ -63,6 +63,15 @@ data:
enabled: true
min_ready_seconds: 0
max_unavailable: '50%'
mandatory_access_control:
type: apparmor
haproxy-anchor:
haproxy-perms: runtime/default
anchor: runtime/default
kubernetes:
haproxy-haproxy-test: runtime/default
haproxy:
haproxy: runtime/default
security_context:
haproxy:
pod:

View File

@ -74,6 +74,15 @@ data:
test:
enabled: true
values:
pod:
mandatory_access_control:
type: apparmor
clcp-openstack-ceph-config-ceph-ns-key-generator:
ceph-storage-keys-generator: runtime/default
init: runtime/default
clcp-openstack-ceph-config-test:
ceph-provisioner-helm-test: runtime/default
init: runtime/default
labels:
job:
node_selector_key: openstack-control-plane

View File

@ -225,6 +225,38 @@ data:
jobs:
- clcp-openstack-rabbitmq-cluster-wait
pod:
mandatory_access_control:
type: apparmor
cinder-api:
cinder-api: runtime/default
ceph-coordination-volume-perms: runtime/default
init: runtime/default
cinder-backup:
cinder-backup: runtime/default
ceph-coordination-volume-perms: runtime/default
init: runtime/default
cinder-scheduler:
cinder-scheduler: runtime/default
ceph-coordination-volume-perms: runtime/default
init: runtime/default
cinder-volume:
cinder-volume: runtime/default
ceph-coordination-volume-perms: runtime/default
init-cinder-conf: runtime/default
init: runtime/default
cinder-backup-storage-init:
cinder-backup-storage-init: runtime/default
init: runtime/default
cinder-test:
init: runtime/default
cinder-test: runtime/default
cinder-test-ks-user: runtime/default
cinder-create-internal-tenant:
init: runtime/default
create-internal-tenant: runtime/default
cinder-volume-usage-audit:
cinder-volume-usage-audit: runtime/default
init: runtime/default
replicas:
api: 3
volume: 3
@ -243,11 +275,25 @@ data:
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 50%
useHostNetwork:
volume: true
backup: true
security_context:
cinder_volume:
container:
cinder_volume:
allowPrivilegeEscalation: true
privileged: true
readOnlyRootFilesystem: false
cinder_backup:
container:
cinder_backup:
privileged: true
cinder_api:
container:
cinder_api:
runAsUser: 0
readOnlyRootFilesystem: false
labels:
api:
node_selector_key: openstack-control-plane
@ -270,7 +316,69 @@ data:
ceph_client:
configmap: tenant-ceph-etc
user_secret_name: pvc-tceph-client-key
network:
api:
ingress:
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "https"
nginx.ingress.kubernetes.io/configuration-snippet: |
more_set_headers "X-Content-Type-Options: nosniff";
more_set_headers "X-Frame-Options: deny";
more_set_headers "Content-Security-Policy: default-src 'self'";
more_set_headers "X-Permitted-Cross-Domain-Policies: none";
more_set_headers "X-XSS-Protection: 1; mode=block";
endpoints:
ingress:
port:
ingress:
default: 443
conf:
software:
apache2:
binary: apache2
start_parameters: -DFOREGROUND
site_dir: /etc/apache2/sites-enabled
conf_dir: /etc/apache2/conf-enabled
mods_dir: /etc/apache2/mods-available
a2enmod:
- ssl
a2dismod: null
mpm_event: |
<IfModule mpm_event_module>
ServerLimit 1024
StartServers 32
MinSpareThreads 32
MaxSpareThreads 256
ThreadsPerChild 25
MaxRequestsPerChild 128
ThreadLimit 720
</IfModule>
wsgi_cinder: |
{{- $portInt := tuple "volume" "internal" "api" $ | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
Listen {{ $portInt }}
<VirtualHost *:{{ $portInt }}>
ServerName {{ printf "%s.%s.svc.%s" "cinder-api" .Release.Namespace .Values.endpoints.cluster_domain_suffix }}
WSGIDaemonProcess cinder-api processes=1 threads=1 user=cinder display-name=%{GROUP}
WSGIProcessGroup cinder-api
WSGIScriptAlias / /var/www/cgi-bin/cinder/cinder-wsgi
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
AllowEncodedSlashes On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded
ErrorLog /dev/stdout
CustomLog /dev/stdout combined env=!forwarded
CustomLog /dev/stdout proxy env=forwarded
SSLEngine on
SSLCertificateFile /etc/cinder/certs/tls.crt
SSLCertificateKeyFile /etc/cinder/certs/tls.key
SSLProtocol all -SSLv3 -TLSv1 -TLSv1.1
SSLCipherSuite ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256
SSLHonorCipherOrder on
</VirtualHost>
ceph:
pools:
backup:

View File

@ -64,6 +64,15 @@ data:
type: apparmor
libvirt-libvirt-default:
libvirt: localhost/libvirt-v1
resources:
enabled: true
libvirt:
requests:
memory: "4096Mi"
cpu: "4000m"
limits:
memory: "4096Mi"
cpu: "4000m"
ceph_client:
configmap: tenant-ceph-etc
user_secret_name: pvc-tceph-client-key

View File

@ -60,6 +60,14 @@ data:
node_selector_key: openvswitch
node_selector_value: enabled
pod:
mandatory_access_control:
type: apparmor
openvswitch-vswitchd:
openvswitch-vswitchd: runtime/default
openvswitch-vswitchd-modules: runtime/default
openvswitch-db:
openvswitch-db: runtime/default
openvswitch-db-perms: runtime/default
probes:
ovs_db:
ovs_db:

View File

@ -55,6 +55,14 @@ data:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
mandatory_access_control:
type: apparmor
ingress-error-pages:
init: runtime/default
ingress-error-pages: runtime/default
ingress-server:
init: runtime/default
ingress: runtime/default
replicas:
ingress: 2
error_page: 2

View File

@ -135,6 +135,120 @@ data:
size: 30Gi
backup:
size: 50Gi
conf:
database:
00_base: |
[mysqld]
# Charset
character_set_server=utf8
collation_server=utf8_general_ci
skip-character-set-client-handshake
# Logging
slow_query_log=off
slow_query_log_file=/var/log/mysql/mariadb-slow.log
log_warnings=2
# General logging has huge performance penalty therefore is disabled by default
general_log=off
general_log_file=/var/log/mysql/mariadb-error.log
long_query_time=3
log_queries_not_using_indexes=on
# Networking
bind_address=0.0.0.0
port={{ tuple "oslo_db" "direct" "mysql" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
# When a client connects, the server will perform hostname resolution,
# and when DNS is slow, establishing the connection will become slow as well.
# It is therefore recommended to start the server with skip-name-resolve to
# disable all DNS lookups. The only limitation is that the GRANT statements
# must then use IP addresses only.
skip_name_resolve
# Tuning
user=mysql
max_allowed_packet=256M
open_files_limit=10240
max_connections=8192
max-connect-errors=1000000
# General security settings
# Reference link below:
# https://dev.mysql.com/doc/mysql-security-excerpt/8.0/en/general-security-issues.html
# secure_file_priv is set to '/home' because it is read-only, which will
# disable this feature completely.
secure_file_priv=/home
local_infile=0
symbolic_links=0
sql_mode="STRICT_ALL_TABLES,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION"
## Generally, it is unwise to set the query cache to be larger than 64-128M
## as the costs associated with maintaining the cache outweigh the performance
## gains.
## The query cache is a well known bottleneck that can be seen even when
## concurrency is moderate. The best option is to disable it from day 1
## by setting query_cache_size=0 (now the default on MySQL 5.6)
## and to use other ways to speed up read queries: good indexing, adding
## replicas to spread the read load or using an external cache.
query_cache_size=0
query_cache_type=0
sync_binlog=0
thread_cache_size=16
table_open_cache=2048
table_definition_cache=1024
#
# InnoDB
#
# The buffer pool is where data and indexes are cached: having it as large as possible
# will ensure you use memory and not disks for most read operations.
# Typical values are 50..75% of available RAM.
# TODO(tomasz.paszkowski): This needs to by dynamic based on available RAM.
innodb_buffer_pool_size=1024M
innodb_doublewrite=0
innodb_file_format=Barracuda
innodb_file_per_table=1
innodb_flush_method=O_DIRECT
innodb_io_capacity=500
innodb_locks_unsafe_for_binlog=1
innodb_log_file_size=128M
innodb_old_blocks_time=1000
innodb_read_io_threads=8
innodb_write_io_threads=8
# Clustering
binlog_format=ROW
default-storage-engine=InnoDB
innodb_autoinc_lock_mode=2
innodb_flush_log_at_trx_commit=2
wsrep_cluster_name={{ tuple "oslo_db" "direct" . | include "helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup" | replace "." "_" }}
wsrep_on=1
wsrep_provider=/usr/lib/galera/libgalera_smm.so
wsrep_provider_options="evs.suspect_timeout=PT30S; gmcast.peer_timeout=PT15S; gmcast.listen_addr=tcp://0.0.0.0:{{ tuple "oslo_db" "direct" "wsrep" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}"
wsrep_slave_threads=12
wsrep_sst_auth={{ .Values.endpoints.oslo_db.auth.sst.username }}:{{ .Values.endpoints.oslo_db.auth.sst.password }}
wsrep_sst_method=mariabackup
{{ if .Values.manifests.certificates }}
# TLS
ssl_ca=/etc/mysql/certs/ca.crt
ssl_key=/etc/mysql/certs/tls.key
ssl_cert=/etc/mysql/certs/tls.crt
# tls_version = TLSv1.2,TLSv1.3
{{ end }}
[mysqldump]
max-allowed-packet=16M
[client]
default_character_set=utf8
protocol=tcp
port={{ tuple "oslo_db" "direct" "mysql" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
{{ if .Values.manifests.certificates }}
# TLS
ssl_ca=/etc/mysql/certs/ca.crt
ssl_key=/etc/mysql/certs/tls.key
ssl_cert=/etc/mysql/certs/tls.crt
# tls_version = TLSv1.2,TLSv1.3
ssl-verify-server-cert=false
{{ end }}
backup:
enabled: true
days_to_keep: 3
remote_backup:
enabled: true
container_name: DOMAIN
days_to_keep: 14
storage_policy: ncbackup_pt
monitoring:
prometheus:
enabled: true

View File

@ -70,6 +70,14 @@ data:
node_selector_key: openstack-control-plane
node_selector_value: enabled
pod:
mandatory_access_control:
type: apparmor
prometheus_memcached_exporter:
init: runtime/default
memcached-exporter: runtime/default
memcached:
init: runtime/default
memcached: runtime/default
lifecycle:
upgrades:
deployments:

View File

@ -101,6 +101,23 @@ data:
anti:
type:
server: requiredDuringSchedulingIgnoredDuringExecution
resources:
enabled: true
server:
requests:
memory: "8192Mi"
cpu: "12000m"
limits:
memory: "8192Mi"
cpu: "12000m"
network:
management:
ingress:
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/configuration-snippet: |
more_set_headers "X-XSS-Protection: 1; mode=block";
more_set_headers "X-Frame-Options: deny";
labels:
server:
node_selector_key: openstack-control-plane

View File

@ -160,6 +160,58 @@ data:
pod:
replicas:
mgr: 3
resources:
enabled: true
mds:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "2000m"
mgr:
requests:
memory: "2Gi"
cpu: "2000m"
limits:
memory: "8Gi"
cpu: "4000m"
checkdns:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
jobs:
bootstrap:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
image_repo_sync:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
rbd_pool:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
tests:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
conf:
features:
mds: false

View File

@ -53,6 +53,14 @@ data:
node_selector_key: tenant-ceph-control-plane
node_selector_value: enabled
pod:
mandatory_access_control:
type: apparmor
ingress-error-pages:
init: runtime/default
ingress-error-pages: runtime/default
ingress-server:
init: runtime/default
ingress: runtime/default
replicas:
ingress: 2
error_page: 2

View File

@ -254,6 +254,11 @@ data:
release_group: clcp-ucp-apiserver-webhook
values:
pod:
mandatory_access_control:
type: apparmor
apiserver-webhook:
apiserver: runtime/default
webhook: runtime/default
replicas:
api: 1
affinity:
@ -271,6 +276,18 @@ data:
kubernetes_apiserver:
node_selector_key: kubernetes-apiserver
node_selector_value: enabled
network:
api:
ingress:
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/proxy-read-timeout: "120"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/secure-backends: "true"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
nginx.ingress.kubernetes.io/configuration-snippet: |
more_set_headers "X-XSS-Protection: 1; mode=block";
more_set_headers "X-Frame-Options: deny";
conf:
policy:
- resource:
@ -298,38 +315,6 @@ data:
- type: role
values:
- utility_exec
- nonresource:
verbs:
- get
path: "/api"
match:
- type: role
values:
- utility_exec
- nonresource:
verbs:
- get
path: "/api/*"
match:
- type: role
values:
- utility_exec
- nonresource:
verbs:
- get
path: "/apis"
match:
- type: role
values:
- utility_exec
- nonresource:
verbs:
- get
path: "/apis/*"
match:
- type: role
values:
- utility_exec
- resource:
verbs:
- "*"
@ -389,14 +374,6 @@ data:
- type: role
values:
- admin_k8scluster
- nonresource:
verbs:
- "*"
path: "*"
match:
- type: role
values:
- admin_k8scluster
- resource:
resources:
- pods
@ -706,6 +683,141 @@ data:
- type: role
values:
- admin_k8scluster_viewer
# gatekeeper constraints can be viewed
- resource:
verbs:
- get
- list
resources:
- "*"
version: constraints.gatekeeper.sh
namespace: "*"
match:
- type: role
values:
- gatekeeper_constraint_reader
# argo workflows can be viewed, created, and deleted in the aqua namespace
- resource:
verbs:
- create
- get
- list
- delete
- watch
resources:
- workflows
version: argoproj.io
namespace: aqua
match:
- type: role
values:
- aqua_bathysphere
# Every built-in or custom resource can be get/list in aqua namespace
- resource:
verbs:
- get
- list
resources:
- '*'
version: '*'
namespace: aqua
match:
- type: role
values:
- aqua_bathysphere
# To enable discovery when using Kubectl get/list command on nodes
- resource:
verbs:
- get
- list
resources:
- nodes
version: ''
namespace: "*"
match:
- type: role
values:
- aqua_bathysphere
# TestResultRef custom resources can be fully managed in the aqua namespace
- resource:
verbs:
- get
- list
- create
- delete
- watch
- update
- patch
resources:
- testresultrefs
version: 'aqua.att.com'
namespace: aqua
match:
- type: role
values:
- aqua_bathysphere
- nonresource:
verbs:
- get
path: "/api"
match:
- type: role
values:
- utility_exec
- admin_k8scluster_viewer
- kube-system-viewer
- nonresource:
verbs:
- get
path: "/api/*"
match:
- type: role
values:
- utility_exec
- admin_k8scluster_viewer
- kube-system-viewer
- nonresource:
verbs:
- get
path: "/apis"
match:
- type: role
values:
- utility_exec
- admin_k8scluster_viewer
- kube-system-viewer
- nonresource:
verbs:
- get
path: "/apis/*"
match:
- type: role
values:
- utility_exec
- admin_k8scluster_viewer
- kube-system-viewer
- nonresource:
verbs:
- get
path: "/version"
match:
- type: role
values:
- utility_exec
- admin_k8scluster_viewer
- kube-system-viewer
- nonresource:
verbs:
- "*"
path: "*"
match:
- type: role
values:
- admin
- admin_k8scluster
- admin_k8scluster_editor
- aqua_bathysphere
- kube-system-admin
apiserver:
encryption_provider:
file: encryption_provider.yaml

View File

@ -96,6 +96,14 @@ data:
release_group: clcp-ucp-armada
values:
pod:
mandatory_access_control:
type: apparmor
armada-api:
init: runtime/default
armada-api: runtime/default
tiller: runtime/default
armada-api-test:
armada-api-test: runtime/default
affinity:
anti:
type:

View File

@ -7,6 +7,8 @@ metadata:
abstract: false
layer: global
storagePolicy: cleartext
labels:
name: ucp-ceph-config
substitutions:
# Chart source
- src:
@ -75,6 +77,15 @@ data:
test:
enabled: true
values:
pod:
mandatory_access_control:
type: apparmor
clcp-ucp-ceph-config-ceph-ns-key-generator:
ceph-storage-keys-generator: runtime/default
init: runtime/default
clcp-ucp-ceph-config-test:
ceph-provisioner-helm-test: runtime/default
init: runtime/default
labels:
job:
node_selector_key: ucp-control-plane

View File

@ -56,7 +56,7 @@ metadata:
# Secrets
- dest:
path: .values.conf.ceph.global.fsid
path: .values.conf.ceph.global.fsid
src:
schema: deckhand/Passphrase/v1
name: ceph_fsid
@ -112,27 +112,27 @@ data:
namespace: ceph
deployment:
ceph: true
bootstrap:
# Placed here to run after main ceph compoents laid down/updated
enabled: true
script: |
set -x
DESIRED_VERSION="nautilus"
ADDITIONAL_VERSIONS=1
while [ $ADDITIONAL_VERSIONS -gt 0 ]; do
sleep 5
ADDITIONAL_VERSIONS=$(ceph tell osd.* version --format json | awk -F 'osd.[0-9]*: ' '/^osd/ { print $2}' | jq -r '.version' | awk '{ print $(NF-1) }' | uniq | grep -v ${DESIRED_VERSION} | wc -l )
done
while [[ `ceph pg ls | tail -n +2 | grep -v "active+\|NOTE"` ]]
do
sleep 5
done
ceph_mgr_enabled_modules:
- restful
- status
- prometheus
- balancer
- iostat
bootstrap:
# Placed here to run after main ceph compoents laid down/updated
enabled: true
script: |
set -x
DESIRED_VERSION="octopus"
ADDITIONAL_VERSIONS=1
while [ $ADDITIONAL_VERSIONS -gt 0 ]; do
sleep 5
ADDITIONAL_VERSIONS=$(ceph tell osd.* version --format json | awk -F 'osd.[0-9]*: ' '/^osd/ { print $2}' | jq -r '.release' | uniq | grep -v ${DESIRED_VERSION} | wc -l )
done
while [[ `ceph pg stat -f json | jq -r ".pg_summary.num_pgs"` -eq 0 ]] || [[ `ceph pg ls -f json | jq -r '.pg_stats[].state' | grep -v "active"` ]]
do
sleep 5
done
jobs:
pool_checkPGs:
# Run once a month at midnight of the first day of the month
@ -146,7 +146,29 @@ data:
# Skip new job if previous job still active
execPolicy: Forbid
startingDeadlineSecs: 60
rbd_pool:
restartPolicy: Never
pod:
mandatory_access_control:
type: apparmor
ceph-checkdns:
ceph-checkdns: runtime/default
init: runtime/default
ceph-mds:
ceph-mds: runtime/default
ceph-init-dirs: runtime/default
ceph-mgr:
ceph-mgr: runtime/default
ceph-init-dirs: runtime/default
ceph-rbd-pool:
ceph-rbd-pool: runtime/default
init: runtime/default
ceph-client-bootstrap:
ceph-client-bootstrap: runtime/default
init: runtime/default
ceph-client-test:
ceph-cluster-helm-test: runtime/default
init: runtime/default
affinity:
anti:
type:
@ -154,12 +176,79 @@ data:
replicas:
mds: 3
mgr: 3
lifecycle:
upgrades:
deployments:
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 50%
resources:
enabled: true
mds:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "2000m"
mgr:
requests:
memory: "2Gi"
cpu: "2000m"
limits:
memory: "8Gi"
cpu: "4000m"
checkdns:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
jobs:
bootstrap:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
image_repo_sync:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
rbd_pool:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
tests:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
conf:
features:
pg_autoscaler: false
# NOTE(st053q): If autoscaler is disabled, then
# mon_pg_warn_max_object_skew should not be 0 in ceph.conf
pg_autoscaler: true
cluster_flags:
unset: "noup"
pool:
spec:
# Health metrics pool
- name: device_health_metrics
application: mgr_devicehealth
replication: 3
percent_total_data: 5
# RBD pool
- name: rbd
application: rbd
@ -225,10 +314,15 @@ data:
target:
osd: 1
pg_per_osd: 100
pg_num_min: 1
protected: true
# Quota at 10000 to ensure that all pools have quotas greater
# than the cluster's capacity. 1000 would get some to about 85%
quota: 10000
# NOTE: 'quota' represents a scaler for effective capacity of cluster
# as a percent value. Setting it to anything more than 100 will mean
# that if all pools completely use their quotas, total data stored is
# more than capacity. For example quota at 10000 would ensure that
# EVERY pool can exceed cluster capacity. Set to 85 in order to maintain
# healthy state and allow data move in case of failures.
quota: 85
default:
# NOTE(alanmeadows): This is required ATM for bootstrapping a Ceph
@ -242,6 +336,8 @@ data:
# cluster with only one OSD. Depending on OSD targeting & site
# configuration this can be changed.
osd_pool_default_size: 1
mon_pg_warn_min_per_osd: 0
mon_pg_warn_max_object_skew: 0
dependencies:
- ceph-htk
...

View File

@ -56,7 +56,7 @@ metadata:
# Secrets
- dest:
path: .values.conf.ceph.global.fsid
path: .values.conf.ceph.global.fsid
src:
schema: deckhand/Passphrase/v1
name: ceph_fsid
@ -118,9 +118,81 @@ data:
- balancer
- iostat
pod:
mandatory_access_control:
type: apparmor
ceph-checkdns:
ceph-checkdns: runtime/default
init: runtime/default
ceph-mds:
ceph-mds: runtime/default
ceph-init-dirs: runtime/default
ceph-mgr:
ceph-mgr: runtime/default
ceph-init-dirs: runtime/default
ceph-rbd-pool:
ceph-rbd-pool: runtime/default
init: runtime/default
ceph-client-bootstrap:
ceph-client-bootstrap: runtime/default
init: runtime/default
ceph-client-test:
ceph-cluster-helm-test: runtime/default
init: runtime/default
replicas:
mds: 1
mgr: 1
resources:
enabled: true
mds:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "2000m"
mgr:
requests:
memory: "2Gi"
cpu: "2000m"
limits:
memory: "8Gi"
cpu: "4000m"
checkdns:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
jobs:
bootstrap:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
image_repo_sync:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
rbd_pool:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
tests:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
jobs:
pool_checkPGs:
# Run once a month at midnight of the first day of the month
@ -134,11 +206,20 @@ data:
# Skip new job if previous job still active
execPolicy: Forbid
startingDeadlineSecs: 60
rbd_pool:
restartPolicy: Never
conf:
features:
pg_autoscaler: false
cluster_flags:
set: "noup"
pool:
spec:
# Health metrics pool
- name: device_health_metrics
application: mgr_devicehealth
replication: 3
percent_total_data: 5
# RBD pool
- name: rbd
application: rbd
@ -205,9 +286,13 @@ data:
osd: 1
pg_per_osd: 100
protected: true
# Quota at 10000 to ensure that all pools have quotas greater
# than the cluster's capacity. 1000 would get some to about 85%
quota: 10000
# NOTE: 'quota' represents a scaler for effective capacity of cluster
# as a percent value. Setting it to anything more than 100 will mean
# that if all pools completely use their quotas, total data stored is
# more than capacity. For example quota at 10000 would ensure that
# EVERY pool can exceed cluster capacity. Set to 85 in order to maintain
# healthy state and allow data move in case of failures.
quota: 85
default:
# NOTE(alanmeadows): This is required ATM for bootstrapping a Ceph
# cluster with only one OSD. Depending on OSD targeting & site
@ -220,6 +305,9 @@ data:
# cluster with only one OSD. Depending on OSD targeting & site
# configuration this can be changed.
osd_pool_default_size: 1
mon_pg_warn_min_per_osd: 0
mon_pg_warn_max_object_skew: 0
manifests:
cronjob_defragosds: false
dependencies:

View File

@ -3,8 +3,6 @@ schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-ceph-ingress
labels:
name: ucp-ceph-ingress-global
layeringDefinition:
abstract: false
layer: global
@ -53,6 +51,14 @@ data:
node_selector_key: ucp-control-plane
node_selector_value: enabled
pod:
mandatory_access_control:
type: apparmor
ingress-error-pages:
init: runtime/default
ingress-error-pages: runtime/default
ingress-server:
init: runtime/default
ingress: runtime/default
replicas:
ingress: 2
error_page: 2
@ -61,6 +67,9 @@ data:
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: 20m
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/configuration-snippet: |
more_set_headers "X-XSS-Protection: 1; mode=block";
more_set_headers "X-Frame-Options: deny";
dependencies:
- ucp-ingress-htk
...

View File

@ -54,7 +54,7 @@ metadata:
# Secrets
- dest:
path: .values.conf.ceph.global.fsid
path: .values.conf.ceph.global.fsid
src:
schema: deckhand/Passphrase/v1
name: ceph_fsid
@ -84,6 +84,42 @@ data:
labels:
release_group: clcp-ucp-ceph-mon
values:
pod:
mandatory_access_control:
type: apparmor
ceph-mon:
ceph-init-dirs: runtime/default
ceph-mon: runtime/default
ceph-log-ownership: runtime/default
ceph-mon-check:
ceph-mon: runtime/default
init: runtime/default
ceph-bootstrap:
ceph-bootstrap: runtime/default
init: runtime/default
ceph-storage-keys-generator:
ceph-storage-keys-generator: runtime/default
init: runtime/default
ceph-mon-keyring-generator:
ceph-mon-keyring-generator: runtime/default
init: runtime/default
ceph-mgr-keyring-generator:
init: runtime/default
ceph-mgr-keyring-generator: runtime/default
ceph-mds-keyring-generator:
init: runtime/default
ceph-mds-keyring-generator: runtime/default
ceph-osd-keyring-generator:
ceph-osd-keyring-generator: runtime/default
init: runtime/default
lifecycle:
upgrades:
daemonsets:
pod_replacement_strategy: RollingUpdate
mon:
enabled: true
min_ready_seconds: 0
max_unavailable: 1
labels:
job:
node_selector_key: ucp-control-plane
@ -103,6 +139,8 @@ data:
ceph:
global:
mon_data_avail_warn: 30
mon_pg_warn_min_per_osd: 0
mon_pg_warn_max_object_skew: 0
dependencies:
- ceph-htk
...

View File

@ -49,7 +49,7 @@ metadata:
# Secrets
- dest:
path: .values.conf.ceph.global.fsid
path: .values.conf.ceph.global.fsid
src:
schema: deckhand/Passphrase/v1
name: ceph_fsid
@ -62,11 +62,10 @@ data:
protected:
continue_processing: false
wait:
timeout: 7200
timeout: 900
labels:
release_group: clcp-ucp-ceph-osd
resources:
- type: daemonset
resources: []
native:
enabled: false
install:
@ -79,18 +78,37 @@ data:
labels:
release_group: clcp-ucp-ceph-osd
component: test
- type: job
labels:
release_group: clcp-ucp-ceph-osd
component: post-apply
test:
enabled: true
enabled: false
values:
# Custom Apparmor Profile needs to be fixed in NC 2.2
pod:
mandatory_access_control:
type: apparmor
ceph-osd-default:
ceph-osd-default: unconfined
ceph-init-dirs: unconfined
ceph-log-ownership: unconfined
osd-init: unconfined
ceph-osd-test:
init: unconfined
ceph-cluster-helm-test: unconfined
ceph-osd-post-apply:
ceph-osd-post-apply: runtime/default
init: runtime/default
lifecycle:
upgrades:
daemonsets:
pod_replacement_strategy: OnDelete
deploy:
tool: "ceph-volume"
manifests:
cronjob_defragosds: false
job_post_apply: false
labels:
osd:
node_selector_key: ceph-osd
@ -120,9 +138,10 @@ data:
osd_crush_chooseleaf_type: 0
ceph:
osd:
osd_op_num_threads_per_hdd: 2
osd_op_num_threads_per_ssd: 4
osd_op_num_threads_per_shard_hdd: 1
osd_op_num_threads_per_shard_ssd: 2
osd_max_backfills: 10
osd_scrub_auto_repair: true
dependencies:
- ceph-htk
...

View File

@ -88,6 +88,12 @@ data:
provisioner:
node_selector_key: ucp-control-plane
node_selector_value: enabled
csi_rbd_plugin:
node_selector_key: ceph-osd
node_selector_value: enabled
test:
node_selector_key: ceph-osd
node_selector_value: enabled
endpoints:
ceph_mon:
namespace: ceph
@ -96,9 +102,34 @@ data:
client_secrets: false
rbd_provisioner: true
cephfs_provisioner: false
csi: true
bootstrap:
enabled: true
pod:
mandatory_access_control:
type: apparmor
ceph-cephfs-provisioner:
ceph-cephfs-provisioner: runtime/default
init: runtime/default
ceph-cepfs-client-key-generator:
ceph-storage-keys-generator: runtime/default
ceph-rbd-provisioner:
ceph-rbd-provisioner: runtime/default
init: runtime/default
ceph-provisioner-test:
init: runtime/default
ceph-provisioner-helm-test: runtime/default
ceph-rbd-csi-provisioner:
ceph-rbd-provisioner: runtime/default
init: runtime/default
ceph-rbd-snapshotter: runtime/default
ceph-rbd-attacher: runtime/default
csi-resizer: runtime/default
csi-rbdplugin: runtime/default
ceph-rbd-plugin:
driver-registrar: runtime/default
csi-rbdplugin: runtime/default
init: runtime/default
affinity:
anti:
type:

View File

@ -50,7 +50,7 @@ metadata:
# Secrets
- dest:
path: .values.conf.ceph.global.fsid
path: .values.conf.ceph.global.fsid
src:
schema: deckhand/Passphrase/v1
name: ceph_fsid
@ -88,6 +88,12 @@ data:
provisioner:
node_selector_key: ucp-control-plane
node_selector_value: enabled
csi_rbd_plugin:
node_selector_key: ceph-osd
node_selector_value: enabled
test:
node_selector_key: ceph-osd
node_selector_value: enabled
endpoints:
ceph_mon:
namespace: ceph
@ -96,12 +102,36 @@ data:
client_secrets: false
rbd_provisioner: true
cephfs_provisioner: false
csi: true
bootstrap:
enabled: true
pod:
mandatory_access_control:
type: apparmor
ceph-cephfs-provisioner:
ceph-cephfs-provisioner: runtime/default
init: runtime/default
ceph-cepfs-client-key-generator:
ceph-storage-keys-generator: runtime/default
ceph-rbd-provisioner:
ceph-rbd-provisioner: runtime/default
init: runtime/default
ceph-rbd-csi-provisioner:
ceph-rbd-provisioner: runtime/default
init: runtime/default
ceph-rbd-snapshotter: runtime/default
ceph-rbd-attacher: runtime/default
csi-resizer: runtime/default
csi-rbdplugin: runtime/default
ceph-rbd-plugin:
driver-registrar: runtime/default
csi-rbdplugin: runtime/default
init: runtime/default
ceph-provisioner-test:
init: runtime/default
ceph-provisioner-helm-test: runtime/default
replicas:
rbd_provisioner: 1
conf:
ceph:
global:

View File

@ -136,6 +136,27 @@ data:
bootstrap:
enabled: false
pod:
mandatory_access_control:
type: apparmor
ceph-rgw:
init: runtime/default
ceph-rgw: runtime/default
ceph-init-dirs: runtime/default
ceph-rgw-init: runtime/default
ceph-rgw-bootstrap:
ceph-keyring-placement: runtime/default
init: runtime/default
ceph-rgw-bootstrap: runtime/default
ceph-rgw-storage-init:
ceph-keyring-placement: runtime/default
init: runtime/default
ceph-rgw-storage-init: runtime/default
ceph-rgw-s3-admin:
ceph-keyring-placement: runtime/default
init: runtime/default
create-s3-admin: runtime/default
ceph-rgw-test:
ceph-rgw-ks-validation: runtime/default
affinity:
anti:
type:
@ -146,12 +167,22 @@ data:
rgw_ks:
enabled: true
config:
rgw_keystone_accepted_roles: "admin, _member_, member"
rgw_keystone_accepted_roles: "admin, member, swift_pool_ro, swift_pool_admin, swift_admin"
ceph_client:
configmap: ceph-etc
secrets:
keyrings:
admin: pvc-ceph-client-key
network:
api:
ingress:
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-max-temp-file-size: "0"
nginx.ingress.kubernetes.io/configuration-snippet: |
more_set_headers "X-Content-Type-Options: nosniff";
more_set_headers "X-Frame-Options: deny";
dependencies:
- ceph-htk
...

View File

@ -53,6 +53,14 @@ data:
node_selector_key: ucp-control-plane
node_selector_value: enabled
pod:
mandatory_access_control:
type: apparmor
ingress-error-pages:
init: runtime/default
ingress-error-pages: runtime/default
ingress-server:
init: runtime/default
ingress: runtime/default
affinity:
anti:
type:
@ -65,5 +73,8 @@ data:
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: 20m
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/configuration-snippet: |
more_set_headers "X-XSS-Protection: 1; mode=block";
more_set_headers "X-Frame-Options: deny";
dependencies:
- ucp-ingress-htk

View File

@ -39,6 +39,7 @@ metadata:
path: .ucp.prometheus_mysql_exporter
dest:
path: .values.endpoints.prometheus_mysql_exporter
# Accounts
- src:
schema: pegleg/AccountCatalogue/v1
@ -103,6 +104,15 @@ metadata:
name: ucp_oslo_db_exporter_password
path: .
# Forming the container name for database backups to go into
- dest:
- path: .values.conf.backup.remote_backup.container_name
pattern: DOMAIN
src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .dns.ingress_domain
data:
chart_name: ucp-mariadb
release: ucp-mariadb
@ -129,6 +139,15 @@ data:
size: 30Gi
backup:
size: 50Gi
conf:
backup:
enabled: true
days_to_keep: 3
remote_backup:
enabled: true
container_name: DOMAIN
days_to_keep: 14
storage_policy: ncbackup_pt
monitoring:
prometheus:
enabled: true
@ -146,6 +165,36 @@ data:
node_selector_key: ucp-control-plane
node_selector_value: enabled
pod:
security_context:
server:
container:
mariadb:
allowPrivilegeEscalation: false
mandatory_access_control:
type: apparmor
mariadb-ingress-error-pages:
init: runtime/default
ingress-error-pages: runtime/default
mariadb-ingress:
init: runtime/default
ingress: runtime/default
mariadb-server:
init: runtime/default
mariadb-perms: runtime/default
mariadb: runtime/default
mariadb-backup:
init: runtime/default
backup-perms: runtime/default
mariadb-backup: runtime/default
mariadb-test:
init: runtime/default
mariadb-test: runtime/default
prometheus-mysql-exporter:
init: runtime/default
mysql-exporter: runtime/default
create-sql-user:
init: runtime/default
exporter-create-sql-user: runtime/default
affinity:
anti:
type:
@ -154,8 +203,14 @@ data:
server: 1
ingress: 1
manifests:
# Enable automated backups
cron_job_mariadb_backup: true
# Not needing to create a keystone user - it should already be created on CH
job_ks_user: false
# Backing up to local PVC in addition to CH backups
pvc_backup: true
# Enable backup/restore secrets
secret_backup_restore: true
dependencies:
- mariadb-htk
...

View File

@ -26,14 +26,6 @@ metadata:
dest:
path: .values.images.tags
# PostgreSQL log level
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .airship.log_level
dest:
path: .values.conf.postgresql.logging.log_level
# Endpoints
- src:
schema: pegleg/EndpointCatalogue/v1
@ -61,12 +53,6 @@ metadata:
path: .ucp.postgres.admin
dest:
path: .values.endpoints.postgresql.auth.admin
- src:
schema: pegleg/AccountCatalogue/v1
name: ucp_service_accounts
path: .ucp.postgres.replica
dest:
path: .values.endpoints.postgresql.auth.replica
- src:
schema: pegleg/AccountCatalogue/v1
name: ucp_service_accounts
@ -79,12 +65,6 @@ metadata:
path: .ucp.prometheus_postgresql_exporter.user
dest:
path: .values.endpoints.prometheus_postgresql_exporter.auth.user
- src:
schema: pegleg/AccountCatalogue/v1
name: ucp_service_accounts
path: .ucp.postgres.replica.username
dest:
path: .values.secrets.pki.replication.hosts.names[0]
- src:
schema: pegleg/AccountCatalogue/v1
name: ucp_service_accounts
@ -99,12 +79,6 @@ metadata:
schema: deckhand/Passphrase/v1
name: ucp_postgres_admin_password
path: .
- dest:
path: .values.endpoints.postgresql.auth.replica.password
src:
schema: deckhand/Passphrase/v1
name: ucp_postgres_replica_password
path: .
- dest:
path: .values.endpoints.postgresql.auth.exporter.password
src:
@ -123,20 +97,6 @@ metadata:
schema: deckhand/Passphrase/v1
name: ucp_postgres_audit_password
path: .
- dest:
- path: .values.secrets.pki.replication.ca.crt
- path: .values.secrets.pki.server.ca.crt
src:
schema: deckhand/CertificateAuthority/v1
name: patroni-replication
path: .
- dest:
- path: .values.secrets.pki.replication.ca.key
- path: .values.secrets.pki.server.ca.key
src:
schema: deckhand/CertificateAuthorityKey/v1
name: patroni-replication
path: .
# POD IPs
- src:
@ -146,6 +106,15 @@ metadata:
dest:
path: .values.secrets.pki.pod_cidr
# Forming the container name for database backups to go into
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .dns.ingress_domain
dest:
- path: .values.conf.backup.remote_backup.container_name
pattern: DOMAIN
data:
chart_name: ucp-postgresql
release: ucp-postgresql
@ -167,11 +136,30 @@ data:
- type: job
labels:
release_group: clcp-ucp-postgresql
- type: cronjob
labels:
release_group: clcp-ucp-postgresql
create: []
post:
create: []
values:
pod:
mandatory_access_control:
type: apparmor
postgresql:
postgresql: runtime/default
set-volume-perms: runtime/default
init: runtime/default
postgresql-backup:
postgresql-backup: runtime/default
backup-perms: runtime/default
init: runtime/default
prometheus-postgresql-exporter:
postgresql-exporter: runtime/default
init: runtime/default
prometheus-postgresql-exporter-create-user:
prometheus-postgresql-exporter-create-user: runtime/default
init: runtime/default
affinity:
anti:
type:
@ -188,8 +176,44 @@ data:
postgresql:
max_connections: 1000
shared_buffers: 2GB
log_connections: 'off'
log_disconnections: 'off'
# disable archiving
archive_mode: 'off'
# disable wal senders (required with wal_level minimal)
max_wal_senders: 0
# to avoid filling up pgdata/pg_xlog, limit to 32 wal files (16 MB each), i.e. 512MB
max_wal_size: 32
# to avoid filling up pgdata/pg_commit_ts, don't track commit timestamps
track_commit_timestamp: 'off'
# don't explicitly force a minimum # of wal files to keep
wal_keep_segments: 0
# retain enough data to recover from a crash or immediate shutdown
wal_level: minimal
# don't force writes for hint bit modifications
wal_log_hints: 'off'
pg_hba: |
host all all 127.0.0.1/32 trust
host all postgresql-admin 0.0.0.0/0 md5
host all postgres 0.0.0.0/0 md5
host all psql_exporter 0.0.0.0/0 md5
host postgres postgresql_exporter 0.0.0.0/0 md5
host deckhand deckhand 0.0.0.0/0 md5
host maasdb maas 0.0.0.0/0 md5
host airflow airflow 0.0.0.0/0 md5
host shipyard shipyard 0.0.0.0/0 md5
host drydock drydock 0.0.0.0/0 md5
local all all trust
host all all 0.0.0.0/0 reject
backup:
pg_dumpall_options: --inserts
pg_dumpall_options: '--inserts --clean'
enabled: true
days_to_keep: 3
remote_backup:
enabled: true
container_name: DOMAIN
days_to_keep: 14
storage_policy: ncbackup_pt
development:
enabled: false
labels:
@ -206,8 +230,14 @@ data:
node_selector_key: ucp-control-plane
node_selector_value: enabled
manifests:
# Enable automated backups
cron_job_postgresql_backup: true
# Not needing to create a keystone user - it should already be created on CH
job_ks_user: false
# Still backing up to local PVC in addition to CH backups
pvc_backup: true
# Enable backup/restore secrets
secret_backup_restore: true
secrets:
pki:
server:

View File

@ -86,12 +86,44 @@ data:
volume:
size: 5Gi
pod:
mandatory_access_control:
type: apparmor
rabbitmq-cluster-wait:
init: runtime/default
rabbitmq-cookie: runtime/default
rabbitmq-rabbitmq-cluster-wait: runtime/default
rabbitmq:
init: runtime/default
rabbitmq-password: runtime/default
rabbitmq-cookie: runtime/default
rabbitmq-perms: runtime/default
rabbitmq: runtime/default
prometheus-rabbitmq-exporter:
init: runtime/default
rabbitmq-exporter: runtime/default
rabbitmq-rabbitmq-test:
rabbitmq-rabbitmq-test: runtime/default
init: runtime/default
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
replicas:
server: 1
resources:
enabled: true
server:
requests:
memory: "8192Mi"
cpu: "12000m"
limits:
memory: "8192Mi"
cpu: "12000m"
security_context:
server:
container:
rabbitmq:
allowPrivilegeEscalation: false
labels:
server:
node_selector_key: ucp-control-plane

View File

@ -127,6 +127,12 @@ metadata:
schema: deckhand/Passphrase/v1
name: ucp_oslo_messaging_password
path: .
- dest:
path: .values.conf.barbican.simple_crypto_plugin.kek
src:
schema: deckhand/Passphrase/v1
name: ucp_barbican_kek
path: .
data:
chart_name: ucp-barbican
release: ucp-barbican
@ -151,11 +157,14 @@ data:
post:
create: []
values:
helm3_hook: false
conf:
barbican:
DEFAULT:
max_allowed_request_size_in_bytes: 256000
max_allowed_secret_in_bytes: 256000
simple_crypto_plugin:
kek: ""
policy:
secret_project_match: project_id:%(target.secret.project_id)s
secret_creator_user: user_id:%(target.secret.creator_id)s
@ -231,10 +240,14 @@ data:
node_selector_key: ucp-control-plane
node_selector_value: enabled
pod:
apparmor:
barbican:
barbican: localhost/docker-default
init: localhost/docker-default
mandatory_access_control:
type: apparmor
barbican-api:
barbican-api: runtime/default
init: runtime/default
barbican-test:
init: runtime/default
barbican-test: runtime/default
affinity:
anti:
type:

View File

@ -154,6 +154,19 @@ data:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
mandatory_access_control:
type: apparmor
deckhand-api:
init: runtime/default
deckhand-api: runtime/default
deckhand-db-init:
init: runtime/default
deckhand-db-init: runtime/default
deckhand-db-sync:
init: runtime/default
deckhand-db-sync: runtime/default
deckhand-api-test:
deckhand-api-test: runtime/default
replicas:
deckhand: 1
security_context:

View File

@ -144,6 +144,21 @@ data:
# TODO(sh8121): Refactor chart to support stricter security
# but still support libvirt+ssh for virtual testing
pod:
mandatory_access_control:
type: apparmor
drydock-api:
init: runtime/default
drydock-api: runtime/default
drydock-db-init:
init: runtime/default
drydock-db-init: runtime/default
drydock-db-sync:
init: runtime/default
drydock-db-sync: runtime/default
drydock-api-test:
drydock-api-test: runtime/default
drydock-auth-test:
drydock-auth-test: runtime/default
security_context:
drydock:
pod:
@ -156,8 +171,9 @@ data:
ingress:
annotations:
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
classes:
cluster: "maas-ingress"
nginx.ingress.kubernetes.io/configuration-snippet: |
more_set_headers "X-XSS-Protection: 1; mode=block";
more_set_headers "X-Frame-Options: deny";
drydock:
node_port:
enabled: false
@ -166,6 +182,8 @@ data:
threads: 1
workers: 1
drydock:
DEFAULT:
poll_interval: 30
database:
pool_size: 200
plugins:

View File

@ -51,6 +51,12 @@ metadata:
path: .
dest:
path: .values.secrets.maas_region.value
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .airship.log_level
dest:
path: .values.conf.syslog.log_level
# Endpoint substitutions
- src:
@ -147,6 +153,43 @@ data:
release_group: clcp-maas
values:
pod:
mandatory_access_control:
type: apparmor
maas-rack:
maas-rack: runtime/default
init: runtime/default
maas-region:
maas-region: runtime/default
maas-cache: runtime/default
init: runtime/default
maas-syslog:
syslog: runtime/default
logrotate: runtime/default
init: runtime/default
maas-ingress:
maas-ingress-vip: runtime/default
maas-ingress: runtime/default
init: runtime/default
maas-ingress-vip-init: runtime/default
maas-ingress-errors:
maas-ingress-errors: runtime/default
maas-bootstrap-admin-user:
maas-bootstrap-admin-user: runtime/default
init: runtime/default
maas-db-init:
maas-db-init: runtime/default
init: runtime/default
maas-db-sync:
maas-db-sync: runtime/default
init: runtime/default
maas-export-api-key:
exporter: runtime/default
init: runtime/default
maas-import-resources:
region-import-resources: runtime/default
init: runtime/default
maas-api-test:
maas-api-test: runtime/default
affinity:
anti:
type:
@ -171,7 +214,12 @@ data:
enabled: false
conf:
cache:
enabled: false
enabled: true
cloudconfig:
override: true
sections:
bootcmd:
- "rm -fr /var/lib/apt/lists"
maas:
credentials:
secret:
@ -189,6 +237,14 @@ data:
default_os: 'ubuntu'
default_image: 'bionic'
default_kernel: 'ga-18.04'
force_gpt: true
extra_settings:
# disable network discovery completely
network_discovery: disabled
active_discovery_interval: 0
# disable creation of root account with default password
system_user: null
system_passwd: null
dependencies:
- maas-htk
...

View File

@ -34,13 +34,6 @@ metadata:
dest:
path: .values.images.tags.monitoring_image
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.kubernetes.hyperkube
dest:
path: .values.images.tags.hyperkube
# Endpoints
- src:
@ -118,6 +111,14 @@ data:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
mandatory_access_control:
type: apparmor
promenade-api:
promenade-util: runtime/default
promenade-api: runtime/default
promenade:
init: runtime/default
promenade-api-test: runtime/default
replicas:
api: 1
security_context:

View File

@ -276,6 +276,38 @@ data:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
mandatory_access_control:
type: apparmor
shipyard-api:
init: runtime/default
shipyard-api: runtime/default
airflow-web: runtime/default
airflow-worker:
init: runtime/default
worker-perms: runtime/default
airflow-worker: runtime/default
airflow-scheduler: runtime/default
airflow-logrotate: runtime/default
airflow-scheduler:
init: runtime/default
airflow-scheduler: runtime/default
shipyard-db-auxiliary:
init: runtime/default
shipyard-db-auxiliary: runtime/default
shipyard-db-init:
init: runtime/default
shipyard-db-init: runtime/default
shipyard-db-sync:
init: runtime/default
shipyard-db-sync: runtime/default
airflow-db-init:
init: runtime/default
airflow-db-init: runtime/default
airflow-db-sync:
init: runtime/default
airflow-db-sync: runtime/default
shipyard-api-test:
shipyard-api-test: runtime/default
replicas:
shipyard:
api: 1
@ -293,6 +325,9 @@ data:
ingress:
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "10m"
nginx.ingress.kubernetes.io/configuration-snippet: |
more_set_headers "X-XSS-Protection: 1; mode=block";
more_set_headers "X-Frame-Options: deny";
airflow:
web:
enable_node_port: false
@ -300,6 +335,8 @@ data:
shipyard:
keystone_authtoken:
memcache_security_strategy: None
requests_config:
deckhand_client_read_timeout: 300
rabbitmq:
# adding rmq policy to mirror messages from celery queues
# TODO: the vhost should be sourced from the same place as the config of the queue (endpoints)

View File

@ -84,6 +84,14 @@ data:
release_group: clcp-ucp-prometheus-openstack-exporter
values:
pod:
mandatory_access_control:
type: apparmor
prometheus-openstack-exporter:
openstack-metrics-exporter: runtime/default
init: runtime/default
prometheus-openstack-exporter-ks-user:
prometheus-openstack-exporter-ks-user: runtime/default
init: runtime/default
resources:
enabled: true
prometheus_openstack_exporter:

View File

@ -18,6 +18,5 @@ data:
- compute-utility
- etcdctl-utility
- mysqlclient-utility
- ncct-utility
- openstack-utility
- postgresql-utility

View File

@ -1,59 +0,0 @@
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ncct-utility
layeringDefinition:
abstract: false
layer: global
labels:
name: ncct-utility-global
component: utility
storagePolicy: cleartext
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.utility.ncct-utility
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.utility.ncct-utility
dest:
path: .values.images.tags
# Location corridor
- src:
schema: nc/CorridorConfig/v1
name: corridor-config
path: .utility.always_log_user
dest:
path: .values.conf.utility.always_log_user
data:
chart_name: ncct-utility
release: ncct-utility
namespace: utility
protected:
continue_processing: false
wait:
timeout: 600
labels:
release_group: clcp-ncct-utility
install:
no_hooks: false
upgrade:
no_hooks: false
values:
labels:
utility:
node_selector_key: utility
node_selector_value: enabled
manifests:
configmap_bin: true
configmap_ncct_etc: true
deployment_ncct_utility: true
job_image_repo_sync: false
...

View File

@ -79,7 +79,7 @@ data:
kubernetes:
apiserver:
location: https://opendev.org/airship/promenade
reference: b65930f205b77f28507dee9296aead00e24f2a18
reference: 27f181a9d30294030d695b747b2e4560ffbd29be
subpath: charts/apiserver
type: git
apiserver-htk:
@ -110,62 +110,32 @@ data:
type: git
controller-manager:
location: https://opendev.org/airship/promenade
reference: b65930f205b77f28507dee9296aead00e24f2a18
reference: 27f181a9d30294030d695b747b2e4560ffbd29be
subpath: charts/controller_manager
type: git
controller-manager-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 844d2cd16d865df1779524a0623503a6c92e12ec
reference: 96b751465abac477517e9ec2f7fca0b64c9dd22a
subpath: helm-toolkit
type: git
coredns:
location: https://opendev.org/airship/promenade
reference: b65930f205b77f28507dee9296aead00e24f2a18
reference: 27f181a9d30294030d695b747b2e4560ffbd29be
subpath: charts/coredns
type: git
coredns-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 844d2cd16d865df1779524a0623503a6c92e12ec
reference: 96b751465abac477517e9ec2f7fca0b64c9dd22a
subpath: helm-toolkit
type: git
etcd:
location: https://opendev.org/airship/promenade
reference: c83f7b8a3e2a61c76422c75a163260e56817852a
reference: 27f181a9d30294030d695b747b2e4560ffbd29be
subpath: charts/etcd
type: git
etcd-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 844d2cd16d865df1779524a0623503a6c92e12ec
subpath: helm-toolkit
type: git
haproxy:
location: https://opendev.org/airship/promenade
reference: 9db70cea913443855578736d0d640989c8453b5e
subpath: charts/haproxy
type: git
haproxy-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 844d2cd16d865df1779524a0623503a6c92e12ec
subpath: helm-toolkit
type: git
ingress:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 781e65ac5dde325b3771859c41709633cff32a42
subpath: ingress
type: git
ingress-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 3dd0eb0cdf54387447f07a4ec6be9582c457a1a8
subpath: helm-toolkit
type: git
proxy:
location: https://opendev.org/airship/promenade
reference: b65930f205b77f28507dee9296aead00e24f2a18
subpath: charts/proxy
type: git
proxy-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 844d2cd16d865df1779524a0623503a6c92e12ec
reference: 96b751465abac477517e9ec2f7fca0b64c9dd22a
subpath: helm-toolkit
type: git
falco:
@ -178,9 +148,39 @@ data:
reference: 22ef25ab295d6b7c6797cfffaa77cf181c673e9b
subpath: helm-toolkit
type: git
haproxy:
location: https://opendev.org/airship/promenade
reference: 27f181a9d30294030d695b747b2e4560ffbd29be
subpath: charts/haproxy
type: git
haproxy-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 96b751465abac477517e9ec2f7fca0b64c9dd22a
subpath: helm-toolkit
type: git
ingress:
location: https://opendev.org/openstack/openstack-helm-infra
reference: f08d30df6b871044135534eedf76c8f2760d9923
subpath: ingress
type: git
ingress-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 3dd0eb0cdf54387447f07a4ec6be9582c457a1a8
subpath: helm-toolkit
type: git
proxy:
location: https://opendev.org/airship/promenade
reference: 27f181a9d30294030d695b747b2e4560ffbd29be
subpath: charts/proxy
type: git
proxy-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 844d2cd16d865df1779524a0623503a6c92e12ec
subpath: helm-toolkit
type: git
scheduler:
location: https://opendev.org/airship/promenade
reference: b65930f205b77f28507dee9296aead00e24f2a18
reference: 27f181a9d30294030d695b747b2e4560ffbd29be
subpath: charts/scheduler
type: git
scheduler-htk:
@ -191,12 +191,12 @@ data:
osh:
barbican:
location: https://opendev.org/openstack/openstack-helm
reference: d467d685a3863edc5fed7c352961b3235a5e3a99
reference: 9bdd4fe96f46e3a4fb30ae7910dd970ca0a82c76
subpath: barbican
type: git
cinder:
location: https://opendev.org/openstack/openstack-helm
reference: d467d685a3863edc5fed7c352961b3235a5e3a99
reference: 2002b9368dc8f3f9d9737803c147534ae3e5258c
subpath: cinder
type: git
cinder-htk:
@ -206,7 +206,7 @@ data:
type: git
glance:
location: https://opendev.org/openstack/openstack-helm
reference: d467d685a3863edc5fed7c352961b3235a5e3a99
reference: 2002b9368dc8f3f9d9737803c147534ae3e5258c
subpath: glance
type: git
glance-htk:
@ -216,7 +216,7 @@ data:
type: git
heat:
location: https://opendev.org/openstack/openstack-helm
reference: d467d685a3863edc5fed7c352961b3235a5e3a99
reference: 4aa7a3026706758d22dc3e7002a2a0bffcfd84ea
subpath: heat
type: git
heat-htk:
@ -231,7 +231,7 @@ data:
type: git
horizon:
location: https://opendev.org/openstack/openstack-helm
reference: d467d685a3863edc5fed7c352961b3235a5e3a99
reference: 4aa7a3026706758d22dc3e7002a2a0bffcfd84ea
subpath: horizon
type: git
horizon-htk:
@ -241,7 +241,7 @@ data:
type: git
ingress:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 781e65ac5dde325b3771859c41709633cff32a42
reference: f08d30df6b871044135534eedf76c8f2760d9923
subpath: ingress
type: git
ingress-htk:
@ -251,7 +251,7 @@ data:
type: git
keystone:
location: https://opendev.org/openstack/openstack-helm
reference: d467d685a3863edc5fed7c352961b3235a5e3a99
reference: 28d0a56e46129276559a4ddb2dea32c35b10b58e
subpath: keystone
type: git
keystone-htk:
@ -261,7 +261,7 @@ data:
type: git
libvirt:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 781e65ac5dde325b3771859c41709633cff32a42
reference: 167b9eb1a8f063174864bd9d12f327d8f0c34885
subpath: libvirt
type: git
libvirt-htk:
@ -271,7 +271,7 @@ data:
type: git
mariadb:
location: https://opendev.org/openstack/openstack-helm-infra
reference: b482b57e6ea85eceef02787cb2e8eb884cceb80c
reference: 6dd39da6ad7ffd744ecff423e99637aadd9c0414
subpath: mariadb
type: git
mariadb-htk:
@ -281,7 +281,7 @@ data:
type: git
memcached:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 781e65ac5dde325b3771859c41709633cff32a42
reference: c60c138777edc0cc35ccb7fcd8a13b57093a7ee1
subpath: memcached
type: git
memcached-htk:
@ -291,8 +291,7 @@ data:
type: git
neutron:
location: https://opendev.org/openstack/openstack-helm
#reference: 915276f4f75422f707418f71422c7383bbffe728
reference: d2abe39d498f48c4721e26aca19e81189bc8891b
reference: 2a9e91589d1d03279cafb4f836056b228c2bc425
subpath: neutron
type: git
neutron-htk:
@ -302,7 +301,7 @@ data:
type: git
nova:
location: https://opendev.org/openstack/openstack-helm
reference: f9dbba70438e065de784075408666abb08414e00
reference: 4aa7a3026706758d22dc3e7002a2a0bffcfd84ea
subpath: nova
type: git
nova-htk:
@ -312,7 +311,7 @@ data:
type: git
openvswitch:
location: https://opendev.org/openstack/openstack-helm-infra
reference: d0b32ed88ad652d9c2226466a13bac8b28038399
reference: f08d30df6b871044135534eedf76c8f2760d9923
subpath: openvswitch
type: git
openvswitch-htk:
@ -322,7 +321,7 @@ data:
type: git
rabbitmq:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 749e2be9f5245ff0ad634a593820c353061b6544
reference: 1a8536fd5d9c3c05a0c4b5cd0e6e4a502c9c53bc
subpath: rabbitmq
type: git
rabbitmq-htk:
@ -341,39 +340,9 @@ data:
subpath: helm-toolkit
type: git
osh_addons:
ranger:
type: git
location: https://opendev.org/openstack/openstack-helm-addons
reference: 80b8ec444afb6be4861c1a536420b02bb5a1bb40
subpath: ranger
ranger-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: b0ee64420436d3acf239446a7b0b3300bb624f9a
subpath: helm-toolkit
type: git
ranger-agent:
type: git
location: https://opendev.org/openstack/openstack-helm-addons
reference: 80b8ec444afb6be4861c1a536420b02bb5a1bb40
subpath: ranger-agent
ranger-agent-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: b0ee64420436d3acf239446a7b0b3300bb624f9a
subpath: helm-toolkit
type: git
sonobuoy:
type: git
location: https://opendev.org/openstack/openstack-helm-addons
reference: b60acc330716eb1c6325bfa6ca8080f9d5528c4c
subpath: sonobuoy
sonobuoy-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 992e82fc1dbcca989f171b7679cee96a924259de
subpath: helm-toolkit
type: git
ingress:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 2ac08b59b4b2658175e7ad83708e254a4dc4ffbd
reference: f08d30df6b871044135534eedf76c8f2760d9923
subpath: ingress
type: git
ingress-htk:
@ -383,7 +352,7 @@ data:
type: git
mariadb:
location: https://opendev.org/openstack/openstack-helm-infra
reference: a4568f31e2e9d09c8f318a9d938c9a16f0da59ef
reference: ba601e0cba1bb584fe3bce07f96f8e5b98b12889
subpath: mariadb
type: git
mariadb-htk:
@ -393,7 +362,7 @@ data:
type: git
mini-mirror:
location: https://opendev.org/openstack/openstack-helm-addons
reference: aad032a9b567775cff2b82abb98004d124bba203
reference: 1c2d9373f2c6f39ef2aaeff83799906f1aae2f2a
subpath: mini-mirror
type: git
mini-mirror-htk:
@ -401,10 +370,50 @@ data:
reference: af270934d44ab3f0eb2462cde7626eb2c6a1f967
subpath: helm-toolkit
type: git
ranger:
location: https://opendev.org/openstack/openstack-helm-addons
reference: b5cbc370781417cda449ba008c747ccf6d9794b9
subpath: ranger
type: git
ranger-agent:
location: https://opendev.org/openstack/openstack-helm-addons
reference: fc9b28b3e2e1be0a2b5171074c5dd8f43e3a0a6f
subpath: ranger-agent
type: git
ranger-agent-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: b0ee64420436d3acf239446a7b0b3300bb624f9a
subpath: helm-toolkit
type: git
ranger-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: b0ee64420436d3acf239446a7b0b3300bb624f9a
subpath: helm-toolkit
type: git
sonobuoy:
location: https://opendev.org/openstack/openstack-helm-addons
reference: b60acc330716eb1c6325bfa6ca8080f9d5528c4c
subpath: sonobuoy
type: git
sonobuoy-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 992e82fc1dbcca989f171b7679cee96a924259de
subpath: helm-toolkit
type: git
osh_infra:
ceph-rgw:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 20cf2db961a05285a751b3bc96b8fa4e51e3b412
subpath: ceph-rgw
type: git
ceph-rgw-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: eacf93722136636dcfbd2b68c59b71f071ffc085
subpath: helm-toolkit
type: git
elasticsearch:
location: https://opendev.org/openstack/openstack-helm-infra
reference: ddd5a74319142b14d42271da6727e76637133869
reference: a2c1eea8a982de777fa430efdf188d556630f3f0
subpath: elasticsearch
type: git
elasticsearch-htk:
@ -414,7 +423,7 @@ data:
type: git
fluentd:
location: https://opendev.org/openstack/openstack-helm-infra
reference: ddd5a74319142b14d42271da6727e76637133869
reference: 59164428d305bf72dd67733a184266fd1217c153
subpath: fluentd
type: git
fluentd-htk:
@ -424,7 +433,7 @@ data:
type: git
grafana:
location: https://opendev.org/openstack/openstack-helm-infra
reference: ddd5a74319142b14d42271da6727e76637133869
reference: 128c3f748aee3ae72cccca0ff17a71ac1ed5c2fb
subpath: grafana
type: git
grafana-htk:
@ -434,7 +443,7 @@ data:
type: git
kibana:
location: https://opendev.org/openstack/openstack-helm-infra
reference: ddd5a74319142b14d42271da6727e76637133869
reference: b62a46336c1b6ca86e48710ba85c9320f3075fd5
subpath: kibana
type: git
kibana-htk:
@ -454,7 +463,7 @@ data:
type: git
nagios:
location: https://opendev.org/openstack/openstack-helm-infra
reference: ddd5a74319142b14d42271da6727e76637133869
reference: a6a9146de4996aafb7148009de8ee584fb44dfa0
subpath: nagios
type: git
nagios-htk:
@ -464,7 +473,7 @@ data:
type: git
podsecuritypolicy:
location: https://opendev.org/openstack/openstack-helm-infra
reference: a0315caffaa7327cc3c8b4e8d60f6fb46b57e4a6
reference: b19c7f21c9aab072fd101228737b3cec0e0b0963
subpath: podsecuritypolicy
type: git
podsecuritypolicy-htk:
@ -474,7 +483,7 @@ data:
type: git
prometheus:
location: https://opendev.org/openstack/openstack-helm-infra
reference: ddd5a74319142b14d42271da6727e76637133869
reference: daca15441ba750d78d0e698e4a89016060d03bd0
subpath: prometheus
type: git
prometheus-htk:
@ -484,7 +493,7 @@ data:
type: git
prometheus_kube_state_metrics:
location: https://opendev.org/openstack/openstack-helm-infra
reference: ddd5a74319142b14d42271da6727e76637133869
reference: af712da8635c22962ff5cd205918a13a66989913
subpath: prometheus-kube-state-metrics
type: git
prometheus_kube_state_metrics-htk:
@ -494,7 +503,7 @@ data:
type: git
prometheus_node_exporter:
location: https://opendev.org/openstack/openstack-helm-infra
reference: ddd5a74319142b14d42271da6727e76637133869
reference: c61fc590fbca886bf86288c03e78727e9889cb33
subpath: prometheus-node-exporter
type: git
prometheus_node_exporter-htk:
@ -502,19 +511,9 @@ data:
reference: a78ee39dcb9209a4ba6cc87f2989481a9fa63fc1
subpath: helm-toolkit
type: git
prometheus_process_exporter:
location: https://opendev.org/openstack/openstack-helm-infra
reference: ddd5a74319142b14d42271da6727e76637133869
subpath: prometheus-process-exporter
type: git
prometheus_process_exporter-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: a78ee39dcb9209a4ba6cc87f2989481a9fa63fc1
subpath: helm-toolkit
type: git
prometheus_openstack_exporter:
location: https://opendev.org/openstack/openstack-helm-infra
reference: ddd5a74319142b14d42271da6727e76637133869
reference: a6a9146de4996aafb7148009de8ee584fb44dfa0
subpath: prometheus-openstack-exporter
type: git
prometheus_openstack_exporter-htk:
@ -522,20 +521,20 @@ data:
reference: a78ee39dcb9209a4ba6cc87f2989481a9fa63fc1
subpath: helm-toolkit
type: git
ceph-rgw:
prometheus_process_exporter:
location: https://opendev.org/openstack/openstack-helm-infra
reference: eacf93722136636dcfbd2b68c59b71f071ffc085
subpath: ceph-rgw
reference: f8c5d0676904cdb05de80234cb8659bde856420a
subpath: prometheus-process-exporter
type: git
ceph-rgw-htk:
prometheus_process_exporter-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: eacf93722136636dcfbd2b68c59b71f071ffc085
reference: a78ee39dcb9209a4ba6cc87f2989481a9fa63fc1
subpath: helm-toolkit
type: git
ucp:
apiserver-webhook:
location: https://opendev.org/airship/promenade
reference: b65930f205b77f28507dee9296aead00e24f2a18
reference: 27f181a9d30294030d695b747b2e4560ffbd29be
subpath: charts/apiserver-webhook
type: git
apiserver-webhook-htk:
@ -545,7 +544,7 @@ data:
type: git
armada:
location: https://opendev.org/airship/armada
reference: e88cbafd62e8a4528e05450607399d2216654841
reference: a3f11e5873bc5b97de579c627d7b57e3bc9f655e
subpath: charts/armada
type: git
armada-htk:
@ -555,7 +554,7 @@ data:
type: git
barbican:
location: https://opendev.org/openstack/openstack-helm
reference: def68865a28f0819d08812d02839ba70483d0e10
reference: 9bdd4fe96f46e3a4fb30ae7910dd970ca0a82c76
subpath: barbican
type: git
barbican-htk:
@ -565,7 +564,7 @@ data:
type: git
ceph-client:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 8d9b7fdd2b146dc8a1578d6ee23c79aa3116f87e
reference: dd39a0f7e08c9eee861b9fa23e9587d440b76c43
subpath: ceph-client
type: git
ceph-htk:
@ -575,57 +574,27 @@ data:
type: git
ceph-mon:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 8d9b7fdd2b146dc8a1578d6ee23c79aa3116f87e
reference: 4eee89ccba75a34b222d2f828129d0d5f26ffd23
subpath: ceph-mon
type: git
ceph-osd:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 8d9b7fdd2b146dc8a1578d6ee23c79aa3116f87e
reference: 4ed24de14b562607742a246906b48fba647d3ce8
subpath: ceph-osd
type: git
ceph-provisioners:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 8d9b7fdd2b146dc8a1578d6ee23c79aa3116f87e
reference: 4ed24de14b562607742a246906b48fba647d3ce8
subpath: ceph-provisioners
type: git
ceph-rgw:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 2f8ea3977ba31a4576ffb9bf98282d0586d1aa25
subpath: ceph-rgw
type: git
tenant-ceph-client:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 8d9b7fdd2b146dc8a1578d6ee23c79aa3116f87e
subpath: ceph-client
type: git
tenant-ceph-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 8d9b7fdd2b146dc8a1578d6ee23c79aa3116f87e
subpath: helm-toolkit
type: git
tenant-ceph-mon:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 8d9b7fdd2b146dc8a1578d6ee23c79aa3116f87e
subpath: ceph-mon
type: git
tenant-ceph-osd:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 8d9b7fdd2b146dc8a1578d6ee23c79aa3116f87e
subpath: ceph-osd
type: git
tenant-ceph-provisioners:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 8d9b7fdd2b146dc8a1578d6ee23c79aa3116f87e
subpath: ceph-provisioners
type: git
tenant-ceph-rgw:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 2f8ea3977ba31a4576ffb9bf98282d0586d1aa25
reference: 20cf2db961a05285a751b3bc96b8fa4e51e3b412
subpath: ceph-rgw
type: git
deckhand:
location: https://opendev.org/airship/deckhand
reference: b35371e241d10637659895b53b81be5fa91343c1
reference: 0545625da9f83c123de854fa2c6ff05b941d56e7
subpath: charts/deckhand
type: git
deckhand-htk:
@ -635,7 +604,7 @@ data:
type: git
divingbell:
location: https://opendev.org/airship/divingbell
reference: fe0a034ec7d60351c1600de76eb6e28df6e59882
reference: d657f7968c296cfb7565e144b2420eabf47ef9f9
subpath: divingbell
type: git
divingbell-htk:
@ -645,7 +614,7 @@ data:
type: git
drydock:
location: https://opendev.org/airship/drydock
reference: 576f1b04886c30f3779c856abeded1c171873fc9
reference: 63f7783da2824709f86bbc56ee2140ed293c450f
subpath: charts/drydock
type: git
drydock-htk:
@ -655,7 +624,7 @@ data:
type: git
ingress:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 781e65ac5dde325b3771859c41709633cff32a42
reference: f08d30df6b871044135534eedf76c8f2760d9923
subpath: ingress
type: git
ingress-htk:
@ -665,7 +634,7 @@ data:
type: git
keystone:
location: https://opendev.org/openstack/openstack-helm
reference: d467d685a3863edc5fed7c352961b3235a5e3a99
reference: 28d0a56e46129276559a4ddb2dea32c35b10b58e
subpath: keystone
type: git
keystone-htk:
@ -675,7 +644,7 @@ data:
type: git
maas:
location: https://opendev.org/airship/maas
reference: 9acebfc2b4e8fab651aec7a9f16407c2d5023f0a
reference: 35fa3175e3d2873d3a7e21cdc793e9d153df7886
subpath: charts/maas
type: git
maas-htk:
@ -685,17 +654,17 @@ data:
type: git
mariadb:
location: https://opendev.org/openstack/openstack-helm-infra
reference: b482b57e6ea85eceef02787cb2e8eb884cceb80c
reference: 6dd39da6ad7ffd744ecff423e99637aadd9c0414
subpath: mariadb
type: git
mariadb-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: b482b57e6ea85eceef02787cb2e8eb884cceb80c
reference: 27f181a9d30294030d695b747b2e4560ffbd29be
subpath: helm-toolkit
type: git
memcached:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 781e65ac5dde325b3771859c41709633cff32a42
reference: c60c138777edc0cc35ccb7fcd8a13b57093a7ee1
subpath: memcached
type: git
memcached-htk:
@ -705,7 +674,7 @@ data:
type: git
postgresql:
location: https://opendev.org/openstack/openstack-helm-infra
reference: c18ee59aff4481d7c4d7a1a074c4bd4184602bd6
reference: 25aa3690252c605d237c187524d0adf73a8424e5
subpath: postgresql
type: git
postgresql-htk:
@ -715,7 +684,7 @@ data:
type: git
promenade:
location: https://opendev.org/airship/promenade
reference: b65930f205b77f28507dee9296aead00e24f2a18
reference: 27f181a9d30294030d695b747b2e4560ffbd29be
subpath: charts/promenade
type: git
promenade-htk:
@ -725,7 +694,7 @@ data:
type: git
rabbitmq:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 749e2be9f5245ff0ad634a593820c353061b6544
reference: 1a8536fd5d9c3c05a0c4b5cd0e6e4a502c9c53bc
subpath: rabbitmq
type: git
rabbitmq-htk:
@ -735,7 +704,7 @@ data:
type: git
shipyard:
location: https://opendev.org/airship/shipyard
reference: 0cbb5cf9b7ea7e517e034e3ccb105b183ada08d9
reference: 796f2ddcca35ad168c5c6c08221d5724dadfffba
subpath: charts/shipyard
type: git
shipyard-htk:
@ -743,9 +712,39 @@ data:
reference: 926348fe2476051f9ca825916db333f81c0139b7
subpath: helm-toolkit
type: git
tenant-ceph-client:
location: https://opendev.org/openstack/openstack-helm-infra
reference: dd39a0f7e08c9eee861b9fa23e9587d440b76c43
subpath: ceph-client
type: git
tenant-ceph-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 8d9b7fdd2b146dc8a1578d6ee23c79aa3116f87e
subpath: helm-toolkit
type: git
tenant-ceph-mon:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 4eee89ccba75a34b222d2f828129d0d5f26ffd23
subpath: ceph-mon
type: git
tenant-ceph-osd:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 4ed24de14b562607742a246906b48fba647d3ce8
subpath: ceph-osd
type: git
tenant-ceph-provisioners:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 4ed24de14b562607742a246906b48fba647d3ce8
subpath: ceph-provisioners
type: git
tenant-ceph-rgw:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 20cf2db961a05285a751b3bc96b8fa4e51e3b412
subpath: ceph-rgw
type: git
tiller:
location: https://opendev.org/airship/armada
reference: da0f6e61ba9e8d40e96db616c0fafb0fdbcb9417
reference: a3f11e5873bc5b97de579c627d7b57e3bc9f655e
subpath: charts/tiller
type: git
tiller-htk:
@ -756,7 +755,7 @@ data:
utility:
calicoctl-utility:
location: https://opendev.org/airship/porthole
reference: 744e4bb10acccaa8722f9e05fb0a39a1b5b35731
reference: 88d87ff37338b2fbf09baec78f3d73160eaa14c3
subpath: charts/calicoctl-utility
type: git
calicoctl-utility-htk:
@ -764,19 +763,9 @@ data:
reference: a78ee39dcb9209a4ba6cc87f2989481a9fa63fc1
subpath: helm-toolkit
type: git
ncct-utility:
location: https://opendev.org/airship/porthole
reference: 744e4bb10acccaa8722f9e05fb0a39a1b5b35731
subpath: charts/calicoctl-utility
type: git
ncct-utility-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: a78ee39dcb9209a4ba6cc87f2989481a9fa63fc1
subpath: helm-toolkit
type: git
ceph-utility:
location: https://opendev.org/airship/porthole
reference: 744e4bb10acccaa8722f9e05fb0a39a1b5b35731
reference: a8b0517a4f2be0c81495167df63f065fc1e4db88
subpath: charts/ceph-utility
type: git
ceph-utility-htk:
@ -786,7 +775,7 @@ data:
type: git
compute-utility:
location: https://opendev.org/airship/porthole
reference: 744e4bb10acccaa8722f9e05fb0a39a1b5b35731
reference: a8b0517a4f2be0c81495167df63f065fc1e4db88
subpath: charts/compute-utility
type: git
compute-utility-htk:
@ -796,7 +785,7 @@ data:
type: git
etcdctl-utility:
location: https://opendev.org/airship/porthole
reference: 744e4bb10acccaa8722f9e05fb0a39a1b5b35731
reference: 002e070eb7d69d483cc3909f9e13c370ca9d84d9
subpath: charts/etcdctl-utility
type: git
etcdctl-utility-htk:
@ -806,27 +795,27 @@ data:
type: git
mysqlclient-utility:
location: https://opendev.org/airship/porthole
reference: 744e4bb10acccaa8722f9e05fb0a39a1b5b35731
reference: 002e070eb7d69d483cc3909f9e13c370ca9d84d9
subpath: charts/mysqlclient-utility
type: git
mysqlclient-utility-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 22ef25ab295d6b7c6797cfffaa77cf181c673e9b
reference: 486ad9a25f03ba888a247fb8d856bb9a33a29937
subpath: helm-toolkit
type: git
openstack-utility:
location: https://opendev.org/airship/porthole
reference: 9f9d8d39be8708e18d4d3d2a518f09fa890b050e
reference: 1f478c6c5d09809ecad491d5e55ccbd708c41217
subpath: charts/openstack-utility
type: git
openstack-utility-htk:
location: https://opendev.org/openstack/openstack-helm-infra
reference: 22ef25ab295d6b7c6797cfffaa77cf181c673e9b
reference: 486ad9a25f03ba888a247fb8d856bb9a33a29937
subpath: helm-toolkit
type: git
postgresql-utility:
location: https://opendev.org/airship/porthole
reference: 0546a3aff7af4541e07630589a15a3d172c8ff5c
reference: 002e070eb7d69d483cc3909f9e13c370ca9d84d9
subpath: charts/postgresql-utility
type: git
postgresql-utility-htk:
@ -836,32 +825,31 @@ data:
type: git
files:
kubernetes: https://dl.k8s.io/v1.17.2/kubernetes-node-linux-amd64.tar.gz
crictl: https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.22.0/crictl-v1.22.0-linux-amd64.tar.gz
images_refs:
images:
alpine: &alpine docker.io/alpine:3.6
busybox: &busybox docker.io/busybox:1.28.3
alpine-38: &alpine-38 docker.io/alpine:3.8
airflow: &airflow quay.io/airshipit/airflow:f0f876afef07f62cd38fbce8b775cfc741e0bf52-ubuntu_xenial
armada: &armada quay.io/airshipit/armada:268d7a39580407911a0a39e34eebcc4540c1deaa-ubuntu_bionic
armada: &armada quay.io/airshipit/armada:7ef4b8643b5ec5216a8f6726841e156c0aa54a1a-ubuntu_bionic
barbican: &barbican docker.io/openstackhelm/barbican@sha256:4c44bb9e5d29224007b2ef52c1ce4ff756e655efe9c6e9a95b2e98f4f501499e
#ceph_daemon: &ceph_daemon docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20191216
ceph_daemon: &ceph_daemon docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521
ceph_daemon: &ceph_daemon docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20210902
ceph-utility: &ceph-utility quay.io/airshipit/porthole-ceph-utility:744e4bb10acccaa8722f9e05fb0a39a1b5b35731-ubuntu_bionic
ceph-utility: &ceph-utility quay.io/airshipit/porthole-ceph-utility:e7a6644b3af8bddd94d20ca35f388ae605203416-ubuntu_bionic
calicoctl-utility: &calicoctl-utility quay.io/airshipit/porthole-calicoctl-utility:744e4bb10acccaa8722f9e05fb0a39a1b5b35731-ubuntu_bionic
calico: &calico quay.io/calico/cni:v3.4.0
calico_ctl: &calico_ctl docker.io/calico/ctl:v3.4.0
calico_kube_controllers: &calico_kube_controllers quay.io/calico/kube-controllers:v3.4.0
calico_node: &calico_node quay.io/calico/node:v3.4.0
calicoctl-utility: &calicoctl-utility quay.io/airshipit/porthole-calicoctl-utility:e7a6644b3af8bddd94d20ca35f388ae605203416-ubuntu_bionic
calico: &calico quay.io/calico/cni:v3.8.9
calico_ctl: &calico_ctl docker.io/calico/ctl:v3.8.9
calico_kube_controllers: &calico_kube_controllers quay.io/calico/kube-controllers:v3.8.9
calico_node: &calico_node quay.io/calico/node:v3.8.9
calico_dep_check: &calico_dep_check quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
ncct-utility: &ncct-utility quay.io/airshipit/porthole-calicoctl-utility:744e4bb10acccaa8722f9e05fb0a39a1b5b35731-ubuntu_bionic
compute-utility: &compute-utility quay.io/airshipit/porthole-compute-utility:e7a6644b3af8bddd94d20ca35f388ae605203416-ubuntu_bionic
compute-utility: &compute-utility quay.io/airshipit/porthole-compute-utility:744e4bb10acccaa8722f9e05fb0a39a1b5b35731-ubuntu_bionic
postgresql-utility: &postgresql-utility quay.io/airshipit/porthole-postgresql-utility:744e4bb10acccaa8722f9e05fb0a39a1b5b35731-ubuntu_bionic
postgresql-utility: &postgresql-utility quay.io/airshipit/porthole-postgresql-utility:e7a6644b3af8bddd94d20ca35f388ae605203416-ubuntu_bionic
coredns: &coredns docker.io/coredns/coredns:1.6.4
@ -875,82 +863,82 @@ data:
elasticsearch: &elasticsearch docker.io/openstackhelm/elasticsearch-s3:7_1_0-20191119
etcd: &etcd quay.io/coreos/etcd:v3.4.6
etcd: &etcd quay.io/coreos/etcd:v3.4.8
etcdctl: &etcdctl-utility quay.io/airshipit/porthole-etcdctl-utility:744e4bb10acccaa8722f9e05fb0a39a1b5b35731-ubuntu_bionic
etcdctl: &etcdctl-utility quay.io/airshipit/porthole-etcdctl-utility:e7a6644b3af8bddd94d20ca35f388ae605203416-ubuntu_bionic
falco: &falco docker.io/falcosecurity/falco:0.12.1
fluentd: &fluentd docker.io/openstackhelm/fluentd:debian-20190903
helm: &helm docker.io/lachlanevenson/k8s-helm:v2.14.1
helm: &helm docker.io/lachlanevenson/k8s-helm:v2.16.9
httpd: &httpd docker.io/library/httpd:2.4
httpd: &httpd docker.io/library/httpd:2.4.46
hyperkube_amd64: &hyperkube_amd64 gcr.io/google-containers/hyperkube-amd64:v1.17.2
image_repo_sync: &image_repo_sync docker.io/docker:17.07.0
ingress_controller: &ingress_controller quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.23.0
ingress_controller: &ingress_controller quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.32.0
ingress_module_init: &ingress_module_init docker.io/openstackhelm/neutron:ocata-ubuntu_xenial-20200521
ingress_module_init: &ingress_module_init docker.io/openstackhelm/neutron:train-ubuntu_bionic-20210414
ingress_routed_vip: &ingress_routed_vip docker.io/openstackhelm/neutron:ocata-ubuntu_xenial-20200521
ingress_routed_vip: &ingress_routed_vip docker.io/openstackhelm/neutron:train-ubuntu_bionic-20210414
ingress_error_pages: &ingress_error_pages gcr.io/google-containers/defaultbackend-amd64@sha256:0b07d6e1652b97d6f68fb63e42342921cd2c7bb4560881353a04e827cb65cf80
ingress_error_pages: &ingress_error_pages gcr.io/google-containers/defaultbackend-amd64@sha256:865b0c35e6da393b8e80b7e3799f777572399a4cff047eb02a81fa6e7a48ed4b
ingress_error_pages_404: &ingress_error_pages_404 gcr.io/google-containers/ingress-gce-404-server-with-metrics-amd64@sha256:d83d8a481145d0eb71f8bd71ae236d1c6a931dd3bdcaf80919a8ec4a4d8aff74
ingress_error_pages_404: &ingress_error_pages_404 gcr.io/google-containers/ingress-gce-404-server-with-metrics-amd64:v1.6.0@sha256:d83d8a481145d0eb71f8bd71ae236d1c6a931dd3bdcaf80919a8ec4a4d8aff74
ingress_controller_mariadb: &ingress_controller_mariadb quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0
ingress_controller_mariadb: &ingress_controller_mariadb quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.32.0
k8s-keystone-auth: &k8s-keystone-auth docker.io/k8scloudprovider/k8s-keystone-auth:v0.3.0
k8s-keystone-auth: &k8s-keystone-auth docker.io/k8scloudprovider/k8s-keystone-auth:v1.18.0
kibana: &kibana docker.elastic.co/kibana/kibana-oss:7.1.0
kibana: &kibana docker.elastic.co/kibana/kibana-oss:7.9.2
libvirt: &libvirt docker.io/openstackhelm/libvirt:ubuntu_bionic-20200623
libvirt: &libvirt docker.io/openstackhelm/libvirt:ubuntu_bionic-20210227
mariadb_db: &mariadb_db docker.io/openstackhelm/mariadb:10.2.18
maas_rack_controller: &maas_rack_controller quay.io/airshipit/maas-rack-controller:03ebbaaca3f38ab49a5993b506761a4abfd19d89
maas_rack_controller: &maas_rack_controller quay.io/airshipit/maas-rack-controller:840b482373df163ba088b91c278f2a58fd599b7b
maas_region_controller: &maas_region_controller quay.io/airshipit/maas-region-controller:03ebbaaca3f38ab49a5993b506761a4abfd19d89
maas_region_controller: &maas_region_controller quay.io/airshipit/maas-region-controller:840b482373df163ba088b91c278f2a58fd599b7b
maas_ingress: &maas_ingress docker-prod-local.artifactory.mirantis.com/mirantis/kubernetes-ingress-nginx/nginx-ingress-controller-amd64@sha256:fe510f97b02566e21685b7b108422370ca67a8a44099ce7e968a778c0ba06e0b
memcached: &memcached docker.io/library/memcached:1.5.17
memcached: &memcached docker.io/library/memcached:1.6.6-alpine
#mos_cinder: &mos_cinder openstackhelm/cinder:stein-ubuntu_bionic-20200513
mos_cinder: &mos_cinder openstackhelm/cinder:stein-ubuntu_bionic-20200306
mos_cinder_storage_init: &mos_cinder_storage_init docker.io/openstackhelm/ceph-config-helper:ubuntu_xenial-20191119
mos_cinder: &mos_cinder docker.io/openstackhelm/cinder:train-ubuntu_bionic-20200612
mos_cinder_storage_init: &mos_cinder_storage_init docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200416
#mos_glance: &mos_glance openstackhelm/glance:stein-ubuntu_bionic-20200513
mos_glance: &mos_glance quay.io/attcomdev/glance-nc:stein-ubuntu_bionic
mos_glance: &mos_glance docker.io/openstackhelm/glance:train-ubuntu_bionic-20200612
#mos_heat: &mos_heat docker.io/deepakdt/heat:stein-ubuntu_bionic-taas
mos_heat: &mos_heat quay.io/attcomdev/heat-nc:stein-ubuntu_bionic
mos_heat: &mos_heat docker.io/openstackhelm/heat:train-ubuntu_bionic-20200612
mos_heat_nova: &mos_heat_nova docker.io/openstackhelm/heat:stein-ubuntu_bionic-20200612
mos_horizon: &mos_horizon docker.io/openstackhelm/horizon:stein-ubuntu_bionic-20200612
mos_heat_nova: &mos_heat_nova docker.io/openstackhelm/heat:train-ubuntu_bionic-20200612
mos_horizon: &mos_horizon docker.io/openstackhelm/horizon:train-ubuntu_bionic-20200612
mos_keystone: &mos_keystone docker.io/openstackhelm/keystone:stein-ubuntu_bionic-20200612
mos_keystone: &mos_keystone docker.io/openstackhelm/keystone:train-ubuntu_bionic-20200612
#mos_neutron: &mos_neutron docker.io/imarijitbose/stein_neutron_bionic
mos_neutron: &mos_neutron quay.io/attcomdev/neutron-nc:stein-ubuntu_bionic
mos_neutron: &mos_neutron docker.io/openstackhelm/neutron:train-ubuntu_bionic-20200612
mos_neutron_sriov: &mos_neutron_sriov docker.io/openstackhelm/neutron:stein-ubuntu_bionic-20200515
mos_nova: &mos_nova quay.io/attcomdev/nova-nc:stein-ubuntu_bionic
mos_neutron_sriov: &mos_neutron_sriov docker.io/openstackhelm/neutron:train-ubuntu_bionic-20200612
mos_nova: &mos_nova docker.io/openstackhelm/nova:train-ubuntu_bionic-20200612
mysqlclient-utility: &mysqlclient-utility quay.io/airshipit/porthole-mysqlclient-utility:744e4bb10acccaa8722f9e05fb0a39a1b5b35731-ubuntu_bionic
mysqlclient-utility: &mysqlclient-utility quay.io/airshipit/porthole-mysqlclient-utility:e7a6644b3af8bddd94d20ca35f388ae605203416-ubuntu_bionic
nova_novncproxy: &nova_novncproxy docker.io/openstackhelm/nova:stein-ubuntu_bionic-20200612
nova_novncproxy: &nova_novncproxy docker.io/openstackhelm/nova:train-ubuntu_bionic-20200612
nova_spiceproxy: &nova_spiceproxy docker.io/openstackhelm/nova:stein-ubuntu_bionic-20200612
nova_spiceproxy: &nova_spiceproxy docker.io/openstackhelm/nova:train-ubuntu_bionic-20200612
#openvswitch: &openvswitch docker.io/deepakdt/openvswitch:ovs2.11.1-dpdk18.11.3-ubuntu_bionic-dpdk
openvswitch: &openvswitch quay.io/attcomdev/openvswitch-nc:ubuntu_bionic
openvswitch: &openvswitch docker.io/openstackhelm/openvswitch:ubuntu_bionic-20200722
openstack-utility: &openstack-utility quay.io/airshipit/porthole-openstack-utility:744e4bb10acccaa8722f9e05fb0a39a1b5b35731-ubuntu_bionic
openstack-utility: &openstack-utility quay.io/airshipit/porthole-openstack-utility:e7a6644b3af8bddd94d20ca35f388ae605203416-ubuntu_bionic
pegleg: &pegleg quay.io/airshipit/pegleg@sha256:fc61ffb6a1456edef6c251371467f424c65521be63bfe79db173ece509f7d197
nc-swissknife: &nc-swissknife quay.io/airshipit/pegleg@sha256:fc61ffb6a1456edef6c251371467f424c65521be63bfe79db173ece509f7d197
pegleg: &pegleg quay.io/airshipit/pegleg@sha256:0ca3b708c37425e4e05627443dbd39d2ed27cf06193d2cc3bcb5b985b260d79d
nc-swissknife: &nc-swissknife quay.io/airshipit/pegleg@sha256:0ca3b708c37425e4e05627443dbd39d2ed27cf06193d2cc3bcb5b985b260d79d
patroni: &patroni docker-prod-local.artifactory.mirantis.com/nc/patroni:9.6.15-bionic-1567439806207
@ -969,7 +957,7 @@ data:
prometheus_postgresql_exporter_create_user: &prometheus_postgresql_exporter_create_user docker.io/library/postgres:9.5
rabbitmq_mgmt: &rabbitmq_mgmt docker.io/library/rabbitmq:3.7.15-management
rabbitmq: &rabbitmq docker.io/library/rabbitmq:3.7.15
rabbitmq_exporter: &rabbitmq_exporter docker.io/kbudde/rabbitmq-exporter:v0.21.0
rabbitmq_exporter: &rabbitmq_exporter docker.io/kbudde/rabbitmq-exporter:v1.0.0-RC7.1
rally_test: &rally_test docker.io/xrally/xrally-openstack:1.3.0
#rbd_provisioner: &rbd_provisioner docker.io/openstackhelm/ceph-rbd-provisioner:ubuntu_bionic-20191216
rbd_provisioner: &rbd_provisioner docker.io/openstackhelm/ceph-rbd-provisioner:ubuntu_bionic-20200521
@ -977,11 +965,11 @@ data:
shipyard: &shipyard quay.io/airshipit/shipyard:0b29f89d3fc284fe483c58f102574c687034468d-ubuntu_xenial
sstream_cache: &sstream_cache quay.io/airshipit/sstream-cache:03ebbaaca3f38ab49a5993b506761a4abfd19d89
storage_init: &storage_init docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20191216
#storage_init: &storage_init docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521
#storage_init: &storage_init docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20191216
storage_init: &storage_init docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521
tiller: &tiller gcr.io/kubernetes-helm/tiller@sha256:4554a65fb8278d93f1c2c1f335ddbfcd6faa016c24b97e8de46c6b8fc1e9e7f5
osh_heat: &osh_heat docker.io/openstackhelm/heat:stein-ubuntu_bionic-20200612
tiller: &tiller ghcr.io/helm/tiller@sha256:fffff22d7b397fa5d5461c0c554584aa9d1d3631952244bfb9795f2e094d10dd
osh_heat: &osh_heat docker.io/openstackhelm/heat:train-ubuntu_bionic-20200612
node_exporter: &node_exporter docker.io/prom/node-exporter:v0.15.0
process_exporter: &process_exporter docker.io/ncabatoff/process-exporter:0.2.11
@ -1153,9 +1141,6 @@ data:
calicoctl-utility:
calicoctl_utility: *calicoctl-utility
image_repo_sync: *image_repo_sync
ncct-utility:
ncct_utility: *ncct-utility
image_repo_sync: *image_repo_sync
compute-utility:
compute_utility: *compute-utility
image_repo_sync: *image_repo_sync
@ -1737,14 +1722,14 @@ data:
- main
gpgkey: ""
named:
runtime: containerd=1.2.10-1~u18.04+mcp
runtime: containerd=1.5.2-0ubuntu1~18.04.2
socat: socat
unnamed:
- bsd-mailx
- ceph-common
- chrony
- docker-ce=18.09.9-1~u18.04+mcp
- linux-modules-extra-4.15.0-64
- linux-modules-extra-4.15.0-140-generic
- jq
validation:
@ -1759,6 +1744,5 @@ data:
# NC release to reboot the nodes after the completion of the Shipyard site-update. In
# the MOP update, you should also link to the following wiki page:
# https://wiki.web.att.com/display/CCPdev/i40e+Driver+upgrades
#location: https://ARTIFACTS_DOMAIN/artifactory/soureforgeie40/i40e/i40e-2.9.24.tar.gz
location: https://downloads.sourceforge.net/project/e1000/i40e%20stable/2.11.25/i40e-2.11.25.tar.gz
location: https://netactuate.dl.sourceforge.net/project/e1000/i40e%20stable/2.12.6/i40e-2.12.6.tar.gz
...

View File

@ -164,6 +164,7 @@ NO_PROXY=${NO_PROXY:-}
# Promenade specific variables for downloading hyperkube image to generate genesis.sh
PROMENADE_TMP=${PROMENADE_TMP:-}
PROMENADE_TMP_LOCAL=${PROMENADE_TMP_LOCAL:-}
PROMENADE_ENCRYPTION_KEY=${PROMENADE_ENCRYPTION_KEY:-}
EOF
# extract Hyperkube binary before running Promenade container

View File

@ -15,6 +15,10 @@ metadata:
- method: merge
path: .
data:
platform:
kernel_params:
# isolcpus for ovs dpdk pmd use
isolcpus: 4-7,48-51
metadata:
owner_data:
openstack-ranger-agent: enabled

View File

@ -17,6 +17,10 @@ metadata:
- method: replace
path: .metadata.owner_data
data:
platform:
kernel_params:
# isolcpus for ovs dpdk pmd use
isolcpus: 4-7,48-51
metadata:
owner_data:
# Additional label to global