treasuremap/global/software/charts/osh/openstack-tenant-ceph/ceph-client.yaml

304 lines
8.0 KiB
YAML

---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: tenant-ceph-client-global
layeringDefinition:
abstract: true
layer: global
storagePolicy: cleartext
labels:
name: tenant-ceph-client-global
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.ucp.tenant-ceph-client
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.ceph.tenant-ceph-client
dest:
path: .values.images.tags
# IP addresses
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .storage.ceph.public_cidr
dest:
path: .values.network.public
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .storage.ceph.cluster_cidr
dest:
path: .values.network.cluster
# Endpoints
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ceph.tenant_ceph_mon
dest:
path: .values.endpoints.ceph_mon
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ceph.tenant_ceph_mgr
dest:
path: .values.endpoints.ceph_mgr
# Secrets
- dest:
path: .values.conf.ceph.global.fsid
src:
schema: deckhand/Passphrase/v1
name: tenant_ceph_fsid
path: .
data:
chart_name: tenant-ceph-client
release: tenant-ceph-client
namespace: tenant-ceph
protected:
continue_processing: false
wait:
timeout: 7200
labels:
release_group: clcp-tenant-ceph-client
resources:
- type: deployment
min_ready: 1
- type: job
native:
enabled: false
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
release_group: clcp-tenant-ceph-client
- type: pod
labels:
release_group: clcp-tenant-ceph-client
component: test
test:
enabled: true
values:
manifests:
deployment_mds: false
job_bootstrap: true
labels:
job:
node_selector_key: tenant-ceph-control-plane
node_selector_value: enabled
mds:
node_selector_key: tenant-ceph-mds
node_selector_value: enabled
mgr:
node_selector_key: tenant-ceph-mgr
node_selector_value: enabled
checkdns:
node_selector_key: tenant-ceph-mon
node_selector_value: enabled
endpoints:
ceph_mon:
namespace: tenant-ceph
ceph_mgr:
namespace: tenant-ceph
monitoring:
prometheus:
ceph_mgr:
port: 9284
ceph_mgr_modules_config:
prometheus:
server_port: 9284
deployment:
ceph: true
bootstrap:
# Placed here to run after main ceph compoents laid down/updated
enabled: true
script: |
set -x
DESIRED_VERSION="mimic"
ADDITIONAL_VERSIONS=1
while [ $ADDITIONAL_VERSIONS -gt 0 ]; do
sleep 5
ADDITIONAL_VERSIONS=$(ceph tell osd.* version --format json | awk -F 'osd.[0-9]*: ' '/^osd/ { print $2}' | jq -r '.version' | awk '{ print $(NF-1) }' | uniq | grep -v ${DESIRED_VERSION} | wc -l )
done
while [[ `ceph pg ls | tail -n +2 | grep -v "active+"` ]]
do
sleep 5
done
ceph_mgr_enabled_modules:
- restful
- status
- prometheus
- balancer
- iostat
jobs:
pool_checkPGs:
# Run once a month at midnight of the first day of the month
cron: "0 0 1 * *"
history:
# Number of successful job to keep
successJob: 1
# Number of failed job to keep
failJob: 1
concurrency:
# Skip new job if previous job still active
execPolicy: Forbid
startingDeadlineSecs: 60
pod:
replicas:
mgr: 3
resources:
enabled: true
mds:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "2000m"
mgr:
requests:
memory: "2Gi"
cpu: "2000m"
limits:
memory: "8Gi"
cpu: "4000m"
checkdns:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
jobs:
bootstrap:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
image_repo_sync:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
rbd_pool:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
tests:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
conf:
features:
mds: false
pg_autoscaler: false
pool:
spec:
# RBD pool
- name: rbd
application: rbd
replication: 3
percent_total_data: 0.1
# Cinder volumes pool
- name: cinder.volumes
application: cinder-volume
replication: 3
percent_total_data: 15
# RadosGW pools
- name: .rgw.root
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.control
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.log
application: rgw
replication: 3
percent_total_data: 5
- name: default.rgw.intent-log
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.meta
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.usage
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.users.uid
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.buckets.non-ec
application: rgw
replication: 3
percent_total_data: 0.1
- name: default.rgw.buckets.index
application: rgw
replication: 3
percent_total_data: 3
- name: default.rgw.buckets.data
application: rgw
replication: 3
percent_total_data: 50
# NOTE(alanmeadowS) spport 4.x 16.04 kernels (non-HWE)
crush:
tunables: 'hammer'
# NOTE(alanmeadows): This is required ATM for bootstrapping a Ceph
# cluster with only one OSD. Depending on OSD targeting & site
# configuration this can be changed.
target:
osd: 1
pg_per_osd: 100
protected: true
# Quota at 10000 to ensure that all pools have quotas greater
# than the cluster's capacity. 1000 would get some to about 85%
quota: 10000
default:
# NOTE(alanmeadows): This is required ATM for bootstrapping a Ceph
# cluster with only one OSD. Depending on OSD targeting & site
# configuration this can be changed.
crush_rule: rack_replicated_rule
ceph:
global:
# NOTE(mb874d): This is required ATM for bootstrapping a Ceph
# cluster with only one OSD. Depending on OSD targeting & site
# configuration this can be changed.
osd_pool_default_size: 1
manifests:
cronjob_defragosds: false
dependencies:
- ceph-htk
...