treasuremap/global/software/charts/ucp/ceph/ceph-osd.yaml

148 lines
3.6 KiB
YAML

---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: ucp-ceph-osd-global
layeringDefinition:
abstract: true
layer: global
storagePolicy: cleartext
labels:
name: ucp-ceph-osd-global
substitutions:
# Chart source
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .charts.ucp.ceph-osd
dest:
path: .source
# Images
- src:
schema: pegleg/SoftwareVersions/v1
name: software-versions
path: .images.ceph.ceph-osd
dest:
path: .values.images.tags
# IP addresses
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .storage.ceph.public_cidr
dest:
path: .values.network.public
- src:
schema: pegleg/CommonAddresses/v1
name: common-addresses
path: .storage.ceph.cluster_cidr
dest:
path: .values.network.cluster
# Endpoitns
- src:
schema: pegleg/EndpointCatalogue/v1
name: ucp_endpoints
path: .ceph.ceph_mon
dest:
path: .values.endpoints.ceph_mon
# Secrets
- dest:
path: .values.conf.ceph.global.fsid
src:
schema: deckhand/Passphrase/v1
name: ceph_fsid
path: .
data:
chart_name: ucp-ceph-osd
release: ucp-ceph-osd
namespace: ceph
protected:
continue_processing: false
wait:
timeout: 900
labels:
release_group: clcp-ucp-ceph-osd
resources: []
native:
enabled: false
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: pod
labels:
release_group: clcp-ucp-ceph-osd
component: test
- type: job
labels:
release_group: clcp-ucp-ceph-osd
component: post-apply
test:
enabled: false
values:
# Custom Apparmor Profile needs to be fixed in NC 2.2
pod:
mandatory_access_control:
type: apparmor
ceph-osd-default:
ceph-osd-default: unconfined
ceph-init-dirs: unconfined
ceph-log-ownership: unconfined
osd-init: unconfined
ceph-osd-test:
init: unconfined
ceph-cluster-helm-test: unconfined
ceph-osd-post-apply:
ceph-osd-post-apply: runtime/default
init: runtime/default
lifecycle:
upgrades:
daemonsets:
pod_replacement_strategy: OnDelete
deploy:
tool: "ceph-volume"
manifests:
cronjob_defragosds: false
job_post_apply: false
labels:
osd:
node_selector_key: ceph-osd
node_selector_value: enabled
endpoints:
ceph_mon:
namespace: ceph
fluentd:
namespace: osh-infra
bootstrap:
enabled: true
conf:
storage:
failure_domain: rack
failure_domain_by_hostname: 1-7
osd:
- data:
type: directory
location: /var/lib/openstack-helm/ceph/osd/osd-one
journal:
type: directory
location: /var/lib/openstack-helm/ceph/osd/journal-one
osd:
# NOTE(alanmeadows): This is required ATM for bootstrapping a Ceph
# cluster with only one OSD. Depending on OSD targeting & site
# configuration this can be changed.
osd_crush_chooseleaf_type: 0
ceph:
osd:
osd_op_num_threads_per_shard_hdd: 1
osd_op_num_threads_per_shard_ssd: 2
osd_max_backfills: 10
osd_scrub_auto_repair: true
dependencies:
- ceph-htk
...