123 lines
5.1 KiB
YAML
123 lines
5.1 KiB
YAML
#################################################################################################################
|
|
# Create a filesystem with settings with replication enabled for a production environment.
|
|
# A minimum of 3 OSDs on different nodes are required in this example.
|
|
# kubectl create -f filesystem.yaml
|
|
#################################################################################################################
|
|
|
|
apiVersion: ceph.rook.io/v1
|
|
kind: CephFilesystem
|
|
metadata:
|
|
name: myfs
|
|
namespace: rook-ceph # namespace:cluster
|
|
spec:
|
|
# The metadata pool spec. Must use replication.
|
|
metadataPool:
|
|
replicated:
|
|
size: 3
|
|
requireSafeReplicaSize: true
|
|
parameters:
|
|
# Inline compression mode for the data pool
|
|
# Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
|
|
compression_mode:
|
|
none
|
|
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
|
|
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
|
|
#target_size_ratio: ".5"
|
|
# The list of data pool specs. Can use replication or erasure coding.
|
|
dataPools:
|
|
- failureDomain: host
|
|
replicated:
|
|
size: 3
|
|
# Disallow setting pool with replica 1, this could lead to data loss without recovery.
|
|
# Make sure you're *ABSOLUTELY CERTAIN* that is what you want
|
|
requireSafeReplicaSize: true
|
|
parameters:
|
|
# Inline compression mode for the data pool
|
|
# Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
|
|
compression_mode:
|
|
none
|
|
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
|
|
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
|
|
#target_size_ratio: ".5"
|
|
# Whether to preserve filesystem after CephFilesystem CRD deletion
|
|
preserveFilesystemOnDelete: true
|
|
# The metadata service (mds) configuration
|
|
metadataServer:
|
|
# The number of active MDS instances
|
|
activeCount: 1
|
|
# Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover.
|
|
# If false, standbys will be available, but will not have a warm cache.
|
|
activeStandby: true
|
|
# The affinity rules to apply to the mds deployment
|
|
placement:
|
|
# nodeAffinity:
|
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
|
# nodeSelectorTerms:
|
|
# - matchExpressions:
|
|
# - key: role
|
|
# operator: In
|
|
# values:
|
|
# - mds-node
|
|
# topologySpreadConstraints:
|
|
# tolerations:
|
|
# - key: mds-node
|
|
# operator: Exists
|
|
# podAffinity:
|
|
podAntiAffinity:
|
|
requiredDuringSchedulingIgnoredDuringExecution:
|
|
- labelSelector:
|
|
matchExpressions:
|
|
- key: app
|
|
operator: In
|
|
values:
|
|
- rook-ceph-mds
|
|
# topologyKey: kubernetes.io/hostname will place MDS across different hosts
|
|
topologyKey: kubernetes.io/hostname
|
|
preferredDuringSchedulingIgnoredDuringExecution:
|
|
- weight: 100
|
|
podAffinityTerm:
|
|
labelSelector:
|
|
matchExpressions:
|
|
- key: app
|
|
operator: In
|
|
values:
|
|
- rook-ceph-mds
|
|
# topologyKey: */zone can be used to spread MDS across different AZ
|
|
# Use <topologyKey: failure-domain.beta.kubernetes.io/zone> in k8s cluster if your cluster is v1.16 or lower
|
|
# Use <topologyKey: topology.kubernetes.io/zone> in k8s cluster is v1.17 or upper
|
|
topologyKey: topology.kubernetes.io/zone
|
|
# A key/value list of annotations
|
|
annotations:
|
|
# key: value
|
|
# A key/value list of labels
|
|
labels:
|
|
# key: value
|
|
resources:
|
|
# The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory
|
|
# limits:
|
|
# cpu: "500m"
|
|
# memory: "1024Mi"
|
|
# requests:
|
|
# cpu: "500m"
|
|
# memory: "1024Mi"
|
|
# priorityClassName: my-priority-class
|
|
# Filesystem mirroring settings
|
|
# mirroring:
|
|
# enabled: true
|
|
# list of Kubernetes Secrets containing the peer token
|
|
# for more details see: https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers
|
|
# peers:
|
|
#secretNames:
|
|
#- secondary-cluster-peer
|
|
# specify the schedule(s) on which snapshots should be taken
|
|
# see the official syntax here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-schedules
|
|
# snapshotSchedules:
|
|
# - path: /
|
|
# interval: 24h # daily snapshots
|
|
# startTime: 11:55
|
|
# manage retention policies
|
|
# see syntax duration here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-retention-policies
|
|
# snapshotRetention:
|
|
# - path: /
|
|
# duration: "h 24"
|