Ceph Non-Resilient Pools - Power platform testing
Container Storage On PowerCSOP-3 ODF 4.12 TestCSOP-62
Verify Replica 1 - Non-resilient pool - Dev Preview
[root@rdr-res3-lon06-bastion-0 ~]# oc get clusterversion
NAME VERSION AVAILABLE PROGRESSING SINCE STATUS
version 4.12.0-0.nightly-ppc64le-2022-11-24-203625 True False 11h Cluster version is 4.12.0-0.nightly-ppc64le-2022-11-24-203625
[root@rdr-res3-lon06-bastion-0 ~]#
[root@rdr-res3-lon06-bastion-0 ~]# oc get storagecluster
NAME AGE PHASE EXTERNAL CREATED AT VERSION
ocs-storagecluster 5m20s Ready 2022-12-15T07:01:33Z 4.12.0
[root@rdr-res3-lon06-bastion-0 ~]# oc patch storagecluster ocs-storagecluster -n openshift-storage --type json --patch '[{ "op": "replace", "path": "/spec/managedResources/cephNonResilientPools/enable", "value": true }]'
storagecluster.ocs.openshift.io/ocs-storagecluster patched
[root@rdr-res3-lon06-bastion-0 ~]# oc patch cm rook-ceph-operator-config -n openshift-storage -p $'data:\n "CSI_ENABLE_TOPOLOGY": "true"'
configmap/rook-ceph-operator-config patched
[root@rdr-res3-lon06-bastion-0 ~]#
[root@rdr-res3-lon06-bastion-0 ~]# oc patch cm rook-ceph-operator-config -n openshift-storage -p $'data:\n "CSI_TOPOLOGY_DOMAIN_LABELS": "topology.kubernetes.io/zone"'
configmap/rook-ceph-operator-config patched
[root@rdr-res3-lon06-bastion-0 ~]# oc get cephblockpools
NAME PHASE
ocs-storagecluster-cephblockpool Ready
ocs-storagecluster-cephblockpool-worker-0 Ready
ocs-storagecluster-cephblockpool-worker-1 Ready
ocs-storagecluster-cephblockpool-worker-2 Ready
[root@rdr-res3-lon06-bastion-0 ~]#
##Check if the default pool has the “replicated” deviceclass
[root@rdr-res3-lon06-bastion-0 ~]# oc get cephblockpools ocs-storagecluster-cephblockpool -o yaml
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
creationTimestamp: "2022-12-15T07:01:33Z"
finalizers:
- cephblockpool.ceph.rook.io
generation: 2
name: ocs-storagecluster-cephblockpool
namespace: openshift-storage
ownerReferences:
- apiVersion: ocs.openshift.io/v1
blockOwnerDeletion: true
controller: true
kind: StorageCluster
name: ocs-storagecluster
uid: 4b3315df-7e73-48e3-8106-76b41363cd3e
resourceVersion: "334149"
uid: 47ff62e9-ac94-4849-9683-e369dd6c3e1d
spec:
deviceClass: replicated
enableRBDStats: true
erasureCoded:
codingChunks: 0
dataChunks: 0
failureDomain: host
mirroring: {}
quotas: {}
replicated:
replicasPerFailureDomain: 1
size: 3
targetSizeRatio: 0.49
statusCheck:
mirror: {}
status:
observedGeneration: 2
phase: Ready
[root@rdr-res3-lon06-bastion-0 ~]#
##Check if the non-resilient pools have the failure domain(Node/Zone) as the device class
[root@rdr-res3-lon06-bastion-0 ~]# oc get cephblockpool ocs-storagecluster-cephblockpool-worker-0 -o yaml
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
creationTimestamp: "2022-12-15T07:07:46Z"
finalizers:
- cephblockpool.ceph.rook.io
generation: 1
name: ocs-storagecluster-cephblockpool-worker-0
namespace: openshift-storage
ownerReferences:
- apiVersion: ocs.openshift.io/v1
blockOwnerDeletion: true
controller: true
kind: StorageCluster
name: ocs-storagecluster
uid: 4b3315df-7e73-48e3-8106-76b41363cd3e
resourceVersion: "335089"
uid: 330caaa5-4249-4367-ba60-62690fe4f874
spec:
deviceClass: worker-0
enableRBDStats: true
erasureCoded:
codingChunks: 0
dataChunks: 0
failureDomain: host
mirroring: {}
quotas: {}
replicated:
size: 1
statusCheck:
mirror: {}
status:
observedGeneration: 1
phase: Ready
[root@rdr-res3-lon06-bastion-0 ~]#
##storageclass
[root@rdr-res3-lon06-bastion-0 ~]# oc get storageclass
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
localblock kubernetes.io/no-provisioner Delete WaitForFirstConsumer false 28m
ocs-storagecluster-ceph-non-resilient-rbd openshift-storage.rbd.csi.ceph.com Delete WaitForFirstConsumer true 15m
ocs-storagecluster-ceph-rbd openshift-storage.rbd.csi.ceph.com Delete Immediate true 20m
ocs-storagecluster-ceph-rgw openshift-storage.ceph.rook.io/bucket Delete Immediate false 22m
ocs-storagecluster-cephfs openshift-storage.cephfs.csi.ceph.com Delete Immediate true 20m
openshift-storage.noobaa.io openshift-storage.noobaa.io/obc Delete Immediate false 18m
[root@rdr-res3-lon06-bastion-0 ~]#
###Check the new storageclass for non-resilient topology constrained pools
[root@rdr-res3-lon06-bastion-0 ~]# oc get storageclass ocs-storagecluster-ceph-non-resilient-rbd -o yaml
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
description: 'Ceph Non Resilient Pools : Provides RWO Filesystem volumes, and
RWO and RWX Block volumes'
creationTimestamp: "2022-12-15T07:09:10Z"
name: ocs-storagecluster-ceph-non-resilient-rbd
resourceVersion: "335168"
uid: e5ae2d13-a6db-4f2c-a4f8-812e48235307
parameters:
clusterID: openshift-storage
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: openshift-storage
csi.storage.k8s.io/fstype: ext4
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: openshift-storage
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: openshift-storage
imageFeatures: layering,deep-flatten,exclusive-lock,object-map,fast-diff
imageFormat: "2"
pool: ocs-storagecluster-cephblockpool
topologyConstrainedPools: |-
[
{
"poolName": "ocs-storagecluster-cephblockpool-worker-1",
"domainSegments": [
{
"domainLabel": "host",
"value": "worker-1"
}
]
},
{
"poolName": "ocs-storagecluster-cephblockpool-worker-2",
"domainSegments": [
{
"domainLabel": "host",
"value": "worker-2"
}
]
},
{
"poolName": "ocs-storagecluster-cephblockpool-worker-0",
"domainSegments": [
{
"domainLabel": "host",
"value": "worker-0"
}
]
}
]
provisioner: openshift-storage.rbd.csi.ceph.com
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
[root@rdr-res3-lon06-bastion-0 ~]#
##OSD PODS
[root@rdr-res3-lon06-bastion-0 ~]# oc get pods | grep osd
rook-ceph-osd-0-6fd49bf8dd-2kxf5 2/2 Running 0 23m
rook-ceph-osd-1-cb965d6fd-zb7zv 2/2 Running 0 23m
rook-ceph-osd-2-75d89c9dbd-f8649 2/2 Running 0 23m
rook-ceph-osd-3-7f8d9986d8-k7gcx 2/2 Running 0 18m
rook-ceph-osd-4-d9fb99cc5-hsqbh 2/2 Running 0 18m
rook-ceph-osd-5-8674ccc494-mlwjn 2/2 Running 0 18m
rook-ceph-osd-prepare-21340fc0b3eaaa13c7ff7bde3747282d-5mdzh 0/1 Completed 0 23m
rook-ceph-osd-prepare-42c210093a02c8e4b6af91e249a8649d-k8jcd 0/1 Completed 0 23m
rook-ceph-osd-prepare-ad5556d5edf290c46b62a6b32a0f5aba-nqgzn 0/1 Completed 0 23m
rook-ceph-osd-prepare-worker-0-data-0rtcwv-fsddm 0/1 Completed 0 18m
rook-ceph-osd-prepare-worker-1-data-0xwf6n-q6kqm 0/1 Completed 0 18m
rook-ceph-osd-prepare-worker-2-data-0nxg78-vjdxb 0/1 Completed 0 18m
[root@rdr-res3-lon06-bastion-0 ~]#
##Data Pvc's
[root@rdr-res3-lon06-bastion-0 ~]# oc get pvc | grep data
ocs-deviceset-localblock-0-data-06l6cm Bound local-pv-7854945e 500Gi RWO localblock 24m
ocs-deviceset-localblock-0-data-1tx46r Bound local-pv-ceaba4b7 500Gi RWO localblock 24m
ocs-deviceset-localblock-0-data-2764zg Bound local-pv-7353786a 500Gi RWO localblock 24m
worker-0-data-0rtcwv Bound local-pv-b32d86e2 500Gi RWO localblock 18m
worker-1-data-0xwf6n Bound local-pv-f1711088 500Gi RWO localblock 18m
worker-2-data-0nxg78 Bound local-pv-bc3fb692 500Gi RWO localblock 18m
[root@rdr-res3-lon06-bastion-0 ~]#
##Data Pv's
[root@rdr-res3-lon06-bastion-0 ~]# oc get pv | grep data
local-pv-7353786a 500Gi RWO Delete Bound openshift-storage/ocs-deviceset-localblock-0-data-2764zg localblock 31m
local-pv-7854945e 500Gi RWO Delete Bound openshift-storage/ocs-deviceset-localblock-0-data-06l6cm localblock 31m
local-pv-b32d86e2 500Gi RWO Delete Bound openshift-storage/worker-0-data-0rtcwv localblock 31m
local-pv-bc3fb692 500Gi RWO Delete Bound openshift-storage/worker-2-data-0nxg78 localblock 31m
local-pv-ceaba4b7 500Gi RWO Delete Bound openshift-storage/ocs-deviceset-localblock-0-data-1tx46r localblock 31m
local-pv-f1711088 500Gi RWO Delete Bound openshift-storage/worker-1-data-0xwf6n localblock 31m
[root@rdr-res3-lon06-bastion-0 ~]#
##Ceph OSD Tree
[root@rdr-res3-lon06-bastion-0 ~]# oc rsh rook-ceph-tools-67bb47d98b-jhskk
sh-4.4$ ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 2.92978 root default
-5 2.92978 region lon
-4 2.92978 zone lon06
-3 0.97659 host worker-0
0 hdd 0.48830 osd.0 up 1.00000 1.00000
4 worker-0 0.48830 osd.4 up 1.00000 1.00000
-11 0.97659 host worker-1
1 hdd 0.48830 osd.1 up 1.00000 1.00000
5 worker-1 0.48830 osd.5 up 1.00000 1.00000
-9 0.97659 host worker-2
2 hdd 0.48830 osd.2 up 1.00000 1.00000
3 worker-2 0.48830 osd.3 up 1.00000 1.00000
sh-4.4$
##Ceph Pool details
sh-4.4$ ceph osd pool ls detail
pool 1 'ocs-storagecluster-cephblockpool' replicated size 3 min_size 2 crush_rule 1 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 31 flags hashpspool,selfmanaged_snaps stripe_width 0 target_size_ratio 0.49 application rbd
pool 2 'device_health_metrics' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 1 pgp_num 1 autoscale_mode on last_change 16 flags hashpspool stripe_width 0 pg_num_max 32 pg_num_min 1 application mgr_devicehealth
pool 3 'ocs-storagecluster-cephobjectstore.rgw.meta' replicated size 3 min_size 2 crush_rule 2 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 20 flags hashpspool stripe_width 0 application rook-ceph-rgw
pool 4 'ocs-storagecluster-cephobjectstore.rgw.buckets.non-ec' replicated size 3 min_size 2 crush_rule 3 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 20 flags hashpspool stripe_width 0 application rook-ceph-rgw
pool 5 '.rgw.root' replicated size 3 min_size 2 crush_rule 5 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 21 flags hashpspool stripe_width 0 application rook-ceph-rgw
pool 6 'ocs-storagecluster-cephobjectstore.rgw.control' replicated size 3 min_size 2 crush_rule 7 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 20 flags hashpspool stripe_width 0 application rook-ceph-rgw
pool 7 'ocs-storagecluster-cephobjectstore.rgw.buckets.index' replicated size 3 min_size 2 crush_rule 6 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 20 flags hashpspool stripe_width 0 application rook-ceph-rgw
pool 8 'ocs-storagecluster-cephobjectstore.rgw.otp' replicated size 3 min_size 2 crush_rule 8 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 21 flags hashpspool stripe_width 0 application rook-ceph-rgw
pool 9 'ocs-storagecluster-cephobjectstore.rgw.log' replicated size 3 min_size 2 crush_rule 4 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 20 flags hashpspool stripe_width 0 application rook-ceph-rgw
pool 10 'ocs-storagecluster-cephfilesystem-metadata' replicated size 3 min_size 2 crush_rule 9 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 29 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 16 recovery_priority 5 application cephfs
pool 11 'ocs-storagecluster-cephfilesystem-data0' replicated size 3 min_size 2 crush_rule 11 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 33 flags hashpspool stripe_width 0 target_size_ratio 0.49 application cephfs
pool 12 'ocs-storagecluster-cephobjectstore.rgw.buckets.data' replicated size 3 min_size 2 crush_rule 10 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 32 flags hashpspool stripe_width 0 target_size_ratio 0.49 application rook-ceph-rgw
pool 13 'ocs-storagecluster-cephblockpool-worker-2' replicated size 1 min_size 1 crush_rule 12 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 45 flags hashpspool,selfmanaged_snaps stripe_width 0 application rbd
pool 14 'ocs-storagecluster-cephblockpool-worker-0' replicated size 1 min_size 1 crush_rule 13 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 52 flags hashpspool,selfmanaged_snaps stripe_width 0 application rbd
pool 15 'ocs-storagecluster-cephblockpool-worker-1' replicated size 1 min_size 1 crush_rule 14 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 58 flags hashpspool,selfmanaged_snaps stripe_width 0 application rbd
sh-4.4$
#######################
##Testing the non-resilient storageclass topology aware provisioning
#Create a pvc
[root@rdr-res3-lon06-bastion-0 ~]# cat <<EOF | oc create -f -
> apiVersion: v1
> kind: PersistentVolumeClaim
> metadata:
> name: non-resilient-rbd-pvc
> spec:
> accessModes:
> - ReadWriteOnce
> resources:
> requests:
> storage: 1Gi
> storageClassName: ocs-storagecluster-ceph-non-resilient-rbd
> EOF
persistentvolumeclaim/non-resilient-rbd-pvc created
#The pvc would be in pending state, waiting for a consumer
[root@rdr-res3-lon06-bastion-0 ~]# oc get pvc | grep non-resilient-rbd-pvc
non-resilient-rbd-pvc Pending ocs-storagecluster-ceph-non-resilient-rbd 22s
#Create a pod to consume the pvc
[root@rdr-res3-lon06-bastion-0 ~]# cat <<EOF | oc create -f -
> apiVersion: v1
> kind: Pod
spec:
> metadata:
> name: task-pv-pod
> spec:
> nodeSelector:
> topology.kubernetes.io/zone: us-east-1a
> volumes:
> - name: task-pv-storage
> persistentVolumeClaim:
> claimName: non-resilient-rbd-pvc
> containers:
> - name: task-pv-container
> image: nginx
> ports:
> - containerPort: 80
name: "http-server"
volumeMounts:
> name: "http-server"
> volumeMounts:
> - mountPath: "/usr/share/nginx/html"
> name: task-pv-storage
> EOF
pod/task-pv-pod created
[root@rdr-res3-lon06-bastion-0 ~]#
[root@rdr-res3-lon06-bastion-0 ~]# cat <<EOF | oc create -f -
> apiVersion: v1
> kind: Pod
spec:
> metadata:
> name: task-pv-pod
> spec:
> nodeSelector:
> kubernetes.io/hostname=worker-0
> volumes:
> - name: task-pv-storage
> persistentVolumeClaim:
> claimName: non-resilient-rbd-pvc
> containers:
> - name: task-pv-container
> image: nginx
> ports:
> - containerPort: 80
name: "http-server"
volumeMounts:
> name: "http-server"
> volumeMounts:
> - mountPath: "/usr/share/nginx/html"
> name: task-pv-storage
> EOF
pod/task-pv-pod created
[root@rdr-res3-lon06-bastion-0 ~]#