Storagecluster ready state issue debug [root@rdr-cicd-odf-69bf-bastion-0 ~]# oc get storagecluster ocs-storagecluster -o yaml apiVersion: ocs.openshift.io/v1 kind: StorageCluster metadata: annotations: cluster.ocs.openshift.io/local-devices: "true" uninstall.ocs.openshift.io/cleanup-policy: delete uninstall.ocs.openshift.io/mode: graceful creationTimestamp: "2022-11-08T12:08:27Z" finalizers: - storagecluster.ocs.openshift.io generation: 3 name: ocs-storagecluster namespace: openshift-storage ownerReferences: - apiVersion: odf.openshift.io/v1alpha1 kind: StorageSystem name: ocs-storagecluster-storagesystem uid: ebd7bb6e-e051-4837-b2ee-3f30e9bdc8d4 resourceVersion: "983940" uid: 5508f168-bc46-4b84-89c5-b8d64a06776c spec: arbiter: {} encryption: kms: {} externalStorage: {} flexibleScaling: true managedResources: cephBlockPools: {} cephCluster: {} cephConfig: {} cephDashboard: {} cephFilesystems: {} cephNonResilientPools: enable: true cephObjectStoreUsers: {} cephObjectStores: {} cephToolbox: {} mirroring: {} monDataDirHostPath: /var/lib/rook storageDeviceSets: - config: {} count: 3 dataPVCTemplate: metadata: {} spec: accessModes: - ReadWriteOnce resources: requests: storage: 100Gi storageClassName: localblock volumeMode: Block status: {} name: ocs-deviceset-localblock placement: {} preparePlacement: {} replica: 1 resources: {} status: conditions: - lastHeartbeatTime: "2022-11-09T05:55:47Z" lastTransitionTime: "2022-11-08T12:30:32Z" message: 'Error while reconciling: some StorageClasses were skipped while waiting for pre-requisites to be met: [ocs-storagecluster-ceph-non-resilient-rbd]' reason: ReconcileFailed status: "False" type: ReconcileComplete - lastHeartbeatTime: "2022-11-08T12:30:30Z" lastTransitionTime: "2022-11-08T12:16:39Z" message: Reconcile completed successfully reason: ReconcileCompleted status: "True" type: Available - lastHeartbeatTime: "2022-11-08T12:30:30Z" lastTransitionTime: "2022-11-08T12:16:39Z" message: Reconcile completed successfully reason: ReconcileCompleted status: "False" type: Progressing - lastHeartbeatTime: "2022-11-08T12:30:30Z" lastTransitionTime: "2022-11-08T12:08:28Z" message: Reconcile completed successfully reason: ReconcileCompleted status: "False" type: Degraded - lastHeartbeatTime: "2022-11-08T12:30:32Z" lastTransitionTime: "2022-11-08T12:30:31Z" message: StorageCluster is expanding reason: Expanding status: "False" type: Upgradeable externalStorage: grantedCapacity: "0" failureDomain: host failureDomainKey: kubernetes.io/hostname failureDomainValues: - worker-2 - worker-0 - worker-1 images: ceph: actualImage: quay.io/rhceph-dev/rhceph@sha256:9b9d1dffa2254ee04f6d7628daa244e805637cf03420bad89545495fadb491d7 desiredImage: quay.io/rhceph-dev/rhceph@sha256:9b9d1dffa2254ee04f6d7628daa244e805637cf03420bad89545495fadb491d7 noobaaCore: actualImage: quay.io/rhceph-dev/odf4-mcg-core-rhel8@sha256:ee1bc56dc3cf3b7f0136184668700caca835712f3252bb79c6c745e772850e25 desiredImage: quay.io/rhceph-dev/odf4-mcg-core-rhel8@sha256:ee1bc56dc3cf3b7f0136184668700caca835712f3252bb79c6c745e772850e25 noobaaDB: actualImage: quay.io/rhceph-dev/rhel8-postgresql-12@sha256:f9393bef938580aa39aacf94bc56fd6f2ac515173f770c75f7fac9650eff62ba desiredImage: quay.io/rhceph-dev/rhel8-postgresql-12@sha256:f9393bef938580aa39aacf94bc56fd6f2ac515173f770c75f7fac9650eff62ba kmsServerConnection: {} nodeTopologies: labels: kubernetes.io/hostname: - worker-2 - worker-0 - worker-1 phase: Progressing relatedObjects: - apiVersion: ceph.rook.io/v1 kind: CephCluster name: ocs-storagecluster-cephcluster namespace: openshift-storage resourceVersion: "983639" uid: 4e3d64c0-ee31-49d7-9bfd-2d7c70a60db4 - apiVersion: noobaa.io/v1alpha1 kind: NooBaa name: noobaa namespace: openshift-storage resourceVersion: "133883" uid: 85666aa5-138a-47c4-93cb-23f3f4e62b91 version: 4.12.0 [root@rdr-cicd-odf-69bf-bastion-0 ~]# [root@rdr-cicd-odf-69bf-bastion-0 ~]# oc get cephblockpools NAME PHASE ocs-storagecluster-cephblockpool Ready ocs-storagecluster-cephblockpool-worker-0 Failure ocs-storagecluster-cephblockpool-worker-1 Failure ocs-storagecluster-cephblockpool-worker-2 Failure [root@rdr-cicd-odf-69bf-bastion-0 ~]# [root@rdr-cicd-odf-69bf-bastion-0 ~]# oc get cephblockpools ocs-storagecluster-cephblockpool-worker-0 -o yaml apiVersion: ceph.rook.io/v1 kind: CephBlockPool metadata: creationTimestamp: "2022-11-08T12:30:31Z" finalizers: - cephblockpool.ceph.rook.io generation: 1 name: ocs-storagecluster-cephblockpool-worker-0 namespace: openshift-storage ownerReferences: - apiVersion: ocs.openshift.io/v1 blockOwnerDeletion: true controller: true kind: StorageCluster name: ocs-storagecluster uid: 5508f168-bc46-4b84-89c5-b8d64a06776c resourceVersion: "134070" uid: e6836b78-d825-4a74-a003-9d68df4fec39 spec: deviceClass: worker-0 enableRBDStats: true erasureCoded: codingChunks: 0 dataChunks: 0 failureDomain: host mirroring: {} quotas: {} replicated: size: 1 statusCheck: mirror: {} status: phase: Failure [root@rdr-cicd-odf-69bf-bastion-0 ~]# [root@rdr-cicd-odf-69bf-bastion-0 ~]# oc get storageclass NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE localblock kubernetes.io/no-provisioner Delete WaitForFirstConsumer false 17h ocs-storagecluster-ceph-rbd openshift-storage.rbd.csi.ceph.com Delete Immediate true 17h ocs-storagecluster-ceph-rgw openshift-storage.ceph.rook.io/bucket Delete Immediate false 17h ocs-storagecluster-cephfs openshift-storage.cephfs.csi.ceph.com Delete Immediate true 17h openshift-storage.noobaa.io openshift-storage.noobaa.io/obc Delete Immediate false 17h [root@rdr-cicd-odf-69bf-bastion-0 ~]# [root@rdr-cicd-odf-69bf-bastion-0 ~]# oc get pods | grep osd rook-ceph-osd-0-748f6f8897-ww995 2/2 Running 0 17h rook-ceph-osd-1-7f9585774-ldg2d 2/2 Running 0 17h rook-ceph-osd-2-b8cf8cd6-z8dzb 2/2 Running 0 17h rook-ceph-osd-prepare-40704edebd520f1ff9d6d8f09e8a5545-mltnm 0/1 Completed 0 17h rook-ceph-osd-prepare-42fdf53e28e5f8f91945f982560011a3-5mlqn 0/1 Completed 0 17h rook-ceph-osd-prepare-90c417e325953a4bb1a96ea237e474e2-hl8gs 0/1 Completed 0 17h rook-ceph-osd-prepare-worker-0-data-0jtpn7-bt7ql 0/1 Completed 0 17h rook-ceph-osd-prepare-worker-1-data-0kxwn9-ld6t9 0/1 Completed 0 17h rook-ceph-osd-prepare-worker-2-data-05jq7k-sqqlj 0/1 Completed 0 17h [root@rdr-cicd-odf-69bf-bastion-0 ~]# [root@rdr-cicd-odf-69bf-bastion-0 ~]# oc get pvc | grep data ocs-deviceset-localblock-0-data-07qcxj Bound local-pv-d215812c 500Gi RWO localblock 17h ocs-deviceset-localblock-0-data-1wkjdp Bound local-pv-49015b6b 500Gi RWO localblock 17h ocs-deviceset-localblock-0-data-2crrhx Bound local-pv-3ac6d77f 500Gi RWO localblock 17h worker-0-data-0jtpn7 Bound local-pv-8a3b2355 500Gi RWO localblock 17h worker-1-data-0kxwn9 Bound local-pv-e5de8aa9 500Gi RWO localblock 17h worker-2-data-05jq7k Bound local-pv-13390437 500Gi RWO localblock 17h [root@rdr-cicd-odf-69bf-bastion-0 ~]# [root@rdr-cicd-odf-69bf-bastion-0 ~]# oc rsh rook-ceph-tools-868cff5cf6-vszmr sh-4.4$ ceph osd tree ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF -1 1.46489 root default -5 0.48830 host worker-0 2 hdd 0.48830 osd.2 up 1.00000 1.00000 -7 0.48830 host worker-1 0 hdd 0.48830 osd.0 up 1.00000 1.00000 -3 0.48830 host worker-2 1 hdd 0.48830 osd.1 up 1.00000 1.00000 sh-4.4$ sh-4.4$ ceph osd pool ls detail pool 1 'device_health_metrics' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 1 pgp_num 1 autoscale_mode on last_change 13 flags hashpspool stripe_width 0 pg_num_max 32 pg_num_min 1 application mgr_devicehealth pool 2 'ocs-storagecluster-cephblockpool' replicated size 3 min_size 2 crush_rule 1 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 468 lfor 0/465/463 flags hashpspool,selfmanaged_snaps stripe_width 0 target_size_ratio 0.49 application rbd pool 3 'ocs-storagecluster-cephobjectstore.rgw.buckets.index' replicated size 3 min_size 2 crush_rule 3 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 26 flags hashpspool stripe_width 0 application rook-ceph-rgw pool 4 'ocs-storagecluster-cephobjectstore.rgw.meta' replicated size 3 min_size 2 crush_rule 4 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 25 flags hashpspool stripe_width 0 application rook-ceph-rgw pool 5 'ocs-storagecluster-cephobjectstore.rgw.control' replicated size 3 min_size 2 crush_rule 8 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 26 flags hashpspool stripe_width 0 application rook-ceph-rgw pool 6 '.rgw.root' replicated size 3 min_size 2 crush_rule 5 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 26 flags hashpspool stripe_width 0 application rook-ceph-rgw pool 7 'ocs-storagecluster-cephobjectstore.rgw.otp' replicated size 3 min_size 2 crush_rule 6 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 26 flags hashpspool stripe_width 0 application rook-ceph-rgw pool 8 'ocs-storagecluster-cephobjectstore.rgw.log' replicated size 3 min_size 2 crush_rule 7 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 26 flags hashpspool stripe_width 0 application rook-ceph-rgw pool 9 'ocs-storagecluster-cephobjectstore.rgw.buckets.non-ec' replicated size 3 min_size 2 crush_rule 2 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 26 flags hashpspool stripe_width 0 application rook-ceph-rgw pool 10 'ocs-storagecluster-cephfilesystem-metadata' replicated size 3 min_size 2 crush_rule 9 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 38 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 16 recovery_priority 5 application cephfs pool 11 'ocs-storagecluster-cephobjectstore.rgw.buckets.data' replicated size 3 min_size 2 crush_rule 10 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 469 flags hashpspool stripe_width 0 target_size_ratio 0.49 application rook-ceph-rgw pool 12 'ocs-storagecluster-cephfilesystem-data0' replicated size 3 min_size 2 crush_rule 11 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 470 flags hashpspool stripe_width 0 target_size_ratio 0.49 application cephfs sh-4.4$ [root@rdr-cicd-odf-69bf-bastion-0 ~]# oc get cm rook-ceph-operator-config -n openshift-storage -o yaml apiVersion: v1 kind: ConfigMap metadata: creationTimestamp: "2022-11-08T12:06:15Z" name: rook-ceph-operator-config namespace: openshift-storage resourceVersion: "111551" uid: e365976c-bc79-464d-aa04-d9816970b525 [root@rdr-cicd-odf-69bf-bastion-0 ~]# [root@rdr-cicd-odf-69bf-bastion-0 ~]# oc describe cephcluster ocs-storagecluster-cephcluster -n openshift-storage Name: ocs-storagecluster-cephcluster Namespace: openshift-storage Labels: app=ocs-storagecluster Annotations: <none> API Version: ceph.rook.io/v1 Kind: CephCluster Metadata: Creation Timestamp: 2022-11-08T12:08:27Z Finalizers: cephcluster.ceph.rook.io Generation: 2 Managed Fields: API Version: ceph.rook.io/v1 Fields Type: FieldsV1 fieldsV1: f:metadata: f:finalizers: .: v:"cephcluster.ceph.rook.io": Manager: rook Operation: Update Time: 2022-11-08T12:08:27Z API Version: ceph.rook.io/v1 Fields Type: FieldsV1 fieldsV1: f:metadata: f:labels: .: f:app: f:ownerReferences: .: k:{"uid":"5508f168-bc46-4b84-89c5-b8d64a06776c"}: f:spec: .: f:cephVersion: .: f:image: f:cleanupPolicy: .: f:sanitizeDisks: f:continueUpgradeAfterChecksEvenIfNotHealthy: f:crashCollector: f:dashboard: f:dataDirHostPath: f:disruptionManagement: .: f:machineDisruptionBudgetNamespace: f:managePodBudgets: f:external: f:healthCheck: .: f:daemonHealth: .: f:mon: f:osd: f:status: f:labels: .: f:monitoring: .: f:rook.io/managedBy: f:logCollector: .: f:enabled: f:maxLogSize: f:periodicity: f:mgr: .: f:modules: f:mon: .: f:count: f:monitoring: .: f:enabled: f:network: f:placement: .: f:all: .: f:nodeAffinity: .: f:requiredDuringSchedulingIgnoredDuringExecution: .: f:nodeSelectorTerms: f:tolerations: f:arbiter: .: f:tolerations: f:mon: .: f:nodeAffinity: .: f:requiredDuringSchedulingIgnoredDuringExecution: .: f:nodeSelectorTerms: f:podAntiAffinity: .: f:requiredDuringSchedulingIgnoredDuringExecution: f:priorityClassNames: .: f:mgr: f:mon: f:osd: f:resources: .: f:mds: .: f:limits: .: f:cpu: f:memory: f:requests: .: f:cpu: f:memory: f:mgr: .: f:limits: .: f:cpu: f:memory: f:requests: .: f:cpu: f:memory: f:mon: .: f:limits: .: f:cpu: f:memory: f:requests: .: f:cpu: f:memory: f:rgw: .: f:limits: .: f:cpu: f:memory: f:requests: .: f:cpu: f:memory: f:security: .: f:kms: f:storage: .: f:storageClassDeviceSets: Manager: ocs-operator Operation: Update Time: 2022-11-08T12:30:31Z API Version: ceph.rook.io/v1 Fields Type: FieldsV1 fieldsV1: f:status: .: f:ceph: .: f:capacity: .: f:bytesAvailable: f:bytesTotal: f:bytesUsed: f:lastUpdated: f:fsid: f:health: f:lastChecked: f:versions: .: f:mds: .: f:ceph version 16.2.10-50.el8cp (f311fa3856a155d4cd9b658e25a78def0ae7a7c3) pacific (stable): f:mgr: .: f:ceph version 16.2.10-50.el8cp (f311fa3856a155d4cd9b658e25a78def0ae7a7c3) pacific (stable): f:mon: .: f:ceph version 16.2.10-50.el8cp (f311fa3856a155d4cd9b658e25a78def0ae7a7c3) pacific (stable): f:osd: .: f:ceph version 16.2.10-50.el8cp (f311fa3856a155d4cd9b658e25a78def0ae7a7c3) pacific (stable): f:overall: .: f:ceph version 16.2.10-50.el8cp (f311fa3856a155d4cd9b658e25a78def0ae7a7c3) pacific (stable): f:rgw: .: f:ceph version 16.2.10-50.el8cp (f311fa3856a155d4cd9b658e25a78def0ae7a7c3) pacific (stable): f:conditions: f:message: f:observedGeneration: f:phase: f:state: f:storage: .: f:deviceClasses: f:version: .: f:image: f:version: Manager: rook Operation: Update Subresource: status Time: 2022-11-09T06:55:09Z Owner References: API Version: ocs.openshift.io/v1 Block Owner Deletion: true Controller: true Kind: StorageCluster Name: ocs-storagecluster UID: 5508f168-bc46-4b84-89c5-b8d64a06776c Resource Version: 1033769 UID: 4e3d64c0-ee31-49d7-9bfd-2d7c70a60db4 Spec: Ceph Version: Image: quay.io/rhceph-dev/rhceph@sha256:9b9d1dffa2254ee04f6d7628daa244e805637cf03420bad89545495fadb491d7 Cleanup Policy: Sanitize Disks: Continue Upgrade After Checks Even If Not Healthy: true Crash Collector: Dashboard: Data Dir Host Path: /var/lib/rook Disruption Management: Machine Disruption Budget Namespace: openshift-machine-api Manage Pod Budgets: true External: Health Check: Daemon Health: Mon: Osd: Status: Labels: Monitoring: rook.io/managedBy: ocs-storagecluster Log Collector: Enabled: true Max Log Size: 500Mi Periodicity: daily Mgr: Modules: Enabled: true Name: pg_autoscaler Enabled: true Name: balancer Mon: Count: 3 Monitoring: Enabled: true Network: Placement: All: Node Affinity: Required During Scheduling Ignored During Execution: Node Selector Terms: Match Expressions: Key: cluster.ocs.openshift.io/openshift-storage Operator: Exists Tolerations: Effect: NoSchedule Key: node.ocs.openshift.io/storage Operator: Equal Value: true Arbiter: Tolerations: Effect: NoSchedule Key: node-role.kubernetes.io/master Operator: Exists Mon: Node Affinity: Required During Scheduling Ignored During Execution: Node Selector Terms: Match Expressions: Key: cluster.ocs.openshift.io/openshift-storage Operator: Exists Pod Anti Affinity: Required During Scheduling Ignored During Execution: Label Selector: Match Expressions: Key: app Operator: In Values: rook-ceph-mon Topology Key: kubernetes.io/hostname Priority Class Names: Mgr: system-node-critical Mon: system-node-critical Osd: system-node-critical Resources: Mds: Limits: Cpu: 3 Memory: 8Gi Requests: Cpu: 3 Memory: 8Gi Mgr: Limits: Cpu: 1 Memory: 3Gi Requests: Cpu: 1 Memory: 3Gi Mon: Limits: Cpu: 1 Memory: 2Gi Requests: Cpu: 1 Memory: 2Gi Rgw: Limits: Cpu: 2 Memory: 4Gi Requests: Cpu: 2 Memory: 4Gi Security: Kms: Storage: Storage Class Device Sets: Count: 3 Name: ocs-deviceset-localblock-0 Placement: Node Affinity: Required During Scheduling Ignored During Execution: Node Selector Terms: Match Expressions: Key: cluster.ocs.openshift.io/openshift-storage Operator: Exists Tolerations: Effect: NoSchedule Key: node.ocs.openshift.io/storage Operator: Equal Value: true Topology Spread Constraints: Label Selector: Match Expressions: Key: ceph.rook.io/pvc Operator: Exists Max Skew: 1 Topology Key: kubernetes.io/hostname When Unsatisfiable: ScheduleAnyway Prepare Placement: Node Affinity: Required During Scheduling Ignored During Execution: Node Selector Terms: Match Expressions: Key: cluster.ocs.openshift.io/openshift-storage Operator: Exists Tolerations: Effect: NoSchedule Key: node.ocs.openshift.io/storage Operator: Equal Value: true Topology Spread Constraints: Label Selector: Match Expressions: Key: ceph.rook.io/pvc Operator: Exists Max Skew: 1 Topology Key: kubernetes.io/hostname When Unsatisfiable: ScheduleAnyway Resources: Limits: Cpu: 2 Memory: 5Gi Requests: Cpu: 2 Memory: 5Gi Volume Claim Templates: Metadata: Annotations: Crush Device Class: replicated Spec: Access Modes: ReadWriteOnce Resources: Requests: Storage: 100Gi Storage Class Name: localblock Volume Mode: Block Status: Count: 1 Name: worker-2 Placement: Node Affinity: Required During Scheduling Ignored During Execution: Node Selector Terms: Match Expressions: Key: kubernetes.io/hostname Operator: In Values: worker-2 Tolerations: Effect: NoSchedule Key: node.ocs.openshift.io/storage Operator: Equal Value: true Prepare Placement: Node Affinity: Required During Scheduling Ignored During Execution: Node Selector Terms: Match Expressions: Key: kubernetes.io/hostname Operator: In Values: worker-2 Tolerations: Effect: NoSchedule Key: node.ocs.openshift.io/storage Operator: Equal Value: true Resources: Limits: Cpu: 2 Memory: 5Gi Requests: Cpu: 2 Memory: 5Gi Volume Claim Templates: Metadata: Annotations: Crush Device Class: worker-2 Spec: Access Modes: ReadWriteOnce Resources: Requests: Storage: 100Gi Storage Class Name: localblock Volume Mode: Block Status: Count: 1 Name: worker-0 Placement: Node Affinity: Required During Scheduling Ignored During Execution: Node Selector Terms: Match Expressions: Key: kubernetes.io/hostname Operator: In Values: worker-0 Tolerations: Effect: NoSchedule Key: node.ocs.openshift.io/storage Operator: Equal Value: true Prepare Placement: Node Affinity: Required During Scheduling Ignored During Execution: Node Selector Terms: Match Expressions: Key: kubernetes.io/hostname Operator: In Values: worker-0 Tolerations: Effect: NoSchedule Key: node.ocs.openshift.io/storage Operator: Equal Value: true Resources: Limits: Cpu: 2 Memory: 5Gi Requests: Cpu: 2 Memory: 5Gi Volume Claim Templates: Metadata: Annotations: Crush Device Class: worker-0 Spec: Access Modes: ReadWriteOnce Resources: Requests: Storage: 100Gi Storage Class Name: localblock Volume Mode: Block Status: Count: 1 Name: worker-1 Placement: Node Affinity: Required During Scheduling Ignored During Execution: Node Selector Terms: Match Expressions: Key: kubernetes.io/hostname Operator: In Values: worker-1 Tolerations: Effect: NoSchedule Key: node.ocs.openshift.io/storage Operator: Equal Value: true Prepare Placement: Node Affinity: Required During Scheduling Ignored During Execution: Node Selector Terms: Match Expressions: Key: kubernetes.io/hostname Operator: In Values: worker-1 Tolerations: Effect: NoSchedule Key: node.ocs.openshift.io/storage Operator: Equal Value: true Resources: Limits: Cpu: 2 Memory: 5Gi Requests: Cpu: 2 Memory: 5Gi Volume Claim Templates: Metadata: Annotations: Crush Device Class: worker-1 Spec: Access Modes: ReadWriteOnce Resources: Requests: Storage: 100Gi Storage Class Name: localblock Volume Mode: Block Status: Status: Ceph: Capacity: Bytes Available: 1570786713600 Bytes Total: 1610612736000 Bytes Used: 39826022400 Last Updated: 2022-11-09T06:55:07Z Fsid: b8ab4bab-769b-495a-ab68-26cf669644e4 Health: HEALTH_OK Last Checked: 2022-11-09T06:55:07Z Versions: Mds: ceph version 16.2.10-50.el8cp (f311fa3856a155d4cd9b658e25a78def0ae7a7c3) pacific (stable): 2 Mgr: ceph version 16.2.10-50.el8cp (f311fa3856a155d4cd9b658e25a78def0ae7a7c3) pacific (stable): 1 Mon: ceph version 16.2.10-50.el8cp (f311fa3856a155d4cd9b658e25a78def0ae7a7c3) pacific (stable): 3 Osd: ceph version 16.2.10-50.el8cp (f311fa3856a155d4cd9b658e25a78def0ae7a7c3) pacific (stable): 3 Overall: ceph version 16.2.10-50.el8cp (f311fa3856a155d4cd9b658e25a78def0ae7a7c3) pacific (stable): 10 Rgw: ceph version 16.2.10-50.el8cp (f311fa3856a155d4cd9b658e25a78def0ae7a7c3) pacific (stable): 1 Conditions: Last Heartbeat Time: 2022-11-09T06:55:09Z Last Transition Time: 2022-11-08T12:11:35Z Message: Cluster created successfully Reason: ClusterCreated Status: True Type: Ready Message: Cluster created successfully Observed Generation: 2 Phase: Ready State: Created Storage: Device Classes: Name: hdd Version: Image: quay.io/rhceph-dev/rhceph@sha256:9b9d1dffa2254ee04f6d7628daa244e805637cf03420bad89545495fadb491d7 Version: 16.2.10-50 Events: <none> [root@rdr-cicd-odf-69bf-bastion-0 ~]#