# Enabling and Validating Metadata on CephFS and RBD Volumes
# Global Setup
```
$ cd ceph-csi
$ CONTAINER_CMD="docker" VM_DRIVER="kvm2" ./scripts/minikube.sh up
$ ./scripts/minikube.sh deploy-rook
$ ./scripts/minikube.sh install-snapshotter
```
# CephFS Metadata
## Enabling configurations
Edit rook config to enable CephFS
```
$ EDITOR=vim kubectl edit cm rook-ceph-operator-config -nrook-ceph
ROOK_CSI_ENABLE_CEPHFS: "true"
```
Wait for the respective CSI CephFS plugin pods to land in running state
```
$ kubectl -n rook-ceph get pods
NAME READY STATUS RESTARTS AGE
csi-cephfsplugin-kt88c 3/3 Running 0 95s
csi-cephfsplugin-provisioner-d98db5784-zg8xw 6/6 Running 0 95s
rook-ceph-mds-myfs-a-595bd569c5-4xb55 1/1 Running 0 36m
rook-ceph-mds-myfs-b-bdd9d8dd9-cdqg5 1/1 Running 0 36m
rook-ceph-mgr-a-7fffbc888-jc5sl 1/1 Running 0 37m
rook-ceph-mon-a-6997644d74-r4bb2 1/1 Running 0 37m
rook-ceph-operator-5fb667dc7-dhspb 1/1 Running 0 40m
rook-ceph-osd-0-5cff4c4664-9lvj8 1/1 Running 0 37m
rook-ceph-osd-prepare-minikube-cjw49 0/1 Completed 0 37m
rook-ceph-tools-756db8d758-rpddx 1/1 Running 0 40m
```
Edit the CephFS deployment pod to add `--clustername`, `--setmetadata` and `--extra-create-metadata` options
```
$ KUBE_EDITOR=vim kubectl -n rook-ceph edit deployment/csi-cephfsplugin-provisioner
```
Add below args to containers `name: csi-provisioner` and `name: csi-snapshotter`
```
- args:
- --extra-create-metadata=true
```
Add below args to container `name: csi-cephfsplugin`
```
- args:
- --setmetadata=true
- --clustername=K8s-cluster1
```
Note: make sure `csi-cephfsplugin-provisioner` pod is restart successfully.
## New CephFS Subvolume
Now create a PVC
```
$ cd rook
$ cd deploy/examples/csi/cephfs/
$ kubectl create -f storageclass.yaml
storageclass.storage.k8s.io/rook-cephfs created
$ kubectl create -f pvc.yaml
persistentvolumeclaim/cephfs-pvc created
$ kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
cephfs-pvc Bound pvc-bffee445-bfac-4ad7-8b45-85866cbbf45d 1Gi RWO rook-cephfs 15s
```
Exec into the tool-box pod and verify the metadata created on the subvolume
```
$ kubectl -n rook-ceph exec -it pod/rook-ceph-tools-756db8d758-rpddx -- sh
sh-4.4$ ceph fs volume ls
[
{
"name": "myfs"
}
]
sh-4.4$ ceph fs subvolumegroup ls myfs
[
{
"name": "csi"
}
]
sh-4.4$ ceph fs subvolume ls myfs --group_name csi
[
{
"name": "csi-vol-fb5570c3-0eff-11ed-8b84-12bcc2015d90"
}
]
sh-4.4$ ceph fs subvolume metadata ls myfs csi-vol-fb5570c3-0eff-11ed-8b84-12bcc2015d90 --group_name=csi --format=json
{
"csi.ceph.com/cluster/name": "K8s-cluster1",
"csi.storage.k8s.io/pv/name": "pvc-bffee445-bfac-4ad7-8b45-85866cbbf45d",
"csi.storage.k8s.io/pvc/name": "cephfs-pvc",
"csi.storage.k8s.io/pvc/namespace": "default"
}
```
## CephFS Clones
Create a clone
```
$ kubectl create -f ./pvc-clone.yaml
persistentvolumeclaim/cephfs-pvc-clone created
$ kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
cephfs-pvc Bound pvc-bffee445-bfac-4ad7-8b45-85866cbbf45d 1Gi RWO rook-cephfs 10m
cephfs-pvc-clone Bound pvc-f265f2f6-f53b-4a09-92d9-2898244970a4 1Gi RWX rook-cephfs 1m37s
```
Validate Metadata on the cloned subvolume
```
sh-4.4$ ceph fs subvolume ls myfs --group_name csi
[
{
"name": "csi-vol-fb5570c3-0eff-11ed-8b84-12bcc2015d90"
},
{
"name": "csi-vol-2e0b90a9-0f01-11ed-8b84-12bcc2015d90"
}
]
sh-4.4$ ceph fs subvolume metadata ls myfs csi-vol-2e0b90a9-0f01-11ed-8b84-12bcc2015d90 --group_name=csi --format=json
{
"csi.ceph.com/cluster/name": "K8s-cluster1",
"csi.storage.k8s.io/pv/name": "pvc-f265f2f6-f53b-4a09-92d9-2898244970a4",
"csi.storage.k8s.io/pvc/name": "cephfs-pvc-clone",
"csi.storage.k8s.io/pvc/namespace": "default"
}
```
## CephFS Snapshots
Create a snapshot
```
$ kubectl create -f ./snapshotclass.yaml
volumesnapshotclass.snapshot.storage.k8s.io/csi-cephfsplugin-snapclass created
$ kubectl create -f ./snapshot.yaml
volumesnapshot.snapshot.storage.k8s.io/cephfs-pvc-snapshot created
$ kubectl get volumesnapshot
NAME READYTOUSE SOURCEPVC SOURCESNAPSHOTCONTENT RESTORESIZE SNAPSHOTCLASS SNAPSHOTCONTENT CREATIONTIME AGE
cephfs-pvc-snapshot true cephfs-pvc 1Gi csi-cephfsplugin-snapclass snapcontent-9f31b8f4-3d13-4ca7-96e0-26e14cdc31ad 9s 10s
```
Exec into the tool-box pod and verify the metadata created on the subvolume snapshot
```
$ kubectl -n rook-ceph exec -it pod/rook-ceph-tools-756db8d758-rpddx -- sh
sh-4.4$ ceph fs subvolume snapshot ls myfs csi-vol-fb5570c3-0eff-11ed-8b84-12bcc2015d90 --group_name csi
[
{
"name": "csi-snap-809abdaa-0f01-11ed-8b84-12bcc2015d90"
}
]
sh-4.4$ ceph fs subvolume snapshot metadata ls myfs csi-vol-fb5570c3-0eff-11ed-8b84-12bcc2015d90 csi-snap-809abdaa-0f01-11ed-8b84-12bcc2015d90 --group_name=csi --format=json
{
"csi.ceph.com/cluster/name": "K8s-cluster1",
"csi.storage.k8s.io/volumesnapshot/name": "cephfs-pvc-snapshot",
"csi.storage.k8s.io/volumesnapshot/namespace": "default",
"csi.storage.k8s.io/volumesnapshotcontent/name": "snapcontent-9f31b8f4-3d13-4ca7-96e0-26e14cdc31ad"
}
```
## CephFS Restore
Restore volume from snapshot
```
$ kubectl create -f pvc-restore.yaml
persistentvolumeclaim/cephfs-pvc-restore created
$ kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
cephfs-pvc Bound pvc-bffee445-bfac-4ad7-8b45-85866cbbf45d 1Gi RWO rook-cephfs 15m
cephfs-pvc-clone Bound pvc-f265f2f6-f53b-4a09-92d9-2898244970a4 1Gi RWX rook-cephfs 6m37s
cephfs-pvc-restore Bound pvc-00c8c65c-bd7b-4c04-9fb7-04f8d863268f 1Gi RWX rook-cephfs 80s
```
Exec into the tool-box pod and validate metadata on the restored subvolume
```
$ kubectl -n rook-ceph exec -it pod/rook-ceph-tools-756db8d758-rpddx -- sh
sh-4.4$ ceph fs subvolume ls myfs --group_name csi
[
{
"name": "csi-vol-fb5570c3-0eff-11ed-8b84-12bcc2015d90"
},
{
"name": "csi-vol-2e0b90a9-0f01-11ed-8b84-12bcc2015d90"
},
{
"name": "csi-vol-eb22f1e4-0f01-11ed-8b84-12bcc2015d90"
}
]
sh-4.4$ ceph fs subvolume metadata ls myfs csi-vol-eb22f1e4-0f01-11ed-8b84-12bcc2015d90 --group_name=csi --format=json
{
"csi.ceph.com/cluster/name": "K8s-cluster1",
"csi.storage.k8s.io/pv/name": "pvc-00c8c65c-bd7b-4c04-9fb7-04f8d863268f",
"csi.storage.k8s.io/pvc/name": "cephfs-pvc-restore",
"csi.storage.k8s.io/pvc/namespace": "default"
}
```
# RBD Metadata
## Enabling configurations
Edit rook config to enable RBD
```
$ EDITOR=vim kubectl edit cm rook-ceph-operator-config -nrook-ceph
ROOK_CSI_ENABLE_RBD: "true"
```
Wait for the respective CSI RBD plugin pods to land in running state
```
$ kubectl -n rook-ceph get pods
NAME READY STATUS RESTARTS AGE
csi-rbdplugin-provisioner-85776b65fb-dfpdq 6/6 Running 0 35s
csi-rbdplugin-r2ck2 3/3 Running 0 2m
rook-ceph-mds-myfs-a-74c57f75f5-7bgqr 1/1 Running 0 24m
rook-ceph-mds-myfs-b-778f8d7f65-xhxlk 1/1 Running 0 24m
rook-ceph-mgr-a-68c468fc68-jp26g 1/1 Running 0 25m
rook-ceph-mon-a-56cc95c6b-47xbd 1/1 Running 0 25m
rook-ceph-operator-5fb667dc7-77cgk 1/1 Running 0 27m
rook-ceph-osd-0-67799d9bc6-dc26k 1/1 Running 0 25m
rook-ceph-osd-prepare-minikube-2mxnq 0/1 Completed 0 25m
rook-ceph-tools-756db8d758-7cqml 1/1 Running 0 27m
```
Edit the RBD deployment pod to add `--clustername`, `--setmetadata` and `-–extra-create-metadata` options
```
$ KUBE_EDITOR=vim kubectl -n rook-ceph edit deployment/csi-rbdplugin-provisioner
```
Add below args to containers `name: csi-provisioner` and `name: csi-snapshotter`
```
- args:
- --extra-create-metadata=true
```
Add below args to container name: `csi-rbdplugin`
```
- args:
- --setmetadata=true
- --clustername=K8s-cluster1
```
Note: make sure csi-rbdplugin-provisioner pod is restart successfully.
## New RBD Subvolume
Now create a PVC
```
$ cd rook
$ cd deploy/examples/csi/rbd/
$ kubectl create -f storageclass.yaml
storageclass.storage.k8s.io/rook-ceph-block created
$ kubectl create -f pvc.yaml
persistentvolumeclaim/rbd-pvc created
$ kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
rbd-pvc Bound pvc-8a6ba1ff-064e-455d-95b6-87dbe40996f7 1Gi RWO rook-ceph-block 7s
```
Exec into the tool-box pod and verify the metadata created on the RBD image
```
$ kubectl -n rook-ceph exec -it pod/rook-ceph-tools-756db8d758-7cqml -- sh
sh-4.4$ rbd ls replicapool
csi-vol-ba0a7ccc-0f24-11ed-ab4d-5e339a195498
sh-4.4$ rbd image-meta ls replicapool/csi-vol-ba0a7ccc-0f24-11ed-ab4d-5e339a195498
There are 4 metadata on this image:
Key Value
csi.ceph.com/cluster/name "K8s-cluster1"
csi.storage.k8s.io/pv/name pvc-8a6ba1ff-064e-455d-95b6-87dbe40996f7
csi.storage.k8s.io/pvc/name rbd-pvc
csi.storage.k8s.io/pvc/namespace default
sh-4.4$
```
## RBD Clones
Create a clone
```
$ kubectl create -f ./pvc-clone.yaml
persistentvolumeclaim/rbd-pvc-clone created
$ kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
rbd-pvc Bound pvc-8a6ba1ff-064e-455d-95b6-87dbe40996f7 1Gi RWO rook-ceph-block 2m21s
rbd-pvc-clone Bound pvc-a7cc5bd2-6674-4693-8a5d-47928c1407f9 1Gi RWO rook-ceph-block 9s
```
Exec into the tool-box pod, validate Metadata on the cloned image
```
$ kubectl -n rook-ceph exec -it pod/rook-ceph-tools-756db8d758-7cqml -- sh
sh-4.4$ rbd ls replicapool
csi-vol-08baab3d-0f25-11ed-ab4d-5e339a195498
csi-vol-08baab3d-0f25-11ed-ab4d-5e339a195498-temp
csi-vol-ba0a7ccc-0f24-11ed-ab4d-5e339a195498
sh-4.4$ rbd image-meta ls replicapool/csi-vol-08baab3d-0f25-11ed-ab4d-5e339a195498
There are 4 metadata on this image:
Key Value
csi.ceph.com/cluster/name "K8s-cluster1"
csi.storage.k8s.io/pv/name pvc-a7cc5bd2-6674-4693-8a5d-47928c1407f9
csi.storage.k8s.io/pvc/name rbd-pvc-clone
csi.storage.k8s.io/pvc/namespace default
```
## RBD Snapshots
Create a snapshot
```
$ kubectl create -f ./snapshotclass.yaml
volumesnapshotclass.snapshot.storage.k8s.io/csi-rbdplugin-snapclass created
$ kubectl create -f ./snapshotclass.yaml
volumesnapshotclass.snapshot.storage.k8s.io/csi-rbdplugin-snapclass created
$ kubectl create -f ./snapshot.yaml
volumesnapshot.snapshot.storage.k8s.io/rbd-pvc-snapshot created
$ kubectl get volumesnapshot
NAME READYTOUSE SOURCEPVC SOURCESNAPSHOTCONTENT RESTORESIZE SNAPSHOTCLASS SNAPSHOTCONTENT CREATIONTIME AGE
rbd-pvc-snapshot true rbd-pvc 1Gi csi-rbdplugin-snapclass snapcontent-bd30b942-1f6c-4157-aa0b-f2264d589bbd 5s 6s
```
Exec into the tool-box pod and verify the metadata created on the RBD snapshot
```
$ kubectl -n rook-ceph exec -it pod/rook-ceph-tools-756db8d758-7cqml -- sh
sh-4.4$ rbd ls replicapool
csi-snap-b40fd669-0f25-11ed-ab4d-5e339a195498
csi-vol-08baab3d-0f25-11ed-ab4d-5e339a195498
csi-vol-08baab3d-0f25-11ed-ab4d-5e339a195498-temp
csi-vol-ba0a7ccc-0f24-11ed-ab4d-5e339a195498
sh-4.4$ rbd image-meta ls replicapool/csi-snap-b40fd669-0f25-11ed-ab4d-5e339a195498
There are 4 metadata on this image:
Key Value
csi.ceph.com/cluster/name "K8s-cluster1"
csi.storage.k8s.io/volumesnapshot/name rbd-pvc-snapshot
csi.storage.k8s.io/volumesnapshot/namespace default
csi.storage.k8s.io/volumesnapshotcontent/name snapcontent-bd30b942-1f6c-4157-aa0b-f2264d589bbd
```
## RBD Restore
Restore volume from snapshot
```
$ kubectl create -f pvc-restore.yaml
$ kubectl create -f ./pvc-restore.yaml
persistentvolumeclaim/rbd-pvc-restore created
$ kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
rbd-pvc Bound pvc-8a6ba1ff-064e-455d-95b6-87dbe40996f7 1Gi RWO rook-ceph-block 12m
rbd-pvc-clone Bound pvc-a7cc5bd2-6674-4693-8a5d-47928c1407f9 1Gi RWO rook-ceph-block 10m
rbd-pvc-restore Bound pvc-1855a4ca-b548-4544-b09b-4d5405319ad9 1Gi RWO rook-ceph-block 6s
```
Exec into the tool-box pod and validate metadata on the restored RBD image
```
$ kubectl -n rook-ceph exec -it pod/rook-ceph-tools-756db8d758-7cqml -- sh
sh-4.4$ rbd ls replicapool
csi-snap-b40fd669-0f25-11ed-ab4d-5e339a195498
csi-vol-08baab3d-0f25-11ed-ab4d-5e339a195498
csi-vol-08baab3d-0f25-11ed-ab4d-5e339a195498-temp
csi-vol-8694d230-0f26-11ed-ab4d-5e339a195498
csi-vol-ba0a7ccc-0f24-11ed-ab4d-5e339a195498
sh-4.4$ rbd image-meta ls replicapool/csi-vol-8694d230-0f26-11ed-ab4d-5e339a195498
There are 4 metadata on this image:
Key Value
csi.ceph.com/cluster/name "K8s-cluster1"
csi.storage.k8s.io/pv/name pvc-1855a4ca-b548-4544-b09b-4d5405319ad9
csi.storage.k8s.io/pvc/name rbd-pvc-restore
csi.storage.k8s.io/pvc/namespace default
```
## RBD Reattach functionality
This feature helps update the image metadata if a PVC is deleted by setting `ReclaimPloicy: Retain` on PV and a freshly created PVC attach to the old PV.
To avoid further confustions, just deleting all PVC/Snapshots created so far, and starting fresh
```
$ kubectl delete -f ./pvc-restore.yaml
persistentvolumeclaim "rbd-pvc-restore" deleted
$ kubectl delete -f ./snapshot.yaml
volumesnapshot.snapshot.storage.k8s.io "rbd-pvc-snapshot" deleted
$ kubectl delete -f ./pvc-clone.yaml
persistentvolumeclaim "rbd-pvc-clone" deleted
$ kubectl delete -f ./pvc.yaml
persistentvolumeclaim "rbd-pvc" deleted
```
Edit the rook ceph config to enable OMAP generator, which needs to be enabled for reattach to work.
```
$ EDITOR=vim kubectl edit cm rook-ceph-operator-config -nrook-ceph
CSI_ENABLE_OMAP_GENERATOR: "true"
```
Create a PVC
```
$ kubectl create -f ./pvc.yaml
persistentvolumeclaim/rbd-pvc created
$ kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
rbd-pvc Bound pvc-8bd008ac-dfda-47f8-8076-93fb09f6b268 1Gi RWO rook-ceph-block 6s
```
Notice the Metadata details carefully
```
sh-4.4$ rbd ls replicapool
csi-vol-31a24ba6-0f2c-11ed-950e-32969afc11f4
sh-4.4$ rbd image-meta ls replicapool/csi-vol-31a24ba6-0f2c-11ed-950e-32969afc11f4
There are 4 metadata on this image:
Key Value
csi.ceph.com/cluster/name K8s-cluster1
csi.storage.k8s.io/pv/name pvc-8bd008ac-dfda-47f8-8076-93fb09f6b268
csi.storage.k8s.io/pvc/name rbd-pvc
csi.storage.k8s.io/pvc/namespace default
sh-4.4$
```
Makesure to set `persistentVolumeReclaimPolicy: Retain` on PV
```
$ KUBE_EDITOR=vim kubectl edit pv/pvc-8bd008ac-dfda-47f8-8076-93fb09f6b268
persistentVolumeReclaimPolicy: Retain
$ kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-8bd008ac-dfda-47f8-8076-93fb09f6b268 1Gi RWO Retain Bound default/rbd-pvc rook-ceph-block 110s
```
Delete the PVC now
```
$ kubectl delete -f ./pvc.yaml
persistentvolumeclaim "rbd-pvc" deleted
```
PV still persists as we set the ReclaimPolicy to Retain, Note the PV is in Released state
```
$ kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-8bd008ac-dfda-47f8-8076-93fb09f6b268 1Gi RWO Retain Released default/rbd-pvc rook-ceph-block 4m4s
```
Edit the PV and manually remove all the claimRef section from the PV object
```
$ KUBE_EDITOR=vim kubectl edit pv/pvc-8bd008ac-dfda-47f8-8076-93fb09f6b268
- claimRef:
- apiVersion: v1
- kind: PersistentVolumeClaim
- name: rbd-pvc
- namespace: default
- resourceVersion: "9150"
- uid: 8bd008ac-dfda-47f8-8076-93fb09f6b268
```
Edit the PVC yaml and give it a new name. In my case I change it to `rbd-pvc2`
Create a New PVC now, note the same old PV is Bound to the newly created PVC
```
$ kubectl create -f ./pvc.yaml
persistentvolumeclaim/rbd-pvc2 created
$ kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
rbd-pvc2 Bound pvc-8bd008ac-dfda-47f8-8076-93fb09f6b268 1Gi RWO rook-ceph-block 6s
$ kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-8bd008ac-dfda-47f8-8076-93fb09f6b268 1Gi RWO Retain Bound default/rbd-pvc2 rook-ceph-block 12m
```
Validate the metadata and see if the `pvc/name` is updated to `rbd-pvc2`
```
$ kubectl -n rook-ceph exec -it pod/rook-ceph-tools-756db8d758-7cqml -- sh
sh-4.4$ rbd image-meta ls replicapool/csi-vol-31a24ba6-0f2c-11ed-950e-32969afc11f4
There are 4 metadata on this image:
Key Value
csi.ceph.com/cluster/name K8s-cluster1
csi.storage.k8s.io/pv/name pvc-8bd008ac-dfda-47f8-8076-93fb09f6b268
csi.storage.k8s.io/pvc/name rbd-pvc2
csi.storage.k8s.io/pvc/namespace default
sh-4.4$
```