csi operator CRD
# Single CRD/struct for everything
```go
package v1
type CSIDriverConfig struct {
Common struct{
EnableHostNetwork bool `json:"enableHostNetwork"`
DisableHolderPods bool `json:"disableHolderPods"`
EnableMetadata bool `json:"enableMetadata"`
ClusterName string `json:"clusterName"`
LogLevel string `json:"logLevel"`
SidecarLogLevel string `json:"sidecarLogLevel"`
DriverNamePrefix string `json:"driverNamePrefix"`
ProvisionerReplicas int `json:"provisionerReplicas"`
EnableSnapshotter bool `json:"enableSnapshotter"`
EnableVolumeGroupSnapshot bool `json:"enableVolumeGroupSnapshot"`
ForceKernelClient bool `json:"forceKernelClient"`
FsgroupPolicy string `json:"fsgroupPolicy"`
PluginPriorityClassName string `json:"pluginPriorityClassName"`
ProvisionerPriorityClassName string `json:"provisionerPriorityClassName"`
ProvisionerResource []struct {
Name string `json:"name"`
Resource struct {
Requests struct {
Memory string `json:"memory"`
CPU string `json:"cpu"`
} `json:"requests"`
Limits struct {
Memory string `json:"memory"`
CPU string `json:"cpu"`
} `json:"limits"`
} `json:"resource"`
} `json:"provisionerResource"`
}`json:"common"`
CephFS struct {
Enabled bool `json:"enabled"`
EnableEncryption bool `json:"enableEncryption"`
EnableHostNetwork bool `json:"enableHostNetwork"`
DisableHolderPods bool `json:"disableHolderPods"`
EnableMetadata bool `json:"enableMetadata"`
ClusterName string `json:"clusterName"`
LogLevel string `json:"logLevel"`
SidecarLogLevel string `json:"sidecarLogLevel"`
DriverNamePrefix string `json:"driverNamePrefix"`
ProvisionerReplicas int `json:"provisionerReplicas"`
EnableSnapshotter bool `json:"enableSnapshotter"`
EnableVolumeGroupSnapshot bool `json:"enableVolumeGroupSnapshot"`
ForceKernelClient bool `json:"forceKernelClient"`
FsgroupPolicy string `json:"fsgroupPolicy"`
PluginPriorityClassName string `json:"pluginPriorityClassName"`
ProvisionerPriorityClassName string `json:"provisionerPriorityClassName"`
ProvisionerResource []struct {
Name string `json:"name"`
Resource struct {
Requests struct {
Memory string `json:"memory"`
CPU string `json:"cpu"`
} `json:"requests"`
Limits struct {
Memory string `json:"memory"`
CPU string `json:"cpu"`
} `json:"limits"`
} `json:"resource"`
} `json:"provisionerResource"`
} `json:"cephfs"`
RBD struct {
Enabled bool `json:"enabled"`
EnableSnapshotter bool `json:"enableSnapshotter"`
ProvisionerReplicas int `json:"provisionerReplicas"`
OmapGeneratorEnabled bool `json:"omapGeneratorEnabled"`
EnableVolumeGroupSnapshot bool `json:"enableVolumeGroupSnapshot"`
FsgroupPolicy string `json:"fsgroupPolicy"`
PluginPriorityClassName string `json:"pluginPriorityClassName"`
ProvisionerPriorityClassName string `json:"provisionerPriorityClassName"`
ProvisionerResource []struct {
Name string `json:"name"`
Resource struct {
Requests struct {
Memory string `json:"memory"`
CPU string `json:"cpu"`
} `json:"requests"`
Limits struct {
Memory string `json:"memory"`
CPU string `json:"cpu"`
} `json:"limits"`
} `json:"resource"`
} `json:"provisionerResource"`
} `json:"rbd"`
NFS struct {
Enabled bool `json:"enabled"`
ProvisionerReplicas int `json:"provisionerReplicas"`
EnableSnapshotter bool `json:"enableSnapshotter"`
FsgroupPolicy string `json:"fsgroupPolicy"`
PluginPriorityClassName string `json:"pluginPriorityClassName"`
ProvisionerPriorityClassName string `json:"provisionerPriorityClassName"`
ProvisionerResource []struct {
Name string `json:"name"`
Resource struct {
Requests struct {
Memory string `json:"memory"`
CPU string `json:"cpu"`
} `json:"requests"`
Limits struct {
Memory string `json:"memory"`
CPU string `json:"cpu"`
} `json:"limits"`
} `json:"resource"`
} `json:"provisionerResource"`
} `json:"nfs"`
}
```
# seperate CRD/struct for everything
```go
package v1
type CephFSDriverConfig struct {
EnableEncryption bool `json:"enableEncryption"`
EnableHostNetwork bool `json:"enableHostNetwork"`
DisableHolderPods bool `json:"disableHolderPods"`
EnableMetadata bool `json:"enableMetadata"`
ClusterName string `json:"clusterName"`
LogLevel string `json:"logLevel"`
SidecarLogLevel string `json:"sidecarLogLevel"`
DriverNamePrefix string `json:"driverNamePrefix"`
ProvisionerReplicas int `json:"provisionerReplicas"`
EnableSnapshotter bool `json:"enableSnapshotter"`
EnableVolumeGroupSnapshot bool `json:"enableVolumeGroupSnapshot"`
ForceKernelClient bool `json:"forceKernelClient"`
FsgroupPolicy string `json:"fsgroupPolicy"`
PluginPriorityClassName string `json:"pluginPriorityClassName"`
ProvisionerPriorityClassName string `json:"provisionerPriorityClassName"`
ProvisionerResource []struct {
Name string `json:"name"`
Resource struct {
Requests struct {
Memory string `json:"memory"`
CPU string `json:"cpu"`
} `json:"requests"`
Limits struct {
Memory string `json:"memory"`
CPU string `json:"cpu"`
} `json:"limits"`
} `json:"resource"`
} `json:"provisionerResource"`
}
}
type RBDDriverConfig struct {
EnableSnapshotter bool `json:"enableSnapshotter"`
ProvisionerReplicas int `json:"provisionerReplicas"`
OmapGeneratorEnabled bool `json:"omapGeneratorEnabled"`
EnableVolumeGroupSnapshot bool `json:"enableVolumeGroupSnapshot"`
FsgroupPolicy string `json:"fsgroupPolicy"`
PluginPriorityClassName string `json:"pluginPriorityClassName"`
ProvisionerPriorityClassName string `json:"provisionerPriorityClassName"`
ProvisionerResource []struct {
Name string `json:"name"`
Resource struct {
Requests struct {
Memory string `json:"memory"`
CPU string `json:"cpu"`
} `json:"requests"`
Limits struct {
Memory string `json:"memory"`
CPU string `json:"cpu"`
} `json:"limits"`
} `json:"resource"`
} `json:"provisionerResource"`
}
}
type NFSDriverConfig struct{
ProvisionerReplicas int `json:"provisionerReplicas"`
EnableSnapshotter bool `json:"enableSnapshotter"`
FsgroupPolicy string `json:"fsgroupPolicy"`
PluginPriorityClassName string `json:"pluginPriorityClassName"`
ProvisionerPriorityClassName string `json:"provisionerPriorityClassName"`
ProvisionerResource []struct {
Name string `json:"name"`
Resource struct {
Requests struct {
Memory string `json:"memory"`
CPU string `json:"cpu"`
} `json:"requests"`
Limits struct {
Memory string `json:"memory"`
CPU string `json:"cpu"`
} `json:"limits"`
} `json:"resource"`
} `json:"provisionerResource"`
}
}
```
```go
package main
import (
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type CSIDriver struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Spec Spec `json:"spec"`
Status DriverStatus `json:"status,omitempty"`
}
type Spec struct {
EnableMetadata string `json:"enableMetadata"`
ClusterName string `json:"clusterName"`
LogLevel int `json:"logLevel"`
SidecarLogLevel int `json:"sidecarLogLevel"`
DriverNamePrefix string `json:"driverNamePrefix"`
ProvisionerReplica int `json:"provisionerReplica"`
OmapGenerator bool `json:"omapGenerator"`
EnableSnapshotter bool `json:"enableSnapshotter"`
EnableVolumeGroupSnapshot bool `json:"enableVolumeGroupSnapshot"`
FsGoupPolicy string `json:"fsGoupPolicy"`
AllowUnsupported bool `json:"allowUnsupported"`
SelinuxHostMount bool `json:"selinuxHostMount"`
GrpcTimeout string `json:"grpcTimeout"`
Provisioner Provisioner `json:"provisioner"`
Plugin Plugin `json:"plugin"`
Csiaddons CSIAddons `json:"csiaddons"`
CephFS CephFS `json:"cephFS"`
RBD RBD `json:"rbd"`
NFS NFS `json:"nFS"`
}
type CSIAddons struct {
Enable bool `json:"enable"`
Port int `json:"port"`
}
type CephFS struct {
Provisioner Provisioner `json:"provisioner"`
Plugin Plugin `json:"plugin"`
Enable bool `json:"enable"`
UseKernelClient bool `json:"useKernelClient"`
KernelMountOption string `json:"kernelMountOption"`
}
type RBD struct {
Provisioner Provisioner `json:"provisioner"`
Plugin Plugin `json:"plugin"`
Enable string `json:"enable"`
}
type NFS struct {
Provisioner Provisioner `json:"provisioner"`
Plugin Plugin `json:"plugin"`
Enable bool `json:"enable"`
}
type Provisioner struct {
ServiceAccountName string `json:"serviceAccountName,omitempty"`
CephCSIImage string `json:"cephCSIImage"`
ResizerImage string `json:"resizerImage"`
ProvisionerImage string `json:"provisionerImage"`
SnapshotterImage string `json:"snapshotterImage"`
AttacherImage string `json:"attacherImage"`
PullPolicy v1.PullPolicy `json:"pullPolicy"`
PriorityClassName string `json:"priorityClassName"`
SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Toleration []v1.Toleration `json:"tolerations,omitempty"`
Affinity v1.Affinity `json:"affinity,omitempty"`
Resources v1.ResourceRequirements `json:"resources,omitempty"`
LeaderElection LeaderElection `json:"leaderElection,omitempty"`
CSIProvisioner sideCarContainer `json:"csiProvisioner,omitempty"`
CSIAttacher sideCarContainer `json:"csiAttacher,omitempty"`
CSISnapshotter sideCarContainer `json:"csiSnapshotter,omitempty"`
CSIResizer sideCarContainer `json:"csiResizer,omitempty"`
CephCSI container `json:"cephcsi,omitempty"`
}
type container struct {
SecurityContext *v1.SecurityContext `json:"securityContext,omitempty"`
Resources v1.ResourceRequirements `json:"resources,omitempty"`
}
type LeaderElection struct {
Enable bool `json:"enable"`
Duration string `json:"Duration"`
RenewDeadline string `json:"Deadline"`
RetryPeriod string `json:"RetryPeriod"`
}
type sideCarContainer struct {
LeaderElection LeaderElection `json:"leaderElection,omitempty"`
SecurityContext *v1.SecurityContext `json:"securityContext,omitempty"`
Resources v1.ResourceRequirements `json:"resources,omitempty"`
}
type Plugin struct {
ServiceAccountName string `json:"serviceAccountName,omitempty"`
RegistratImage string `json:"registratImage"`
CephCSIImage string `json:"cephCSIImage"`
PullPolicy v1.PullPolicy `json:"pullPolicy"`
PriorityClassName string `json:"priorityClassName"`
UpdateStrategy appsv1.DaemonSetUpdateStrategy `json:"updateStrategy,omitempty"`
KubeletDirPath string `json:"kubeletDirPath"`
TopologyLabels map[string]string `json:"topologylabels,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"`
Volumes []v1.Volume `json:"volumes,omitempty"`
VolumeMount []v1.VolumeMount `json:"volumeMounts,omitempty"`
Toleration []v1.Toleration `json:"tolerations,omitempty"`
Affinity *v1.Affinity `json:"affinity,omitempty"`
Resources v1.ResourceRequirements `json:"resources,omitempty"`
CSIDriverRegistrar container `json:"csiDriverRegistrar,omitempty"`
CephCSI container `json:"cephcsi,omitempty"`
}
// Phase is a label for the condition of a driver at the current time.
// +enum
type Phase string
// These are the valid statuses of driver.
const (
// Pending means the driver has been accepted by the system, but driver
// has not been started. This includes time before being bound to a node,
// as well as time spent pulling images onto the host.
Pending Phase = "Pending"
// Succeeded means that the driver is successfully created/started.
Succeeded Phase = "Succeeded"
// Failed means that the driver is not able to get into running state.
Failed Phase = "Failed"
// Unknown means that for some reason the state of the driver could not be obtained, typically due
// to an error in communicating with the operator and driver.
Unknown Phase = "Unknown"
)
type Status struct {
// +optional
Phase Phase `json:"phase,omitempty"`
//Conditions []Condition `json:"conditions,omitempty"`
// A human readable message indicating details about why the driver is in this condition.
// +optional
Message string `json:"message,omitempty"`
// A brief CamelCase message indicating details about why the driver is in this state.
// e.g. 'Failed'
// +optional
Reason string `json:"reason,omitempty"`
}
type DriverStatus struct {
RBD Status `json:"rbd,omitempty"`
CephFS Status `json:"cephfs,omitempty"`
NFS Status `json:"nfs,omitempty"`
}
```
* 1 CR v/s multiple CR
* 1 CRD v/s multiple CRD
Pros:
* Having a single CRD simplifies the management and deployment process. Users only need to deal with one CRD definition.
* Deployment automation becomes easier as there's only one CRD to create and manage.
* One single configuration to confiure all the drivers, example single log level
* If more customization is requried, we will end up having duplicate values per driver in a single CR
Cons:
* If the configuration for different CSI drivers varies significantly, managing a single CRD might become complex and harder to understand.
* Users might have less flexibility in customizing configurations for individual drivers if they are constrained within a single CRD.
* Can reduce extra configuration at higher level which specifies enable/disable any specific driver etc
*
Single CRD
* Contains all the details about cephfs/nfs/rbd
* Complicated CRD as we need to provide options to set and override it as well specific to the csi driver.
* CRD keeps on growing if we want to add
* Single Controller to handle everything
Single CR v/s Multiple CR
Single CR
* More configurations need to be provided at the CRD to override each csi drivers or duplicate details need to be added per csi drivers.
Multiple CR
* Create multiple instances of the CR specific to the type
* Naming the csi driver can be problamatic
* We need to add new field inside the spec to name the csi driver as we cannot take the CR name (if someone wants to name the csi drivers with same name prefix)
* we need to have more checks inside the operator to handle all cases
Multiple CRD
* CRD specific to the csi driver (cephfs/rbd/nbd)
* New CRD's need to be added specific to the new csi drivers (if we add any)
* Multiple controllers need to run specific to the csi driver
* More complexity in management as we need to have secrets/configmap created (duplicate) per csi drivers.
* If someone need to deploy cephcsi with same configurations for all 2 drivers, need to have duplicate details.
Do we need a new CRD for the configmap for monitor and the RDR? or we can add it to the existing CR if we go with single CR
# Write Design document (Require help)
# Presentation preparation (Done)
* The names of kind and fields are subjected to change
```yaml
apiVersion: csi.ceph.io/v1alpa1
kind: OperatorConfig
metadata:
name: "default-csi-operator"
spec:
operator:
logLevel: "debug"
csi:
logRotation:
enabled: true
maximumCount: 3
periodicity: daily # one of: hourly, daily, weekly, monthly
maxLogSize: 10M
clusterConfigName: ""
enableMetadata: false
clusterName: ""
logLevel: ""
sidecarLogLevel: ""
provisionerReplicas: 2
enableSnapshotter: false
enableVolumeGroupSnapshot: false
...
```
* This will be one CR created with specific name
* The csi section contains the common defaults need to be applied for all the csi deployments done by this operator
```yaml
apiVersion: csi.ceph.io/v1alpa1
kind: Driver
metadata:
name: "openshift-storage.cephfs" (the name will end with specific type) that can be enfored with CEL
spec:
csiDriverConfigName: ""
driverType: cephfs (subjected to change)
# Either one of the driver section need to be filled
cephfs:
-
rbd:
-
nfs:
-
```
or
```yaml
apiVersion: csi.ceph.io/v1alpa1
kind: Driver
metadata:
name: "openshift-storage.cephfs" (the name will end with specific type) that can be enfored with CEL
spec:
driverType: cephfs (subjected to change)
# Either one of the driver section need to be filled
cephfs:
enabled: true (subjected to change)
... this contains all the driver specific keys and also option to override the default
rbd:
... this contains all the driver specific keys and also option to override the default
nfs:
... this contains all the driver specific keys and also option to override the default
```
```yaml
apiVersion: csi.ceph.io/v1alpha1
kind: CSIDriverConfig
name: csi-config-map
spec:
spec:
- clusterID: rook-ceph
monitors:
- 10.98.44.171:6789
cephFS:
subvolumeGroup: ""
kernelMountOptions: ""
fuseMountOptions: ""
rbd:
radosNamespace: ""
nfs:
readAffinity:
enabled: false
crushLocationLabels:
- kubernetes.io/hostname
- topology.kubernetes.io/region
mappingForRecovery:
- clusterIDMapping:
clusterID on site1: clusterID on site2
RBDPoolIDMapping:
- poolID on site1: poolID on site2
CephFSFscIDMapping:
- CephFS FscID on site1: CephFS FscID on site2
customCephConf:
ceph.conf: ""
keyring: ""
encryption:
config(configmap key config.json): ""
```
Tasks
* New operator
* Licensce
* Community
* CI/CD
* Security Tools
* static tools
* github actions
* kubernetes cluster
* Documentation
* Developer guide
* Design plan
* Arcitecture overview
* Deployment/Installation guide
* Release
* Inetgration tests
* Unit tests
* Upgrade testing
* ocs-client-operator
* Removing the dead code and refractoring
* Update required documents
* RBAC changes
* Creation of required CR for deployment
* Update configuration related to clusterID
* SVG,Radosnamespace,Encryption etc
* Upgrade testing
* upgrades
* changing the subscription
* Rook
* Integration
* Removing the dead code and refractoring
* RBAC changes
* Creation of required CR for deployment
* Update configuration related to clusterID
* SVG,Radosnamespace,Encryption etc
* Upgrade testing
* Documentation
* Update required documents
* odf-operator
* Same as csi-addons
* CRD ownership, CSV
* upgrades
* changing the subscription
* UI work
* Encryption configmap creation
* cephcsi
* Reuse the E2E workflow
* csi-addons
* Need to check if operator can own csi-addons in downstream?
* The names or kind and fields are subjected to change
```yaml
apiVersion: csi.ceph.io/v1alpa1
kind: CephCSIOperatorConfig
metadata:
name: ceph-csi-operator-config
namespace: operator-ns
spec:
logging:
operatorlogLevel: "debug"
driverDefault:
clusterName: ""
fsGroupPloicy: ""
generateOMapInfo: true
logging:
driverLogLevel: ""
sidecarLogLevel: ""
logRotation:
enabled: true
maximumCount: 3
periodicity: daily # one of: hourly, daily, weekly, monthly
maxLogSize: 10M
plugin:
resources:
plugin: ""
driverver-registrar:""
...
kubeletPath: "var/lib/kubelet"
provisioner:
resources:
csi-provisioner: ""
attacher: ""
plugin: ""
snapshotter:""
...
clusterConfigName: ""
enableMetadata: false
clusterName: ""
provisionerReplicas: 2
enableSnapshotter: false
enableVolumeGroupSnapshot: false
encryption:
configMapRef: ""
...
```
* This will be one CR created with specific name
* The csi section contains the common defaults need to be applied for all the csi deployments done by this operator
```yaml
apiVersion: csi.ceph.io/v1alpa1
kind: CephCSIDriver
metadata:
name: "openshift-storage.cephfs" (the name will end with specific type) that can be enfored with CEL
spec:
clusterName: ""
fsGroupPloicy: ""
generateOMapInfo: true
logging:
driverLogLevel: ""
sidecarLogLevel: ""
logRotation:
enabled: true
maximumCount: 3
periodicity: daily # one of: hourly, daily, weekly, monthly
maxLogSize: 10M
plugin:
resources:
plugin: ""
driverver-registrar: ""
...
kubeletPath: "var/lib/kubelet"
provisioner:
resources:
csi-provisioner: ""
attacher: ""
plugin: ""
snapshotter:""
...
clusterConfigName: ""
enableMetadata: false
clusterName: ""
provisionerReplicas: 2
enableSnapshotter: false
enableVolumeGroupSnapshot: false
encryption:
configMapRef: ""
# Either one of the driver section need to be filled
cephfs:
--forcekernel: true
rbd:
nfs:
```
```yaml
apiVersion: csi.ceph.io/v1alpha1
kind: CephClusterConnection
name: cephclusterName (identifier)
spec:
# Can be used to map cluster ID in RDR failover scenarios
aliases:
-
-
-
monitors:
- 10.98.44.171:6789
readAffinity:
enabled: true
crushLocationLabels:
- kubernetes.io/hostname
- topology.kubernetes.io/region
# a map for genric ceph configuration
# Future enhancements
# config:
# ceph.conf: ""
```
```yaml
apiVersion: csi.ceph.io/v1alpha1
kind: CephCSIClusterConnection (Binding similar to Roles/ClusterRole)
name: openshift-storage (clusterID)
spec:
Ref:
CRName: cephclusterName
subvolumeGroup: "csi-test"
fuseMountOptions: ""
radosNamespace: "rados-test"
```
#TO BE DISCUSSED
```yaml`
# The cephfs subvolume group to be used for drivers configures to consume cephfs stroage
subvolumeGroup: ""
#
fuseMountOptions: ""
#
radosNamespace: ""
- clusterIDMapping:
clusterID on site1: clusterID on site2
RBDPoolIDMapping:
- poolID on site1: poolID on site2
CephFSFscIDMapping:
- CephFS FscID on site1: CephFS FscID on site2
```yaml
---
kind: CephCSIOperatorConfig
apiVersion: csi.ceph.io/v1alpha1
metadata:
name: csioperatorconfig
spec:
logLevel: 1
driverSpecDefaults:
logging:
logLevel: 5
maxfiles: 5
maxLogSize: 10M
clusterName: 5c63ad7e-74fe-4724-a511-4ccdc560da56
enableMetadata: true
SnapshotPolicy: auto-detect
generateOMapInfo: true
fsGroupPolicy: File
plugin:
priorityClassName: system-node-critical
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
Labels:
app: csi
annotations:
k8s.v1.cni.cncf.io/networks: macvlan-conf-1
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: storage
operator: In
values:
- node
tolerations:
- key: storage
operator: Exists
resources:
registrar:
limits:
cpu: '200'
memory: '500'
requests:
cpu: '100'
memory: '250'
liveness:
limits:
cpu: '200'
memory: '500'
requests:
cpu: '100'
memory: '250'
plugin:
limits:
cpu: '200'
memory: '500'
requests:
cpu: '100'
memory: '250'
pluginVolumes:
- name: host-run
Volumes:
hostPath:
path: "/run"
type: Directory
VolumeMounts:
name: ''
readOnly: true
mountPath: "/run"
mountPropagation: Bidirectional
kubeletDirPath: "/var/lib/kubelet"
imagePullPolicy: IfNotPresent
provisioner:
priorityClassName: system-cluster-critical
Labels:
app: provisioner
annotations:
k8s.v1.cni.cncf.io/networks: macvlan-conf-1
provisionerReplicas: 2
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: storage
operator: In
values:
- node
tolerations:
- key: storage
operator: Exists
resources:
attacher:
limits:
cpu: '200'
memory: '500'
requests:
cpu: '100'
memory: '250'
snapshotter:
limits:
cpu: '200'
memory: '500'
requests:
cpu: '100'
memory: '250'
resizer:
limits:
cpu: '200'
memory: '500'
requests:
cpu: '100'
memory: '250'
provisioner:
limits:
cpu: '200'
memory: '500'
requests:
cpu: '100'
memory: '250'
omapGenerator:
limits:
cpu: '200'
memory: '500'
requests:
cpu: '100'
memory: '250'
liveness:
limits:
cpu: '200'
memory: '500'
requests:
cpu: '100'
memory: '250'
plugin:
limits:
cpu: '200'
memory: '500'
requests:
cpu: '100'
memory: '250'
liveness:
metricsPort: 8000
leaderElection:
leaseDuration: 100
renewDeadline: 100
retryPeriod: 10
deployCSIAddons: true
cephfs:
status:
phase: Succeeded
message: operator config successfully created
```
```yaml
---
kind: CephCSIDriver
apiVersion: csi.ceph.io/v1alpha1
metadata:
name: "<prefix>.cephfs.csi.ceph.com"
creationTimestamp:
spec:
DeploymentName: csi-cephfsplugin-provisioner
DaemonsetName: csi-cephfsplugin
Spec:
fsGroupPolicy: File
plugin:
priorityClassName: system-node-critical
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
Labels:
app: cephfs-plugin
annotations:
k8s.v1.cni.cncf.io/networks: macvlan-conf-1
kubeletDirPath: ''
imagePullPolicy: ''
provisioner:
Labels:
app: ceph-fs-provisioner
annotations:
k8s.v1.cni.cncf.io/networks: macvlan-conf-1
provisionerReplicas: 2
liveness:
metricsPort: 8000
leaderElection:
leaseDuration: 100
renewDeadline: 100
retryPeriod: 10
deployCSIAddons: false
cephfs:
forceCephFsKernelClient: true
kernelMountOptions: ms_mode=secure
status:
phase: Failed
message: Failed to create cephfs csi driver
reason: csi driver with same name already exists in the cluster
```
```yaml
---
kind: CephCSICephConnection
apiVersion: csi.ceph.io/v1alpha1
metadata:
name: ceph-cluster-1
creationTimestamp:
spec:
monitors:
- 10.98.44.171:6789
- 10.98.44.172:6789
- 10.98.44.173:6789
ReadAffinity:
crushLocationLabels:
- kubernetes.io/hostname
- topology.kubernetes.io/region
- topology.kubernetes.io/zone
cephFS:
kernelMountOptions: readdir_max_bytes=1048576,norbytes
fuseMountOptions: debug
rbd:
mirrorDaemonCount: 2
config: |-
[global]
auth_cluster_required = none
auth_service_required = none
auth_client_required = none
rbd_validate_pool = false
status: {}
```
```yaml
---
kind: CephCSICluster
apiVersion: csi.ceph.io/v1alpha1
metadata:
name: storage
creationTimestamp:
spec:
cephCSICephConnectionRef:
name: ceph-cluster-1
cephFS:
subvolumeGroup: csi
rbd:
radosNamespace: rados-test
status:
phase: Succeeded
message: successfully linked to CephCSICephConnection
```