Kubernetes Dynamic Volume Provisioning with Ceph RBD

You can use Ceph RBD to create volume on demand with the storageClass concept, the list of technology supported is listed in Storage Classes docs. Make sure you have the following configuration:

  • Kubernetes Cluster, you can create a high-performance kubernetes cluster with ansible, like I working before in this project.
  • Ceph Cluster (luminous)
root@zu-master1:~# kubectl get nodes
NAME            STATUS   ROLES    AGE   VERSION
10.202.202.40   Ready    master   24d   v1.13.3
10.202.202.50   Ready    master   24d   v1.13.3
10.202.202.60   Ready    master   24d   v1.13.3
10.202.202.70   Ready    <none>   24d   v1.13.3
10.202.202.80   Ready    <none>   24d   v1.13.3
10.202.202.90   Ready    <none>   24d   v1.13.3
root@zu-master1:~# kubectl get componentstatus
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-2               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}   
[root@zu-ceph-mon ~]# ceph status
  cluster:
    id:     cde3d179-eebf-4c60-b375-78e1426a792b
    health: HEALTH_OK
 
  services:
    mon: 1 daemons, quorum zu-ceph-mon
    mgr: zu-ceph-mon(active)
    osd: 6 osds: 6 up, 6 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0B
    usage:   7.56GiB used, 292GiB / 299GiB avail
    pgs:

Install ceph-common in all worker kubernetes nodes

sudo apt install ceph-common

Create a new pool for dynamic provisioning in Ceph

ceph osd pool create kube 512
ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=kube' -o ceph.client.kube.keyring
[root@zu-ceph-mon ~]# ceph auth get client.admin
exported keyring for client.admin
[client.admin]
        key = AQDyCOBcmLU/JxAAw1wBxR9Rf1fj9xCWerCXCQ==
        caps mds = "allow *"
        caps mgr = "allow *"
        caps mon = "allow *"
        caps osd = "allow *"
[root@zu-ceph-mon ~]# ceph auth get client.kube
exported keyring for client.kube
[client.kube]
        key = AQBrAOVcmya5KRAAMDbmqSJW673oNFo5GN5DOQ==
        caps mon = "allow r"
        caps osd = "allow class-read object_prefix rbd_children, allow rwx pool=kube"

copy ceph.client.kube.keyring into all worker node (because my kubelet is running as a service on a worker host, so I copy into the node, if you running kubelet as a pod-like kubeadm, I don’t have an idea how it’s working)

[root@zu-ceph-mon ~]# ls
anaconda-ks.cfg  ceph.client.kube.keyring

scp ceph.client.kube.keyring root@10.202.202.70:/etc/ceph/
scp ceph.client.kube.keyring root@10.202.202.80:/etc/ceph/
scp ceph.client.kube.keyring root@10.202.202.90:/etc/ceph/

Generate the client.admin base64-encoded key

[root@zu-ceph-mon ~]# ceph auth get-key client.admin | base64
QVFEeUNPQmNtTFUvSnhBQXcxd0J4UjlSZjFmajl4Q1dlckNYQ1E9PQ==

create secret user admin ceph inside kubernetes

apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret
  namespace: kube-system
data:
  key: QVFEeUNPQmNtTFUvSnhBQXcxd0J4UjlSZjFmajl4Q1dlckNYQ1E9PQ==
type: kubernetes.io/rbd

create SstorageClass ceph

apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
  name: ceph
  annotations:
provisioner: kubernetes.io/rbd
parameters:
  monitors: 10.200.200.10:6789 
  adminId: admin 
  adminSecretName: ceph-secret 
  adminSecretNamespace: kube-system 
  pool: kube  
  userId: kube  
  userSecretName: ceph-user-secret
  • monitors: adalah list ip address dari ceph-mon, dipisahkan dengan koma
  • adminId: user ceph yang dapat membuat image didalam pool, default admin
  • adminSecretName: nama secret dari user admin ceph yang dibuat diatas
  • adminSecretNamespace: namespace dimana secret admin berada
  • pool: pool ceph yang digunakan
  • userId: user kube ceph
  • userSecretName: nama secret dari user kube, perlu diperhatikan bahwa jika namespace didalam kubernetes ingin menggunakan storageclass ceph, perlu dibuat secret object tersebut.

create secret user ceph in default namespace

[root@zu-ceph-mon ~]# ceph auth get-key client.kube | base64
QVFCckFPVmNteWE1S1JBQU1EYm1xU0pXNjczb05GbzVHTjVET1E9PQ==
apiVersion: v1
kind: Secret
metadata:
  name: ceph-user-secret
data:
  key: QVFCckFPVmNteWE1S1JBQU1EYm1xU0pXNjczb05GbzVHTjVET1E9PQ==
type: kubernetes.io/rbd

create some PVC in default namespace

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
 name: test-pvc
 annotations:
   volume.beta.kubernetes.io/storage-class: ceph
spec:
 accessModes:
  - ReadWriteOnce
 resources:
   requests:
     storage: 2Gi
root@zu-master1:~# kubectl get pvc
NAME       STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
test-pvc   Bound    pvc-2ba0a7dc-7c6c-11e9-8a4e-525400b186ef   2Gi        RWO            ceph           5m28s
root@zu-master1:~# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM              STORAGECLASS   REASON   AGE
pvc-2ba0a7dc-7c6c-11e9-8a4e-525400b186ef   2Gi        RWO            Delete           Bound    default/test-pvc   ceph                    3m44s

create Pod and check the volume

apiVersion: v1
kind: Pod
metadata:
  name: ceph-pod 
spec:
  containers:
  - name: ceph-busybox
    image: busybox 
    command: ["sleep", "60000"]
    volumeMounts:
    - name: ceph-vol
      mountPath: /usr/share/busybox 
      readOnly: false
  volumes:
  - name: ceph-vol
    persistentVolumeClaim:
      claimName: test-pvc
root@zu-master1:~# kubectl get pod
NAME       READY   STATUS    RESTARTS   AGE
ceph-pod   1/1     Running   0          89s
root@zu-master1:~# kubectl exec -it ceph-pod -- ls /usr/share/busybox
lost+found

Troubleshooting

  • ceph-common installed in Kubernetes worker node┬áhas version 10.2.11 (Jewel) and the ceph cluster is in luminous. If you get feature mismatch with code 400000000000000, run this command in ceph-mon (https://silvenga.com/ceph-feature-mismatch/)
ceph osd crush tunables hammer

 

 

Comments are closed.