K8s对于存储解耦的设计是,pv交给存储管理员来管理,我们只管用pvc来消费就好,但这里我们实际还是得一起管理pv和pvc,在实际工作中,我们(存储管理员)可以提前配置好pv的动态供给StorageClass,来根据pvc的消费动态生成pv。
    利用nfs-client-provisioner来生成一个基于nfs的StorageClass,部署配置yaml配置如下,保持为nfs-sc.yaml:

    1. cat nfs-sc.yaml
    2. apiVersion: v1
    3. kind: ServiceAccount
    4. metadata:
    5. name: nfs-client-provisioner
    6. namespace: kube-system
    7. ---
    8. kind: ClusterRole
    9. apiVersion: rbac.authorization.k8s.io/v1
    10. metadata:
    11. name: nfs-client-provisioner-runner
    12. rules:
    13. - apiGroups: [""]
    14. resources: ["persistentvolumes"]
    15. verbs: ["get", "list", "watch", "create", "delete"]
    16. - apiGroups: [""]
    17. resources: ["persistentvolumeclaims"]
    18. verbs: ["get", "list", "watch", "update"]
    19. - apiGroups: ["storage.k8s.io"]
    20. resources: ["storageclasses"]
    21. verbs: ["get", "list", "watch"]
    22. - apiGroups: [""]
    23. resources: ["events"]
    24. verbs: ["list", "watch", "create", "update", "patch"]
    25. - apiGroups: [""]
    26. resources: ["endpoints"]
    27. verbs: ["get", "list", "watch", "create", "update", "patch"]
    28. ---
    29. kind: ClusterRoleBinding
    30. apiVersion: rbac.authorization.k8s.io/v1
    31. metadata:
    32. name: run-nfs-client-provisioner
    33. subjects:
    34. - kind: ServiceAccount
    35. name: nfs-client-provisioner
    36. namespace: kube-system
    37. roleRef:
    38. kind: ClusterRole
    39. name: nfs-client-provisioner-runner
    40. apiGroup: rbac.authorization.k8s.io
    41. ---
    42. kind: Deployment
    43. apiVersion: apps/v1
    44. metadata:
    45. name: nfs-provisioner-01
    46. namespace: kube-system
    47. spec:
    48. replicas: 1
    49. strategy:
    50. type: Recreate
    51. selector:
    52. matchLabels:
    53. app: nfs-provisioner-01
    54. template:
    55. metadata:
    56. labels:
    57. app: nfs-provisioner-01
    58. spec:
    59. serviceAccountName: nfs-client-provisioner
    60. containers:
    61. - name: nfs-client-provisioner
    62. image: jmgao1983/nfs-client-provisioner:latest
    63. imagePullPolicy: IfNotPresent
    64. volumeMounts:
    65. - name: nfs-client-root
    66. mountPath: /persistentvolumes
    67. env:
    68. - name: PROVISIONER_NAME
    69. value: nfs-provisioner-01 # 此处供应者名字供storageclass调用
    70. - name: NFS_SERVER
    71. value: 10.0.1.201 # 填入NFS的地址
    72. - name: NFS_PATH
    73. value: /nfs_dir # 填入NFS挂载的目录
    74. volumes:
    75. - name: nfs-client-root
    76. nfs:
    77. server: 10.0.1.201 # 填入NFS的地址
    78. path: /nfs_dir # 填入NFS挂载的目录
    79. ---
    80. apiVersion: storage.k8s.io/v1
    81. kind: StorageClass
    82. metadata:
    83. name: nfs-boge
    84. provisioner: nfs-provisioner-01
    85. # Supported policies: Delete、 Retain , default is Delete
    86. reclaimPolicy: Retain

    开始创建这个StorageClass:

    1. # kubectl apply -f nfs-sc.yaml
    2. serviceaccount/nfs-client-provisioner created
    3. clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
    4. clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
    5. deployment.apps/nfs-provisioner-01 created
    6. orageclass.storage.k8s.io/nfs-boge created
    7. # 注意这个是在放kube-system的namespace下面,这里面放置一些偏系统类的服务
    8. # kubectl -n kube-system get pod -w
    9. NAME READY STATUS RESTARTS AGE
    10. calico-kube-controllers-7fdc86d8ff-dpdm5 1/1 Running 1 24h
    11. calico-node-8jcp5 1/1 Running 1 24h
    12. calico-node-m92rn 1/1 Running 1 24h
    13. calico-node-xg5n4 1/1 Running 1 24h
    14. calico-node-xrfqq 1/1 Running 1 24h
    15. coredns-d9b6857b5-5zwgf 1/1 Running 1 24h
    16. metrics-server-869ffc99cd-wfj44 1/1 Running 2 24h
    17. nfs-provisioner-01-5db96d9cc9-qxlgk 0/1 ContainerCreating 0 9s
    18. nfs-provisioner-01-5db96d9cc9-qxlgk 1/1 Running 0 21s
    19. # StorageClass已经创建好了
    20. # kubectl get sc
    21. NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
    22. nfs-boge nfs-provisioner-01 Retain Immediate false 37s

    我们来基于StorageClass创建一个pvc,看看动态生成的pv是什么效果:

    # vim pvc-sc.yaml 
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      name: pvc-sc
    spec:
      storageClassName: nfs-boge
      accessModes:
        - ReadWriteMany
      resources:
        requests:
          storage: 1Mi
    
    # kubectl  apply -f pvc-sc.yaml 
    persistentvolumeclaim/pvc-sc created
    
    # kubectl  get pvc
    NAME     STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
    pvc-sc   Bound    pvc-63eee4c7-90fd-4c7e-abf9-d803c3204623   1Mi        RWX            nfs-boge       3s
    pvc1     Bound    pv1                                        1Gi        RWO            nfs            24m
    
    # kubectl  get pv
    NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM            STORAGECLASS   REASON   AGE
    pv1                                        1Gi        RWO            Recycle          Bound    default/pvc1     nfs                     49m
    pvc-63eee4c7-90fd-4c7e-abf9-d803c3204623   1Mi        RWX            Retain
    

    我们修改下nginx的yaml配置,将pvc的名称换成上面的pvc-sc:

    # vim nginx.yaml 
    ---
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      labels:
        app: nginx
      name: nginx
    spec:
      replicas: 1
      selector:
        matchLabels:
          app: nginx
      template:
        metadata:
          labels:
            app: nginx
        spec:
          containers:
          - image: nginx
            name: nginx
            volumeMounts:    # 我们这里将nginx容器默认的页面目录挂载
              - name: html-files
                mountPath: "/usr/share/nginx/html"
          volumes:
            - name: html-files
              persistentVolumeClaim:
                claimName: pvc-sc
    
    
    # kubectl apply -f nginx.yaml 
    service/nginx unchanged
    deployment.apps/nginx configured
    
    # 这里注意下,因为是动态生成的pv,所以它的目录基于是一串随机字符串生成的,这时我们直接进到pod内来创建访问页面
    # kubectl exec -it nginx-57cdc6d9b4-n497g -- bash
    root@nginx-57cdc6d9b4-n497g:/# echo 'storageClass used' > /usr/share/nginx/html/index.html
    root@nginx-57cdc6d9b4-n497g:/# exit
    
    # curl 10.68.238.54                              
    storageClass used
    
    # 我们看下NFS挂载的目录
    # ll /nfs_dir/
    total 0
    drwxrwxrwx 2 root root 24 Nov 27 17:52 default-pvc-sc-pvc-63eee4c7-90fd-4c7e-abf9-d803c3204623
    drwxr-xr-x 2 root root  6 Nov 27 17:25 pv1