0
点赞
收藏
分享

微信扫一扫

k8s挂载存储模式---NFS

烟中雯城 2021-09-28 阅读 53

1.存储到临时目录

  spec:
      nodeSelector:
        kubernetes.io/hostname: k8s-node2  #指定工作在节点2上
      containers:
      - name: nginx-web
        image: nginx:latest
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /usr/share/nginx/html  #容器目录
          name: html
      volumes:
        - name: html
            emptyDir:  {}

这种模式数据存储将随着pod的创建与销毁生命周期存在,数据将不持久化存储。

2.存储到宿主机目录

  spec:
      nodeSelector:
        kubernetes.io/hostname: k8s-node2  #指定工作在节点2上
      containers:
      - name: nginx-web
        image: nginx:latest
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /usr/share/nginx/html  #容器目录
          name: html
      volumes:
      - name: html
        hostPath:  #类型为hostPath,即宿主机文件路径
          path: /data/nginx/html   #宿主机目录
          type:  DirectoryOrCreate

优点:简单易用,无需额外支持
缺点:依赖宿主机磁盘容量,pod与宿主机存在强耦合,不利于管理。当pod部署多个副本并分配到不同host时,数据不共享;当pod漂移时,数据不同步;当node故障时,数据易丢失;

3.存储到NFS中

3.1安装NFS

#master节点安装nfs
[root@k8s-master nginx]# yum -y install nfs-utils
#创建nfs目录
[root@k8s-master nginx]# mkdir -p /nfs/data/
#修改权限
[root@k8s-master nginx]# chmod -R 777 /nfs/data
#编辑export文件,这个文件就是nfs默认的配置文件
[root@k8s-master nginx]# vim /etc/exports
/nfs/data *(rw,no_root_squash,sync)
#配置生效
[root@k8s-master nginx]# exportfs -r
#查看生效
[root@k8s-master nginx]# exportfs
/nfs/data       <world>
#启动rpcbind、nfs服务
[root@k8s-master nginx]# systemctl restart rpcbind && systemctl enable rpcbind
[root@k8s-master nginx]# systemctl restart nfs && systemctl enable nfs
Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /usr/lib/systemd/system/nfs-server.service.
#查看 RPC 服务的注册状况
[root@k8s-master nginx]# rpcinfo -p localhost
   program vers proto   port  service
    100000    4   tcp    111  portmapper
    100000    3   tcp    111  portmapper
    100000    2   tcp    111  portmapper
    100000    4   udp    111  portmapper

#showmount测试
[root@k8s-master nginx]# showmount -e 192.168.0.66
 Export list for 192.168.0.66:
 /nfs/data *

3.2创建PV
创建前我们先在master节点 mkdir /nfs/data/nginx 创建出一个nginx子目录供pv使用

apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv
  namespace: default
  labels:
    pv: nfs-pv
spec:
  capacity:
    storage: 100Mi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  storageClassName: nfs
  nfs:  
    server: 192.168.0.66
    path: "/nfs/data/nginx"   #NFS目录,需要该目录在NFS上存在

然后执行创建

[root@k8s-master nfs]# kubectl apply -f pv.yaml 
persistentvolume/nfs-pv created
[root@k8s-master nfs]# kubectl get pv
NAME     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
nfs-pv   100Mi      RWX            Retain           Available                                   7s
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nfs-pvc
  namespace: default
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 50Mi  #容量
  selector:
    matchLabels:
      pv: nfs-pv   #关联pv 的label,key/value要一致

执行创建命令

[root@k8s-master nfs]# kubectl apply -f pvc.yaml 
persistentvolumeclaim/nfs-pvc created
[root@k8s-master nfs]# kubectl get pvc
NAME      STATUS   VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS   AGE
nfs-pvc   Bound    nfs-pv   100Mi      RWX 
[root@k8s-master nfs]# kubectl get pv
NAME     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM             STORAGECLASS   REASON   AGE
nfs-pv   100Mi      RWX            Retain           Bound    default/nfs-pvc
此时pv状态已经从Available变成Bound状态。

3.4 创建pod并使用pvc存储资源
vim nginx.yaml #我们用nginx镜像进行验证,将html目录映射到nfs目录中

#deploy
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-nginx
  namespace: default
spec:
  selector:
    matchLabels:
      app: nfs-nginx
  replicas: 2
  template:
    metadata:
      labels:
        app: nfs-nginx
    spec:
      containers:
      - name: nginx-web
        image: nginx:latest
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /usr/share/nginx/html
          name: html
      volumes:
      - name: html
        persistentVolumeClaim:
          claimName: nfs-pvc
---
#service
apiVersion: v1
kind: Service
metadata:
  name: nfs-nginx
  namespace: default
spec:
  type: NodePort
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 31681
  selector:
    app: nfs-nginx

创建pod容器

[root@k8s-master nfs]# kubectl apply -f nginx.yaml
[root@k8s-master nfs]# kubectl get pods  -o wide
NAME                         READY   STATUS    RESTARTS   AGE   IP            NODE        NOMINATED NODE   READINESS GATES
nfs-nginx-7695b95db6-l74zx   1/1     Running   0          12s   10.244.2.93   k8s-node1   <none>           <none>
nfs-nginx-7695b95db6-qcqp8   1/1     Running   0          12s   10.244.1.22   k8s-node2   <none>           <none>

如果kubectl describe pods xxx 发现有如下报错,则在节点服务器上安装nfs-unitls

Output: Running scope as unit run-20005.scope.
mount: wrong fs type, bad option, bad superblock on 192.168.0.66:/nfs/data/nginx,
       missing codepage or helper program, or other error
各节点安装并启用nfs
yum install nfs-utils
systemctl start nfs & systemctl enable nfs
systemctl start rpcbind & systemctl enable rpcbind

3.5验证
3.5.1直接放文件到NFS的/nfs/data/nginx目录
我们在/nfs/data/nginx目录创建了一个1.html文件

<html>
<body>Test01</body>
</html>


3.5.2 在容器1的/usr/share/nginx/html目录创建文件2.html

<html>
<body>Test02</body>
</html>

3.5.3 在容器2的/usr/share/nginx/html目录创建文件3.html

<html>
<body>Test03</body>
</html>

分别测试访问2.html和3.html




此外我们进入容器查看,目录中文件是共享的:

root@nfs-nginx-7695b95db6-l74zx:/usr/share/nginx/html# ls
1.html  2.html  3.html

3.5.4 pod销毁重建
kubectl delete -f nginx.yaml
kubectl apply -f nginx.yaml
再次访问1.html/2.html/3.html,依旧可以访问到,说明文件未丢失。

root@nfs-nginx-7695b95db6-78wml:/usr/share/nginx/html# ls
1.html  2.html  3.html
#新创建的容器,依旧可以看到这些文件

4.结语

NFS挂载有静态与动态两种不同模式,动态挂载模式需要创建StorageClass,使用过程相对复杂,本文采用的是静态模式。
另外对于k8s集群来讲,NFS并不是最理想存储模式,建议优先采用分布式存储方案,如cephfs存储。

举报

相关推荐

0 条评论