0
点赞
收藏
分享

微信扫一扫

83-云原生操作系统-Kubernetes资源对象管理及示例Ⅱ

本章内容:

  1. pvc-dynamin动态存储
  2. Configmap
  3. Secret
  1. Opaque
  2. Secret的挂载
  3. kubernetes.io/tls-为nginx提供证书
  4. kubernetes.io/dockerconfigjson
  1. Statefulset
  2. DaemonSet

动态存储使用案例

------------------------------------动态存储使用案例-------------------------------------
#使用项目地址
https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner

#1.创建账户:
[root@K8s-ansible Dynamic-storage-volumes]#cat user-create.yaml
apiVersion: v1
kind: Namespace
metadata:
  name: nfs
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io

[root@K8s-ansible Dynamic-storage-volumes]#kubectl apply -f user-create.yaml 
namespace/nfs created
serviceaccount/nfs-client-provisioner created
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created

#2.创建storageclass
[root@K8s-ansible Dynamic-storage-volumes]#cat storageclass-create.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: mooreyxia-nfs-storage
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner # or choose another name, must match deployment's env PROVISIONER_NAME'
reclaimPolicy: Retain #PV删除策略,默认delete,删除PV后删除NFS数据
mountOptions:
  #- vers=4.1 #可以解决containerd参数异常警告
  #- noresvport #设置NFS客户端重连时使用新的传输控制协议源端口
  - noatime #文件访问不更新inode时间戳,提高性能
parameters:
  #mountOptions: "vers=4.1,noresvport,noatime"
  archiveOnDelete: "true" #删除Pod后保留Pod数据,默认false

[root@K8s-ansible Dynamic-storage-volumes]#kubectl apply -f storageclass-create.yaml 
storageclass.storage.k8s.io/mooreyxia-nfs-storage created

[root@K8s-ansible Dynamic-storage-volumes]#kubectl get storageclasses.storage.k8s.io 
NAME                    PROVISIONER                                   RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
mooreyxia-nfs-storage   k8s-sigs.io/nfs-subdir-external-provisioner   Retain          Immediate           false                  78s

#3.NFS服务器创建共享目录
[root@K8s-haproxy01 ~]#mkdir -p /data/volumes
[root@K8s-haproxy01 ~]#vim /etc/exports 
[root@K8s-haproxy01 ~]#cat /etc/exports 
# /etc/exports: the access control list for filesystems which may be exported
#		to NFS clients.  See exports(5).
#
# Example for NFSv2 and NFSv3:
# /srv/homes       hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
#
# Example for NFSv4:
# /srv/nfs4        gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
# /srv/nfs4/homes  gss/krb5i(rw,sync,no_subtree_check)
#

/data/k8sdata *(rw,no_root_squash)
/data/volumes *(rw,no_root_squash)

[root@K8s-haproxy01 ~]#exportfs -arv #选择重新挂载,注意不要重启
exportfs: /etc/exports [2]: Neither 'subtree_check' or 'no_subtree_check' specified for export "*:/data/k8sdata".
  Assuming default behaviour ('no_subtree_check').
  NOTE: this default has changed since nfs-utils version 1.0.x

exportfs: /etc/exports [3]: Neither 'subtree_check' or 'no_subtree_check' specified for export "*:/data/volumes".
  Assuming default behaviour ('no_subtree_check').
  NOTE: this default has changed since nfs-utils version 1.0.x

exporting *:/data/volumes
exporting *:/data/k8sdata

#测试nfs共享目录可见
[root@K8s-ansible Dynamic-storage-volumes]#showmount -e 192.168.11.203
Export list for 192.168.11.203:
/data/volumes *
/data/k8sdata *

#4.创建NFS provisioner
[root@K8s-ansible Dynamic-storage-volumes]#cat nfsProvisioner-define.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
spec:
  replicas: 1
  strategy: #部署策略
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner #在nfs上动态创建卷
      containers:
        - name: nfs-client-provisioner
          #image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2 
          image: K8s-harbor01.mooreyxia.com/kubernetes/nfs-subdir-external-provisioner:v4.0.2
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: k8s-sigs.io/nfs-subdir-external-provisioner
            - name: NFS_SERVER
              value: 192.168.11.203
            - name: NFS_PATH
              value: /data/volumes
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.11.203
            path: /data/volumes

[root@K8s-ansible Dynamic-storage-volumes]#kubectl apply -f nfsProvisioner-define.yaml 
deployment.apps/nfs-client-provisioner created

[root@K8s-ansible Dynamic-storage-volumes]#kubectl get pod -A |grep nfs-client-provisioner
nfs                    nfs-client-provisioner-7d964749dd-rpztv             1/1     Running   0              2m23s

[root@K8s-ansible Dynamic-storage-volumes]#kubectl describe pod nfs-client-provisioner-7d964749dd-rpztv -n nfs
Name:             nfs-client-provisioner-7d964749dd-rpztv
Namespace:        nfs
Priority:         0
Service Account:  nfs-client-provisioner
Node:             192.168.11.216/192.168.11.216
Start Time:       Wed, 05 Apr 2023 04:51:44 +0000
Labels:           app=nfs-client-provisioner
                  pod-template-hash=7d964749dd
Annotations:      <none>
Status:           Running
IP:               10.200.128.146
IPs:
  IP:           10.200.128.146
Controlled By:  ReplicaSet/nfs-client-provisioner-7d964749dd
Containers:
  nfs-client-provisioner:
    Container ID:   containerd://8872d8cbf00f04447eae6b047a7a33b9796ca636908fcd561a05eb79042796b8
    Image:          K8s-harbor01.mooreyxia.com/kubernetes/nfs-subdir-external-provisioner:v4.0.2
    Image ID:       K8s-harbor01.mooreyxia.com/kubernetes/nfs-subdir-external-provisioner@sha256:f741e403b3ca161e784163de3ebde9190905fdbf7dfaa463620ab8f16c0f6423
    Port:           <none>
    Host Port:      <none>
    State:          Running
      Started:      Wed, 05 Apr 2023 04:51:50 +0000
    Ready:          True
    Restart Count:  0
    Environment:
      PROVISIONER_NAME:  k8s-sigs.io/nfs-subdir-external-provisioner
      NFS_SERVER:        192.168.11.203
      NFS_PATH:          /data/volumes
    Mounts:
      /persistentvolumes from nfs-client-root (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-6lvkh (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  nfs-client-root:
    Type:      NFS (an NFS mount that lasts the lifetime of a pod)
    Server:    192.168.11.203
    Path:      /data/volumes
    ReadOnly:  false
  kube-api-access-6lvkh:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type    Reason     Age   From               Message
  ----    ------     ----  ----               -------
  Normal  Scheduled  81s   default-scheduler  Successfully assigned nfs/nfs-client-provisioner-7d964749dd-rpztv to 192.168.11.216
  Normal  Pulling    79s   kubelet            Pulling image "K8s-harbor01.mooreyxia.com/kubernetes/nfs-subdir-external-provisioner:v4.0.2"
  Normal  Pulled     76s   kubelet            Successfully pulled image "K8s-harbor01.mooreyxia.com/kubernetes/nfs-subdir-external-provisioner:v4.0.2" in 3.076598952s (3.076624098s including waiting)
  Normal  Created    76s   kubelet            Created container nfs-client-provisioner
  Normal  Started    75s   kubelet            Started container nfs-client-provisioner


#4.创建PVC:
[root@K8s-ansible Dynamic-storage-volumes]#cat pvc-create.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: myserver-myapp-dynamic-pvc
  namespace: myserver
spec:
  storageClassName: mooreyxia-nfs-storage #存储类-提前创建好
  accessModes:
    - ReadWriteMany #访问权限
  resources:
    requests:
      storage: 500Mi #空间大小
     
[root@K8s-ansible Dynamic-storage-volumes]#kubectl apply -f pvc-create.yaml 
persistentvolumeclaim/myserver-myapp-dynamic-pvc created

[root@K8s-ansible ~]#kubectl get pvc -n myserver
NAME                         STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS            AGE
myserver-myapp-dynamic-pvc   Bound    pvc-b5ae1f9c-8569-4645-8398-0571b6defa6c   500Mi      RWX            mooreyxia-nfs-storage   2m17s

#nfs上确认是否创建存储卷
[root@K8s-haproxy01 ~]#ls /data/volumes
myserver-myserver-myapp-dynamic-pvc-pvc-b5ae1f9c-8569-4645-8398-0571b6defa6c

#在卷中准备数据用以测试
[root@K8s-haproxy01 myserver-myserver-myapp-dynamic-pvc-pvc-b5ae1f9c-8569-4645-8398-0571b6defa6c]#cat index.html 
Hello,this is mooreyxia-dynamic-pvc test 

#创建web服务并挂载pvc存储
[root@K8s-ansible Dynamic-storage-volumes]#cat nginx-server.yaml
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: myserver-myapp 
  name: myserver-myapp-deployment-name
  namespace: myserver
spec:
  replicas: 1 
  selector:
    matchLabels:
      app: myserver-myapp-frontend
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend
    spec:
      containers:
        - name: myserver-myapp-container
          image: nginx:1.20.0 
          #imagePullPolicy: Always
          volumeMounts:
          - mountPath: "/usr/share/nginx/html/statics"
            name: statics-datadir
      volumes:
        - name: statics-datadir
          persistentVolumeClaim:
            claimName: myserver-myapp-dynamic-pvc 

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: myserver-myapp-service
  name: myserver-myapp-service-name
  namespace: myserver
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30080
  selector:
    app: myserver-myapp-frontend

[root@K8s-ansible Dynamic-storage-volumes]#kubectl apply -f nginx-server.yaml 
deployment.apps/myserver-myapp-deployment-name created
service/myserver-myapp-service-name created

#确认Pod运行中
[root@K8s-ansible Dynamic-storage-volumes]#kubectl get pod -A |grep myserver-myapp-deployment
myserver               myserver-myapp-deployment-name-65ff65446f-sr494     1/1     Running   0              105s
[root@K8s-ansible Dynamic-storage-volumes]#kubectl describe pod myserver-myapp-deployment-name-65ff65446f-sr494 -n myserver
Name:             myserver-myapp-deployment-name-65ff65446f-sr494
Namespace:        myserver
Priority:         0
Service Account:  default
Node:             192.168.11.215/192.168.11.215
Start Time:       Wed, 05 Apr 2023 05:10:06 +0000
Labels:           app=myserver-myapp-frontend
                  pod-template-hash=65ff65446f
Annotations:      <none>
Status:           Running
IP:               10.200.67.14
IPs:
  IP:           10.200.67.14
Controlled By:  ReplicaSet/myserver-myapp-deployment-name-65ff65446f
Containers:
  myserver-myapp-container:
    Container ID:   containerd://2e29e2b96865c93b8ee83375cfbb067eae358ba89e662ec1741a9d9c0b6a17aa
    Image:          nginx:1.20.0
    Image ID:       docker.io/library/nginx@sha256:ea4560b87ff03479670d15df426f7d02e30cb6340dcd3004cdfc048d6a1d54b4
    Port:           <none>
    Host Port:      <none>
    State:          Running
      Started:      Wed, 05 Apr 2023 05:10:35 +0000
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /usr/share/nginx/html/statics from statics-datadir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-bjjst (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  statics-datadir:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  myserver-myapp-dynamic-pvc
    ReadOnly:   false
  kube-api-access-bjjst:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type    Reason     Age   From               Message
  ----    ------     ----  ----               -------
  Normal  Scheduled  2m7s  default-scheduler  Successfully assigned myserver/myserver-myapp-deployment-name-65ff65446f-sr494 to 192.168.11.215
  Normal  Pulling    2m4s  kubelet            Pulling image "nginx:1.20.0"
  Normal  Pulled     99s   kubelet            Successfully pulled image "nginx:1.20.0" in 25.786403295s (25.786428156s including waiting)
  Normal  Created    98s   kubelet            Created container myserver-myapp-container
  Normal  Started    96s   kubelet            Started container myserver-myapp-container
  
 #负载均衡地址地址端口修改
 [root@K8s-haproxy01 ~]#cat /etc/haproxy/haproxy.cfg 
 ...
listen myserver-80
    bind 192.168.11.242:80
    mode tcp
    server K8s-master01 192.168.11.211:30080 check inter 3000 fall 2 rise 5
    server K8s-master02 192.168.11.212:30080 check inter 3000 fall 2 rise 5
    server K8s-master03 192.168.11.213:30080 check inter 3000 fall 2 rise 5

#测试访问动态卷中资源
http://192.168.11.242/statics/index.html

83-云原生操作系统-Kubernetes资源对象管理及示例Ⅱ_Kubernetes对象管理

  • Configmap

83-云原生操作系统-Kubernetes资源对象管理及示例Ⅱ_Kubernetes对象管理_02

  • Configmap配置信息和镜像解耦, 实现方式为将配置信息放到configmap对象中,然后在pod的中作为Volume挂载到pod中,从而实现导入配置的目的。
  • 使用场景
  • 通过Configmap给pod定义全局环境变量
  • 通过Configmap给pod传递命令行参数,如mysql -u -p中的账户名密码可以通过Configmap传递。
  • 通过Configmap给pod中的容器服务提供配置文件,配置文件以挂载到容器的形式使用。
  • 注意事项
  • Configmap需要在pod使用它之前创建。
  • pod只能使用位于同一个namespace的Configmap,及Configmap不能夸namespace使用。
  • 通常用于非安全加密的配置场景。
  • Configmap通常是小于1MB的配置。

#案例:通过Configmap给pod中的容器服务提供配置文件
[root@K8s-ansible yaml-case]#cat configmap-test-case.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-config
data:
 default: |
    server {
       listen       80;
       server_name  www.mysite.com;
       index        index.html index.php index.htm;

       location / {
           root /data/nginx/html;
           if (!-e $request_filename) {
               rewrite ^/(.*) /index.html last;
           }
       }
    }


---
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx:1.20.0
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /data/nginx/html
          name: nginx-static-dir
        - name: nginx-config
          mountPath:  /etc/nginx/conf.d
      volumes:
      - name: nginx-static-dir
        hostPath:
          path: /data/nginx
      - name: nginx-config
        configMap:
          name: nginx-config
          items:
             - key: default
               path: mysite.conf

---
apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-80
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30019
    protocol: TCP
  type: NodePort
  selector:
    app: ng-deploy-80

[root@K8s-ansible yaml-case]#kubectl apply -f configmap-test-case.yaml 
configmap/nginx-config created
deployment.apps/nginx-deployment created
service/ng-deploy-80 created
[root@K8s-ansible yaml-case]#kubectl get pod 
NAME                                READY   STATUS    RESTARTS   AGE
nginx-deployment-865fd99f75-76x24   1/1     Running   0          6m29s

[root@K8s-ansible yaml-case]#kubectl describe pod nginx-deployment-865fd99f75-76x24
Name:             nginx-deployment-865fd99f75-76x24
Namespace:        default
Priority:         0
Service Account:  default
Node:             192.168.11.216/192.168.11.216
Start Time:       Wed, 05 Apr 2023 08:59:35 +0000
Labels:           app=ng-deploy-80
                  pod-template-hash=865fd99f75
Annotations:      <none>
Status:           Running
IP:               10.200.128.150
IPs:
  IP:           10.200.128.150
Controlled By:  ReplicaSet/nginx-deployment-865fd99f75
Containers:
  ng-deploy-80:
    Container ID:   containerd://255e64893f3884fdfe2b4ee51377462abc26f6367e771ca5894603e3ad8b21ef
    Image:          nginx:1.20.0
    Image ID:       docker.io/library/nginx@sha256:ea4560b87ff03479670d15df426f7d02e30cb6340dcd3004cdfc048d6a1d54b4
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Wed, 05 Apr 2023 09:00:07 +0000
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /data/nginx/html from nginx-static-dir (rw)
      /etc/nginx/conf.d from nginx-config (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-sl2hs (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  nginx-static-dir:
    Type:          HostPath (bare host directory volume)
    Path:          /data/nginx
    HostPathType:  
  nginx-config:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      nginx-config
    Optional:  false
  kube-api-access-sl2hs:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type    Reason     Age    From               Message
  ----    ------     ----   ----               -------
  Normal  Scheduled  6m49s  default-scheduler  Successfully assigned default/nginx-deployment-865fd99f75-76x24 to 192.168.11.216
  Normal  Pulling    6m48s  kubelet            Pulling image "nginx:1.20.0"
  Normal  Pulled     6m17s  kubelet            Successfully pulled image "nginx:1.20.0" in 30.415107412s (30.415129969s including waiting)
  Normal  Created    6m17s  kubelet            Created container ng-deploy-80
  Normal  Started    6m17s  kubelet            Started container ng-deploy-80

[root@K8s-ansible yaml-case]#kubectl exec -it  nginx-deployment-865fd99f75-76x24 bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-865fd99f75-76x24:/# cat /etc/nginx/conf.d/mysite.conf 
server {
   listen       80;
   server_name  www.mysite.com;
   index        index.html index.php index.htm;

   location / {
       root /data/nginx/html;
       if (!-e $request_filename) {
           rewrite ^/(.*) /index.html last;
       }
   }
}


#案例:通过Configmap给pod定义全局环境变量
[root@K8s-ansible yaml-case]#cat configmap-env-test-case.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-config
data:
  username: "user1"
  password: "12345678"

---
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx 
        env:
        - name: MY_USERNAME
          valueFrom:
            configMapKeyRef:
              name: nginx-config
              key: username
        - name: MY_PASSWORD
          valueFrom:
            configMapKeyRef:
              name: nginx-config
              key: password
        ###### 这种方式更简单
        - name: "password"
          value: "123456"
        ports:
        - containerPort: 80

[root@K8s-ansible yaml-case]#kubectl apply -f configmap-env-test-case.yaml 
configmap/nginx-config created
deployment.apps/nginx-deployment created

[root@K8s-ansible yaml-case]#kubectl get pod
NAME                                READY   STATUS    RESTARTS   AGE
nginx-deployment-6d7799d478-rcnn8   1/1     Running   0          27s
[root@K8s-ansible yaml-case]#kubectl describe pod nginx-deployment-6d7799d478-rcnn8
Name:             nginx-deployment-6d7799d478-rcnn8
Namespace:        default
Priority:         0
Service Account:  default
Node:             192.168.11.216/192.168.11.216
Start Time:       Wed, 05 Apr 2023 09:17:22 +0000
Labels:           app=ng-deploy-80
                  pod-template-hash=6d7799d478
Annotations:      <none>
Status:           Running
IP:               10.200.128.151
IPs:
  IP:           10.200.128.151
Controlled By:  ReplicaSet/nginx-deployment-6d7799d478
Containers:
  ng-deploy-80:
    Container ID:   containerd://78c7ac006ae282abb9920e01c8e0c61e93f4af6623f1a61172a6fbaa86e79ddb
    Image:          nginx
    Image ID:       docker.io/library/nginx@sha256:0d17b565c37bcbd895e9d92315a05c1c3c9a29f762b011a10c54a66cd53c9b31
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Wed, 05 Apr 2023 09:17:40 +0000
    Ready:          True
    Restart Count:  0
    Environment:
      MY_USERNAME:  <set to the key 'username' of config map 'nginx-config'>  Optional: false
      MY_PASSWORD:  <set to the key 'password' of config map 'nginx-config'>  Optional: false
      password:     123456
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-s44xd (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  kube-api-access-s44xd:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type    Reason     Age   From               Message
  ----    ------     ----  ----               -------
  Normal  Scheduled  38s   default-scheduler  Successfully assigned default/nginx-deployment-6d7799d478-rcnn8 to 192.168.11.216
  Normal  Pulling    37s   kubelet            Pulling image "nginx"
  Normal  Pulled     21s   kubelet            Successfully pulled image "nginx" in 15.691316716s (15.691376518s including waiting)
  Normal  Created    21s   kubelet            Created container ng-deploy-80
  Normal  Started    20s   kubelet            Started container ng-deploy-80

[root@K8s-ansible yaml-case]#kubectl exec -it nginx-deployment-6d7799d478-rcnn8 bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-6d7799d478-rcnn8:/# env |grep password
password=123456
root@nginx-deployment-6d7799d478-rcnn8:/# env |grep MY_USERNAME
MY_USERNAME=user1
root@nginx-deployment-6d7799d478-rcnn8:/# env |grep MY_PASSWORD
MY_PASSWORD=12345678

  • Secret

83-云原生操作系统-Kubernetes资源对象管理及示例Ⅱ_Kubernetes对象管理_03

  • Secret 的功能类似于 ConfigMap给pod提供额外的配置信息,但是Secret是一种包含少量敏感信息例如密码、令牌或密钥的对象。
  • Secret 的名称必须是合法的 DNS 子域名。
  • 每个Secret的大小最多为1MiB,主要是为了避免用户创建非常大的Secret进而导致API服务器和kubelet内存耗尽,不过创建很多小的Secret也可能耗尽内存,可以使用资源配额来约束每个名字空间中Secret的个数。
  • 在通过yaml文件创建secret时,可以设置data或stringData字段,data和stringData字段都是可选的,data字段中所有键值都必须是base64编码的字符串,如果不希望执行这种 base64字符串的转换操作,也可以选择设置stringData字段,其中可以使用任何非加密的字符串作为其取值。
  • Secret类型

83-云原生操作系统-Kubernetes资源对象管理及示例Ⅱ_Kubernetes对象管理_04

  • Opaque格式

#案例 - Opaque
[root@K8s-ansible ~]#echo admin | base64
YWRtaW4K
[root@K8s-ansible ~]#echo YWRtaW4K | base64 -d
admin
[root@K8s-ansible secret]#cat 1-secret-Opaque-data.yaml
apiVersion: v1
kind: Secret
metadata:
  name: mysecret-data
  namespace: myserver
type: Opaque
data: #Opaque格式-data类型数据-事先使用base64加密
  user: YWRtaW4K 
  password: MTIzNDU2Cg==
  age: MTgK

[root@K8s-ansible secret]#kubectl apply -f 1-secret-Opaque-data.yaml 
secret/mysecret-data created

[root@K8s-ansible secret]#kubectl get secrets -n myserver
NAME            TYPE     DATA   AGE
mysecret-data   Opaque   3      35s

#容器挂载后会自动解密
[root@K8s-ansible secret]#kubectl get secrets -n myserver -o yaml
apiVersion: v1
items:
- apiVersion: v1
  data:
    age: MTgK
    password: MTIzNDU2Cg==
    user: YWRtaW4K
  kind: Secret
  metadata:
    annotations:
      kubectl.kubernetes.io/last-applied-configuration: |
        {"apiVersion":"v1","data":{"age":"MTgK","password":"MTIzNDU2Cg==","user":"YWRtaW4K"},"kind":"Secret","metadata":{"annotations":{},"name":"mysecret-data","namespace":"myserver"},"type":"Opaque"}
    creationTimestamp: "2023-04-05T10:47:08Z"
    name: mysecret-data
    namespace: myserver
    resourceVersion: "277525"
    uid: e1848052-05a3-4326-a8f1-57cafd8c71ef
  type: Opaque
kind: List
metadata:
  resourceVersion: ""

#案例 - stringData
[root@K8s-ansible secret]#cat 2-secret-Opaque-stringData.yaml
apiVersion: v1
kind: Secret
metadata:
  name: mysecret-stringdata
  namespace: myserver
type: Opaque
stringData:
  user: 'admin'
  password: '123456'
[root@K8s-ansible secret]#kubectl apply -f 2-secret-Opaque-stringData.yaml 
secret/mysecret-stringdata created
[root@K8s-ansible secret]#kubectl get secrets -n myserver -o yaml
apiVersion: v1
items:
- apiVersion: v1
  data:
    password: MTIzNDU2
    user: YWRtaW4=
  kind: Secret
  metadata:
    annotations:
      kubectl.kubernetes.io/last-applied-configuration: |
        {"apiVersion":"v1","kind":"Secret","metadata":{"annotations":{},"name":"mysecret-stringdata","namespace":"myserver"},"stringData":{"password":"123456","user":"admin"},"type":"Opaque"}
    creationTimestamp: "2023-04-05T10:57:04Z"
    name: mysecret-stringdata
    namespace: myserver
    resourceVersion: "279188"
    uid: 8d505f9f-5821-4974-a29d-b7f58848bdf6
  type: Opaque
kind: List
metadata:
  resourceVersion: ""

  • Secret的挂载流程

83-云原生操作系统-Kubernetes资源对象管理及示例Ⅱ_Kubernetes对象管理_05


#创建服务并挂载Secret
[root@K8s-ansible secret]#kubectl get secrets -n myserver 
NAME                  TYPE     DATA   AGE
mysecret-data         Opaque   3      2s
mysecret-stringdata   Opaque   2      6m45s
[root@K8s-ansible secret]#vim 3-secret-Opaquemount.yaml
[root@K8s-ansible secret]#cat 3-secret-Opaquemount.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-app1-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-myapp-app1
  template:
    metadata:
      labels:
        app: myserver-myapp-app1
    spec:
      containers:
      - name: myserver-myapp-app1
        image: tomcat:7.0.94-alpine
        ports:
        - containerPort: 8080
        volumeMounts:
        - mountPath: /data/myserver/auth
          name: myserver-auth-secret 
      volumes:
      - name: myserver-auth-secret
        secret:
          secretName: mysecret-data #挂载指定的secret,挂载后会将base64解密
---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-app1
  namespace: myserver
spec:
  ports:
  - name: http
    port: 8080
    targetPort: 8080
    nodePort: 30018
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-app1

[root@K8s-ansible secret]#kubectl apply -f 3-secret-Opaquemount.yaml 
deployment.apps/myserver-myapp-app1-deployment created
service/myserver-myapp-app1 created

[root@K8s-ansible secret]#kubectl describe pod myserver-myapp-app1-deployment-6f68468b89-7wdgt -n myserver
Name:             myserver-myapp-app1-deployment-6f68468b89-7wdgt
Namespace:        myserver
Priority:         0
Service Account:  default
Node:             192.168.11.216/192.168.11.216
Start Time:       Wed, 05 Apr 2023 11:10:24 +0000
Labels:           app=myserver-myapp-app1
                  pod-template-hash=6f68468b89
Annotations:      <none>
Status:           Running
IP:               10.200.128.152
IPs:
  IP:           10.200.128.152
Controlled By:  ReplicaSet/myserver-myapp-app1-deployment-6f68468b89
Containers:
  myserver-myapp-app1:
    Container ID:   containerd://645805cc57b94e744ceaa4eb6c34cb766d13dd08df091b0b0e0d1d559cbb888b
    Image:          tomcat:7.0.94-alpine
    Image ID:       docker.io/library/tomcat@sha256:8eaa7fb99223ad7d00503080adf6de6f1da02993050d7a43ed2f84ab06d79ef8
    Port:           8080/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Wed, 05 Apr 2023 11:10:45 +0000
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /data/myserver/auth from myserver-auth-secret (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-plfh9 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  myserver-auth-secret:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  mysecret-data
    Optional:    false
  kube-api-access-plfh9:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason       Age    From               Message
  ----     ------       ----   ----               -------
  Normal   Scheduled    8m29s  default-scheduler  Successfully assigned myserver/myserver-myapp-app1-deployment-6f68468b89-7wdgt to 192.168.11.216
  Warning  FailedMount  8m28s  kubelet            MountVolume.SetUp failed for volume "myserver-auth-secret" : failed to sync secret cache: timed out waiting for the condition
  Normal   Pulling      8m27s  kubelet            Pulling image "tomcat:7.0.94-alpine"
  Normal   Pulled       8m9s   kubelet            Successfully pulled image "tomcat:7.0.94-alpine" in 17.723782118s (17.723803328s including waiting)
  Normal   Created      8m9s   kubelet            Created container myserver-myapp-app1
  Normal   Started      8m8s   kubelet            Started container myserver-myapp-app1

#进入Pod确认挂载信息
[root@K8s-ansible secret]#kubectl exec -it myserver-myapp-app1-deployment-6f68468b89-7wdgt bash  -n myserver
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
bash-4.4# ls /data/myserver/auth
age       password  user
bash-4.4# cat /data/myserver/auth/age 
18
bash-4.4# cat /data/myserver/auth/password 
123456
bash-4.4# cat /data/myserver/auth/user 
admin

#查看挂载到宿主机位置
[root@K8s-noded03 ~]#find /var/lib/kubelet/ -name password
/var/lib/kubelet/pods/858da009-cd68-4f48-acda-ef745614cd79/volumes/kubernetes.io~secret/myserver-auth-secret/password
/var/lib/kubelet/pods/858da009-cd68-4f48-acda-ef745614cd79/volumes/kubernetes.io~secret/myserver-auth-secret/..2023_04_05_11_10_25.2570199853/password
[root@K8s-noded03 ~]#cd /var/lib/kubelet/pods/858da009-cd68-4f48-acda-ef745614cd79/volumes/kubernetes.io~secret/myserver-auth-secret/
[root@K8s-noded03 myserver-auth-secret]#ls
age  password  user
[root@K8s-noded03 myserver-auth-secret]#cat age password user 
18
123456
admin

  • kubernetes.io/tls

#案例: Secret类型-kubernetes.io/tls-为nginx提供证书
#自签名证书:
[root@K8s-ansible ~]#mkdir certs
[root@K8s-ansible ~]#cd certs/
[root@K8s-ansible certs]#openssl req -x509 -sha256 -newkey rsa:4096 -keyout ca.key -out ca.crt -days 3560 -nodes -subj '/CN=www.ca.com'
..+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*.+...+..+.+.........+..+....+.....+.+.....+.+..+...+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*.......+...........+...+................+.........+.........+........+.......+.....+.+......+...........+...+.+........+...+..........+...+.................+.........+....+.....+.......+...............+..+......+......+...+.+.................+..........+........+....+..+....+..............+...+...+............+....+...+......+......+.....+.........+.+......+.....+....+...........+.............+......+.........+.....+.......+................................+............+...+......+......+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
....+...+.....+.+......+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*.+.........+......+.+........+......+.......+.....+............+...+...+.+.........+..+...+..........+.........+......+......+...+........+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*......+.+...+..............+...+......................+.....+.......+.....+..........+...........+...+....+...........+.+..............+.......+............+................................+...+......+....+......+......+......+...+...........+...+...+.........+.........+....+..+....+.........+............+........+......+.......+...+..+.......+......+...........+.+..+...+..................+.......+...............+..+..........+..+...+..........+.....+.+......+...............+..............+.+...........+.........+.+........+....+..+.+..................+..+...+....+...............+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-----
[root@K8s-ansible certs]#openssl req -new -newkey rsa:4096 -keyout server.key -out server.csr -nodes -subj '/CN=www.mysite.com'
...+.........+......+.+.....+...............+...+.+......+...+...............+.....+...+....+......+.........+........+...+..........+..+...+....+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*.+................+...+..+...+.......+...........+....+...+...+..+.+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*............+.+.....+.+...........+.......+...........+......+.........+......+....+..+.......+......+..+..........+........+.......+........+......+.........................+.....+....+.....+....+...............+........+...+.+...+...+...+..+.......+.................+....+..........................+...................+..+............+.+.....................+.....+....+.....+...............+.+......+.....+...+.+...+...............+......+........................+.................+.........+.......+...+...........+.............+...........+......+...............+...+.............+........+......................+...+.....+.+......+.....+.............+..+...+....+......+......+...+..+......+....+..................+...+...+.....+......+...+......+.......+.....+...+......+..................+.......+...........+....+...+...........+......................+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
....+........+......+....+......+............+..+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*...+...........+.......+..+...+.+.....+......+.+..+.+..+...+.......+........+.+...+...+..............+...+.......+...+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*.+...+...............+...+..+.+.........+............+......+........+.......+...........................+...+...+...........+...+...+..........+............+.....+.......+.....+.+.....+...+......+.+.........+...+......+.....+....+...+..+.+......+.........+............+...+.....+....+...........+.+.....+.+.........+........+...+....+.........+.......................................+...+.....+...+....+...........+.......+.........+..............+......+....+.....+......+.+............+..+....+............+..+............+...............+.........+.....................+.........+.....................+....+..+..........+...+.....+.+..............+......+....+.....+....+........+...+.+.........+..+..........+............+..+...+.+......+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-----
[root@K8s-ansible certs]#openssl x509 -req -sha256 -days 3650 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt
Certificate request self-signature ok
subject=CN = www.mysite.com
[root@K8s-ansible certs]#kubectl create secret tls myserver-tls-key --cert=./server.crt --key=./server.key -n myserver
secret/myserver-tls-key created
[root@K8s-ansible certs]#kubectl get secrets -n myserver |grep myserver-tls-key
myserver-tls-key      kubernetes.io/tls   2      6m18s

#创建web服务nginx并使用证书:
[root@K8s-ansible secret]#cat 4-secret-tls.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-config
  namespace: myserver
data:
 default: |
    server {
       listen       80;
       server_name  www.mysite.com;
       listen 443 ssl;
       ssl_certificate /etc/nginx/conf.d/certs/tls.crt;
       ssl_certificate_key /etc/nginx/conf.d/certs/tls.key;

       location / {
           root /usr/share/nginx/html; 
           index index.html;
           if ($scheme = http ){ 
              rewrite / https://www.mysite.com permanent;
           }  

           if (!-e $request_filename) {
               rewrite ^/(.*) /index.html last;
           }
       }
    }

---
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-frontend-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-myapp-frontend
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend
    spec:
      containers:
      - name: myserver-myapp-frontend
        image: nginx:1.20.2-alpine 
        ports:
          - containerPort: 80
        volumeMounts:
          - name: nginx-config
            mountPath:  /etc/nginx/conf.d/myserver
          - name: myserver-tls-key
            mountPath:  /etc/nginx/conf.d/certs
      volumes:
      - name: nginx-config
        configMap:
          name: nginx-config
          items:
             - key: default
               path: mysite.conf
      - name: myserver-tls-key
        secret:
          secretName: myserver-tls-key 


---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend
  namespace: myserver
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30020
    protocol: TCP
  - name: htts
    port: 443
    targetPort: 443
    nodePort: 30019
    protocol: TCP
  selector:
    app: myserver-myapp-frontend 

[root@K8s-ansible secret]#kubectl apply -f 4-secret-tls.yaml 
configmap/nginx-config created
deployment.apps/myserver-myapp-frontend-deployment created
service/myserver-myapp-frontend created

[root@K8s-ansible secret]#kubectl describe pod myserver-myapp-frontend-deployment-5cf6b65d59-25rbs -n myserver
Name:             myserver-myapp-frontend-deployment-5cf6b65d59-25rbs
Namespace:        myserver
Priority:         0
Service Account:  default
Node:             192.168.11.215/192.168.11.215
Start Time:       Wed, 05 Apr 2023 12:27:36 +0000
Labels:           app=myserver-myapp-frontend
                  pod-template-hash=5cf6b65d59
Annotations:      <none>
Status:           Running
IP:               10.200.67.18
IPs:
  IP:           10.200.67.18
Controlled By:  ReplicaSet/myserver-myapp-frontend-deployment-5cf6b65d59
Containers:
  myserver-myapp-frontend:
    Container ID:   containerd://138494836cbd3106ec2fff6ccc0ca07766439ef9c3826c275dd347d253449465
    Image:          nginx:1.20.2-alpine
    Image ID:       docker.io/library/nginx@sha256:74694f2de64c44787a81f0554aa45b281e468c0c58b8665fafceda624d31e556
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Wed, 05 Apr 2023 12:27:59 +0000
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /etc/nginx/conf.d/certs from myserver-tls-key (rw)
      /etc/nginx/conf.d/myserver from nginx-config (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-ptmbv (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  nginx-config:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      nginx-config
    Optional:  false
  myserver-tls-key:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  myserver-tls-key
    Optional:    false
  kube-api-access-ptmbv:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type    Reason     Age    From               Message
  ----    ------     ----   ----               -------
  Normal  Scheduled  9m40s  default-scheduler  Successfully assigned myserver/myserver-myapp-frontend-deployment-5cf6b65d59-25rbs to 192.168.11.215
  Normal  Pulling    9m38s  kubelet            Pulling image "nginx:1.20.2-alpine"
  Normal  Pulled     9m17s  kubelet            Successfully pulled image "nginx:1.20.2-alpine" in 20.521643664s (20.521660994s including waiting)
  Normal  Created    9m17s  kubelet            Created container myserver-myapp-frontend
  Normal  Started    9m17s  kubelet            Started container myserver-myapp-frontend

#进入pod,设置nginx引用ssl配置文件并开启443端口
[root@K8s-ansible secret]#kubectl exec -it myserver-myapp-frontend-deployment-5cf6b65d59-25rbs sh  -n myserver
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # ls /etc/nginx/conf.d/
certs         default.conf  myserver
/ # cat /etc/nginx/conf.d/myserver/mysite.conf 
server {
   listen       80;
   server_name  www.mysite.com;
   listen 443 ssl;
   ssl_certificate /etc/nginx/conf.d/certs/tls.crt;
   ssl_certificate_key /etc/nginx/conf.d/certs/tls.key;

   location / {
       root /usr/share/nginx/html; 
       index index.html;
       if ($scheme = http ){ 
          rewrite / https://www.mysite.com permanent;
       }  

       if (!-e $request_filename) {
           rewrite ^/(.*) /index.html last;
       }
   }
}
/ # vi /etc/nginx/nginx.conf 
/ # cat /etc/nginx/nginx.conf 

user  nginx;
worker_processes  auto;

error_log  /var/log/nginx/error.log notice;
pid        /var/run/nginx.pid;


events {
    worker_connections  1024;
}


http {
    include       /etc/nginx/mime.types;
    default_type  application/octet-stream;

    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/access.log  main;

    sendfile        on;
    #tcp_nopush     on;

    keepalive_timeout  65;

    #gzip  on;

    include /etc/nginx/conf.d/*.conf;
    include /etc/nginx/conf.d/myserver/*.conf; #这里要引用
}

/ # nginx -s reload
2023/04/05 12:49:17 [notice] 56#56: signal process started

#确认443端口被打开
/ # netstat -tanlp
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      1/nginx: master pro
tcp        0      0 0.0.0.0:443             0.0.0.0:*               LISTEN      1/nginx: master pro
tcp        0      0 :::80                   :::*                    LISTEN      1/nginx: master pro

#配置负载均衡器转发到30019端口
[root@K8s-haproxy01 ~]#cat /etc/haproxy/haproxy.cfg 
...

listen myserver-443
    bind 192.168.11.242:443
    mode tcp
    server K8s-master01 192.168.11.211:30019 check inter 3000 fall 2 rise 5
    server K8s-master02 192.168.11.212:30019 check inter 3000 fall 2 rise 5
    server K8s-master03 192.168.11.213:30019 check inter 3000 fall 2 rise 5

[root@K8s-haproxy01 ~]#systemctl restart haproxy
[root@K8s-haproxy01 ~]#ss -nltp|grep 443
LISTEN 0      4096   192.168.11.241:6443       0.0.0.0:*    users:(("haproxy",pid=1211,fd=10))                      
LISTEN 0      4096   192.168.11.242:443        0.0.0.0:*    users:(("haproxy",pid=1211,fd=12))                      
LISTEN 0      4096   192.168.11.241:443        0.0.0.0:*    users:(("haproxy",pid=1211,fd=9))              
#测试访问
#域名解析
192.168.11.242 www.mysite.com
https://www.mysite.com/

#查看网站证书
curl -lvk https://www.mysite.com/

83-云原生操作系统-Kubernetes资源对象管理及示例Ⅱ_Kubernetes对象管理_06

确认证书

83-云原生操作系统-Kubernetes资源对象管理及示例Ⅱ_Kubernetes对象管理_07

  • kubernetes.io/dockerconfigjson
  • 存储docker registry的认证信息,在下载镜像的时候使用,这样每一个node节点就可以不登录也可以下载私有级别的镜像

#案例:
#创建secret:
方式一:通过命令创建
kubectl create secret docker-registry keyName \
--docker-server=registry.myserver.com \
--docker-username=USER\
--docker-password=PASSWORD

方式二:通过docker认证文件创建:
root@k8s-master1:~# docker login --username=rooroot@aliyun.com registry.cn-qingdao.aliyuncs.com
root@k8s-master1:~# kubectl create secret generic aliyun-registry-image-pull-key \
--from-file=.dockercnotallow=/root/.docker/config.json \
--type=kubernetes.io/dockerconfigjson \
-n myserver

#创建pod
[root@K8s-ansible certs]#cat 5-secret-imagePull.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-frontend-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-myapp-frontend
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend
    spec:
      containers:
      - name: myserver-myapp-frontend
        image: registry.cn-qingdao.aliyuncs.com/zhangshijie/nginx:1.16.1-alpine-perl 
        ports:
          - containerPort: 80
      imagePullSecrets:
        - name: aliyun-registry-image-pull-key #注意这里就是创建的认证信息

---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend
  namespace: myserver
spec:
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30022
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-frontend

  • Statefulset
  • Statefulset为了解决有状态服务的集群部署、集群之间的数据同步问题(MySQL主从等)
  • Statefulset所管理的Pod拥有唯一且固定的Pod名称
  • Statefulset按照顺序对pod进行启停、伸缩和回收
  • Headless Services(无头服务,请求的解析直接解析到pod IP)
  • Kubernetes上的服务会有20%~30%的性能损失,优点在于方便进行弹性伸缩,性能损失方面可以用多副本去弥补

https://kubernetes.io/zh/docs/concepts/workloads/controllers/statefulset/

#案例
[root@K8s-ansible ~]#cat Statefulset-test-case.yaml
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: StatefulSet 
metadata:
  name: myserver-myapp
  namespace: myserver
spec:
  replicas: 3 #更改副本数会按照顺序对pod自动伸缩
  serviceName: "myserver-myapp-service"
  selector:
    matchLabels:
      app: myserver-myapp-frontend
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend
    spec:
      containers:
      - name: myserver-myapp-frontend
        image: nginx:1.20.2-alpine 
        ports:
          - containerPort: 80

---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-service
  namespace: myserver
spec:
  clusterIP: None
  ports:
  - name: http
    port: 80
  selector:
    app: myserver-myapp-frontend 
[root@K8s-ansible ~]#kubectl apply -f Statefulset-test-case.yaml 
statefulset.apps/myserver-myapp created
service/myserver-myapp-service created

[root@K8s-ansible ~]#kubectl get svc -n myserver
NAME                            TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                      AGE
myserver-myapp-service          ClusterIP   None             <none>        80/TCP                       2m30s

[root@K8s-ansible ~]#kubectl get pod -n myserver -o wide
NAME                                                  READY   STATUS    RESTARTS        AGE     IP               NODE             NOMINATED NODE   READINESS GATES
myserver-myapp-0                                      1/1     Running   0               10m     10.200.128.153   192.168.11.216   <none>           <none>
myserver-myapp-1                                      1/1     Running   0               9m46s   10.200.209.25    192.168.11.214   <none>           <none>
myserver-myapp-2                                      1/1     Running   0               9m23s   10.200.128.154   192.168.11.216   <none>           <none>

#进入Pod后访问Service名称会自动解析到其对应的后端podIP,并轮询
[root@K8s-ansible ~]#kubectl exec -it net-test1 bash -n myserver
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
[root@net-test1 /]# ping myserver-myapp-service 
PING myserver-myapp-service.myserver.svc.mooreyxia.local (10.200.209.25) 56(84) bytes of data.
64 bytes from 10-200-209-25.myserver-myapp-frontend.myserver.svc.mooreyxia.local (10.200.209.25): icmp_seq=1 ttl=62 time=1.33 ms

[root@net-test1 /]# ping myserver-myapp-service 
PING myserver-myapp-service.myserver.svc.mooreyxia.local (10.200.67.18) 56(84) bytes of data.
64 bytes from 10-200-67-18.myserver-myapp-frontend.myserver.svc.mooreyxia.local (10.200.67.18): icmp_seq=1 ttl=62 time=0.835 ms


[root@net-test1 /]# ping myserver-myapp-service 
PING myserver-myapp-service.myserver.svc.mooreyxia.local (10.200.128.153) 56(84) bytes of data.
64 bytes from 10-200-128-153.myserver-myapp-service-name.myserver.svc.mooreyxia.local (10.200.128.153): icmp_seq=1 ttl=63 time=0.140 ms

  • DaemonSet

83-云原生操作系统-Kubernetes资源对象管理及示例Ⅱ_Kubernetes对象管理_08

  • DaemonSet 在当前集群中每个节点运行同一个pod
  • 当有新的节点加入集群时也会为新的节点配置相同的pod
  • 当节点从集群中移除时其pod也会被kubernetes回收
  • 删除DaemonSet 控制器将删除其创建的所有的pod

https://kubernetes.io/zh/docs/concepts/workloads/controllers/daemonset/
#下面的案例都使用官网案例

#案例1 - 部署nginx-DaemonSet 
[root@K8s-ansible DaemonSet]#cat 1-DaemonSet-webserver.yaml
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: DaemonSet 
metadata:
  name: myserver-myapp
  namespace: myserver
spec:
  selector:
    matchLabels:
      app: myserver-myapp-frontend
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend
    spec:
      tolerations:
      # this toleration is to have the daemonset runnable on master nodes
      # remove it if your masters can't run pods
      - key: node-role.kubernetes.io/master
        operator: Exists #容忍存在
        effect: NoSchedule #不调度
      hostNetwork: true #使用宿主机网络,不进行service转发,避免性能损耗
      hostPID: true #使用宿主机PID
      containers:
      - name: myserver-myapp-frontend
        image: nginx:1.20.2-alpine 
        ports:
          - containerPort: 80

---
apiVersion: v1 #这里不加也可以,上面设置了使用宿主机网络
kind: Service
metadata:
  name: myserver-myapp-frontend
  namespace: myserver
spec:
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30028
    protocol: TCP
  type: NodePort
  selector:

[root@K8s-ansible DaemonSet]#kubectl apply -f 1-DaemonSet-webserver.yaml 
daemonset.apps/myserver-myapp created
service/myserver-myapp-frontend created

#确认所有节点都会使用宿主机网络对外通信 
[root@K8s-ansible DaemonSet]#kubectl get pod  -o wide -n myserver 
NAME                                                READY   STATUS    RESTARTS        AGE     IP               NODE             NOMINATED NODE   READINESS GATES
mooreyxia-nginx-deployment-789dfdcb7b-4ccpp         1/1     Running   3 (5h54m ago)   45h     10.200.67.16     192.168.11.215   <none>           <none>
mooreyxia-tomcat-app1-deployment-78699df478-n77xn   1/1     Running   4 (5h53m ago)   2d9h    10.200.128.148   192.168.11.216   <none>           <none>
myserver-myapp-2n7zl                                1/1     Running   0               2m55s   192.168.11.216   192.168.11.216   <none>           <none>
myserver-myapp-45nzk                                1/1     Running   0               2m55s   192.168.11.215   192.168.11.215   <none>           <none>
myserver-myapp-6bdjm                                1/1     Running   0               2m55s   192.168.11.212   192.168.11.212   <none>           <none>
myserver-myapp-7jzzb                                1/1     Running   0               2m55s   192.168.11.214   192.168.11.214   <none>           <none>
myserver-myapp-9jrp7                                1/1     Running   0               2m55s   192.168.11.213   192.168.11.213   <none>           <none>
myserver-myapp-app1-deployment-6f68468b89-7wdgt     1/1     Running   0               3h26m   10.200.128.152   192.168.11.216   <none>           <none>
myserver-myapp-deployment-name-65ff65446f-sr494     1/1     Running   1 (5h54m ago)   9h      10.200.67.17     192.168.11.215   <none>           <none>
myserver-myapp-xm2wn                                1/1     Running   0               2m55s   192.168.11.211   192.168.11.211   <none>           <none>
net-test1                                           1/1     Running   6 (5h53m ago)   6d10h   10.200.128.149   192.168.11.216   <none>           <none>

[root@K8s-noded01 ~]#lsof -i:80
COMMAND    PID            USER   FD   TYPE DEVICE SIZE/OFF NODE NAME
nginx   182902            root    7u  IPv4 666229      0t0  TCP *:http (LISTEN)
nginx   182902            root    8u  IPv6 666230      0t0  TCP *:http (LISTEN)
nginx   182945 systemd-network    7u  IPv4 666229      0t0  TCP *:http (LISTEN)
nginx   182945 systemd-network    8u  IPv6 666230      0t0  TCP *:http (LISTEN)
nginx   182946 systemd-network    7u  IPv4 666229      0t0  TCP *:http (LISTEN)
nginx   182946 systemd-network    8u  IPv6 666230      0t0  TCP *:http (LISTEN)

#可以直接访问宿主机ip+端口的服务

83-云原生操作系统-Kubernetes资源对象管理及示例Ⅱ_Kubernetes对象管理_09


#案例2 部署fluentd-DaemonSet 日志收集
[root@K8s-ansible DaemonSet]#cat 2-DaemonSet-fluentd.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: fluentd-elasticsearch
  namespace: kube-system
  labels:
    k8s-app: fluentd-logging
spec:
  selector:
    matchLabels:
      name: fluentd-elasticsearch
  template:
    metadata:
      labels:
        name: fluentd-elasticsearch
    spec:
      tolerations:
      # this toleration is to have the daemonset runnable on master nodes
      # remove it if your masters can't run pods
      - key: node-role.kubernetes.io/master
        operator: Exists
        effect: NoSchedule
      containers:
      - name: fluentd-elasticsearch
        image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2
        resources:
          limits:
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 200Mi
        volumeMounts: #文件夹挂载到日志收集app中
        - name: varlog
          mountPath: /var/log #宿主机日志文件
        - name: varlibdockercontainers
          mountPath: /var/lib/docker/containers #容器日志文件
          readOnly: true
      terminationGracePeriodSeconds: 30
      volumes:
      - name: varlog
        hostPath:
          path: /var/log
      - name: varlibdockercontainers
        hostPath:
          path: /var/lib/docker/containers

#案例3 部署prometheus-DaemonSet 监控
[root@K8s-ansible DaemonSet]#cat 3-DaemonSet-prometheus.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: node-exporter
  namespace: monitoring 
  labels:
    k8s-app: node-exporter
spec:
  selector:
    matchLabels:
        k8s-app: node-exporter
  template:
    metadata:
      labels:
        k8s-app: node-exporter
    spec:
      tolerations:
        - effect: NoSchedule
          key: node-role.kubernetes.io/master
      containers:
      - image: prom/node-exporter:v1.3.1 
        imagePullPolicy: IfNotPresent
        name: prometheus-node-exporter
        ports:
        - containerPort: 9100
          hostPort: 9100
          protocol: TCP
          name: metrics
        volumeMounts:
        - mountPath: /host/proc
          name: proc
        - mountPath: /host/sys
          name: sys
        - mountPath: /host
          name: rootfs
        args:
        - --path.procfs=/host/proc
        - --path.sysfs=/host/sys
        - --path.rootfs=/host
      volumes:
        - name: proc
          hostPath:
            path: /proc
        - name: sys
          hostPath:
            path: /sys
        - name: rootfs
          hostPath:
            path: /
      hostNetwork: true
      hostPID: true
---
apiVersion: v1
kind: Service
metadata:
  annotations:
    prometheus.io/scrape: "true"
  labels:
    k8s-app: node-exporter
  name: node-exporter
  namespace: monitoring 
spec:
  type: NodePort
  ports:
  - name: http
    port: 9100
    nodePort: 39100
    protocol: TCP
  selector:
    k8s-app: node-exporter


举报

相关推荐

0 条评论