0
点赞
收藏
分享

微信扫一扫

86-云原生操作系统-Zookeeper集群业务容器化生产案例

  • 案例业务逻辑

86-云原生操作系统-Zookeeper集群业务容器化生产案例_nfs

  • 实现步骤
  • 构建zookeeper镜像

#准备构建镜像的文件
[root@K8s-ansible zookeeper]#chmod a+x *.sh
[root@K8s-ansible zookeeper]#ll
total 36900
drwxr-xr-x  4 root root     4096 Apr  9 13:47 ./
drwxr-xr-x 11 root root     4096 Apr  9 02:59 ../
-rw-r--r--  1 root root     1758 Apr  9 13:11 Dockerfile
-rw-r--r--  1 root root    63587 Apr  9 02:59 KEYS
drwxr-xr-x  2 root root     4096 Apr  9 02:59 bin/
-rwxr-xr-x  1 root root      264 Apr  9 02:59 build-command.sh*
drwxr-xr-x  2 root root     4096 Apr  9 02:59 conf/
-rwxr-xr-x  1 root root      278 Apr  9 13:47 entrypoint.sh*
-rw-r--r--  1 root root       91 Apr  9 02:59 repositories
-rw-r--r--  1 root root     2270 Apr  9 02:59 zookeeper-3.12-Dockerfile.tar.gz
-rw-r--r--  1 root root 37676320 Apr  9 02:59 zookeeper-3.4.14.tar.gz
-rw-r--r--  1 root root      836 Apr  9 02:59 zookeeper-3.4.14.tar.gz.asc

#配置镜像源
[root@K8s-ansible zookeeper]#cat repositories 
http://mirrors.aliyun.com/alpine/v3.6/main
http://mirrors.aliyun.com/alpine/v3.6/community

#准备jdk镜像 - 只有31M
[root@K8s-ansible zookeeper]#docker pull elevy/slim_java:8
8: Pulling from elevy/slim_java
88286f41530e: Pull complete 
7141511c4dad: Pull complete 
fd529fe251b3: Pull complete 
Digest: sha256:044e42fb89cda51e83701349a9b79e8117300f4841511ed853f73caf7fc98a51
Status: Downloaded newer image for elevy/slim_java:8
docker.io/elevy/slim_java:8
#上传到harbor
[root@K8s-ansible zookeeper]#docker tag elevy/slim_java:8 K8s-harbor01.mooreyxia.com/baseimages/slim_java:8
[root@K8s-ansible zookeeper]#docker push K8s-harbor01.mooreyxia.com/baseimages/slim_java:8
The push refers to repository [K8s-harbor01.mooreyxia.com/baseimages/slim_java]
e053edd72ca6: Pushed 
aba783efb1a4: Pushed 
5bef08742407: Pushed 
8: digest: sha256:817d0af5d4f16c29509b8397784f5d4ec3accb1bfde4e474244ed3be7f41a604 size: 952

#准备zookeeper配置文件 - 单点相同的配置
[root@K8s-ansible zookeeper]#cat conf/zoo.cfg 
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/zookeeper/data
dataLogDir=/zookeeper/wal
#snapCount=100000
autopurge.purgeInterval=1
clientPort=2181
quorumListenOnAllIPs=true

#准备zookeeper集群配置 - 生成zookeeper的myid并且暴露2888:3888端口
[root@K8s-ansible zookeeper]#cat entrypoint.sh 
#!/bin/bash

echo ${MYID:-1} > /zookeeper/data/myid

if [ -n "$SERVERS" ]; then
    IFS=\, read -a servers <<<"$SERVERS"
    for i in "${!servers[@]}"; do 
        printf "\nserver.%i=%s:2888:3888" "$((1 + $i))" "${servers[$i]}" >> /zookeeper/conf/zoo.cfg
        #输出 server.1~3=zookeeper1-3:2888:3888
    done
fi

cd /zookeeper
exec "$@"

#zookeeper - log4j日志配置 
[root@K8s-ansible zookeeper]#cat conf/log4j.properties 
# Define some default values that can be overridden by system properties
zookeeper.root.logger=INFO, CONSOLE, ROLLINGFILE
zookeeper.console.threshold=INFO
zookeeper.log.dir=/zookeeper/log
zookeeper.log.file=zookeeper.log
zookeeper.log.threshold=INFO
zookeeper.tracelog.dir=/zookeeper/log
zookeeper.tracelog.file=zookeeper_trace.log

#
# ZooKeeper Logging Configuration
#

# Format is "<default threshold> (, <appender>)+

# DEFAULT: console appender only
log4j.rootLogger=${zookeeper.root.logger}

# Example with rolling log file
#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE

# Example with rolling log file and tracing
#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE

#
# Log INFO level and above messages to the console
#
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
log4j.appender.CONSOLE.Threshold=${zookeeper.console.threshold}
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
log4j.appender.CONSOLE.layout.Cnotallow=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n

#
# Add ROLLINGFILE to rootLogger to get log file output
#    Log DEBUG level and above messages to a log file
log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
log4j.appender.ROLLINGFILE.Threshold=${zookeeper.log.threshold}
log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/${zookeeper.log.file}

# Max log file size of 10MB
log4j.appender.ROLLINGFILE.MaxFileSize=10MB
# uncomment the next line to limit number of backup files
log4j.appender.ROLLINGFILE.MaxBackupIndex=5

log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
log4j.appender.ROLLINGFILE.layout.Cnotallow=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n


#
# Add TRACEFILE to rootLogger to get log file output
#    Log DEBUG level and above messages to a log file
log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
log4j.appender.TRACEFILE.Threshold=TRACE
log4j.appender.TRACEFILE.File=${zookeeper.tracelog.dir}/${zookeeper.tracelog.file}

log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
### Notice we are including log4j's NDC here (%x)
log4j.appender.TRACEFILE.layout.Cnotallow=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L][%x] - %m%n


#准备镜像构建文件
[root@K8s-ansible zookeeper]#cat Dockerfile 
#FROM harbor-linux38.local.com/linux38/slim_java:8 
FROM K8s-harbor01.mooreyxia.com/baseimages/slim_java:8 #准备一个jdk到harbor

ENV ZK_VERSION 3.4.14 #zookeeper版本
ADD repositories /etc/apk/repositories #用alpine做系统镜像,提前配置镜像源
# Download Zookeeper
COPY zookeeper-3.4.14.tar.gz /tmp/zk.tgz
COPY zookeeper-3.4.14.tar.gz.asc /tmp/zk.tgz.asc
COPY KEYS /tmp/KEYS
RUN apk add --no-cache --virtual .build-deps \
      ca-certificates   \
      gnupg             \
      tar               \
      wget &&           \
    #
    # Install dependencies
    apk add --no-cache  \
      bash &&           \
    #
    #
    # Verify the signature
    export GNUPGHOME="$(mktemp -d)" && \
    gpg -q --batch --import /tmp/KEYS && \
    gpg -q --batch --no-auto-key-retrieve --verify /tmp/zk.tgz.asc /tmp/zk.tgz && \
    #
    # Set up directories
    #
    mkdir -p /zookeeper/data /zookeeper/wal /zookeeper/log && \
    #
    # Install
    tar -x -C /zookeeper --strip-compnotallow=1 --no-same-owner -f /tmp/zk.tgz && \
    #
    # Slim down
    cd /zookeeper && \
    cp dist-maven/zookeeper-${ZK_VERSION}.jar . && \
    rm -rf \
      *.txt \
      *.xml \
      bin/README.txt \
      bin/*.cmd \
      conf/* \
      contrib \
      dist-maven \
      docs \
      lib/*.txt \
      lib/cobertura \
      lib/jdiff \
      recipes \
      src \
      zookeeper-*.asc \
      zookeeper-*.md5 \
      zookeeper-*.sha1 && \
    #
    # Clean up
    apk del .build-deps && \
    rm -rf /tmp/* "$GNUPGHOME"

COPY conf /zookeeper/conf/
COPY bin/zkReady.sh /zookeeper/bin/
COPY entrypoint.sh /

ENV PATH=/zookeeper/bin:${PATH} \
    ZOO_LOG_DIR=/zookeeper/log \
    ZOO_LOG4J_PROP="INFO, CONSOLE, ROLLINGFILE" \
    JMXPORT=9010

ENTRYPOINT [ "/entrypoint.sh" ]

CMD [ "zkServer.sh", "start-foreground" ]

EXPOSE 2181 2888 3888 9010

#构建镜像并上传harbor
[root@K8s-ansible zookeeper]#cat build-command.sh 
#!/bin/bash
TAG=$1
docker build -t K8s-harbor01.mooreyxia.com/demo/zookeeper:${TAG} .
sleep 1
docker push  K8s-harbor01.mooreyxia.com/demo/zookeeper:${TAG}

[root@K8s-ansible zookeeper]#bash build-command.sh v3.4.14
...
Successfully built 4be1c51f39dd
Successfully tagged K8s-harbor01.mooreyxia.com/demo/zookeeper:v3.4.14
The push refers to repository [K8s-harbor01.mooreyxia.com/demo/zookeeper]
e562b485e113: Pushed 
471c0a089ec7: Pushed 
e9c1d174b408: Pushed 
bd3506eb3fca: Pushed 
479b1f22723a: Pushed 
0fdd215d56a7: Pushed 
240cfb0dce70: Pushed 
2c1db90485e1: Pushed 
e053edd72ca6: Mounted from baseimages/slim_java 
aba783efb1a4: Mounted from baseimages/slim_java 
5bef08742407: Mounted from baseimages/slim_java 
v3.4.14: digest: sha256:b6e3fe808f5740371d02b7755b0dc610fad5cea0eb127fe550c0fff33d81e54c size: 2621

  • 测试zookeeper镜像 - 此处省略,生产要确保镜像可用
  • 创建PV/PVC

#准备存储设备,这里用NFS
[root@K8s-haproxy01 ~]#mkdir -p /data/k8sdata/mooreyxia/zookeeper-datadir-1 
[root@K8s-haproxy01 ~]#mkdir -p /data/k8sdata/mooreyxia/zookeeper-datadir-2
[root@K8s-haproxy01 ~]#mkdir -p /data/k8sdata/mooreyxia/zookeeper-datadir-3
[root@K8s-haproxy01 ~]#cat /etc/exports 
# /etc/exports: the access control list for filesystems which may be exported
#       to NFS clients.  See exports(5).
#
# Example for NFSv2 and NFSv3:
# /srv/homes       hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
#
# Example for NFSv4:
# /srv/nfs4        gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
# /srv/nfs4/homes  gss/krb5i(rw,sync,no_subtree_check)
#

/data/k8sdata *(rw,no_root_squash)
/data/volumes *(rw,no_root_squash)
[root@K8s-haproxy01 ~]#exportfs -avs
exportfs: /etc/exports [2]: Neither 'subtree_check' or 'no_subtree_check' specified for export "*:/data/k8sdata".
  Assuming default behaviour ('no_subtree_check').
  NOTE: this default has changed since nfs-utils version 1.0.x

exportfs: /etc/exports [3]: Neither 'subtree_check' or 'no_subtree_check' specified for export "*:/data/volumes".
  Assuming default behaviour ('no_subtree_check').
  NOTE: this default has changed since nfs-utils version 1.0.x

exporting *:/data/volumes
exporting *:/data/k8sdata

#准备PV数据卷 - 将存储设备映射为pv卷
[root@K8s-ansible pv]#cat zookeeper-persistentvolume.yaml 
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-1
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce 
  nfs:
    server: 192.168.11.203
    path: /data/k8sdata/mooreyxia/zookeeper-datadir-1 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-2
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 192.168.11.203 
    path: /data/k8sdata/mooreyxia/zookeeper-datadir-2 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-3
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 192.168.11.203  
    path: /data/k8sdata/mooreyxia/zookeeper-datadir-3 

#创建PV
[root@K8s-ansible pv]#kubectl apply -f zookeeper-persistentvolume.yaml 
persistentvolume/zookeeper-datadir-pv-1 created
persistentvolume/zookeeper-datadir-pv-2 created
persistentvolume/zookeeper-datadir-pv-3 created
#确认PV是否可用 - Available
[root@K8s-ansible pv]#kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                                 STORAGECLASS            REASON   AGE
pvc-b5ae1f9c-8569-4645-8398-0571b6defa6c   500Mi      RWX            Retain           Bound       myserver/myserver-myapp-dynamic-pvc   mooreyxia-nfs-storage            4d9h
zookeeper-datadir-pv-1                     20Gi       RWO            Retain           Available                                                                          46s
zookeeper-datadir-pv-2                     20Gi       RWO            Retain           Available                                                                          46s
zookeeper-datadir-pv-3                     20Gi       RWO            Retain           Available                                                                          46s

#创建PVC给业务pod存储用
[root@K8s-ansible pv]#cat zookeeper-persistentvolumeclaim.yaml 
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-pvc-1
  namespace: mooreyxia
spec:
  accessModes:
    - ReadWriteOnce #设定读写模式
  volumeName: zookeeper-datadir-pv-1
  resources:
    requests:
      storage: 10Gi #设定存储使用上限
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-pvc-2
  namespace: mooreyxia
spec:
  accessModes:
    - ReadWriteOnce
  volumeName: zookeeper-datadir-pv-2
  resources:
    requests:
      storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-pvc-3
  namespace: mooreyxia
spec:
  accessModes:
    - ReadWriteOnce
  volumeName: zookeeper-datadir-pv-3
  resources:
    requests:
      storage: 10Gi

#创建pvc
[root@K8s-ansible pv]#kubectl apply -f zookeeper-persistentvolumeclaim.yaml 
persistentvolumeclaim/zookeeper-datadir-pvc-1 created
persistentvolumeclaim/zookeeper-datadir-pvc-2 created
persistentvolumeclaim/zookeeper-datadir-pvc-3 created
#确认pvc - pv显示Bound
[root@K8s-ansible pv]#kubectl get pvc -n mooreyxia
NAME                      STATUS   VOLUME                   CAPACITY   ACCESS MODES   STORAGECLASS   AGE
zookeeper-datadir-pvc-1   Bound    zookeeper-datadir-pv-1   20Gi       RWO                           30s
zookeeper-datadir-pvc-2   Bound    zookeeper-datadir-pv-2   20Gi       RWO                           30s
zookeeper-datadir-pvc-3   Bound    zookeeper-datadir-pv-3   20Gi       RWO                           30s

  • 运行zookeeper集群

#准备kubernetes对象控制器脚本 - 使用NodePort是集群外可用
#由于zookeeper集群自带数据同步,只需要使得zookeeper-pod的service互相访问即可自动同步数据,所以可以不使用statefulSet
[root@K8s-ansible zookeeper]#cat zookeeper1.yaml 
apiVersion: v1
kind: Service
metadata:
  name: zookeeper
  namespace: mooreyxia
spec:
  ports:
    - name: client
      port: 2181 #负载均衡入口
  selector:
    app: zookeeper #轮询转发至zookeeper1-3的service
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper1
  namespace: mooreyxia
spec:
  type: NodePort        
  ports:
    - name: client
      port: 2181
      nodePort: 32181
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "1"
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper2
  namespace: mooreyxia
spec:
  type: NodePort        
  ports:
    - name: client
      port: 2181
      nodePort: 32182
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "2"
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper3
  namespace: mooreyxia
spec:
  type: NodePort        
  ports:
    - name: client
      port: 2181
      nodePort: 32183
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "3"
---
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  name: zookeeper1
  namespace: mooreyxia
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "1"
    spec:
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir:
            medium: Memory
      containers:
        - name: server
          image: K8s-harbor01.mooreyxia.com/demo/zookeeper:v3.4.14 
          imagePullPolicy: Always
          env:#配置文件中用到的环境变量
            - name: MYID
              value: "1"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3" #每个servers后面都有一个zookeeper
            - name: JVMFLAGS
              value: "-Xmx2G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-pvc-1 
      volumes:
        - name: zookeeper-datadir-pvc-1 
          persistentVolumeClaim:
            claimName: zookeeper-datadir-pvc-1
---
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  name: zookeeper2
  namespace: mooreyxia
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "2"
    spec:
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir:
            medium: Memory
      containers:
        - name: server
          image: K8s-harbor01.mooreyxia.com/demo/zookeeper:v3.4.14 
          imagePullPolicy: Always
          env:
            - name: MYID
              value: "2"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx2G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-pvc-2 
      volumes:
        - name: zookeeper-datadir-pvc-2
          persistentVolumeClaim:
            claimName: zookeeper-datadir-pvc-2
---
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  name: zookeeper3
  namespace: mooreyxia
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "3"
    spec:
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir:
            medium: Memory
      containers:
        - name: server
          image: K8s-harbor01.mooreyxia.com/demo/zookeeper:v3.4.14 
          imagePullPolicy: Always
          env:
            - name: MYID
              value: "3"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx2G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-pvc-3
      volumes:
        - name: zookeeper-datadir-pvc-3
          persistentVolumeClaim:
           claimName: zookeeper-datadir-pvc-3

#创建zookeeper-pod
[root@K8s-ansible zookeeper]#kubectl apply -f zookeeper1.yaml 
service/zookeeper created
service/zookeeper1 created
service/zookeeper2 created
service/zookeeper3 created
deployment.apps/zookeeper1 created
deployment.apps/zookeeper2 created
deployment.apps/zookeeper3 created

#确认zookeeper-pod运行情况
[root@K8s-ansible zookeeper]#kubectl get pod -n mooreyxia|grep zookeeper
zookeeper1-67db986b9f-lxhlf                         1/1     Running   1 (3m2s ago)    3m28s
zookeeper2-6786d47d66-7kvql                         1/1     Running   1 (2m45s ago)   3m28s
zookeeper3-56b4f54865-xd2k8                         1/1     Running   1 (2m59s ago)   3m28s

#确认kubelet收集的报告 - describe
[root@K8s-ansible zookeeper]#kubectl describe pod zookeeper1-67db986b9f-lxhlf -n mooreyxia
Name:             zookeeper1-67db986b9f-lxhlf
Namespace:        mooreyxia
Priority:         0
Service Account:  default
Node:             192.168.11.215/192.168.11.215
Start Time:       Sun, 09 Apr 2023 14:39:40 +0000
Labels:           app=zookeeper
                  pod-template-hash=67db986b9f
                  server-id=1
Annotations:      <none>
Status:           Running
IP:               10.200.67.33
IPs:
  IP:           10.200.67.33
Controlled By:  ReplicaSet/zookeeper1-67db986b9f
Containers:
  server:
    Container ID:   containerd://79b0be34ddb9df62727282da761f80b7c4ec0ce37cf53bec1c8e5a2e0adc1613
    Image:          K8s-harbor01.mooreyxia.com/demo/zookeeper:v3.4.14
    Image ID:       K8s-harbor01.mooreyxia.com/demo/zookeeper@sha256:b6e3fe808f5740371d02b7755b0dc610fad5cea0eb127fe550c0fff33d81e54c
    Ports:          2181/TCP, 2888/TCP, 3888/TCP
    Host Ports:     0/TCP, 0/TCP, 0/TCP
    State:          Running
      Started:      Sun, 09 Apr 2023 14:40:13 +0000
    Last State:     Terminated
      Reason:       Error
      Exit Code:    1
      Started:      Sun, 09 Apr 2023 14:40:04 +0000
      Finished:     Sun, 09 Apr 2023 14:40:06 +0000
    Ready:          True
    Restart Count:  1
    Environment:
      MYID:      1
      SERVERS:   zookeeper1,zookeeper2,zookeeper3
      JVMFLAGS:  -Xmx2G
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-hd9r7 (ro)
      /zookeeper/data from zookeeper-datadir-pvc-1 (rw)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  zookeeper-datadir-pvc-1:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  zookeeper-datadir-pvc-1
    ReadOnly:   false
  kube-api-access-hd9r7:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type    Reason     Age                    From               Message
  ----    ------     ----                   ----               -------
  Normal  Scheduled  4m13s                  default-scheduler  Successfully assigned mooreyxia/zookeeper1-67db986b9f-lxhlf to 192.168.11.215
  Normal  Pulled     3m49s                  kubelet            Successfully pulled image "K8s-harbor01.mooreyxia.com/demo/zookeeper:v3.4.14" in 21.024280603s (21.025309321s including waiting)
  Normal  Pulling    3m42s (x2 over 4m10s)  kubelet            Pulling image "K8s-harbor01.mooreyxia.com/demo/zookeeper:v3.4.14"
  Normal  Pulled     3m41s                  kubelet            Successfully pulled image "K8s-harbor01.mooreyxia.com/demo/zookeeper:v3.4.14" in 1.169990499s (1.170009649s including waiting)
  Normal  Created    3m40s (x2 over 3m49s)  kubelet            Created container server
  Normal  Started    3m40s (x2 over 3m48s)  kubelet            Started container server

#确认log日志没有错误
[root@K8s-ansible zookeeper]#kubectl logs zookeeper1-67db986b9f-lxhlf -n mooreyxia
ZooKeeper JMX enabled by default
ZooKeeper remote JMX Port set to 9010
ZooKeeper remote JMX authenticate set to false
ZooKeeper remote JMX ssl set to false
ZooKeeper remote JMX log4j set to true
Using config: /zookeeper/bin/../conf/zoo.cfg
...

  • 验证集群状态

#确认zookeeper-pod处于集群中 - 查看多个确保主从状态
[root@K8s-ansible zookeeper]#kubectl exec -it zookeeper1-67db986b9f-lxhlf bash -n mooreyxia
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
bash-4.3# /zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
ZooKeeper remote JMX Port set to 9010
ZooKeeper remote JMX authenticate set to false
ZooKeeper remote JMX ssl set to false
ZooKeeper remote JMX log4j set to true
Using config: /zookeeper/bin/../conf/zoo.cfg
Mode: follower #是follower身份 如果是state-alone,则表示单机模式

#查看配置
bash-4.3# cat /zookeeper/conf/zoo.cfg 
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/zookeeper/data
dataLogDir=/zookeeper/wal
#snapCount=100000
autopurge.purgeInterval=1
clientPort=2181
quorumListenOnAllIPs=true
server.1=zookeeper1:2888:3888 #这里是通过程序生成的集群配置
server.2=zookeeper2:2888:3888
server.3=zookeeper3:2888:3888

[root@K8s-ansible zookeeper]#kubectl exec -it zookeeper2-6786d47d66-7kvql bash -n mooreyxia
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
bash-4.3# /zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
ZooKeeper remote JMX Port set to 9010
ZooKeeper remote JMX authenticate set to false
ZooKeeper remote JMX ssl set to false
ZooKeeper remote JMX log4j set to true
Using config: /zookeeper/bin/../conf/zoo.cfg
Mode: follower #是follower身份
bash-4.3# exit
exit

[root@K8s-ansible zookeeper]#kubectl exec -it zookeeper3-56b4f54865-xd2k8 bash -n mooreyxia
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
bash-4.3# /zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
ZooKeeper remote JMX Port set to 9010
ZooKeeper remote JMX authenticate set to false
ZooKeeper remote JMX ssl set to false
ZooKeeper remote JMX log4j set to true
Using config: /zookeeper/bin/../conf/zoo.cfg
Mode: leader #是leader身份


#尝试连接到zookeeper中
#可连接至zookeeper 集群中的任意一台zookeeper 节点进行以下操作,zkCli.sh 默认连接本机,连接成功后即可进行数据更新
#zookeeper操作详情可参考我的zookeeper专题博客
bash-4.3# zkCli.sh -server 192.168.11.211:32181
Connecting to 192.168.11.211:32181
2023-04-09 15:03:57,442 [myid:] - INFO  [main:Environment@100] - Client environment:zookeeper.versinotallow=3.4.14-4c25d480e66aadd371de8bd2fd8da255ac140bcf, built on 03/06/2019 16:18 GMT
2023-04-09 15:03:57,447 [myid:] - INFO  [main:Environment@100] - Client environment:host.name=zookeeper3-56b4f54865-xd2k8
2023-04-09 15:03:57,447 [myid:] - INFO  [main:Environment@100] - Client environment:java.versinotallow=1.8.0_144
2023-04-09 15:03:57,451 [myid:] - INFO  [main:Environment@100] - Client environment:java.vendor=Oracle Corporation
2023-04-09 15:03:57,452 [myid:] - INFO  [main:Environment@100] - Client environment:java.home=/usr/lib/jvm/java-8-oracle
2023-04-09 15:03:57,453 [myid:] - INFO  [main:Environment@100] - Client environment:java.class.path=/zookeeper/bin/../zookeeper-server/target/classes:/zookeeper/bin/../build/classes:/zookeeper/bin/../zookeeper-server/target/lib/*.jar:/zookeeper/bin/../build/lib/*.jar:/zookeeper/bin/../lib/slf4j-log4j12-1.7.25.jar:/zookeeper/bin/../lib/slf4j-api-1.7.25.jar:/zookeeper/bin/../lib/netty-3.10.6.Final.jar:/zookeeper/bin/../lib/log4j-1.2.17.jar:/zookeeper/bin/../lib/jline-0.9.94.jar:/zookeeper/bin/../lib/audience-annotations-0.5.0.jar:/zookeeper/bin/../zookeeper-3.4.14.jar:/zookeeper/bin/../zookeeper-server/src/main/resources/lib/*.jar:/zookeeper/bin/../conf:
2023-04-09 15:03:57,453 [myid:] - INFO  [main:Environment@100] - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
2023-04-09 15:03:57,454 [myid:] - INFO  [main:Environment@100] - Client environment:java.io.tmpdir=/tmp
2023-04-09 15:03:57,454 [myid:] - INFO  [main:Environment@100] - Client environment:java.compiler=<NA>
2023-04-09 15:03:57,455 [myid:] - INFO  [main:Environment@100] - Client environment:os.name=Linux
2023-04-09 15:03:57,455 [myid:] - INFO  [main:Environment@100] - Client environment:os.arch=amd64
2023-04-09 15:03:57,456 [myid:] - INFO  [main:Environment@100] - Client environment:os.versinotallow=5.15.0-69-generic
2023-04-09 15:03:57,456 [myid:] - INFO  [main:Environment@100] - Client environment:user.name=root
2023-04-09 15:03:57,457 [myid:] - INFO  [main:Environment@100] - Client environment:user.home=/root
2023-04-09 15:03:57,457 [myid:] - INFO  [main:Environment@100] - Client environment:user.dir=/
2023-04-09 15:03:57,459 [myid:] - INFO  [main:ZooKeeper@442] - Initiating client connection, cnotallow=192.168.11.211:32181 sessinotallow=30000 watcher=org.apache.zookeeper.ZooKeeperMain$MyWatcher@1de0aca6
Welcome to ZooKeeper!

可以尝试下线zooker集群中的一个pod,禁用harbor,使得k8s无法自动创建,然后观察zookeeper的选举情况,此处省略

我是moore,大家一起加油!!!

举报

相关推荐

0 条评论