0
点赞
收藏
分享

微信扫一扫

Debian11_kubeadm1.23.5高可用集群部署

曾宝月 2022-04-05 阅读 164
kubernetes

Debian11_kubeadm1.23.5高可用集群部署

机器规划
#masterx3
192.168.3.201 k8s-master01
192.168.3.202 k8s-master02
192.168.3.203 k8s-master03
#master-lb
192.168.3.236 k8s-master-lb
#nodex2
192.168.3.204 k8s-node01
192.168.3.205 k8s-node02

Pod网段
172.16.0.0/12

Service网段
10.96.0.0/12

修改hosts

/etc/hosts
#masterx3
192.168.3.201	k8s-master01
192.168.3.202	k8s-master02
192.168.3.203	k8s-master03
#master-lb
192.168.3.236	k8s-master-lb
#nodex2
192.168.3.204	k8s-node01
192.168.3.205	k8s-node02

关闭SWAP分区

swapoff -a
/etc/fstab # 禁用swap
/etc/profile
ulimit -HSn 65535
export HISTTIMEFORMAT='%F %T '
export HISTSIZE=1000
HISTFILESIZE=10000
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
cat <<EOF | sudo tee /etc/modules-load.d/ipvs.conf
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
br_netfilter
EOF
net.ipv4.ip_local_port_range = 1024 65535
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 30
net.core.somaxconn = 20480
net.core.netdev_max_backlog = 20480
net.ipv4.tcp_max_syn_backlog = 20480
net.ipv4.tcp_max_tw_buckets = 800000
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1

sysctl -p

所有节点配置完内核后,重启服务器,保证重启后内核依旧加载

reboot
lsmod | grep --color=auto -e ip_vs -e nf_conntrack

安装docker

curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
vim /etc/docker/daemon.json
{
  "graph": "/data/docker",
  "storage-driver": "overlay2",
  "registry-mirrors": ["https://q2ddddke.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {"max-size": "100m"}
}

systemctl daemon-reload
systemctl enable docker
systemctl restart docker

安装kubenetes

curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -

cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF

apt-get update

apt-get install kubelet=1.23* kubeadm=1.23* kubectl=1.23*

systemctl enable kubelet
systemctl start kubelet

在3个master上安装高可用haproxy,keepalived

apt-get install haproxy keepalived

3台master的haproxy配置是一样的

global
  maxconn  2000
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s

defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s

frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor

frontend k8s-master
  bind 0.0.0.0:16443
  bind 127.0.0.1:16443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master

backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server k8s-master01	192.168.3.201:6443  check
  server k8s-master02	192.168.3.202:6443  check
  server k8s-master03	192.168.3.203:6443  check
systemctl enable haproxy
systemctl start haproxy

keepalived的配置

! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2  
rise 1
}
vrrp_instance VI_1 {
    state MASTER
    interface ens32
    mcast_src_ip 192.168.3.201
    virtual_router_id 51
    priority 101
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.3.236
    }
    track_script {
       chk_apiserver
    }
}
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
   interval 5
    weight -5
    fall 2  
rise 1
}
vrrp_instance VI_1 {
    state BACKUP
    interface ens32
    mcast_src_ip 192.168.3.202
    virtual_router_id 51
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.3.236
    }
    track_script {
       chk_apiserver
    }
}
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
 interval 5
    weight -5
    fall 2  
rise 1
}
vrrp_instance VI_1 {
    state BACKUP
    interface ens32
    mcast_src_ip 192.168.3.203
    virtual_router_id 51
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.3.236
    }
    track_script {
       chk_apiserver
    }
}
root@k8s-master03:~# cat /etc/keepalived/check_apiserver.sh 
#!/bin/bash

err=0
for k in $(seq 1 3)
do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi
systemctl enable keepalived
systemctl start keepalived

准备一个初始化的yaml

kubeadm-config.yaml

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: 7t2weq.bjbawausm0jaxury
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.3.201
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock  # 如果是Docker作为Runtime配置此项
  #criSocket: /run/containerd/containerd.sock # 如果是Containerd作为Runtime配置此项
  name: k8s-master01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  certSANs:
  - 192.168.3.236
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.3.236:16443
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.23.5 # 更改此处的版本号和kubeadm version一致
networking:
  dnsDomain: cluster.local
  podSubnet: 172.16.0.0/12
  serviceSubnet: 10.96.0.0/12
scheduler: {}
kubeadm config migrate --old-config kubeadm-config.yaml --new-config new.yaml

kubeadm config images pull --config /root/new.yaml

kubeadm init --config /root/new.yaml  --upload-certs
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.3.236:16443 --token 7t2weq.bjbawausm0jaxury \
	--discovery-token-ca-cert-hash sha256:2b46dd44bf45c3d1645780a32d3f2357853ec94a0c5d6b6dc5829b02314e8b9c \
	--control-plane --certificate-key bc8174e8426a0412b311c083f265241169b63d1c337128d935944cd601359069

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.3.236:16443 --token 7t2weq.bjbawausm0jaxury \
	--discovery-token-ca-cert-hash sha256:2b46dd44bf45c3d1645780a32d3f2357853ec94a0c5d6b6dc5829b02314e8b9c 

准备网络

下载calico

curl https://docs.projectcalico.org/manifests/calico.yaml -O

修改下配置
4222             # - name: CALICO_IPV4POOL_CIDR
4223             #   value: "192.168.0.0/16"
取消注释,改pod网络地址

kubectl apply -f calico.yaml

然后就可以

kubeadm join 192.168.3.236:16443 --token 7t2weq.bjbawausm0jaxury \
	--discovery-token-ca-cert-hash sha256:2b46dd44bf45c3d1645780a32d3f2357853ec94a0c5d6b6dc5829b02314e8b9c \
	--control-plane --certificate-key bc8174e8426a0412b311c083f265241169b63d1c337128d935944cd601359069

kubeadm join 192.168.3.236:16443 --token 7t2weq.bjbawausm0jaxury \
	--discovery-token-ca-cert-hash sha256:2b46dd44bf45c3d1645780a32d3f2357853ec94a0c5d6b6dc5829b02314e8b9c 

修改proxy 模型为ipvs

kubectl edit configmap kube-proxy -n kube-system

 44     mode: "ipvs"


for i in `kubectl get pods -n kube-system | grep kube-proxy | awk '{print $1}'`; do kubectl delete pod $i -n kube-system ;done
root@k8s-master01:~# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.96.0.1:443 rr
  -> 192.168.3.201:6443           Masq    1      3          0         
  -> 192.168.3.202:6443           Masq    1      2          1         
  -> 192.168.3.203:6443           Masq    1      4          0         
TCP  10.96.0.10:53 rr
  -> 172.25.244.199:53            Masq    1      0          0         
  -> 172.25.244.200:53            Masq    1      0          0         
TCP  10.96.0.10:9153 rr
  -> 172.25.244.199:9153          Masq    1      0          0         
  -> 172.25.244.200:9153          Masq    1      0          0         
UDP  10.96.0.10:53 rr
  -> 172.25.244.199:53            Masq    1      0          0         
  -> 172.25.244.200:53            Masq    1      0          0    
root@k8s-master01:~# kubectl get pods -A -o wide
NAMESPACE     NAME                                       READY   STATUS    RESTARTS      AGE   IP               NODE           NOMINATED NODE   READINESS GATES
kube-system   calico-kube-controllers-56fcbf9d6b-795kp   1/1     Running   4 (54s ago)   8h    172.25.244.201   k8s-master01   <none>           <none>
kube-system   calico-node-55tpp                          1/1     Running   2 (8h ago)    8h    192.168.3.201    k8s-master01   <none>           <none>
kube-system   calico-node-8x92f                          1/1     Running   2 (8h ago)    8h    192.168.3.204    k8s-node01     <none>           <none>
kube-system   calico-node-c6hl7                          1/1     Running   2 (8h ago)    8h    192.168.3.202    k8s-master02   <none>           <none>
kube-system   calico-node-rgsgt                          1/1     Running   2 (8h ago)    8h    192.168.3.203    k8s-master03   <none>           <none>
kube-system   calico-node-wqbzz                          1/1     Running   2 (8h ago)    8h    192.168.3.205    k8s-node02     <none>           <none>
kube-system   coredns-65c54cc984-bfkrr                   1/1     Running   2 (8h ago)    8h    172.25.244.200   k8s-master01   <none>           <none>
kube-system   coredns-65c54cc984-jlwr2                   1/1     Running   2 (8h ago)    8h    172.25.244.199   k8s-master01   <none>           <none>
kube-system   etcd-k8s-master01                          1/1     Running   2 (8h ago)    8h    192.168.3.201    k8s-master01   <none>           <none>
kube-system   etcd-k8s-master02                          1/1     Running   2 (8h ago)    8h    192.168.3.202    k8s-master02   <none>           <none>
kube-system   etcd-k8s-master03                          1/1     Running   2 (8h ago)    8h    192.168.3.203    k8s-master03   <none>           <none>
kube-system   kube-apiserver-k8s-master01                1/1     Running   2 (8h ago)    8h    192.168.3.201    k8s-master01   <none>           <none>
kube-system   kube-apiserver-k8s-master02                1/1     Running   3 (8h ago)    8h    192.168.3.202    k8s-master02   <none>           <none>
kube-system   kube-apiserver-k8s-master03                1/1     Running   2 (8h ago)    8h    192.168.3.203    k8s-master03   <none>           <none>
kube-system   kube-controller-manager-k8s-master01       1/1     Running   3 (8h ago)    8h    192.168.3.201    k8s-master01   <none>           <none>
kube-system   kube-controller-manager-k8s-master02       1/1     Running   2 (8h ago)    8h    192.168.3.202    k8s-master02   <none>           <none>
kube-system   kube-controller-manager-k8s-master03       1/1     Running   2 (8h ago)    8h    192.168.3.203    k8s-master03   <none>           <none>
kube-system   kube-proxy-69ddj                           1/1     Running   2 (8h ago)    8h    192.168.3.203    k8s-master03   <none>           <none>
kube-system   kube-proxy-9s65w                           1/1     Running   2 (8h ago)    8h    192.168.3.205    k8s-node02     <none>           <none>
kube-system   kube-proxy-fph5c                           1/1     Running   2 (8h ago)    8h    192.168.3.202    k8s-master02   <none>           <none>
kube-system   kube-proxy-tzsqt                           1/1     Running   2 (8h ago)    8h    192.168.3.201    k8s-master01   <none>           <none>
kube-system   kube-proxy-xn4tz                           1/1     Running   2 (8h ago)    8h    192.168.3.204    k8s-node01     <none>           <none>
kube-system   kube-scheduler-k8s-master01                1/1     Running   3 (8h ago)    8h    192.168.3.201    k8s-master01   <none>           <none>
kube-system   kube-scheduler-k8s-master02                1/1     Running   2 (8h ago)    8h    192.168.3.202    k8s-master02   <none>           <none>
kube-system   kube-scheduler-k8s-master03                1/1     Running   2 (8h ago)    8h    192.168.3.203    k8s-master03   <none>           <none>
root@k8s-master01:~# kubectl get nodes
NAME           STATUS   ROLES                  AGE   VERSION
k8s-master01   Ready    control-plane,master   8h    v1.23.5
k8s-master02   Ready    control-plane,master   8h    v1.23.5
k8s-master03   Ready    control-plane,master   8h    v1.23.5
k8s-node01     Ready    <none>                 8h    v1.23.5
k8s-node02     Ready    <none>                 8h    v1.23.5
举报

相关推荐

0 条评论