0
点赞
收藏
分享

微信扫一扫

K8S集群部署

_刘彦辉 2022-02-10 阅读 152

安装Ansible

yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm  #添加repo源
yum install -y ansible.noarch    #安装ansible

配置ansible的hosts文件

cat >/etc/ansible/hosts <<EOF
[master]
master1 ansible_ssh_host="192.168.137.100"
master2 ansible_ssh_host="192.168.137.110"
master3 ansible_ssh_host="192.168.137.120"
EOF
ssh-keygen -t rsa     #生成密钥
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:2iC7vtK7x1XEdXhvQMhHTAJ/4XfP1WyFGfID5siGKA0 root@localhost.localdomain
The key's randomart image is:
+---[RSA 2048]----+
|    E    ..+*BB+.|
|     o . oo=+BBoo|
|    . o ..+ oo=oB|
|     .   ..  . =*|
|    . . S.     .o|
|     o +.        |
|   ......        |
|  . ..o          |
|   o*=           |
+----[SHA256]-----+

发送公钥到各个节点,实现免密码登陆

ssh-copy-id -i /root/.ssh/id_rsa.pub 192.168.137.100:
ssh-copy-id -i /root/.ssh/id_rsa.pub 192.168.137.110:
ssh-copy-id -i /root/.ssh/id_rsa.pub 192.168.137.120:

设置主机名

hostnamectl set-hostname master1
ssh 192.168.137.110 "hostnamectl set-hostname master2"
ssh 192.168.137.120 "hostnamectl set-hostname master3"

添加hosts文件

ansible all -m shell -a "echo '192.168.137.100 master1' >> /etc/hosts"
ansible all -m shell -a "echo '192.168.137.110 master2' >> /etc/hosts"
ansible all -m shell -a "echo '192.168.137.120 master3' >> /etc/hosts"
ansible all -m shell -a "echo '192.168.137.200 vip' >> /etc/hosts"

关闭selinux和防火墙

ansible all -m shell -a "setenforce 0 &&systemctl --now disable firewalld && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config"

安装依赖文件

ansible all -m  shell -a  "yum install yum-utils device-mapper-persistent-data lvm2 vim wget yum-utils lrzsz lsof ipvsadm git net-tools libseccomp ipset jq iptables sysstat chrony -y "
ansible all -m shell -a "yum -y install iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save"   #iptables策略放空
ansible all -m shell -a "systemctl stop postfix.service && systemctl disable postfix.service"

安装chrony,并开启,实现时间同步

ansible all -m shell -a "yum install -y chrony && systemctl --now enable chronyd"

查看时间同步

ansible all -m shell -a "timedatectl"
master3 | CHANGED | rc=0 >>
      Local time: Thu 2020-09-10 20:10:47 CST
  Universal time: Thu 2020-09-10 12:10:47 UTC
        RTC time: Thu 2020-09-10 12:10:47
       Time zone: Asia/Shanghai (CST, +0800)
     NTP enabled: yes
NTP synchronized: yes
 RTC in local TZ: no

### 修改K8S内核参数

cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 
vm.overcommit_memory=1 
vm.panic_on_oom=0 
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1net.netfilter.nf_conntrack_max=2310720
vm.max_map_count=262144
EOF

复制到其他主机

ansible all -m copy -a "src=/etc/sysctl.d/k8s.conf dest=/etc/sysctl.d/k8s.conf"
ansible all -m shell -a "sysctl -p /etc/sysctl.d/k8s.conf"

设置 rsyslogd 和 systemd journald

ansible all -m shell -a "mkdir /var/log/journal"
ansible all -m shell -a "mkdir /etc/systemd/journald.conf.d"
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]   # 持久化保存到磁盘
Storage=persistent   # 压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000   #最大占用空间 
10GSystemMaxUse=10G   #单日志文件最大 
200MSystemMaxFileSize=200M   #日志保存时间 2 周
MaxRetentionSec=2week  #不将日志转发到 
syslogForwardToSyslog=no
EOF
ansible all -m copy -a "src=/etc/systemd/journald.conf.d/99-prophet.conf dest=/etc/systemd/journald.conf.d/99-prophet.conf"
ansible all -m shell -a "systemctl restart systemd-journald"

### 升级内核

ansible all -m shell -a "rpm -import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org"   #导入key
ansible all -m shell -a "rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm"  #安装elrepo源
yum --diablerepo="*" --enablerepo="elrepo-kernel" list available  #查看可用内核包
ansible all -m shell -a "yum -y --enablerepo=elrepo-kernel install kernel-ml.x86_64 kernel-ml-devel.x86_64 “  #安装最新内核
awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg  #查看新内核顺序为0
vim /etc/default/grub 
GRUB_TIMEOUT=5
GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)"
GRUB_DEFAULT=0    #修改为0
GRUB_DISABLE_SUBMENU=true
GRUB_TERMINAL_OUTPUT="console"
GRUB_CMDLINE_LINUX="crashkernel=auto rd.lvm.lv=centos/root rd.lvm.lv=centos/swap rhgb quiet numa=off"   #末尾加numa=off ,关闭numa
GRUB_DISABLE_RECOVERY="true"
ansible all -m shell -a "cp /boot/grub2/grub.cfg{,.bak}"           #备份grub.cfg文件
ansible all -m shell -a "grub2-mkconfig -o /boot/grub2/grub.cfg"   #创建内核配置
ansible all -m shell -a "reboot "                                  #重启电脑
ansible all -m shell -a "uname -a "                                #查看内核
Linux master2 5.8.8-1.el7.elrepo.x86_64 #1 SMP Wed Sep 9 14:47:37 EDT 2020 x86_64 x86_64 x86_64 GNU/Linux
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg 
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
curl -o /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
curl -o /tmp/rpm-package-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
ansible all -m copy -a "src=/tmp/rpm-package-key.gpg dest=/tmp/rpm-package-key.gpg"
ansible all -m copy -a "src=/etc/yum.repos.d/docker-ce.repo dest=/etc/yum.repos.d/docker-ce.repo"
ansible all -m copy -a "src=/etc/yum.repos.d/kubernetes.repo dest=/etc/yum.repos.d/kubernetes.repo"
yum update -y     #每台都执行
yum install -y docker-ce     #每台都执行
ansible all -m shell -a "systemctl --now enable docker"
ansible all -m  shell -a  "mkdir -p /etc/docker &&mkdir -p /etc/systemd/system/docker.service.d"
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ],
  "registry-mirrors": [
  "https://v2ltjwbg.mirror.aliyuncs.com",
  "https://docker.mirrors.ustc.edu.cn",
  "http://f1361db2.m.daocloud.io",
  "https://registry.docker-cn.com"
  ]

}
EOF
modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash
/etc/sysconfig/modules/ipvs.modules && lsmod |grep -e ip_vs -e nf_conntrack_ipv4
yum install -y kubectl kubeadm kubelet    #每台上都执行
ansible all -m shell -a "systemctl enable kubelet"

获取所需要镜像链接

[root@master1 ~]# kubeadm config images list 
W0917 19:42:42.158905    1700 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
k8s.gcr.io/kube-apiserver:v1.19.2
k8s.gcr.io/kube-controller-manager:v1.19.2
k8s.gcr.io/kube-scheduler:v1.19.2
k8s.gcr.io/kube-proxy:v1.19.2
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns:1.7.0

### 因不能访问外网,可先上传到镜像到Docker Hub,再从Docker Hub上进行拉取,详情请看如果从Docker Hub拉取镜像:

[root@master1 ~]# docker login
Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.
Username: linben1985
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded

docker拉取镜像地址,在docker hub上复制即可,所需要的全都拉取下来

[root@master1 ~]# docker pull linben1985/k8sofben:v1.19.2
v1.19.2: Pulling from linben1985/k8sofben
b9cd0ea6c874: Pull complete 
a84ff2cd01b7: Pull complete 
f5db63e1da64: Pull complete 
Digest: sha256:b119baef2a60b537c264c0ea009f63095169af089e1a36fb4167693f1b60cd1e
Status: Downloaded newer image for linben1985/k8sofben:v1.19.2
docker.io/linben1985/k8sofben:v1.19.2
[root@master1 ~]# docker images
REPOSITORY              TAG                 IMAGE ID            CREATED             SIZE
linben1985/k8sofben     v1.19.2             607331163122        24 hours ago        119MB
wise2c/keepalived-k8s   latest              0ba6a7862982        2 years ago         14MB
wise2c/haproxy-k8s      latest              fde31577093d        2 years ago         71.1MB

把拉取的镜像tag设置成kubeadm config images list获取的地址,因为获取的是默认的地址,后面初始化时不用修改镜像地址

[root@master1 ~]# docker tag linben1985/k8sofben:v1.19.2 k8s.gcr.io/kube-apiserver:v1.19.2
[root@master1 ~]# docker images
REPOSITORY                  TAG                 IMAGE ID            CREATED             SIZE
linben1985/k8sofben         v1.19.2             607331163122        24 hours ago        119MB
k8s.gcr.io/kube-apiserver   v1.19.2             607331163122        24 hours ago        119MB
wise2c/keepalived-k8s       latest              0ba6a7862982        2 years ago         14MB
wise2c/haproxy-k8s          latest              fde31577093d        2 years ago         71.1MB
[root@master1 ~]# docker rmi linben1985/k8sofben:v1.19.2
Untagged: linben1985/k8sofben:v1.19.2
Untagged: linben1985/k8sofben@sha256:b119baef2a60b537c264c0ea009f63095169af089e1a36fb4167693f1b60cd1e
[root@master1 ~]# docker images
REPOSITORY                  TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-apiserver   v1.19.2             607331163122        24 hours ago        119MB
wise2c/keepalived-k8s       latest              0ba6a7862982        2 years ago         14MB
wise2c/haproxy-k8s          latest              fde31577093d        2 years ago         71.1MB
docker images
REPOSITORY                  TAG                 IMAGE ID            CREATED             SIZE
linben1985/k8sofben         proxy               d373dd5a8593        47 hours ago        118MB
linben1985/k8sofben         controller          8603821e1a7a        47 hours ago        111MB
k8s.gcr.io/kube-apiserver   v1.19.2             607331163122        47 hours ago        119MB
linben1985/k8sofben         scheduler           2f32d66b884f        47 hours ago        45.7MB
linben1985/k8sofben         etcd                0369cf4303ff        3 weeks ago         253MB
linben1985/k8sofben         coredns             bfe3a36ebd25        3 months ago        45.2MB
linben1985/k8sofben         pause               80d28bedfe5d        7 months ago        683kB
wise2c/keepalived-k8s       latest              0ba6a7862982        2 years ago         14MB
wise2c/haproxy-k8s          latest              fde31577093d        2 years ago         71.1MB
[root@master1 ~]# kubeadm config images list
W0918 22:58:50.017625    1835 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
k8s.gcr.io/kube-apiserver:v1.19.2
k8s.gcr.io/kube-controller-manager:v1.19.2
k8s.gcr.io/kube-scheduler:v1.19.2
k8s.gcr.io/kube-proxy:v1.19.2
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns:1.7.0
[root@master1 ~]# docker tag d373dd5a8593 k8s.gcr.io/kube-proxy:v1.19.2
[root@master1 ~]# docker tag 8603821e1a7a k8s.gcr.io/kube-controller-manager:v1.19.2
[root@master1 ~]# docker tag 2f32d66b884f k8s.gcr.io/kube-scheduler:v1.19.2
[root@master1 ~]# docker tag 0369cf4303ff k8s.gcr.io/etcd:3.4.13-0
[root@master1 ~]# docker tag bfe3a36ebd25 k8s.gcr.io/coredns:1.7.0
[root@master1 ~]# docker tag 80d28bedfe5d k8s.gcr.io/pause:3.2

把修改tag后的镜像保存到本地,并传给其他两个节点

mkdir images && cd images
root@master1 images]# docker save -o api.tar k8s.gcr.io/kube-apiserver:v1.19.2
[root@master1 images]# docker save -o controller.tar k8s.gcr.io/kube-controller-manager:v1.19.2
[root@master1 images]# docker save -o scheduler.tar k8s.gcr.io/kube-scheduler:v1.19.2
[root@master1 images]# docker save -o proxy.tar k8s.gcr.io/kube-proxy:v1.19.2
[root@master1 images]# docker save -o pause.tar k8s.gcr.io/pause:3.2
[root@master1 images]# docker save -o etcd.tar k8s.gcr.io/etcd:3.4.13-0
[root@master1 images]# docker save -o coredns.tar k8s.gcr.io/coredns:1.7.0
[root@master1 images]# ls
api.tar  controller.tar  coredns.tar  etcd.tar  pause.tar  proxy.tar  scheduler.tar
[root@master1 k8s]# scp -r images/ root@192.168.137.110:/root
[root@master1 k8s]# scp -r images/ root@192.168.137.120:/root

master2和master3上操作

[root@master2 images]# vim load-images.sh 
#!/bin/bash

cd /root/images/      #修改为镜像路径

ls /root/images/ | grep -v load-images.sh > /tmp/k8s-images.txt  #修改为镜像路径

for i in $( cat  /tmp/k8s-images.txt )
do
    docker load -i $i
done

rm -rf /tmp/k8s-images.txt
[root@master2 images]# sh load-images.sh
[root@master2 images]# docker images
REPOSITORY                           TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-proxy                v1.19.2             d373dd5a8593        2 days ago          118MB
k8s.gcr.io/kube-apiserver            v1.19.2             607331163122        2 days ago          119MB
k8s.gcr.io/kube-controller-manager   v1.19.2             8603821e1a7a        2 days ago          111MB
k8s.gcr.io/kube-scheduler            v1.19.2             2f32d66b884f        2 days ago          45.7MB
k8s.gcr.io/etcd                      3.4.13-0            0369cf4303ff        3 weeks ago         253MB
k8s.gcr.io/coredns                   1.7.0               bfe3a36ebd25        3 months ago        45.2MB
k8s.gcr.io/pause                     3.2                 80d28bedfe5d        7 months ago        683kB

[root@master1 etc]# vi haproxy.cfg 

global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 4096
#chroot /usr/share/haproxy
#user haproxy
#group haproxy
daemon

defaults
    log     global
    mode    http
    option  httplog
    option  dontlognull
    retries 3
    option redispatch
    timeout connect  5000
    timeout client  50000
    timeout server  50000

frontend stats-front
  bind *:8081
  mode http
  default_backend stats-back

frontend fe_k8s_6444
  bind *:6444
  mode tcp
  timeout client 1h
  log global
  option tcplog
  default_backend be_k8s_6443
  acl is_websocket hdr(Upgrade) -i WebSocket
  acl is_websocket hdr_beg(Host) -i ws

backend stats-back
  mode http
  balance roundrobin
  stats uri /haproxy/stats
  stats auth pxcstats:secret

backend be_k8s_6443
  mode tcp
  timeout queue 1h
  timeout server 1h
  timeout connect 1h
  log global
  balance roundrobin
  server rancher01 192.168.137.100:6443   #指定三台master的IP
  server rancher02 192.168.137.110:6443
  server rancher03 192.168.137.120:6443
[root@master1 lb]# vi start-haproxy.sh 

#!/bin/bash
MasterIP1=192.168.137.100     #指定三台master的IP
MasterIP2=192.168.137.110
MasterIP3=192.168.137.120
MasterPort=6443

docker run -d --restart=always --name HAProxy-K8S -p 6444:6444 \
        -e MasterIP1=$MasterIP1 \
        -e MasterIP2=$MasterIP2 \
        -e MasterIP3=$MasterIP3 \
        -e MasterPort=$MasterPort \
        -v /data/lb/etc/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg \
        wise2c/haproxy-k8s    #/data/lb/etc/haproxy.cfg路径为上面生成的路径
[root@master1 lb]# vi start-keepalived.sh 

#!/bin/bash
VIRTUAL_IP=192.168.137.200           #指定VIP的IP
INTERFACE=ens33                      #指定现用的网络接口
NETMASK_BIT=24
CHECK_PORT=6444
RID=10
VRID=160
MCAST_GROUP=224.0.0.18

docker run -itd --restart=always --name=Keepalived-K8S \
        --net=host --cap-add=NET_ADMIN \
        -e VIRTUAL_IP=$VIRTUAL_IP \
        -e INTERFACE=$INTERFACE \
        -e CHECK_PORT=$CHECK_PORT \
        -e RID=$RID \
        -e VRID=$VRID \
        -e NETMASK_BIT=$NETMASK_BIT \
        -e MCAST_GROUP=$MCAST_GROUP \
        wise2c/keepalived-k8s
[root@master1 lb]# sh start-haproxy.sh 
[root@master1 lb]# netstat -anp|grep proxy
tcp6       0      0 :::6444                 :::*                    LISTEN      2098/docker-proxy
[root@master1 lb]# sh start-keepalived.sh 
[root@master1 lb]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:46:1f:63 brd ff:ff:ff:ff:ff:ff
    inet 192.168.137.100/24 brd 192.168.137.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet 192.168.137.200/24 scope global secondary ens33    #VIP IP
       valid_lft forever preferred_lft forever
    inet6 fe80::c8c1:ce38:9736:7440/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

[root@master1 lb]# docker ps
CONTAINER ID        IMAGE                   COMMAND                  CREATED              STATUS              PORTS                    NAMES
0bf63e8f9e7e        wise2c/keepalived-k8s   "/usr/bin/keepalived…"   About a minute ago   Up About a minute                            Keepalived-K8S
1de5062184ea        wise2c/haproxy-k8s      "/docker-entrypoint.…"   3 minutes ago        Up 3 minutes        0.0.0.0:6444->6444/tcp   HAProxy-K8S
kubeadm config print init-defaults > kubeadm-config.yaml   #生成初始化文件
[root@master1 k8s]# vi kubeadm-config.yaml 

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.137.100               #修改为本机IP
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: master1
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.137.200:6444"      #增加这一行,添加VIP ip
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.19.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: "10.244.0.0/16"                      #增加这一行,添加IPVS的IP段
scheduler: {}
---                                               #增加下面内容,设置为IPVS模式
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs
[root@master1 k8s]# kubeadm init --config=kubeadm-config.yaml --upload-certs --ignore-preflight-errors=all| tee kubeadm-init.log  #初始化主节点,第一次不用--ignore-preflight-errors=all 参数,如果第一次初始化失败,要修改某些参数的话,再初始化要使用这参数
W0919 21:00:30.096513  110409 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.19.2
[preflight] Running pre-flight checks
    [WARNING Port-10259]: Port 10259 is in use
    [WARNING Port-10257]: Port 10257 is in use
    [WARNING FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml]: /etc/kubernetes/manifests/kube-apiserver.yaml already exists
    [WARNING FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml]: /etc/kubernetes/manifests/kube-controller-manager.yaml already exists
    [WARNING FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml]: /etc/kubernetes/manifests/kube-scheduler.yaml already exists
    [WARNING FileAvailable--etc-kubernetes-manifests-etcd.yaml]: /etc/kubernetes/manifests/etcd.yaml already exists
    [WARNING Port-10250]: Port 10250 is in use
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/kubelet.conf"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/controller-manager.conf"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/scheduler.conf"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 20.503460 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.19" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
a79e48f9a158b8bad7a55672c8d21bef7307aedb10fb9f003739f2e7b5abc41d
[mark-control-plane] Marking the node master1 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node master1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube                 #以下三行为增加kubectl的admin权限
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.137.200:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:e8da3dfb6d186081f2a42dcf078d880a6a364e1d89e3c51434be602b124d9941 \
    --control-plane --certificate-key a79e48f9a158b8bad7a55672c8d21bef7307aedb10fb9f003739f2e7b5abc41d    #master节点加入命令

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.137.200:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:e8da3dfb6d186081f2a42dcf078d880a6a364e1d89e3c51434be602b124d9941  #普通Node加入命令

master2和master3上执行,加入master节点

kubeadm join 192.168.137.200:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:96d2cfbcc77d10550aa05b98c092bc15be9693d784aa9ed48e4a4b6c20a777c6 \
    --control-plane --certificate-key 41b6cc1edd842af85f9ce7186dd00af3f1c5f5e1db954a4d33381b65e511b6b1

所有节点都执行,增加kubectl的admin权限

    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@master1 k8s]# kubectl get node
NAME      STATUS     ROLES    AGE     VERSION
master1   NotReady   master   8m34s   v1.19.1
master2   NotReady   master   7m30s   v1.19.1
master3   NotReady   master   3m56s   v1.19.1

可以看到的有组件都有三个副本

[root@master1 k8s]# kubectl get pod --all-namespaces
NAMESPACE     NAME                              READY   STATUS    RESTARTS   AGE
kube-system   coredns-f9fd979d6-fscvz           0/1     Pending   0          8m56s
kube-system   coredns-f9fd979d6-rprfl           0/1     Pending   0          8m56s
kube-system   etcd-master1                      1/1     Running   0          9m4s
kube-system   etcd-master2                      1/1     Running   0          8m8s
kube-system   etcd-master3                      1/1     Running   0          4m34s
kube-system   kube-apiserver-master1            1/1     Running   0          9m4s
kube-system   kube-apiserver-master2            1/1     Running   0          8m8s
kube-system   kube-apiserver-master3            1/1     Running   0          4m34s
kube-system   kube-controller-manager-master1   1/1     Running   1          9m4s
kube-system   kube-controller-manager-master2   1/1     Running   0          8m8s
kube-system   kube-controller-manager-master3   1/1     Running   0          4m34s
kube-system   kube-proxy-8tnrp                  1/1     Running   0          4m36s
kube-system   kube-proxy-dktgb                  1/1     Running   0          8m10s
kube-system   kube-proxy-jkxfj                  1/1     Running   0          8m55s
kube-system   kube-scheduler-master1            1/1     Running   1          9m4s
kube-system   kube-scheduler-master2            1/1     Running   0          8m9s
kube-system   kube-scheduler-master3            1/1     Running   0          4m34s

设置按tab键补全kubectl命令

ansible all -m shell -a "source /usr/share/bash-completion/bash_completion"
ansible all -m shell -a “echo ‘source /usr/share/bash-completion/bash_completion’ >> ~/.bashrc”
ansible all -m shell -a ”echo ‘source <(kubectl completion bash)’ >> ~/.bashrc“

部署flannel网络

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
[root@master1 manifests]# kubectl get node #查看节点状态,已经都是ready状态
NAME      STATUS   ROLES    AGE   VERSION
master1   Ready    master   64d   v1.19.1
master2   Ready    master   64d   v1.19.1
master3   Ready    master   64d   v1.19.1
举报

相关推荐

k8s集群部署

安装部署k8s集群

k8s 部署Redis集群

K8S集群化部署

k8s 集群部署 kubesphere

0 条评论