0
点赞
收藏
分享

微信扫一扫

29、Pipeline Job进阶之部署应用至Kubernetes集群

静悠 2023-04-21 阅读 60

Pipeline Job进阶之部署应用至Kubernetes集群

在jenkins上的k8s云节点,在原来maven-and-docker模板的基础之上,添加容器
也可以添加pod模板,通过继承的方式来实现maven-docker-kubectl方式来定义
添加pod template

29、Pipeline Job进阶之部署应用至Kubernetes集群_Pipeline Job部署应用至集群

添加容器:使用kubesphere/kubectl:latest镜像

29、Pipeline Job进阶之部署应用至Kubernetes集群_Pipeline Job部署应用至集群_02

安装插件用于认证到k8s集群之上:kubernetes credentials plugin、kubernetes cli
kubernetes cli:此插件能为kubectl加载、配置所需要的认证环境,
此步是为了jenkins要作为kubectl命令认证到k8s集群之上去部署使用的认证凭据
如:使用Token、UserName and Password、证书、定义成kubeconfig,让kubectl来加载kubeconfig文件来认证都是可以的。(最简单使用Token认证,并直接指定kubeApi server)
第一种:在k8s集群上添加静态令牌认证用户账号

[root@ubuntu2004 ~]#cd learning-jenkins-cicd/08-jenkins-on-kubernetes/static-token-auth-example/
[root@ubuntu2004 static-token-auth-example]#ls
auth  kube-apiserver.yaml  README.md

在auth目录下,定义的是静态令牌认证的用户账号文件
[root@ubuntu2004 static-token-auth-example]#cat auth/tokens.csv 
83d07d.d1d200dd0c85c694,tom,1001,"kube-users"
7dbe3f.03d7e8f69d576210,jerry,1002,"kube-admins"
07c31c.17bddf45d355d902,mageedu,1003,"kube-admins"

把账号添加到k8s集群之上并能够运行起来,还需要改一下kube-apiserver.yaml的配置文件
把 - --token-auth-file=/etc/kubernetes/auth/tokens.csv添加进来
并确保后续的各种基于卷把auth对应的目录加载到k8s集群上的方式关联进来,从而能够支撑起apiserver使用静态令牌认证文件进行认证,此时要注意以下文件关于k8s集群的安装版本是否对应

[root@ubuntu2004 static-token-auth-example]#cat kube-apiserver.yaml 
apiVersion: v1
kind: Pod
metadata:
  annotations:
    kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 172.29.6.1:6443
  creationTimestamp: null
  labels:
    component: kube-apiserver
    tier: control-plane
  name: kube-apiserver
  namespace: kube-system
spec:
  containers:
  - command:
    - kube-apiserver
    - --advertise-address=172.29.6.1
    - --allow-privileged=true
    - --authorization-mode=Node,RBAC
    - --token-auth-file=/etc/kubernetes/auth/tokens.csv
    - --client-ca-file=/etc/kubernetes/pki/ca.crt
    - --enable-admission-plugins=NodeRestriction
    - --enable-bootstrap-token-auth=true
    - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
    - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
    - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
    - --etcd-servers=https://127.0.0.1:2379
    - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
    - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
    - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
    - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
    - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
    - --requestheader-allowed-names=front-proxy-client
    - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
    - --requestheader-extra-headers-prefix=X-Remote-Extra-
    - --requestheader-group-headers=X-Remote-Group
    - --requestheader-username-headers=X-Remote-User
    - --secure-port=6443
    - --service-account-issuer=https://kubernetes.default.svc.cluster.local
    - --service-account-key-file=/etc/kubernetes/pki/sa.pub
    - --service-account-signing-key-file=/etc/kubernetes/pki/sa.key
    - --service-cluster-ip-range=10.96.0.0/12
    - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
    - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
    image: registry.k8s.io/kube-apiserver:v1.25.4
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 8
      httpGet:
        host: 172.29.6.1
        path: /livez
        port: 6443
        scheme: HTTPS
      initialDelaySeconds: 10
      periodSeconds: 10
      timeoutSeconds: 15
    name: kube-apiserver
    readinessProbe:
      failureThreshold: 3
      httpGet:
        host: 172.29.6.1
        path: /readyz
        port: 6443
        scheme: HTTPS
      periodSeconds: 1
      timeoutSeconds: 15
    resources:
      requests:
        cpu: 250m
    startupProbe:
      failureThreshold: 24
      httpGet:
        host: 172.29.6.1
        path: /livez
        port: 6443
        scheme: HTTPS
      initialDelaySeconds: 10
      periodSeconds: 10
      timeoutSeconds: 15
    volumeMounts:
    - mountPath: /etc/ssl/certs
      name: ca-certs
      readOnly: true
    - mountPath: /etc/ca-certificates
      name: etc-ca-certificates
      readOnly: true
    - mountPath: /etc/pki
      name: etc-pki
      readOnly: true
    - mountPath: /etc/kubernetes/pki
      name: k8s-certs
      readOnly: true
    - mountPath: /usr/local/share/ca-certificates
      name: usr-local-share-ca-certificates
      readOnly: true
    - mountPath: /usr/share/ca-certificates
      name: usr-share-ca-certificates
      readOnly: true
    - mountPath: /etc/kubernetes/auth
      name: users-static-token
      readOnly: true
  hostNetwork: true
  priorityClassName: system-node-critical
  securityContext:
    seccompProfile:
      type: RuntimeDefault
  volumes:
  - hostPath:
      path: /etc/ssl/certs
      type: DirectoryOrCreate
    name: ca-certs
  - hostPath:
      path: /etc/ca-certificates
      type: DirectoryOrCreate
    name: etc-ca-certificates
  - hostPath:
      path: /etc/pki
      type: DirectoryOrCreate
    name: etc-pki
  - hostPath:
      path: /etc/kubernetes/pki
      type: DirectoryOrCreate
    name: k8s-certs
  - hostPath:
      path: /usr/local/share/ca-certificates
      type: DirectoryOrCreate
    name: usr-local-share-ca-certificates
  - hostPath:
      path: /usr/share/ca-certificates
      type: DirectoryOrCreate
    name: usr-share-ca-certificates
  - hostPath:
      path: /etc/kubernetes/auth
      type: DirectoryOrCreate
    name: users-static-token
status: {}

如果文件中定义的版本与当前集群版本一致,可直接把文件进行复制替换

第一:先复制auth目录到 /etc/kubernetes下
cp -rp auth/ /etc/kubernetes

第二:为了防止集群被破坏,可先把集群定义的kube-apiserver.yaml备份
cp /etc/kubernetes/manifests/kube-apiserver.yaml /root/

第三:然后再把准备好的kube-apiserver.yaml替换掉/etc/kubernetes/manifests/kube-apiserver.yaml
cp kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml

第四:apiserver相关pod重新初始化
初始化成功kubectl get pods,用户账号就可以使用了。

第四:使用core命令手动直接执行测试认证到kube-apiserver之上
cd /etc/kubernetes auth
cat tokens.csv 
83d07d.d1d200dd0c85c694,tom,1001,"kube-users"
7dbe3f.03d7e8f69d576210,jerry,1002,"kube-admins"
07c31c.17bddf45d355d902,mageedu,1003,"kube-admins"

kubectl --roken="83d07d.d1d200dd0c85c694" --insecure-skip-tls-verify=true --server='https://kubeapi.magedu.com:6443' get pods

使用tom账号来完成让kubectl容器中的kubectl命令接到k8s集群上来,并完成应用部署

一:创建集群角色tom-cluster-admin,把tom用户绑定在集群级别上
kubectl create clusterrolebinding tom-cluster-admin --clusterrole=cluster-admin --user=tom

二:把tom的token复制下来,创建到jenkins系统之上

在jenkins上--系统管理--manager credentials--添加凭据

29、Pipeline Job进阶之部署应用至Kubernetes集群_Pipeline Job部署应用至集群_03

在k8s集群的kubectl容器内加载token信息认证到所指定的k8s集群上去,完成应用部署

pipeline {
  agent { 
    kubernetes {
      inheritFrom 'maven-docker-kubectl'  
      }
    }  
  triggers {
    GenericTrigger(
      genericVariables: [
        [key: 'ref', value: '$.ref']
      ],
      token: 'fClZ0e/kTcqL2ARh7YqxW/3ndOCZA2SqfKnRTLat',
      causeString: 'Triggered on $ref',
      printContributedVariables: true,
      printPostContent: true
    )
  }   
  environment {
    codeRepo="http://gitlab.mengfanchao.com/root/spring-boot-helloWorld.git"
    registry='harbor.meng.org'
    registryCredential='harbor-user-credential'
    projectName='spring-boot-helloworld'
    imageUrl="${registry}/ikubernetes/${projectName}"
    imageTag="${BUILD_ID}"
  }
  stages {
    stage('Source') {
      steps {
        git branch: 'main', credentialsId: 'gitlab-root-credential', url: "${codeRepo}"
      }
    }
    stage('Build') {
      steps {
        container('maven') {
          sh 'mvn -B -DskipTests clean package'
        }
      }
    }
    stage('Test') {
      steps {
        container('maven') {
          sh 'mvn test'
        }
      }
    }
    stage("SonarQube Analysis") {
      steps {
        container('maven') {                
          withSonarQubeEnv('SonarQube-Server') {
            sh 'mvn sonar:sonar'
          }
        }
      }
    }
    stage("Quality Gate") {
      steps {
        timeout(time: 30, unit: 'MINUTES') {
          waitForQualityGate abortPipeline: true
        }
      }
    } 
    stage('Build Image') {
      steps {
        container('dind') {
          script {
            dockerImage = docker.build("${imageUrl}:${imageTag}")  
          }
        }
      }
    }
    stage('Push Image') {
      steps {
        container('dind') {
          script {
            docker.withRegistry(registryUrl, registryCredential) {
              dockerImage.push()
              dockerImage.push('latest')
            }  
          }
        }
      }
    }
    stage('Update-manifests') {
      steps {
        container('jnlp') {
          sh 'sed -i "s#__IMAGE__#${imageUrl}:${imageTag}#gi" deploy/all-in-one.yaml'
        }
      }
    }
    stage('Deploy') {
      steps {
        container('kubectl') {
          withKubeConfig([credentialsId: 'k8s-uesr-tom-cluster-admin-token', serverUrl: 'https://kubernetes.default.svc']) {
            sh '''
               kubectl apply -f deploy/01-namespace.yaml #指定文件创建名称空间,位置在代码仓库
               kubectl apply -f deploy/all-in-one.yaml -n hello
          }
        }
      }
    }
  }             
  post {
    always {
      mail to: '1153454651@qq.com',
      subject: "Status of pipeline: ${currentBuild.fullDisplayName}",
      body: "${env.BUILD_URL} has result ${currentBuild.result}"
    }
  }      
}

在流水线行配置并通过gitlab上的项目的webhooks进行事件推送触发

在k8s集群的jenkins名称空间下,会发现有个pod创建了4个容器,这四个容器就是jnlp、maven、dind、kubectl
kubectl get pods -n jenkins

第二种:基于证书添加认证用户账号

一:创建SA
kubectl create sa k8s-cicd-admin -n kube-system

二:绑定对应的集群角色
kubectl create clusterrolebinding k8s-cicd-admin --clusterrole=cluster-admin --serviceaccount=kube-system:k8s-cicd-admin

三:此时k8s-cicd-admin这个SA具有了整个集群级别的管理员权限,基于他创建pod,取出SA
kubectl run mypod --image=ikubernetes/demoapp:v1.0 --dry-run=client -o yaml -n kube-system >mypod.yaml 

四:对mypod.yaml进行编辑
vim mypod.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run:mypod
  name:mypod
  namespace: kube-system
spec:
  containers:
  - image: ikubernetes/demoapp:v1.0
    name: mypod
    resources: {}
  dnsPolicy: ClusterFirst
  serviceAccountName: k8s-cicd-admin

kubectl apply -f mypod.yaml

kubectl exec -it mypod -n kube-system -- /bin/sh
cd /var/run/secrets/kubernetes.io/serviceaccount/
取出令牌信息
cat token && echo

定义jenkins之上的credentials

29、Pipeline Job进阶之部署应用至Kubernetes集群_Pipeline Job部署应用至集群_04

在k8s集群的kubectl容器内加载token信息认证到所指定的k8s集群上去,完成应用部署,此时要更换认证为k8s-cicd-admin-credentials

pipeline {
  agent { 
    kubernetes {
      inheritFrom 'maven-docker-kubectl'  
      }
    }  
  triggers {
    GenericTrigger(
      genericVariables: [
        [key: 'ref', value: '$.ref']
      ],
      token: 'fClZ0e/kTcqL2ARh7YqxW/3ndOCZA2SqfKnRTLat',
      causeString: 'Triggered on $ref',
      printContributedVariables: true,
      printPostContent: true
    )
  }   
  environment {
    codeRepo="http://gitlab.mengfanchao.com/root/spring-boot-helloWorld.git"
    registry='harbor.meng.org'
    registryCredential='harbor-user-credential'
    projectName='spring-boot-helloworld'
    imageUrl="${registry}/ikubernetes/${projectName}"
    imageTag="${BUILD_ID}"
  }
  stages {
    stage('Source') {
      steps {
        git branch: 'main', credentialsId: 'gitlab-root-credential', url: "${codeRepo}"
      }
    }
    stage('Build') {
      steps {
        container('maven') {
          sh 'mvn -B -DskipTests clean package'
        }
      }
    }
    stage('Test') {
      steps {
        container('maven') {
          sh 'mvn test'
        }
      }
    }
    stage("SonarQube Analysis") {
      steps {
        container('maven') {                
          withSonarQubeEnv('SonarQube-Server') {
            sh 'mvn sonar:sonar'
          }
        }
      }
    }
    stage("Quality Gate") {
      steps {
        timeout(time: 30, unit: 'MINUTES') {
          waitForQualityGate abortPipeline: true
        }
      }
    } 
    stage('Build Image') {
      steps {
        container('dind') {
          script {
            dockerImage = docker.build("${imageUrl}:${imageTag}")  
          }
        }
      }
    }
    stage('Push Image') {
      steps {
        container('dind') {
          script {
            docker.withRegistry(registryUrl, registryCredential) {
              dockerImage.push()
              dockerImage.push('latest')
            }  
          }
        }
      }
    }
    stage('Update-manifests') {
      steps {
        container('jnlp') {
          sh 'sed -i "s#__IMAGE__#${imageUrl}:${imageTag}#gi" deploy/all-in-one.yaml'
        }
      }
    }
    stage('Deploy') {
      steps {
        container('kubectl') {
          withKubeConfig([credentialsId: 'k8s-cicd-admin-credential', serverUrl: 'https://kubernetes.default.svc']) {
            sh '''
               kubectl apply -f deploy/01-namespace.yaml #指定文件创建名称空间,位置在代码仓库
               kubectl apply -f deploy/all-in-one.yaml -n hello
          }
        }
      }
    }
  }               
  post {
    always {
      mail to: '1153454651@qq.com',
      subject: "Status of pipeline: ${currentBuild.fullDisplayName}",
      body: "${env.BUILD_URL} has result ${currentBuild.result}"
    }
  }      
}

立即构建

举报

相关推荐

0 条评论