为啥升级:
A.解决Nacos Raft协议反序列化代码执行漏洞
1.nacos 2.0升级到nacos 2.2数据库字段有变化
use nacos;
ALTER TABLE config_info ADD encrypted_data_key TEXT NOT NULL;
ALTER TABLE config_info_beta ADD encrypted_data_key TEXT NOT NULL;
ALTER TABLE his_config_info ADD encrypted_data_key TEXT NOT NULL;
2.环境变量新增一个参数
- name: SPRING_DATASOURCE_PLATFORM
value: "mysql"
3.在做好充分的测试之后,再到生产进行升级
实际的配置文件:
pvc.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.mirrors.ustc.edu.cn/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 192.168.0.2
- name: NFS_PATH
value: /data/nfs/kubernetes
volumes:
- name: nfs-client-root
nfs:
server: 192.168.0.2
path: /data/nfs/kubernetes
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
configMap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: nacos-cm
namespace: default
data:
mysql.db.name: "nacos"
mysql.db.host: "192.168.0.2"
mysql.port: "3306"
mysql.user: "nacos"
mysql.password: "nacos"
deploy.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: nacos-cluster
namespace: default
spec:
serviceName: nacos-headless
selector:
matchLabels:
app: nacos-cluster
replicas: 3
template:
metadata:
labels:
app: nacos-cluster
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- nacos-cluster
topologyKey: "kubernetes.io/hostname"
imagePullSecrets:
- name: aliyun-registry-secret
initContainers:
- name: peer-finder-plugin-install
image: nacos/nacos-peer-finder-plugin:1.1
imagePullPolicy: Always
volumeMounts:
- mountPath: /home/nacos/plugins/peer-finder
name: nacos-storage
subPath: peer-finder
containers:
- name: nacos
image: nacos/nacos-server:v2.2.3
resources:
requests:
memory: "1024Mi"
cpu: "512m"
limits:
memory: "2048Mi"
cpu: "2048m"
ports:
- containerPort: 8848
name: nacos-http
- containerPort: 9848
name: nacos-rpc
- containerPort: 9849
name: raft-rpc
- containerPort: 7848
name: old-raft-rpc
env:
- name: JVM_XMS
value: "1g"
- name: JVM_XMX
value: "1g"
- name: MODE
value: "cluster"
- name: NACOS_REPLICAS
value: "3"
- name: SPRING_DATASOURCE_PLATFORM
value: "mysql"
- name: SERVICE_NAME
value: "nacos-headless"
- name: DOMAIN_NAME
value: "cluster.local"
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: nacos-cm
key: mysql.db.host
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: nacos-cm
key: mysql.password
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: NACOS_SERVERS
value: "nacos-cluster-0.nacos-headless.default.svc.cluster.local:8848 nacos-cluster-1.nacos-headless.default.svc.cluster.local:8848 nacos-cluster-2.nacos-headless.default.svc.cluster.local:8848"
- name: MANAGEMENT_ENDPOINTS_WEB_EXPOSURE_INCLUDE
value: "*"
volumeMounts:
- name: nacos-storage
mountPath: /home/nacos/plugins/peer-finder
subPath: peer-finder
- name: nacos-storage
mountPath: /home/nacos/data
subPath: data
- name: nacos-storage
mountPath: /home/nacos/logs
subPath: logs
volumeClaimTemplates:
- metadata:
name: nacos-storage
spec:
accessModes:
- ReadWriteMany
storageClassName: "managed-nfs-storage"
resources:
requests:
storage: 100Gi
svc.yaml
apiVersion: v1
kind: Service
metadata:
name: nacos-headless
namespace: default
labels:
app: nacos-cluster
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
ports:
- protocol: TCP
port: 8848
targetPort: 8848
name: nacos-http
- protocol: TCP
port: 9848
targetPort: 9848
name: nacos-rpc
- protocol: TCP
port: 9849
targetPort: 9849
name: raft-rpc
- protocol: TCP
port: 7848
targetPort: 7848
name: old-raft-rpc
clusterIP: None
selector:
app: nacos-cluster
---
apiVersion: v1
kind: Service
metadata:
name: nacos-cluster
namespace: default
labels:
app: nacos-cluster
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
ports:
- protocol: TCP
port: 8848
targetPort: 8848
name: nacos-http
- protocol: TCP
port: 9848
targetPort: 9848
name: nacos-rpc
- protocol: TCP
port: 9849
targetPort: 9849
name: raft-rpc
- protocol: TCP
port: 7848
targetPort: 7848
name: old-raft-rpc
selector:
app: nacos-cluster
---
apiVersion: v1
kind: Service
metadata:
name: nacos
namespace: default
labels:
app: nacos-cluster
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
ports:
- protocol: TCP
port: 8848
targetPort: 8848
name: nacos-http
- protocol: TCP
port: 9848
targetPort: 9848
name: nacos-rpc
- protocol: TCP
port: 9849
targetPort: 9849
name: raft-rpc
- protocol: TCP
port: 7848
targetPort: 7848
name: old-raft-rpc
selector:
app: nacos-cluster
ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: nacos
namespace: default
spec:
rules:
- host: nacos.abc.com
http: &http_rules
paths:
- backend:
service:
name: nacos-cluster
port:
number: 8848
path: /
pathType: ImplementationSpecific