0
点赞
收藏
分享

微信扫一扫

K8S之kubelet kubelet_node_status.go源码解读

/*
Copyright 2016 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package kubelet

import (
	"context"
	"fmt"
	"net"
	goruntime "runtime"
	"sort"
	"strings"
	"time"

	v1 "k8s.io/api/core/v1"
	apiequality "k8s.io/apimachinery/pkg/api/equality"
	apierrors "k8s.io/apimachinery/pkg/api/errors"
	"k8s.io/apimachinery/pkg/api/resource"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/types"
	"k8s.io/apimachinery/pkg/util/sets"
	cloudprovider "k8s.io/cloud-provider"
	cloudproviderapi "k8s.io/cloud-provider/api"
	nodeutil "k8s.io/component-helpers/node/util"
	kubeletapis "k8s.io/kubelet/pkg/apis"
	v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
	"k8s.io/kubernetes/pkg/kubelet/events"
	"k8s.io/kubernetes/pkg/kubelet/nodestatus"
	"k8s.io/kubernetes/pkg/kubelet/util"
	taintutil "k8s.io/kubernetes/pkg/util/taints"
	volutil "k8s.io/kubernetes/pkg/volume/util"
)

// registerWithAPIServer registers the node with the cluster master. It is safe
// to call multiple times, but not concurrently (kl.registrationCompleted is
// not locked).
func (kl *Kubelet) registerWithAPIServer() {
	if kl.registrationCompleted {
		return
	}

	kl.nodeStartupLatencyTracker.RecordAttemptRegisterNode()

	step := 100 * time.Millisecond

	for {
		time.Sleep(step)
		step = step * 2
		if step >= 7*time.Second {
			step = 7 * time.Second
		}

		node, err := kl.initialNode(context.TODO())
		if err != nil {
			klog.ErrorS(err, "Unable to construct v1.Node object for kubelet")
			continue
		}

		klog.InfoS("Attempting to register node", "node", klog.KObj(node))
		registered := kl.tryRegisterWithAPIServer(node)
		if registered {
			klog.InfoS("Successfully registered node", "node", klog.KObj(node))
			kl.registrationCompleted = true
			return
		}
	}
}

//该函数是一个Go语言函数,用于将节点注册到集群主节点。
//函数首先检查节点是否已经注册完成,如果已经完成则直接返回。
//然后,函数会记录一次节点注册尝试,并设置初始延迟时间为100毫秒。
//接下来,函数通过一个无限循环来尝试注册节点,每次循环都会将延迟时间翻倍,直到达到最大延迟时间7秒为止。
//在每次循环中,函数会构造一个v1.Node对象,并尝试使用tryRegisterWithAPIServer方法将其注册到API服务器。
//如果注册成功,则函数会记录注册完成,并返回。
//如果注册失败,则函数会继续循环尝试。

// tryRegisterWithAPIServer makes an attempt to register the given node with
// the API server, returning a boolean indicating whether the attempt was
// successful.  If a node with the same name already exists, it reconciles the
// value of the annotation for controller-managed attach-detach of attachable
// persistent volumes for the node.
func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
	_, err := kl.kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{})
	if err == nil {
		kl.nodeStartupLatencyTracker.RecordRegisteredNewNode()
		return true
	}

	if !apierrors.IsAlreadyExists(err) {
		klog.ErrorS(err, "Unable to register node with API server", "node", klog.KObj(node))
		return false
	}

	existingNode, err := kl.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(kl.nodeName), metav1.GetOptions{})
	if err != nil {
		klog.ErrorS(err, "Unable to register node with API server, error getting existing node", "node", klog.KObj(node))
		return false
	}
	if existingNode == nil {
		klog.InfoS("Unable to register node with API server, no node instance returned", "node", klog.KObj(node))
		return false
	}

	originalNode := existingNode.DeepCopy()

	klog.InfoS("Node was previously registered", "node", klog.KObj(node))

	// Edge case: the node was previously registered; reconcile
	// the value of the controller-managed attach-detach
	// annotation.
	requiresUpdate := kl.reconcileCMADAnnotationWithExistingNode(node, existingNode)
	requiresUpdate = kl.updateDefaultLabels(node, existingNode) || requiresUpdate
	requiresUpdate = kl.reconcileExtendedResource(node, existingNode) || requiresUpdate
	requiresUpdate = kl.reconcileHugePageResource(node, existingNode) || requiresUpdate
	if requiresUpdate {
		if _, _, err := nodeutil.PatchNodeStatus(kl.kubeClient.CoreV1(), types.NodeName(kl.nodeName), originalNode, existingNode); err != nil {
			klog.ErrorS(err, "Unable to reconcile node with API server,error updating node", "node", klog.KObj(node))
			return false
		}
	}

	return true
}

//该函数是Kubelet的一个方法,尝试将给定的节点注册到API服务器,并返回一个布尔值,指示尝试是否成功。
//如果节点名称已经存在,则会协调控制器管理的可附加持久卷的挂载和分离注解的值。
//- 首先,通过kl.kubeClient.CoreV1().Nodes().Create()方法尝试创建节点,如果无错误发生则记录新节点已注册,并返回true。
//- 如果创建节点时出现非"已存在"错误,则记录错误日志并返回false。
//- 如果节点已存在,则获取现有节点。如果获取现有节点时出现错误,则记录错误日志并返回false。
//- 如果获取的现有节点为nil,则记录信息日志并返回false。
//- 如果节点之前已注册,则通过kl.reconcileCMADAnnotationWithExistingNode()方法协调控制器管理的挂载和分离注解的值,
//并更新默认标签、扩展资源和HugePage资源。 - 如果节点需要更新,则使用nodeutil.PatchNodeStatus()方法更新节点状态。
//如果更新时出现错误,则记录错误日志并返回false。
//- 最后,返回true表示节点已成功注册。

// reconcileHugePageResource will update huge page capacity for each page size and remove huge page sizes no longer supported
func (kl *Kubelet) reconcileHugePageResource(initialNode, existingNode *v1.Node) bool {
	requiresUpdate := updateDefaultResources(initialNode, existingNode)
	supportedHugePageResources := sets.String{}

	for resourceName := range initialNode.Status.Capacity {
		if !v1helper.IsHugePageResourceName(resourceName) {
			continue
		}
		supportedHugePageResources.Insert(string(resourceName))

		initialCapacity := initialNode.Status.Capacity[resourceName]
		initialAllocatable := initialNode.Status.Allocatable[resourceName]

		capacity, resourceIsSupported := existingNode.Status.Capacity[resourceName]
		allocatable := existingNode.Status.Allocatable[resourceName]

		// Add or update capacity if it the size was previously unsupported or has changed
		if !resourceIsSupported || capacity.Cmp(initialCapacity) != 0 {
			existingNode.Status.Capacity[resourceName] = initialCapacity.DeepCopy()
			requiresUpdate = true
		}

		// Add or update allocatable if it the size was previously unsupported or has changed
		if !resourceIsSupported || allocatable.Cmp(initialAllocatable) != 0 {
			existingNode.Status.Allocatable[resourceName] = initialAllocatable.DeepCopy()
			requiresUpdate = true
		}

	}

	for resourceName := range existingNode.Status.Capacity {
		if !v1helper.IsHugePageResourceName(resourceName) {
			continue
		}

		// If huge page size no longer is supported, we remove it from the node
		if !supportedHugePageResources.Has(string(resourceName)) {
			delete(existingNode.Status.Capacity, resourceName)
			delete(existingNode.Status.Allocatable, resourceName)
			klog.InfoS("Removing huge page resource which is no longer supported", "resourceName", resourceName)
			requiresUpdate = true
		}
	}
	return requiresUpdate
}

//该函数用于协调节点的巨页资源,它会更新每个巨页大小的容量,并移除不再支持的巨页大小。具体步骤如下:
//1. 调用updateDefaultResources函数更新节点的默认资源信息,并返回是否需要更新节点状态的标志。
//2. 遍历初始节点的容量信息,如果资源名称是巨页资源,则将其添加到支持的巨页资源集合中,并分别获取初始容量和可分配容量。
//3. 获取现有节点的容量和可分配容量,如果巨页资源在此节点上不受支持或容量已更改,则更新现有节点的容量和可分配容量,
//并将需要更新节点状态的标志设置为true。
//4. 遍历现有节点的容量信息,如果资源名称是巨页资源且不再支持,则将其从节点的容量和可分配容量中删除,并记录日志,
//将需要更新节点状态的标志设置为true。
//5. 返回需要更新节点状态的标志。

// Zeros out extended resource capacity during reconciliation.
func (kl *Kubelet) reconcileExtendedResource(initialNode, node *v1.Node) bool {
	requiresUpdate := updateDefaultResources(initialNode, node)
	// Check with the device manager to see if node has been recreated, in which case extended resources should be zeroed until they are available
	if kl.containerManager.ShouldResetExtendedResourceCapacity() {
		for k := range node.Status.Capacity {
			if v1helper.IsExtendedResourceName(k) {
				klog.InfoS("Zero out resource capacity in existing node", "resourceName", k, "node", klog.KObj(node))
				node.Status.Capacity[k] = *resource.NewQuantity(int64(0), resource.DecimalSI)
				node.Status.Allocatable[k] = *resource.NewQuantity(int64(0), resource.DecimalSI)
				requiresUpdate = true
			}
		}
	}
	return requiresUpdate
}

//该函数用于在节点同步过程中,重置节点的扩展资源容量。
//首先,它会调用updateDefaultResources函数来更新节点的默认资源。
//然后,它会检查设备管理器是否需要重置扩展资源容量。
//如果是,则遍历节点的容量信息,将所有扩展资源的容量和可分配容量都设置为0,并记录需要更新。
//最后,返回是否需要更新节点状态的标志。

// updateDefaultResources will set the default resources on the existing node according to the initial node
func updateDefaultResources(initialNode, existingNode *v1.Node) bool {
	requiresUpdate := false
	if existingNode.Status.Capacity == nil {
		if initialNode.Status.Capacity != nil {
			existingNode.Status.Capacity = initialNode.Status.Capacity.DeepCopy()
			requiresUpdate = true
		} else {
			existingNode.Status.Capacity = make(map[v1.ResourceName]resource.Quantity)
		}
	}

	if existingNode.Status.Allocatable == nil {
		if initialNode.Status.Allocatable != nil {
			existingNode.Status.Allocatable = initialNode.Status.Allocatable.DeepCopy()
			requiresUpdate = true
		} else {
			existingNode.Status.Allocatable = make(map[v1.ResourceName]resource.Quantity)
		}
	}
	return requiresUpdate
}

//该函数用于更新节点的默认资源。
//它比较节点的初始资源和现有资源,如果现有资源为空,则将初始资源复制到现有资源中;
//如果现有资源不为空,则不进行更新。
//函数返回一个布尔值,表示是否需要更新节点资源。

// updateDefaultLabels will set the default labels on the node
func (kl *Kubelet) updateDefaultLabels(initialNode, existingNode *v1.Node) bool {
	defaultLabels := []string{
		v1.LabelHostname,
		v1.LabelTopologyZone,
		v1.LabelTopologyRegion,
		v1.LabelFailureDomainBetaZone,
		v1.LabelFailureDomainBetaRegion,
		v1.LabelInstanceTypeStable,
		v1.LabelInstanceType,
		v1.LabelOSStable,
		v1.LabelArchStable,
		v1.LabelWindowsBuild,
		kubeletapis.LabelOS,
		kubeletapis.LabelArch,
	}

	needsUpdate := false
	if existingNode.Labels == nil {
		existingNode.Labels = make(map[string]string)
	}
	//Set default labels but make sure to not set labels with empty values
	for _, label := range defaultLabels {
		if _, hasInitialValue := initialNode.Labels[label]; !hasInitialValue {
			continue
		}

		if existingNode.Labels[label] != initialNode.Labels[label] {
			existingNode.Labels[label] = initialNode.Labels[label]
			needsUpdate = true
		}

		if existingNode.Labels[label] == "" {
			delete(existingNode.Labels, label)
		}
	}

	return needsUpdate
}

//该函数用于更新节点的默认标签。
//它比较节点的初始标签和现有标签,将初始标签中存在但现有标签中不存在或者值不同的标签更新到现有标签中,并删除现有标签中值为空的标签。
//函数返回一个布尔值,表示是否需要更新节点标签。

// reconcileCMADAnnotationWithExistingNode reconciles the controller-managed
// attach-detach annotation on a new node and the existing node, returning
// whether the existing node must be updated.
func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v1.Node) bool {
	var (
		existingCMAAnnotation    = existingNode.Annotations[volutil.ControllerManagedAttachAnnotation]
		newCMAAnnotation, newSet = node.Annotations[volutil.ControllerManagedAttachAnnotation]
	)

	if newCMAAnnotation == existingCMAAnnotation {
		return false
	}

	// If the just-constructed node and the existing node do
	// not have the same value, update the existing node with
	// the correct value of the annotation.
	if !newSet {
		klog.InfoS("Controller attach-detach setting changed to false; updating existing Node")
		delete(existingNode.Annotations, volutil.ControllerManagedAttachAnnotation)
	} else {
		klog.InfoS("Controller attach-detach setting changed to true; updating existing Node")
		if existingNode.Annotations == nil {
			existingNode.Annotations = make(map[string]string)
		}
		existingNode.Annotations[volutil.ControllerManagedAttachAnnotation] = newCMAAnnotation
	}

	return true
}

//该函数用于协调新节点和现有节点上的控制器管理的挂载分离注解,返回是否需要更新现有节点。
//函数首先检查新节点和现有节点的注解是否相同,如果相同,则不需要更新,返回false。
//如果注解不同,则根据新节点的注解值更新现有节点的注解。如果新节点的注解值为false,则删除现有节点的注解;
//如果新节点的注解值为true,则在现有节点的注解中添加该注解。
//最后返回true表示需要更新现有节点。

// initialNode constructs the initial v1.Node for this Kubelet, incorporating node
// labels, information from the cloud provider, and Kubelet configuration.
func (kl *Kubelet) initialNode(ctx context.Context) (*v1.Node, error) {
	node := &v1.Node{
		ObjectMeta: metav1.ObjectMeta{
			Name: string(kl.nodeName),
			Labels: map[string]string{
				v1.LabelHostname:      kl.hostname,
				v1.LabelOSStable:      goruntime.GOOS,
				v1.LabelArchStable:    goruntime.GOARCH,
				kubeletapis.LabelOS:   goruntime.GOOS,
				kubeletapis.LabelArch: goruntime.GOARCH,
			},
		},
		Spec: v1.NodeSpec{
			Unschedulable: !kl.registerSchedulable,
		},
	}
	osLabels, err := getOSSpecificLabels()
	if err != nil {
		return nil, err
	}
	for label, value := range osLabels {
		node.Labels[label] = value
	}
	//该函数用于构建Kubelet的初始v1.Node对象,其中包括节点标签、云提供商的信息和Kubelet配置。
	//函数首先创建一个v1.Node对象,并设置其Name和Labels属性。
	//然后调用getOSSpecificLabels函数获取操作系统特定的标签,并将其添加到节点标签中。
	//如果getOSSpecificLabels函数返回错误,则函数返回nil和错误。
	//最后,函数返回构建完成的v1.Node对象。

	nodeTaints := make([]v1.Taint, len(kl.registerWithTaints))
	copy(nodeTaints, kl.registerWithTaints)
	unschedulableTaint := v1.Taint{
		Key:    v1.TaintNodeUnschedulable,
		Effect: v1.TaintEffectNoSchedule,
	}

	// Taint node with TaintNodeUnschedulable when initializing
	// node to avoid race condition; refer to #63897 for more detail.
	if node.Spec.Unschedulable &&
		!taintutil.TaintExists(nodeTaints, &unschedulableTaint) {
		nodeTaints = append(nodeTaints, unschedulableTaint)
	}
	//这段Go代码的功能是在节点初始化时为其添加一个特定的污点(Taint),以避免竞态条件的问题。
	//具体而言,它首先创建一个与kl.registerWithTaints长度相等的nodeTaints切片,并将kl.registerWithTaints的值复制到nodeTaints中。
	//然后,它定义了一个名为unschedulableTaint的污点,其键为TaintNodeUnschedulable,效果为NoSchedule。
	//最后,如果节点的Spec.Unschedulable属性为true,并且在nodeTaints中不存在unschedulableTaint,
	//则将unschedulableTaint添加到nodeTaints中。  这
	//段代码的主要目的是确保在节点被标记为不可调度时,其对应的污点能够被正确地添加,以避免因竞态条件导致的问题。

	if kl.externalCloudProvider {
		taint := v1.Taint{
			Key:    cloudproviderapi.TaintExternalCloudProvider,
			Value:  "true",
			Effect: v1.TaintEffectNoSchedule,
		}

		nodeTaints = append(nodeTaints, taint)
	}
	if len(nodeTaints) > 0 {
		node.Spec.Taints = nodeTaints
	}
	// Initially, set NodeNetworkUnavailable to true.
	if kl.providerRequiresNetworkingConfiguration() {
		node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
			Type:               v1.NodeNetworkUnavailable,
			Status:             v1.ConditionTrue,
			Reason:             "NoRouteCreated",
			Message:            "Node created without a route",
			LastTransitionTime: metav1.NewTime(kl.clock.Now()),
		})
	}
	//这段Go代码主要实现了以下功能:
	//1. 如果kl.externalCloudProvider为true,则创建一个具有特定键、值和效果的taint对象,并将其添加到nodeTaints切片中。
	//2. 如果nodeTaints切片的长度大于0,则将nodeTaints切片中的taint对象追加到node.Spec.Taints中。
	//3. 如果kl.providerRequiresNetworkingConfiguration()返回true,
	//则向node.Status.Conditions中追加一个NodeNetworkUnavailable条件,其状态为true,
	//原因和消息为"Node created without a route",并设置LastTransitionTime为当前时间。
	//这段代码的主要目的是在节点上添加特定的taint对象,并根据提供程序是否需要网络配置来设置节点的网络不可用条件。

	if kl.enableControllerAttachDetach {
		if node.Annotations == nil {
			node.Annotations = make(map[string]string)
		}

		klog.V(2).InfoS("Setting node annotation to enable volume controller attach/detach")
		node.Annotations[volutil.ControllerManagedAttachAnnotation] = "true"
	} else {
		klog.V(2).InfoS("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes")
	}
	//这段Go代码主要功能是根据kl.enableControllerAttachDetach的值来决定是否为节点设置注解(annotation),以启用或禁用卷控制器的挂载/卸载。
	//- 如果kl.enableControllerAttachDetach为true,则会检查节点的Annotations是否为nil,
	//如果是,则创建一个空的map[string]string赋值给node.Annotations。
	//- 然后,通过klog.V(2).InfoS记录日志信息,表示正在设置节点注解以启用卷控制器的挂载/卸载
	//。 - 最后,将volutil.ControllerManagedAttachAnnotation注解设置为"true"。
	//- 如果kl.enableControllerAttachDetach为false,则会通过klog.V(2).InfoS记录日志信息,表示已禁用控制器的挂载/卸载,
	//Kubelet将负责挂载和卸载卷。

	if kl.keepTerminatedPodVolumes {
		if node.Annotations == nil {
			node.Annotations = make(map[string]string)
		}
		klog.V(2).InfoS("Setting node annotation to keep pod volumes of terminated pods attached to the node")
		node.Annotations[volutil.KeepTerminatedPodVolumesAnnotation] = "true"
	}

	// @question: should this be place after the call to the cloud provider? which also applies labels
	for k, v := range kl.nodeLabels {
		if cv, found := node.ObjectMeta.Labels[k]; found {
			klog.InfoS("the node label will overwrite default setting", "labelKey", k, "labelValue", v, "default", cv)
		}
		node.ObjectMeta.Labels[k] = v
	}
	//这段Go代码中的函数主要进行了以下操作:
	//1. 判断kl.keepTerminatedPodVolumes是否为true,如果是,则进行以下操作:
	//- 判断node.Annotations是否为nil,如果是,则将其初始化为一个空的map[string]string。
	//- 使用klog.V(2).InfoS记录日志,表示将设置节点注解以保持已终止Pod的卷附加到节点。
	//- 将volutil.KeepTerminatedPodVolumesAnnotation注解的值设置为"true"。
	//2. 使用for循环遍历kl.nodeLabels中的键值对,对于每个键值对,进行以下操作:
	//- 判断节点的ObjectMeta.Labels中是否存在与当前键相等的键,如果是,则使用klog.InfoS记录日志,表示节点标签将覆盖默认设置。
	//- 将当前键值对赋值给节点的ObjectMeta.Labels。
	//关于问题部分,代码中的注释并没有给出明确的指示,因此无法确定是否应该将这段代码放在调用云提供商之后。
	//这需要根据具体的业务逻辑和代码上下文来决定。

	if kl.providerID != "" {
		node.Spec.ProviderID = kl.providerID
	}

	if kl.cloud != nil {
		instances, ok := kl.cloud.Instances()
		if !ok {
			return nil, fmt.Errorf("failed to get instances from cloud provider")
		}

		// TODO: We can't assume that the node has credentials to talk to the
		// cloudprovider from arbitrary nodes. At most, we should talk to a
		// local metadata server here.
		var err error
		if node.Spec.ProviderID == "" {
			node.Spec.ProviderID, err = cloudprovider.GetInstanceProviderID(ctx, kl.cloud, kl.nodeName)
			if err != nil {
				return nil, err
			}
		}

		instanceType, err := instances.InstanceType(ctx, kl.nodeName)
		if err != nil {
			return nil, err
		}
		if instanceType != "" {
			klog.InfoS("Adding label from cloud provider", "labelKey", v1.LabelInstanceType, "labelValue", instanceType)
			node.ObjectMeta.Labels[v1.LabelInstanceType] = instanceType
			klog.InfoS("Adding node label from cloud provider", "labelKey", v1.LabelInstanceTypeStable, "labelValue", instanceType)
			node.ObjectMeta.Labels[v1.LabelInstanceTypeStable] = instanceType
		}
		// If the cloud has zone information, label the node with the zone information
		zones, ok := kl.cloud.Zones()
		if ok {
			zone, err := zones.GetZone(ctx)
			if err != nil {
				return nil, fmt.Errorf("failed to get zone from cloud provider: %v", err)
			}
			if zone.FailureDomain != "" {
				klog.InfoS("Adding node label from cloud provider", "labelKey", v1.LabelFailureDomainBetaZone, "labelValue", zone.FailureDomain)
				node.ObjectMeta.Labels[v1.LabelFailureDomainBetaZone] = zone.FailureDomain
				klog.InfoS("Adding node label from cloud provider", "labelKey", v1.LabelTopologyZone, "labelValue", zone.FailureDomain)
				node.ObjectMeta.Labels[v1.LabelTopologyZone] = zone.FailureDomain
			}
			if zone.Region != "" {
				klog.InfoS("Adding node label from cloud provider", "labelKey", v1.LabelFailureDomainBetaRegion, "labelValue", zone.Region)
				node.ObjectMeta.Labels[v1.LabelFailureDomainBetaRegion] = zone.Region
				klog.InfoS("Adding node label from cloud provider", "labelKey", v1.LabelTopologyRegion, "labelValue", zone.Region)
				node.ObjectMeta.Labels[v1.LabelTopologyRegion] = zone.Region
			}
		}
	}
	//这段Go代码主要功能是通过调用云提供商的API,获取节点的提供商ID、实例类型和区域信息,并将这些信息设置为节点的标签。
	//具体来说: - 首先,如果kl.providerID不为空,则将其赋值给node.Spec.ProviderID。
	//- 然后,如果kl.cloud不为空,则尝试通过kl.cloud.Instances()获取实例信息。
	//- 如果获取失败,则返回错误。
	//- 如果node.Spec.ProviderID为空,则调用cloudprovider.GetInstanceProviderID()获取提供商ID,并将其赋值给node.Spec.ProviderID。
	//- 接着,通过kl.cloud.Instances().InstanceType()获取节点的实例类型。
	//- 如果获取失败,则返回错误。
	//- 如果实例类型不为空,则将其添加为节点的标签v1.LabelInstanceType和v1.LabelInstanceTypeStable。
	//- 然后,尝试通过kl.cloud.Zones()获取区域信息。
	//- 如果获取失败,则跳过区域信息的处理。
	//- 如果获取成功,则通过zones.GetZone()获取区域。
	//- 如果获取失败,则返回错误。
	//- 如果区域的FailureDomain不为空,则将其添加为节点的标签v1.LabelFailureDomainBetaZone和v1.LabelTopologyZone。
	//- 如果区域的Region不为空,则将其添加为节点的标签v1.LabelFailureDomainBetaRegion和v1.LabelTopologyRegion。
	//- 最后,返回处理后的node对象和nil的错误值。
	//这段代码的主要目的是通过云提供商的API获取节点的相关信息,并将这些信息添加为节点的标签,以便于在Kubernetes集群中进行节点的管理和调度。

	kl.setNodeStatus(ctx, node)

	return node, nil
}

//这段Go代码的功能是通过调用云提供商的API,获取节点的实例类型和区域信息,并将这些信息作为标签添加到节点对象中。
//如果节点的ProviderID为空,则通过GetInstanceProviderID函数获取并设置ProviderID。
//然后,调用Instances.InstanceType函数获取实例类型,并将其添加到节点的标签中。
//如果云提供商提供了区域信息,则调用Zones.GetZone函数获取区域信息,并将区域和区域相关的标签添加到节点对象中。
//最后,调用setNodeStatus函数设置节点的状态,并返回更新后的节点对象。
//如果在执行过程中出现错误,则返回错误信息。

// fastNodeStatusUpdate is a "lightweight" version of syncNodeStatus which doesn't hit the
// apiserver except for the final run, to be called by fastStatusUpdateOnce in each loop.
// It holds the same lock as syncNodeStatus and is thread-safe when called concurrently with
// syncNodeStatus. Its return value indicates whether the loop running it should exit
// (final run), and it also sets kl.containerRuntimeReadyExpected.
func (kl *Kubelet) fastNodeStatusUpdate(ctx context.Context, timeout bool) (completed bool) {
	kl.syncNodeStatusMux.Lock()
	defer func() {
		kl.syncNodeStatusMux.Unlock()

		if completed {
			// containerRuntimeReadyExpected is read by updateRuntimeUp().
			// Not going for a more granular mutex as this path runs only once.
			kl.updateRuntimeMux.Lock()
			defer kl.updateRuntimeMux.Unlock()
			kl.containerRuntimeReadyExpected = true
		}
	}()

	if timeout {
		klog.ErrorS(nil, "Node not becoming ready in time after startup")
		return true
	}

	originalNode, err := kl.GetNode()
	if err != nil {
		klog.ErrorS(err, "Error getting the current node from lister")
		return false
	}

	readyIdx, originalNodeReady := nodeutil.GetNodeCondition(&originalNode.Status, v1.NodeReady)
	if readyIdx == -1 {
		klog.ErrorS(nil, "Node does not have NodeReady condition", "originalNode", originalNode)
		return false
	}

	if originalNodeReady.Status == v1.ConditionTrue {
		return true
	}

	// This is in addition to the regular syncNodeStatus logic so we can get the container runtime status earlier.
	// This function itself has a mutex and it doesn't recursively call fastNodeStatusUpdate or syncNodeStatus.
	kl.updateRuntimeUp()

	node, changed := kl.updateNode(ctx, originalNode)

	if !changed {
		// We don't do markVolumesFromNode(node) here and leave it to the regular syncNodeStatus().
		return false
	}

	readyIdx, nodeReady := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
	if readyIdx == -1 {
		klog.ErrorS(nil, "Node does not have NodeReady condition", "node", node)
		return false
	}

	if nodeReady.Status == v1.ConditionFalse {
		return false
	}

	klog.InfoS("Fast updating node status as it just became ready")
	if _, err := kl.patchNodeStatus(originalNode, node); err != nil {
		// The originalNode is probably stale, but we know that the current state of kubelet would turn
		// the node to be ready. Retry using syncNodeStatus() which fetches from the apiserver.
		klog.ErrorS(err, "Error updating node status, will retry with syncNodeStatus")

		// The reversed kl.syncNodeStatusMux.Unlock/Lock() below to allow kl.syncNodeStatus() execution.
		kl.syncNodeStatusMux.Unlock()
		kl.syncNodeStatus()
		// This lock action is unnecessary if we add a flag to check in the defer before unlocking it,
		// but having it here makes the logic a bit easier to read.
		kl.syncNodeStatusMux.Lock()
	}

	// We don't do markVolumesFromNode(node) here and leave it to the regular syncNodeStatus().
	return true
}

//该函数是Kubelet的一个方法,用于快速更新节点状态,只在最后一次运行时访问API服务器。
//它与syncNodeStatus方法使用相同的锁,可并发安全地调用。
//函数返回一个布尔值,指示调用该函数的循环是否应退出(最后一次运行),并设置kl.containerRuntimeReadyExpected。
//函数内部主要执行以下操作:
//1. 加锁并defer解锁。
//2. 如果是超时情况,则记录错误日志并返回true。
//3. 获取当前节点信息。
//4. 检查节点的Ready条件,如果不存在则记录错误日志并返回false。
//5. 如果节点Ready条件为True,则直接返回true。
//6. 更新运行时状态。
//7. 更新节点信息。
//8. 检查更新后的节点的Ready条件,如果不存在则记录错误日志并返回false。
//9. 如果节点Ready条件为False,则返回false。
//10. 记录日志,更新节点状态,并处理异常情况。
//11. 返回true。
//该函数通过加锁保证并发安全,并通过条件判断和异常处理保证节点状态更新的准确性。

// syncNodeStatus should be called periodically from a goroutine.
// It synchronizes node status to master if there is any change or enough time
// passed from the last sync, registering the kubelet first if necessary.
func (kl *Kubelet) syncNodeStatus() {
	kl.syncNodeStatusMux.Lock()
	defer kl.syncNodeStatusMux.Unlock()
	ctx := context.Background()

	if kl.kubeClient == nil || kl.heartbeatClient == nil {
		return
	}
	if kl.registerNode {
		// This will exit immediately if it doesn't need to do anything.
		kl.registerWithAPIServer()
	}
	if err := kl.updateNodeStatus(ctx); err != nil {
		klog.ErrorS(err, "Unable to update node status")
	}
}

//该函数是一个Go语言函数,名为syncNodeStatus,它属于Kubelet结构体。
//该函数应该周期性地在一个goroutine中被调用,用于同步节点状态到master,如果有任何变化或足够时间从上一次同步开始,如果需要的话,
//首先注册kubelet。
//函数首先通过syncNodeStatusMux互斥锁进行加锁和解锁,以确保同步操作的原子性。
//然后,它在后台创建一个context。如果kl.kubeClient或kl.heartbeatClient为nil,则函数直接返回
//。如果kl.registerNode为true,则调用kl.registerWithAPIServer()函数进行API服务器的注册。
//最后,函数调用kl.updateNodeStatus(ctx)来更新节点状态,如果更新失败,则记录错误日志。

// updateNodeStatus updates node status to master with retries if there is any
// change or enough time passed from the last sync.
func (kl *Kubelet) updateNodeStatus(ctx context.Context) error {
	klog.V(5).InfoS("Updating node status")
	for i := 0; i < nodeStatusUpdateRetry; i++ {
		if err := kl.tryUpdateNodeStatus(ctx, i); err != nil {
			if i > 0 && kl.onRepeatedHeartbeatFailure != nil {
				kl.onRepeatedHeartbeatFailure()
			}
			klog.ErrorS(err, "Error updating node status, will retry")
		} else {
			return nil
		}
	}
	return fmt.Errorf("update node status exceeds retry count")
}

//该函数是Kubelet的一个方法,用于更新节点状态到master,如果有任何变化或足够时间从上次同步。
//它通过重试来实现更新操作。
//函数首先记录日志,然后通过循环尝试更新节点状态。
//如果更新失败且重试次数大于0,则调用onRepeatedHeartbeatFailure回调函数,并记录错误日志。
//如果更新成功,则返回nil。
//如果更新失败且超过重试次数,则返回错误。

// tryUpdateNodeStatus tries to update node status to master if there is any
// change or enough time passed from the last sync.
func (kl *Kubelet) tryUpdateNodeStatus(ctx context.Context, tryNumber int) error {
	// In large clusters, GET and PUT operations on Node objects coming
	// from here are the majority of load on apiserver and etcd.
	// To reduce the load on etcd, we are serving GET operations from
	// apiserver cache (the data might be slightly delayed but it doesn't
	// seem to cause more conflict - the delays are pretty small).
	// If it result in a conflict, all retries are served directly from etcd.
	opts := metav1.GetOptions{}
	if tryNumber == 0 {
		util.FromApiserverCache(&opts)
	}
	originalNode, err := kl.heartbeatClient.CoreV1().Nodes().Get(ctx, string(kl.nodeName), opts)
	if err != nil {
		return fmt.Errorf("error getting node %q: %v", kl.nodeName, err)
	}
	if originalNode == nil {
		return fmt.Errorf("nil %q node object", kl.nodeName)
	}

	node, changed := kl.updateNode(ctx, originalNode)
	shouldPatchNodeStatus := changed || kl.clock.Since(kl.lastStatusReportTime) >= kl.nodeStatusReportFrequency

	if !shouldPatchNodeStatus {
		kl.markVolumesFromNode(node)
		return nil
	}

	updatedNode, err := kl.patchNodeStatus(originalNode, node)
	if err == nil {
		kl.markVolumesFromNode(updatedNode)
	}
	return err
}

//该函数尝试更新节点状态到master,如果有任何更改或足够时间从上次同步经过。
//为了减少对etcd的负载,在第一次尝试时从apiserver缓存中获取节点对象。
//如果获取操作导致冲突,则所有重试都直接从etcd中获取。
//函数通过比较节点状态和时间戳来决定是否需要更新节点状态。
//如果需要更新,函数会调用patchNodeStatus函数来更新节点状态,并标记节点的卷信息。

// updateNode creates a copy of originalNode and runs update logic on it.
// It returns the updated node object and a bool indicating if anything has been changed.
func (kl *Kubelet) updateNode(ctx context.Context, originalNode *v1.Node) (*v1.Node, bool) {
	node := originalNode.DeepCopy()

	podCIDRChanged := false
	if len(node.Spec.PodCIDRs) != 0 {
		// Pod CIDR could have been updated before, so we cannot rely on
		// node.Spec.PodCIDR being non-empty. We also need to know if pod CIDR is
		// actually changed.
		var err error
		podCIDRs := strings.Join(node.Spec.PodCIDRs, ",")
		if podCIDRChanged, err = kl.updatePodCIDR(ctx, podCIDRs); err != nil {
			klog.ErrorS(err, "Error updating pod CIDR")
		}
	}

	areRequiredLabelsNotPresent := false
	osName, osLabelExists := node.Labels[v1.LabelOSStable]
	if !osLabelExists || osName != goruntime.GOOS {
		if len(node.Labels) == 0 {
			node.Labels = make(map[string]string)
		}
		node.Labels[v1.LabelOSStable] = goruntime.GOOS
		areRequiredLabelsNotPresent = true
	}
	// Set the arch if there is a mismatch
	arch, archLabelExists := node.Labels[v1.LabelArchStable]
	if !archLabelExists || arch != goruntime.GOARCH {
		if len(node.Labels) == 0 {
			node.Labels = make(map[string]string)
		}
		node.Labels[v1.LabelArchStable] = goruntime.GOARCH
		areRequiredLabelsNotPresent = true
	}

	kl.setNodeStatus(ctx, node)

	changed := podCIDRChanged || nodeStatusHasChanged(&originalNode.Status, &node.Status) || areRequiredLabelsNotPresent
	return node, changed
}

//该函数用于更新节点对象。
//它首先创建原始节点对象的深拷贝,然后执行更新逻辑。
//更新逻辑包括检查和更新节点的Pod CIDR、操作系统标签和架构标签,并调用setNodeStatus函数设置节点状态。
//函数返回更新后的节点对象和一个bool值,指示节点是否已更改。

// patchNodeStatus patches node on the API server based on originalNode.
// It returns any potential error, or an updatedNode and refreshes the state of kubelet when successful.
func (kl *Kubelet) patchNodeStatus(originalNode, node *v1.Node) (*v1.Node, error) {
	// Patch the current status on the API server
	updatedNode, _, err := nodeutil.PatchNodeStatus(kl.heartbeatClient.CoreV1(), types.NodeName(kl.nodeName), originalNode, node)
	if err != nil {
		return nil, err
	}
	kl.lastStatusReportTime = kl.clock.Now()
	kl.setLastObservedNodeAddresses(updatedNode.Status.Addresses)

	readyIdx, readyCondition := nodeutil.GetNodeCondition(&updatedNode.Status, v1.NodeReady)
	if readyIdx >= 0 && readyCondition.Status == v1.ConditionTrue {
		kl.nodeStartupLatencyTracker.RecordNodeReady()
	}

	return updatedNode, nil
}

//该函数用于更新节点状态,
//基于原始节点对象originalNode和更新后的节点对象node,
//通过调用nodeutil.PatchNodeStatus函数向API服务器发送patch请求进行更新。
//成功更新后,函数会返回更新后的节点对象和nil错误,同时刷新kubelet的状态。
//如果更新失败,则返回nil和错误信息。
//函数内部还会更新kubelet的lastStatusReportTime和lastObservedNodeAddresses字段,并根据更新后的节点就绪状态记录节点就绪时间。

// markVolumesFromNode updates volumeManager with VolumesInUse status from node.
//
// In the case of node status update being unnecessary, call with the fetched node.
// We must mark the volumes as ReportedInUse in volume manager's dsw even
// if no changes were made to the node status (no volumes were added or removed
// from the VolumesInUse list).
//
// The reason is that on a kubelet restart, the volume manager's dsw is
// repopulated and the volume ReportedInUse is initialized to false, while the
// VolumesInUse list from the Node object still contains the state from the
// previous kubelet instantiation.
//
// Once the volumes are added to the dsw, the ReportedInUse field needs to be
// synced from the VolumesInUse list in the Node.Status.
//
// The MarkVolumesAsReportedInUse() call cannot be performed in dsw directly
// because it does not have access to the Node object.
// This also cannot be populated on node status manager init because the volume
// may not have been added to dsw at that time.
//
// Or, after a successful node status update, call with updatedNode returned from
// the patch call, to mark the volumeInUse as reportedInUse to indicate
// those volumes are already updated in the node's status
//1. markVolumesFromNode函数用于标记从节点报告的卷为正在使用。
//2. recordNodeStatusEvent函数记录给定节点的给定类型的事件消息。
//3. recordEvent函数为该节点记录一个事件,将Kubelet的nodeRef传递给记录器。

func (kl *Kubelet) markVolumesFromNode(node *v1.Node) {
	kl.volumeManager.MarkVolumesAsReportedInUse(node.Status.VolumesInUse)
}

// recordNodeStatusEvent records an event of the given type with the given
// message for the node.
func (kl *Kubelet) recordNodeStatusEvent(eventType, event string) {
	klog.V(2).InfoS("Recording event message for node", "node", klog.KRef("", string(kl.nodeName)), "event", event)
	kl.recorder.Eventf(kl.nodeRef, eventType, event, "Node %s status is now: %s", kl.nodeName, event)
}

// recordEvent records an event for this node, the Kubelet's nodeRef is passed to the recorder
func (kl *Kubelet) recordEvent(eventType, event, message string) {
	kl.recorder.Eventf(kl.nodeRef, eventType, event, message)
}

//1. markVolumesFromNode函数用于标记从节点报告的卷为正在使用。
//2. recordNodeStatusEvent函数记录给定节点的给定类型的事件消息。
//3. recordEvent函数为该节点记录一个事件,将Kubelet的nodeRef传递给记录器。

// record if node schedulable change.
func (kl *Kubelet) recordNodeSchedulableEvent(ctx context.Context, node *v1.Node) error {
	kl.lastNodeUnschedulableLock.Lock()
	defer kl.lastNodeUnschedulableLock.Unlock()
	if kl.lastNodeUnschedulable != node.Spec.Unschedulable {
		if node.Spec.Unschedulable {
			kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeNotSchedulable)
		} else {
			kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeSchedulable)
		}
		kl.lastNodeUnschedulable = node.Spec.Unschedulable
	}
	return nil
}

//该函数用于记录节点是否可调度的变化。
//它首先通过锁定lastNodeUnschedulableLock来确保并发安全,
//然后检查kl.lastNodeUnschedulable和node.Spec.Unschedulable的值是否不同。
//如果不同,它将根据节点的可调度性记录相应的事件,并更新kl.lastNodeUnschedulable的值。
//最后,函数返回nil。

// setNodeStatus fills in the Status fields of the given Node, overwriting
// any fields that are currently set.
// TODO(madhusudancs): Simplify the logic for setting node conditions and
// refactor the node status condition code out to a different file.
func (kl *Kubelet) setNodeStatus(ctx context.Context, node *v1.Node) {
	for i, f := range kl.setNodeStatusFuncs {
		klog.V(5).InfoS("Setting node status condition code", "position", i, "node", klog.KObj(node))
		if err := f(ctx, node); err != nil {
			klog.ErrorS(err, "Failed to set some node status fields", "node", klog.KObj(node))
		}
	}
}

func (kl *Kubelet) setLastObservedNodeAddresses(addresses []v1.NodeAddress) {
	kl.lastObservedNodeAddressesMux.Lock()
	defer kl.lastObservedNodeAddressesMux.Unlock()
	kl.lastObservedNodeAddresses = addresses
}
func (kl *Kubelet) getLastObservedNodeAddresses() []v1.NodeAddress {
	kl.lastObservedNodeAddressesMux.RLock()
	defer kl.lastObservedNodeAddressesMux.RUnlock()
	return kl.lastObservedNodeAddresses
}

// defaultNodeStatusFuncs is a factory that generates the default set of
// setNodeStatus funcs
func (kl *Kubelet) defaultNodeStatusFuncs() []func(context.Context, *v1.Node) error {
	// if cloud is not nil, we expect the cloud resource sync manager to exist
	var nodeAddressesFunc func() ([]v1.NodeAddress, error)
	if kl.cloud != nil {
		nodeAddressesFunc = kl.cloudResourceSyncManager.NodeAddresses
	}
	var setters []func(ctx context.Context, n *v1.Node) error
	setters = append(setters,
		nodestatus.NodeAddress(kl.nodeIPs, kl.nodeIPValidator, kl.hostname, kl.hostnameOverridden, kl.externalCloudProvider, kl.cloud, nodeAddressesFunc),
		nodestatus.MachineInfo(string(kl.nodeName), kl.maxPods, kl.podsPerCore, kl.GetCachedMachineInfo, kl.containerManager.GetCapacity,
			kl.containerManager.GetDevicePluginResourceCapacity, kl.containerManager.GetNodeAllocatableReservation, kl.recordEvent, kl.supportLocalStorageCapacityIsolation()),
		nodestatus.VersionInfo(kl.cadvisor.VersionInfo, kl.containerRuntime.Type, kl.containerRuntime.Version),
		nodestatus.DaemonEndpoints(kl.daemonEndpoints),
		nodestatus.Images(kl.nodeStatusMaxImages, kl.imageManager.GetImageList),
		nodestatus.GoRuntime(),
		nodestatus.RuntimeHandlers(kl.runtimeState.runtimeHandlers),
	)
	// Volume limits
	setters = append(setters, nodestatus.VolumeLimits(kl.volumePluginMgr.ListVolumePluginWithLimits))

	setters = append(setters,
		nodestatus.MemoryPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderMemoryPressure, kl.recordNodeStatusEvent),
		nodestatus.DiskPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderDiskPressure, kl.recordNodeStatusEvent),
		nodestatus.PIDPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderPIDPressure, kl.recordNodeStatusEvent),
		nodestatus.ReadyCondition(kl.clock.Now, kl.runtimeState.runtimeErrors, kl.runtimeState.networkErrors, kl.runtimeState.storageErrors,
			kl.containerManager.Status, kl.shutdownManager.ShutdownStatus, kl.recordNodeStatusEvent, kl.supportLocalStorageCapacityIsolation()),
		nodestatus.VolumesInUse(kl.volumeManager.ReconcilerStatesHasBeenSynced, kl.volumeManager.GetVolumesInUse),
		// TODO(mtaufen): I decided not to move this setter for now, since all it does is send an event
		// and record state back to the Kubelet runtime object. In the future, I'd like to isolate
		// these side-effects by decoupling the decisions to send events and partial status recording
		// from the Node setters.
		kl.recordNodeSchedulableEvent,
	)
	return setters
}

//该函数是Kubelet的一个方法,用于生成默认的setNodeStatus函数集合。
//这些函数用于更新节点状态。
//函数根据Kubelet的配置和运行状态,
//设置节点的地址、硬件信息、版本信息、守护进程端点、镜像、运行时信息、卷限制、内存压力、磁盘压力、PID压力、就绪状态、正在使用的卷等信息。
//最后,函数返回一个包含所有设置节点状态函数的切片。

// Validate given node IP belongs to the current host
func validateNodeIP(nodeIP net.IP) error {
	// Honor IP limitations set in setNodeStatus()
	if nodeIP.To4() == nil && nodeIP.To16() == nil {
		return fmt.Errorf("nodeIP must be a valid IP address")
	}
	if nodeIP.IsLoopback() {
		return fmt.Errorf("nodeIP can't be loopback address")
	}
	if nodeIP.IsMulticast() {
		return fmt.Errorf("nodeIP can't be a multicast address")
	}
	if nodeIP.IsLinkLocalUnicast() {
		return fmt.Errorf("nodeIP can't be a link-local unicast address")
	}
	if nodeIP.IsUnspecified() {
		return fmt.Errorf("nodeIP can't be an all zeros address")
	}

	addrs, err := net.InterfaceAddrs()
	if err != nil {
		return err
	}
	for _, addr := range addrs {
		var ip net.IP
		switch v := addr.(type) {
		case *net.IPNet:
			ip = v.IP
		case *net.IPAddr:
			ip = v.IP
		}
		if ip != nil && ip.Equal(nodeIP) {
			return nil
		}
	}
	return fmt.Errorf("node IP: %q not found in the host's network interfaces", nodeIP.String())
}

//该函数用于验证给定的节点IP是否属于当前主机。
//函数首先检查IP地址的有效性,然后检查是否为回环、多播、链路本地单播或全零地址。
//最后,它遍历主机的所有网络接口地址,如果找到与给定节点IP匹配的地址,则返回nil,否则返回错误。

// nodeStatusHasChanged compares the original node and current node's status and
// returns true if any change happens. The heartbeat timestamp is ignored.
func nodeStatusHasChanged(originalStatus *v1.NodeStatus, status *v1.NodeStatus) bool {
	if originalStatus == nil && status == nil {
		return false
	}
	if originalStatus == nil || status == nil {
		return true
	}

	// Compare node conditions here because we need to ignore the heartbeat timestamp.
	if nodeConditionsHaveChanged(originalStatus.Conditions, status.Conditions) {
		return true
	}

	// Compare other fields of NodeStatus.
	originalStatusCopy := originalStatus.DeepCopy()
	statusCopy := status.DeepCopy()
	originalStatusCopy.Conditions = nil
	statusCopy.Conditions = nil
	return !apiequality.Semantic.DeepEqual(originalStatusCopy, statusCopy)
}

//该函数用于比较节点的原始状态和当前状态,如果状态有任何变化,则返回true。
//心跳时间戳被忽略。
//首先,函数检查原始状态和当前状态是否都为nil,如果是,则返回false。
//如果其中一方为nil,则返回true。
//接下来,函数通过忽略心跳时间戳来比较节点条件。
//如果节点条件发生变化,则返回true。
//最后,函数复制原始状态和当前状态,并将条件字段设置为nil,然后使用深度比较来检查其他字段是否有变化。
//如果有变化,则返回true,否则返回false。

// nodeConditionsHaveChanged compares the original node and current node's
// conditions and returns true if any change happens. The heartbeat timestamp is
// ignored.
func nodeConditionsHaveChanged(originalConditions []v1.NodeCondition, conditions []v1.NodeCondition) bool {
	if len(originalConditions) != len(conditions) {
		return true
	}

	originalConditionsCopy := make([]v1.NodeCondition, 0, len(originalConditions))
	originalConditionsCopy = append(originalConditionsCopy, originalConditions...)
	conditionsCopy := make([]v1.NodeCondition, 0, len(conditions))
	conditionsCopy = append(conditionsCopy, conditions...)

	sort.SliceStable(originalConditionsCopy, func(i, j int) bool { return originalConditionsCopy[i].Type < originalConditionsCopy[j].Type })
	sort.SliceStable(conditionsCopy, func(i, j int) bool { return conditionsCopy[i].Type < conditionsCopy[j].Type })

	replacedheartbeatTime := metav1.Time{}
	for i := range conditionsCopy {
		originalConditionsCopy[i].LastHeartbeatTime = replacedheartbeatTime
		conditionsCopy[i].LastHeartbeatTime = replacedheartbeatTime
		if !apiequality.Semantic.DeepEqual(&originalConditionsCopy[i], &conditionsCopy[i]) {
			return true
		}
	}
	return false
}

//该函数用于比较节点的原始条件和当前条件,如果有任何变化则返回true。
//心跳时间被忽略。
//函数首先通过比较原始条件和当前条件的长度来确定是否存在变化。
//如果长度不同,则返回true。
//如果长度相同,则创建原始条件和当前条件的副本,并对它们进行排序。
//然后,函数将每个条件的最后心跳时间替换为一个固定的metav1.Time值,并逐个比较副本中的条件。如果有任何条件不相等,则返回true。
//最后,如果没有发现任何变化,则返回false。

举报

相关推荐

0 条评论