1. 前言
2. 例子
2.1 初始状态
[root@master kubectl]# ./kubectl get endpoints -n kube-system
NAME ENDPOINTS AGE
kube-controller-manager <none> 42h
kube-scheduler <none> 42h
[root@master kubectl]#
[root@master kubectl]# ./kubectl get endpoints kube-scheduler -o yaml -n kube-system
apiVersion: v1
kind: Endpoints
metadata:
annotations:
control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"master_74cc3de4-f0be-11e9-9232-525400d54f7e","leaseDurationSeconds":15,"acquireTime":"2019-10-17T09:14:19Z","renewTime":"2019-10-17T09:41:41Z","leaderTransitions":5}'
creationTimestamp: "2019-10-15T14:56:55Z"
name: kube-scheduler
namespace: kube-system
resourceVersion: "59633"
selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler
uid: 0786d7b7-ef5c-11e9-af01-525400d54f7e
2.2 关闭leader
[root@master kubectl]# ./kubectl get endpoints kube-scheduler -o yaml -n kube-system
apiVersion: v1
kind: Endpoints
metadata:
annotations:
control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"worker_f6134651-f0bf-11e9-a387-5254009b5271","leaseDurationSeconds":15,"acquireTime":"2019-10-17T09:42:11Z","renewTime":"2019-10-17T09:42:13Z","leaderTransitions":6}'
creationTimestamp: "2019-10-15T14:56:55Z"
name: kube-scheduler
namespace: kube-system
resourceVersion: "59667"
selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler
uid: 0786d7b7-ef5c-11e9-af01-525400d54f7e
[root@master kubectl]#
[root@worker scheduler]# cat config.txt
./kube-scheduler --master=http://172.21.0.16:8080
[root@worker scheduler]# ./kube-scheduler --master=http://172.21.0.16:8080
...
I1017 17:24:47.941202 32277 leaderelection.go:205] attempting to acquire leader lease kube-system/kube-scheduler...
I1017 17:42:11.815383 32277 leaderelection.go:214] successfully acquired lease kube-system/kube-scheduler
2.3 启动一个自定义调度器
[root@master kubectl]# ./kubectl get endpoints -n kube-system
NAME ENDPOINTS AGE
kube-controller-manager <none> 42h
kube-scheduler <none> 42h
my-scheduler <none> 7s
[root@master kubectl]# ./kubectl get endpoints my-scheduler -o yaml -n kube-system
apiVersion: v1
kind: Endpoints
metadata:
annotations:
control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"master_1dd3cdbe-f0c3-11e9-985f-525400d54f7e","leaseDurationSeconds":15,"acquireTime":"2019-10-17T09:47:23Z","renewTime":"2019-10-17T09:47:45Z","leaderTransitions":0}'
creationTimestamp: "2019-10-17T09:47:23Z"
name: my-scheduler
namespace: kube-system
resourceVersion: "60119"
selfLink: /api/v1/namespaces/kube-system/endpoints/my-scheduler
uid: 1e6d5569-f0c3-11e9-b23b-525400d54f7e
[root@master kubectl]#
[root@master kubectl]# cat pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: test
spec:
containers:
- name: podtest
image: nginx
ports:
- containerPort: 80
[root@master kubectl]# cat pod-scheduler.yaml
apiVersion: v1
kind: Pod
metadata:
name: test-schduler
spec:
schedulerName: my-scheduler
containers:
- name: podtest-scheduler
image: nginx
ports:
- containerPort: 80
[root@master kubectl]# ./kubectl get pods
No resources found.
[root@master kubectl]# ./kubectl apply -f pod.yaml
pod/test created
[root@master kubectl]# ./kubectl apply -f pod-scheduler.yaml
pod/test-schduler created
[root@master kubectl]# ./kubectl get pods
NAME READY STATUS RESTARTS AGE
test 1/1 Running 0 3m3s
test-schduler 1/1 Running 0 2m55s
[root@master kubectl]# ./kubectl get pod test-schduler -o yaml | grep schedulerName
{"apiVersion":"v1","kind":"Pod","metadata":{"annotations":{},"name":"test-schduler","namespace":"default"},"spec":{"containers":[{"image":"nginx","name":"podtest-scheduler","ports":[{"containerPort":80}]}],"schedulerName":"my-scheduler"}}
schedulerName: my-scheduler
[root@master kubectl]# ./kubectl get pod test -o yaml | grep schedulerName
schedulerName: default-scheduler
[root@master kubectl]#
3. 源码分析
3.1 结构体与默认值
// pkg/scheduler/apis/config/types.go
type KubeSchedulerConfiguration struct {
...
LeaderElection KubeSchedulerLeaderElectionConfiguration
...
}
type KubeSchedulerLeaderElectionConfiguration struct {
apiserverconfig.LeaderElectionConfiguration
// LockObjectNamespace defines the namespace of the lock object
LockObjectNamespace string
// LockObjectName defines the lock object name
LockObjectName string
}
type LeaderElectionConfiguration struct {
LeaderElect bool
LeaseDuration metav1.Duration
RenewDeadline metav1.Duration
RetryPeriod metav1.Duration
ResourceLock string
}
// pkg/scheduler/apis/config/v1alpha1/defaults.go
func SetDefaults_KubeSchedulerConfiguration(obj *kubescedulerconfigv1alpha1.KubeSchedulerConfiguration) {
...
if len(obj.LeaderElection.LockObjectNamespace) == 0 {
// obj.LeaderElection.LockObjectNamespace = kube-system
obj.LeaderElection.LockObjectNamespace = kubescedulerconfigv1alpha1.SchedulerDefaultLockObjectNamespace
}
if len(obj.LeaderElection.LockObjectName) == 0 {
// obj.LeaderElection.LockObjectName = kube-scheduler
obj.LeaderElection.LockObjectName = kubescedulerconfigv1alpha1.SchedulerDefaultLockObjectName
}
...
}
// k8s.io/apiserver/pkg/apis/config/v1alpha1/defaults.go
func RecommendedDefaultLeaderElectionConfiguration(obj *LeaderElectionConfiguration) {
zero := metav1.Duration{}
if obj.LeaseDuration == zero {
obj.LeaseDuration = metav1.Duration{Duration: 15 * time.Second}
}
if obj.RenewDeadline == zero {
obj.RenewDeadline = metav1.Duration{Duration: 10 * time.Second}
}
if obj.RetryPeriod == zero {
obj.RetryPeriod = metav1.Duration{Duration: 2 * time.Second}
}
if obj.ResourceLock == "" {
obj.ResourceLock = EndpointsResourceLock
}
if obj.LeaderElect == nil {
obj.LeaderElect = utilpointer.BoolPtr(true)
}
}
LockObjectNamespace = "kube-system"
LockObjectName = "kube-scheduler"
ResourceLock = "endpoints"
LeaderElect = true
3.2 流程
// cmd/kube-scheduler/app/options/options.go
func (o *Options) Config() (*schedulerappconfig.Config, error) {
...
// Set up leader election if enabled.
var leaderElectionConfig *leaderelection.LeaderElectionConfig
// 默认值就是true 只要用户不设置为false 这一步就会执行
// 也就是说kube-scheduler 默认就是支持高可用
if c.ComponentConfig.LeaderElection.LeaderElect {
leaderElectionConfig, err = makeLeaderElectionConfig(c.ComponentConfig.LeaderElection, leaderElectionClient, recorder)
if err != nil {
return nil, err
}
}
...
c.LeaderElection = leaderElectionConfig
...
}
func makeLeaderElectionConfig(config kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration, client clientset.Interface, recorder record.EventRecorder) (*leaderelection.LeaderElectionConfig, error) {
hostname, err := os.Hostname()
if err != nil {
return nil, fmt.Errorf("unable to get hostname: %v", err)
}
// add a uniquifier so that two processes on the same host don't accidentally both become active
id := hostname + "_" + string(uuid.NewUUID())
rl, err := resourcelock.New(config.ResourceLock,
config.LockObjectNamespace,
config.LockObjectName,
client.CoreV1(),
resourcelock.ResourceLockConfig{
Identity: id,
EventRecorder: recorder,
})
if err != nil {
return nil, fmt.Errorf("couldn't create resource lock: %v", err)
}
return &leaderelection.LeaderElectionConfig{
Lock: rl,
LeaseDuration: config.LeaseDuration.Duration,
RenewDeadline: config.RenewDeadline.Duration,
RetryPeriod: config.RetryPeriod.Duration,
WatchDog: leaderelection.NewLeaderHealthzAdaptor(time.Second * 20),
Name: "kube-scheduler",
}, nil
}
// cmd/kube-scheduler/app/server.go
func Run(cc schedulerserverconfig.CompletedConfig, stopCh <-chan struct{}) error {
...
// Prepare a reusable runCommand function.
run := func(ctx context.Context) {
sched.Run()
<-ctx.Done()
}
ctx, cancel := context.WithCancel(context.TODO()) // TODO once Run() accepts a context, it should be used here
defer cancel()
go func() {
select {
case <-stopCh:
cancel()
case <-ctx.Done():
}
}()
// If leader election is enabled, runCommand via LeaderElector until done and exit.
// 启动高可用
if cc.LeaderElection != nil {
cc.LeaderElection.Callbacks = leaderelection.LeaderCallbacks{
// 调用run方法
OnStartedLeading: run,
OnStoppedLeading: func() {
utilruntime.HandleError(fmt.Errorf("lost master"))
},
}
leaderElector, err := leaderelection.NewLeaderElector(*cc.LeaderElection)
if err != nil {
return fmt.Errorf("couldn't create leader elector: %v", err)
}
leaderElector.Run(ctx)
return fmt.Errorf("lost lease")
}
// Leader election is disabled, so runCommand inline until done.
run(ctx)
return fmt.Errorf("finished without leader elect")
}