我已经使用 kubernetes/client-go 领导选举实现了领导选举。我有 2 个副本。第一次两个 pod 都被选为领导者,但在此之后同一个 pod 没有被选为领导者。一段时间后,领导人选举停止了。我尝试删除一个 pod,然后将创建的新 pod 选为领导者。再次,一旦 pod 停止领导,没有 pod 充当领导者。我正在使用 configmap 进行资源锁定。请帮我解决问题。
func NewElectorWithCallbacks(namespace, configMapName, identity string, ttl time.Duration, client cli.CoreV1Interface, callbacks *leaderelection.LeaderCallbacks) (*leaderelection.LeaderElector, error) {
hostname, err := os.Hostname()
if err != nil {
return nil, err
}
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(log.Printf)
broadcaster.StartRecordingToSink(&cli.EventSinkImpl{Interface: client.Events(namespace)})
recorder := broadcaster.NewRecorder(scheme.Scheme, api.EventSource{Component: identity, Host: hostname})
cmLock := &resourcelock.ConfigMapLock{
Client: client,
ConfigMapMeta: meta.ObjectMeta{
Namespace: namespace,
Name: configMapName,
},
LockConfig: resourcelock.ResourceLockConfig{
Identity: identity,
EventRecorder: recorder,
},
}
if callbacks == nil {
callbacks = NewDefaultCallbacks()
}
config := leaderelection.LeaderElectionConfig{
Lock: cmLock,
LeaseDuration: ttl,
RenewDeadline: ttl / 2,
RetryPeriod: ttl / 4,
Callbacks: *callbacks,
}
return leaderelection.NewLeaderElector(config)
}
config, err = rest.InClusterConfig()
v1Client, err := v1.NewForConfig(config)
callbacks := &leaderelection.LeaderCallbacks{
OnStartedLeading: func(context.Context) {
// do the work
fmt.Println("selected as leader")
// Wait forever
select {}
},
OnStoppedLeading: func() {
fmt.Println("Pod stopped leading")
},
}
elector, err := election.NewElectorWithCallbacks(namespace, electionName, hostname, ttl, v1Client, callbacks)
elector.Run(context.TODO())