我通过 kubeadm 设置了我的 kubernetes 集群。在最后一步。我需要将我的节点加入主节点。所以我将加入命令复制到节点以执行它并排除错误。然后我回到 master 执行“kubectl get nodes”。只有一个主节点,没有工作节点。所以我检查了工作节点的 kubelet 日志,它显示了这样的错误:
2月 16 15:51:44 localhost.localdomain kubelet[938]: E0216 15:51:44.608258 938 controller.go:177] failed to update node lease, error: Operation cannot be fulfilled on leases.coordination.k8s.io "localhost.localdomain": the object has been modified; please apply your changes to the latest version and try again
我对如何解决它没有理想。顺便说一句,我有两个问题,这是其中之一。另一个是关于 kube-apiserver:kubernetes v1.16.2 写:break out我还没有解决这里是我设置集群的主要步骤
setenforce 0 sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config systemctl stop firewalld && systemctl disable firewalld iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat && iptables -P FORWARD ACCEPT swapoff -a sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab modprobe br_netfilter modprobe -- ip_vs modprobe -- ip_vs_rr modprobe -- ip_vs_wrr modprobe -- ip_vs_sh modprobe -- nf_conntrack_ipv4 sysctl -p cat << EOF | tee /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-iptables=1 net.bridge.bridge-nf-call-ip6tables=1 net.ipv4.ip_forward = 1 EOF sysctl -p /etc/sysctl.d/k8s.conf yum install -y epel-release conntrack ipvsadm ipset jq sysstat curl iptables libseccomp unzip lrzsz yum install -y yum-utils device-mapper-persistent-data lvm2 ntp ntpdate ntpdate ntp1.aliyun.com yum install docker-ce -y yum -y install kubelet-1.17.0 kubeadm-1.17.0 kubectl-1.17.0 kubernetes-cni systemctl enable docker systemctl enable kubelet.service systemctl start docker systemctl start kubelet kubeadm config images list kubeadm config print init-defaults > kubeadm.conf kubeadm init --kubernetes-version=v1.17.0 --pod-network-cidr=192.168.0.0/16 --apiserver-advertise-address=172.16.5.150 mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config then copy this to node to exec kubeadm join 172.16.5.150:6443 --token 2yj4eu.lhdrdks0dykao9in \ --discovery-token-ca-cert-hash sha256:8563b0c50c48e563cdd4ac4380206133535df8a020bc2a949ff66b5aa7722ea7
[root@localhost calico]# kubectl get pods --all-namespaces -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system calico-kube-controllers-5b644bc49c-8ht2m 1/1 Running 0 82m 192.168.102.134 localhost.localdomain <none> <none>
kube-system calico-node-4887h 1/1 Running 0 82m 172.16.5.152 localhost.localdomain <none> <none>
kube-system coredns-6955765f44-9pjbp 1/1 Running 0 9h 192.168.102.133 localhost.localdomain <none> <none>
kube-system coredns-6955765f44-mlrdt 0/1 CrashLoopBackOff 17 9h 192.168.102.132 localhost.localdomain <none> <none>
kube-system kube-proxy-6fqwk 1/1 Running 0 9h 172.16.5.151 localhost.localdomain <none> <none>
[root@localhost calico]# kubectl get pods --all-namespaces -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system calico-kube-controllers-5b644bc49c-8ht2m 0/1 Running 1 82m 192.168.102.137 localhost.localdomain <none> <none>
kube-system calico-node-4887h 1/1 Running 0 82m 172.16.5.159 localhost.localdomain <none> <none>
kube-system coredns-6955765f44-9pjbp 1/1 Running 16 9h 192.168.102.136 localhost.localdomain <none> <none>
kube-system coredns-6955765f44-mlrdt 0/1 CrashLoopBackOff 15 9h 192.168.102.135 localhost.localdomain <none> <none>
kube-system kube-proxy-6fqwk 1/1 Running 0 9h 172.16.5.151 localhost.localdomain <none> <none>
[root@localhost calico]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
localhost.localdomain Ready master 9h v1.17.0
[root@localhost calico]#