0

centos7.5

为什么这里总是Pending?是不是上面的etcd容器不正常,下面的pod为什么不行?

要启动 K8S 自己的进程,还需要先安装 CNI 容器。

[root@K8S-M1 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok
controller-manager   Healthy   ok
etcd-2               Healthy   {"health":"true"}
etcd-1               Healthy   {"health":"true"}
etcd-0               Healthy   {"health":"true"}



[root@K8S-M1 ~]# kubectl get csr
NAME        AGE       REQUESTOR            CONDITION
csr-2bvfj   10h       system:node:K8S-M2   Pending
csr-48szz   10h       system:node:K8S-M1   Pending
csr-4lrzl   10h       system:node:K8S-M2   Pending
csr-4m9q9   10h       system:node:K8S-M3   Pending
csr-htwbq   10h       system:node:K8S-M3   Pending
csr-vb7hv   10h       system:node:K8S-M1   Pending

资源应该足够

 [root@K8S-M1 ~]# df -Th
Filesystem              Type      Size  Used Avail Use% Mounted on
/dev/mapper/centos-root xfs        50G  3.0G   47G   6% /
devtmpfs                devtmpfs  7.8G     0  7.8G   0% /dev
tmpfs                   tmpfs     7.8G     0  7.8G   0% /dev/shm
tmpfs                   tmpfs     7.8G   50M  7.8G   1% /run
tmpfs                   tmpfs     7.8G     0  7.8G   0% /sys/fs/cgroup
/dev/sda1               xfs       797M  142M  656M  18% /boot
overlay                 overlay    50G  3.0G   47G   6% /var/lib/docker/overlay2/2161c3bd3843566ad2188f02b6f21ea90fbda01606a465fdfa8c2a81a551a46c/merged
shm                     tmpfs      64M     0   64M   0% /var/lib/docker/containers/b83ba180df8e29d815e140fb7b9974489a8b278af807ebe3c98a076f2291ee87/mounts/shm
overlay                 overlay    50G  3.0G   47G   6% /var/lib/docker/overlay2/8400d260da55ae3db0235b2c1419fad5b8b190378f457dbd26119bf62e76b757/merged
overlay                 overlay    50G  3.0G   47G   6% /var/lib/docker/overlay2/cf013d8436ee1bba4f6a6caeefbdf03a59f4af516e362889a6f913ff7360ec02/merged
overlay                 overlay    50G  3.0G   47G   6% /var/lib/docker/overlay2/e78e7ad517f52ef0c64887f110a128ec8d5afc0dca4a8b88b8ec56bec856b551/merged
overlay                 overlay    50G  3.0G   47G   6% /var/lib/docker/overlay2/6fda164d6535ee45623db59c447ad36590ec8b62a1bd1af8a9bd14f38d541d15/merged
overlay                 overlay    50G  3.0G   47G   6% /var/lib/docker/overlay2/68f7325ae6bedddbac0838026e0984d1ce41c824a524fba54fdd2593858049dd/merged
shm                     tmpfs      64M     0   64M   0% /var/lib/docker/containers/1b8fc403c9b2b123ceeb1d73da7787adacc05b58b45b6f522860c8fb81ff40aa/mounts/shm
overlay                 overlay    50G  3.0G   47G   6% /var/lib/docker/overlay2/c2cdcbb4f36c8d9f55ba21b9c083b99eaae79cd34e4d22057d8f8da6475ab1ca/merged
shm                     tmpfs      64M     0   64M   0% /var/lib/docker/containers/476fdf7fc04ba0cdb7aade7e6c0c9a5ebad41b95a9097717e848eb09b9d376ee/mounts/shm
shm                     tmpfs      64M     0   64M   0% /var/lib/docker/containers/223ef9569f1cbd9005354d8b26991db506cf87c485ea170aae3dc97b3dde3861/mounts/shm
shm                     tmpfs      64M     0   64M   0% /var/lib/docker/containers/2a09a1f72e8a7d67ac98e7699be06ef6dc8631b1c5db1994fe699394551855ec/mounts/shm
shm                     tmpfs      64M     0   64M   0% /var/lib/docker/containers/6219f8e9eacc22c5f5cce5b07c8d301b885df98c3174e3580ccd314a75e40e0e/mounts/shm
overlay                 overlay    50G  3.0G   47G   6% /var/lib/docker/overlay2/fe280abbe1f2d2fa0adcab938fa452af329511414b5a140cf28c78ea420b1e6d/merged
overlay                 overlay    50G  3.0G   47G   6% /var/lib/docker/overlay2/0ee630697c9fbe93bccef86dfbac2e90644c2e9e4f28b7397fe474bbe2431951/merged
overlay                 overlay    50G  3.0G   47G   6% /var/lib/docker/overlay2/0b20400678fa4ccb05f2c27ee13161303b606871b0659a1b5774d271411ab552/merged
overlay                 overlay    50G  3.0G   47G   6% /var/lib/docker/overlay2/acc4525beb65ae8a6f5793b2f0e65568f562b02c175ffbb19b4bd0b9dc385901/merged
overlay                 overlay    50G  3.0G   47G   6% /var/lib/docker/overlay2/876f125113a1b0d08ca3f7a80516d663b2e24a42afddfe69c6ba6cbb0754e29e/merged
tmpfs                   tmpfs     1.6G     0  1.6G   0% /run/user/0


[root@K8S-M1 ~]# free -m
              total        used        free      shared  buff/cache   available
Mem:          15883         690       14197          49         995       14648
Swap:             0           0           0
[root@K8S-M1 ~]# kubectl get events
LAST SEEN   FIRST SEEN   COUNT     NAME                      KIND      SUBOBJECT   TYPE      REASON                    SOURCE            MESSAGE
45m         1h           346       k8s-m1.1560d88dcc941af5   Node                  Normal    NodeHasSufficientMemory   kubelet, k8s-m1   Node k8s-m1 status is now: NodeHasSufficientMemory
13m         9h           7813      k8s-m3.1560bbb6ccbe5e13   Node                  Normal    NodeHasSufficientMemory   kubelet, k8s-m3   Node k8s-m3 status is now: NodeHasSufficientMemory
8m          9h           7883      k8s-m2.1560bbb6c3dc7cdc   Node                  Normal    NodeHasSufficientMemory   kubelet, k8s-m2   Node k8s-m2 status is now: NodeHasSufficientMemory
5m          1h           878       k8s-m1.1560d88dcc93e478   Node                  Normal    NodeHasSufficientDisk     kubelet, k8s-m1   Node k8s-m1 status is now: NodeHasSufficientDisk
3m          9h           7951      k8s-m2.1560bbb6c3dc2450   Node                  Normal    NodeHasSufficientDisk     kubelet, k8s-m2   Node k8s-m2 status is now: NodeHasSufficientDisk
3m          9h           7947      k8s-m3.1560bbb6ccbde61c   Node                  Normal    NodeHasSufficientDisk     kubelet, k8s-m3   Node k8s-m3 status is now: NodeHasSufficientDisk

是否需要配置 cni 才能启动系统 pod?我觉得我这里不需要这个东西

[root@K8S-M1 ~]# tail -f /var/log/messages
Oct 26 09:18:29 K8S-M1 kubelet: E1026 09:18:29.644297    1743 kubelet.go:2112] Container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized
Oct 26 09:18:32 K8S-M1 kubelet: I1026 09:18:32.511136    1743 kubelet_node_status.go:269] Setting node annotation to enable volume controller attach/detach
Oct 26 09:18:33 K8S-M1 kubelet: E1026 09:18:33.861609    1743 eviction_manager.go:243] eviction manager: failed to get get summary stats: failed to get node info: node "k8s-m1" not found
Oct 26 09:18:34 K8S-M1 kubelet: W1026 09:18:34.645467    1743 cni.go:172] Unable to update cni config: No networks found in /etc/cni/net.d
Oct 26 09:18:34 K8S-M1 kubelet: E1026 09:18:34.645613    1743 kubelet.go:2112] Container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized
Oct 26 09:18:35 K8S-M1 kubelet: I1026 09:18:35.102542    1743 kubelet_node_status.go:269] Setting node annotation to enable volume controller attach/detach
Oct 26 09:18:35 K8S-M1 kubelet: I1026 09:18:35.104947    1743 kubelet_node_status.go:79] Attempting to register node k8s-m1
Oct 26 09:18:35 K8S-M1 kubelet: E1026 09:18:35.107179    1743 kubelet_node_status.go:103] Unable to register node "k8s-m1" with API server: nodes "k8s-m1" is forbidden: node "K8S-M1" cannot modify node "k8s-m1"
Oct 26 09:18:39 K8S-M1 kubelet: W1026 09:18:39.646779    1743 cni.go:172] Unable to update cni config: No networks found in /etc/cni/net.d
Oct 26 09:18:39 K8S-M1 kubelet: E1026 09:18:39.646937    1743 kubelet.go:2112] Container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized

[root@K8S-M1 ~]# cat  /etc/systemd/system/kubelet.service.d/10-kubelet.conf
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node-role.kubernetes.io/master=''"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS
4

1 回答 1

0

我建议从检查位于命名空间的核心 Kubernetes 服务开始研究kube-system

kubectl get all -n kube-system

当然,您还可以检索所有可用命名空间的健康状态:

kubectl get all --all-namespaces

您可以为集群中的每个组件进行转储,以调查问题的原因:

kubectl cluster-info dump --output-directory=<Output_directory>

默认情况下,它从集群中的所有命名空间获取数据,您可以在其中找到特定 K8s 组件的单独日志文件。

查看官方文档中的一般故障排除Kubernetes 集群概念可能会很有用。

于 2018-10-26T09:20:27.507 回答