I have deployed kubernetes on a virt-manager vm following this link
https://kubernetes.io/docs/setup/independent/install-kubeadm/
When i join my another vm to the cluster i find that the kube-dns is in pending state.
root@ubuntu1:~# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system etcd-ubuntu1 1/1 Running 0 7m
kube-system kube-apiserver-ubuntu1 1/1 Running 0 8m
kube-system kube-controller-manager-ubuntu1 1/1 Running 0 8m
kube-system kube-dns-86f4d74b45-br6ck 0/3 Pending 0 8m
kube-system kube-proxy-sh9lg 1/1 Running 0 8m
kube-system kube-proxy-zwdt5 1/1 Running 0 7m
kube-system kube-scheduler-ubuntu1 1/1 Running 0 8m
root@ubuntu1:~# kubectl --namespace=kube-system describe pod kube-dns-86f4d74b45-br6ck
Name: kube-dns-86f4d74b45-br6ck
Namespace: kube-system
Node: <none>
Labels: k8s-app=kube-dns
pod-template-hash=4290830601
Annotations: <none>
Status: Pending
IP:
Controlled By: ReplicaSet/kube-dns-86f4d74b45
Containers:
kubedns:
Image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
Ports: 10053/UDP, 10053/TCP, 10055/TCP
Host Ports: 0/UDP, 0/TCP, 0/TCP
Args:
--domain=cluster.local.
--dns-port=10053
--config-dir=/kube-dns-config
--v=2
Limits:
memory: 170Mi
Requests:
cpu: 100m
memory: 70Mi
Liveness: http-get http://:10054/healthcheck/kubedns delay=60s timeout=5s period=10s #success=1 #failure=5
Readiness: http-get http://:8081/readiness delay=3s timeout=5s period=10s #success=1 #failure=3
Environment:
PROMETHEUS_PORT: 10055
Mounts:
/kube-dns-config from kube-dns-config (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-dns-token-4fjt4 (ro)
dnsmasq:
Image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
Ports: 53/UDP, 53/TCP
Host Ports: 0/UDP, 0/TCP
Args:
-v=2
-logtostderr
-configDir=/etc/k8s/dns/dnsmasq-nanny
-restartDnsmasq=true
--
-k
--cache-size=1000
--no-negcache
--log-facility=-
--server=/cluster.local/127.0.0.1#10053
--server=/in-addr.arpa/127.0.0.1#10053
--server=/ip6.arpa/127.0.0.1#10053
Requests:
cpu: 150m
memory: 20Mi
Liveness: http-get http://:10054/healthcheck/dnsmasq delay=60s timeout=5s period=10s #success=1 #failure=5
Environment: <none>
Mounts:
/etc/k8s/dns/dnsmasq-nanny from kube-dns-config (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-dns-token-4fjt4 (ro)
sidecar:
Image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
Port: 10054/TCP
Host Port: 0/TCP
Args:
--v=2
--logtostderr
--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV
--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV
Requests:
cpu: 10m
memory: 20Mi
Liveness: http-get http://:10054/metrics delay=60s timeout=5s period=10s #success=1 #failure=5
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-dns-token-4fjt4 (ro)
Conditions:
Type Status
PodScheduled False
Volumes:
kube-dns-config:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: kube-dns
Optional: true
kube-dns-token-4fjt4:
Type: Secret (a volume populated by a Secret)
SecretName: kube-dns-token-4fjt4
Optional: false
QoS Class: Burstable
Node-Selectors: <none>
Tolerations: CriticalAddonsOnly
node-role.kubernetes.io/master:NoSchedule
node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 6m (x7 over 7m) default-scheduler 0/1 nodes are available: 1 node(s) were not ready.
Warning FailedScheduling 3s (x19 over 6m) default-scheduler 0/2 nodes are available: 2 node(s) were not ready.
Can anyone just help me how to deconstruct this and find the actual issue??
Any help would be off great use
Thanks in advance.