1

我在我的 k8s 节点上部署一些 pod 时遇到问题。错误如下:

Failed create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "7da8bce09dd6820a65754073b1b4e52e640291dcb82f1da87ae99570c6964d1b" network for pod "webservices-8675d4667d-7mdf9": networkPlugin cni failed to set up pod "webservices-8675d4667d-7mdf9_default" network: Get https://[10.233.0.1]:443/api/v1/namespaces/default : 拨号 tcp 10.233.0.1:443: i/o 超时

但是,部署了一些 pod,例如kubernetes-dashboard

在此处输入图像描述

更新:

NAME                   STATUS   ROLES    AGE     VERSION   LABELS
k8s-master.mariyo.eu   Ready    master   3d15h   v1.16.6   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-master.mariyo.eu,kubernetes.io/os=linux,node-role.kubernetes.io/master=
k8s-node-1.mariyo.eu   Ready    <none>   3d15h   v1.16.6   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-node-1.mariyo.eu,kubernetes.io/os=linux

coredns的部署:

kind: Deployment
apiVersion: apps/v1
metadata:
  name: coredns
  namespace: kube-system
  selfLink: /apis/apps/v1/namespaces/kube-system/deployments/coredns
  uid: bd5451ec-2a33-443d-8519-ffcec935ac0c
  resourceVersion: '397508'
  generation: 2
  creationTimestamp: '2020-01-24T16:14:37Z'
  labels:
    addonmanager.kubernetes.io/mode: Reconcile
    k8s-app: kube-dns
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: coredns
  annotations:
    deployment.kubernetes.io/revision: '1'
    kubectl.kubernetes.io/last-applied-configuration: >
      {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"addonmanager.kubernetes.io/mode":"Reconcile","k8s-app":"kube-dns","kubernetes.io/cluster-service":"true","kubernetes.io/name":"coredns"},"name":"coredns","namespace":"kube-system"},"spec":{"selector":{"matchLabels":{"k8s-app":"kube-dns"}},"strategy":{"rollingUpdate":{"maxSurge":"10%","maxUnavailable":0},"type":"RollingUpdate"},"template":{"metadata":{"annotations":{"seccomp.security.alpha.kubernetes.io/pod":"docker/default"},"labels":{"k8s-app":"kube-dns"}},"spec":{"affinity":{"nodeAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"preference":{"matchExpressions":[{"key":"node-role.kubernetes.io/master","operator":"In","values":[""]}]},"weight":100}]},"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"kube-dns"}},"topologyKey":"kubernetes.io/hostname"}]}},"containers":[{"args":["-conf","/etc/coredns/Corefile"],"image":"docker.io/coredns/coredns:1.6.0","imagePullPolicy":"IfNotPresent","livenessProbe":{"failureThreshold":10,"httpGet":{"path":"/health","port":8080,"scheme":"HTTP"},"successThreshold":1,"timeoutSeconds":5},"name":"coredns","ports":[{"containerPort":53,"name":"dns","protocol":"UDP"},{"containerPort":53,"name":"dns-tcp","protocol":"TCP"},{"containerPort":9153,"name":"metrics","protocol":"TCP"}],"readinessProbe":{"failureThreshold":10,"httpGet":{"path":"/ready","port":8181,"scheme":"HTTP"},"successThreshold":1,"timeoutSeconds":5},"resources":{"limits":{"memory":"170Mi"},"requests":{"cpu":"100m","memory":"70Mi"}},"securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["all"]},"readOnlyRootFilesystem":true},"volumeMounts":[{"mountPath":"/etc/coredns","name":"config-volume"}]}],"dnsPolicy":"Default","nodeSelector":{"beta.kubernetes.io/os":"linux"},"priorityClassName":"system-cluster-critical","serviceAccountName":"coredns","tolerations":[{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"key":"CriticalAddonsOnly","operator":"Exists"}],"volumes":[{"configMap":{"items":[{"key":"Corefile","path":"Corefile"}],"name":"coredns"},"name":"config-volume"}]}}}}
spec:
  replicas: 2
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      creationTimestamp: null
      labels:
        k8s-app: kube-dns
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: docker/default
    spec:
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
              - key: Corefile
                path: Corefile
            defaultMode: 420
      containers:
        - name: coredns
          image: 'docker.io/coredns/coredns:1.6.0'
          args:
            - '-conf'
            - /etc/coredns/Corefile
          ports:
            - name: dns
              containerPort: 53
              protocol: UDP
            - name: dns-tcp
              containerPort: 53
              protocol: TCP
            - name: metrics
              containerPort: 9153
              protocol: TCP
          resources:
            limits:
              memory: 170Mi
            requests:
              cpu: 100m
              memory: 70Mi
          volumeMounts:
            - name: config-volume
              mountPath: /etc/coredns
          livenessProbe:
            httpGet:
              path: /health
              port: 8080
              scheme: HTTP
            timeoutSeconds: 5
            periodSeconds: 10
            successThreshold: 1
            failureThreshold: 10
          readinessProbe:
            httpGet:
              path: /ready
              port: 8181
              scheme: HTTP
            timeoutSeconds: 5
            periodSeconds: 10
            successThreshold: 1
            failureThreshold: 10
          terminationMessagePath: /dev/termination-log
          terminationMessagePolicy: File
          imagePullPolicy: IfNotPresent
          securityContext:
            capabilities:
              add:
                - NET_BIND_SERVICE
              drop:
                - all
            readOnlyRootFilesystem: true
            allowPrivilegeEscalation: false
      restartPolicy: Always
      terminationGracePeriodSeconds: 30
      dnsPolicy: Default
      nodeSelector:
        beta.kubernetes.io/os: linux
      serviceAccountName: coredns
      serviceAccount: coredns
      securityContext: {}
      affinity:
        nodeAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
            - weight: 100
              preference:
                matchExpressions:
                  - key: node-role.kubernetes.io/master
                    operator: In
                    values:
                      - ''
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchLabels:
                  k8s-app: kube-dns
              topologyKey: kubernetes.io/hostname
      schedulerName: default-scheduler
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
        - key: CriticalAddonsOnly
          operator: Exists
      priorityClassName: system-cluster-critical
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 0
      maxSurge: 10%
  revisionHistoryLimit: 10
  progressDeadlineSeconds: 600
status:
  observedGeneration: 2
  replicas: 2
  updatedReplicas: 2
  readyReplicas: 1
  availableReplicas: 1
  unavailableReplicas: 1
  conditions:
    - type: Progressing
      status: 'True'
      lastUpdateTime: '2020-01-24T16:14:42Z'
      lastTransitionTime: '2020-01-24T16:14:37Z'
      reason: NewReplicaSetAvailable
      message: ReplicaSet "coredns-58687784f9" has successfully progressed.
    - type: Available
      status: 'False'
      lastUpdateTime: '2020-01-27T17:42:57Z'
      lastTransitionTime: '2020-01-27T17:42:57Z'
      reason: MinimumReplicasUnavailable
      message: Deployment does not have minimum availability.

Web服务的部署:

kind: Deployment
apiVersion: apps/v1
metadata:
  name: webservices
  namespace: default
  selfLink: /apis/apps/v1/namespaces/default/deployments/webservices
  uid: da75d3d8-92f4-4d06-86d6-e2fb325806a5
  resourceVersion: '398529'
  generation: 1
  creationTimestamp: '2020-01-27T08:05:16Z'
  labels:
    run: webservices
  annotations:
    deployment.kubernetes.io/revision: '1'
spec:
  replicas: 5
  selector:
    matchLabels:
      run: webservices
  template:
    metadata:
      creationTimestamp: null
      labels:
        run: webservices
    spec:
      containers:
        - name: webservices
          image: nginx
          ports:
            - containerPort: 80
              protocol: TCP
          resources: {}
          terminationMessagePath: /dev/termination-log
          terminationMessagePolicy: File
          imagePullPolicy: Always
      restartPolicy: Always
      terminationGracePeriodSeconds: 30
      dnsPolicy: ClusterFirst
      securityContext: {}
      schedulerName: default-scheduler
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 25%
      maxSurge: 25%
  revisionHistoryLimit: 10
  progressDeadlineSeconds: 600
status:
  observedGeneration: 1
  replicas: 5
  updatedReplicas: 5
  unavailableReplicas: 5
  conditions:
    - type: Available
      status: 'False'
      lastUpdateTime: '2020-01-27T08:05:16Z'
      lastTransitionTime: '2020-01-27T08:05:16Z'
      reason: MinimumReplicasUnavailable
      message: Deployment does not have minimum availability.
    - type: Progressing
      status: 'False'
      lastUpdateTime: '2020-01-27T17:52:58Z'
      lastTransitionTime: '2020-01-27T17:52:58Z'
      reason: ProgressDeadlineExceeded
      message: ReplicaSet "webservices-8675d4667d" has timed out progressing.
4

3 回答 3

2

最后,我决定将节点从 Debian 10 重新安装到 Ubuntu 18.04,一切正常。

感谢您的时间

于 2020-01-29T19:51:26.990 回答
1

问题是 kube-proxy 无法正常运行,因为我认为 10.233.0.1 是它负责配置/设置的 kubernetes api 服务地址。您应该检查 kube-proxy 日志并查看它是否正常,并为 kubernetes 服务创建 iptables 规则。

看看这里:calico-timeout-pod

于 2020-01-28T10:02:03.733 回答
0

在加入工作节点之前,我还必须在工作节点上设置以下内容才能使其工作:sudo sysctl net.bridge.bridge-nf-call-iptables=1

于 2021-03-09T21:38:25.090 回答