我正在尝试在生产 kubernetes 集群上部署 EFK 堆栈(使用 kubespray 安装),我们有 3 个节点,1 个 master + 2 个 worker,我需要使用 elasticsearch 作为 statefulset 并使用 master 节点中的本地文件夹来存储日志(本地持久性存储),我的配置是:
kind: Namespace
apiVersion: v1
metadata:
name: kube-logging
---
kind: Service
apiVersion: v1
metadata:
name: elasticsearch
namespace: kube-logging
labels:
app: elasticsearch
spec:
selector:
app: elasticsearch
clusterIP: None
ports:
- port: 9200
name: rest
- port: 9300
name: inter-node
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: local-storage
namespace: kube-logging
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: my-pv
namespace: kube-logging
spec:
storageClassName: local-storage
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /tmp/elastic
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: es-cluster
namespace: kube-logging
spec:
serviceName: elasticsearch
replicas: 2
selector:
matchLabels:
app: elasticsearch
template:
metadata:
labels:
app: elasticsearch
spec:
containers:
- name: elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0
resources:
limits:
cpu: 1000m
memory: 2Gi
ports:
- containerPort: 9200
name: rest
protocol: TCP
- containerPort: 9300
name: inter-node
protocol: TCP
volumeMounts:
- name: data
mountPath: /usr/share/elasticsearch/data
env:
- name: cluster.name
value: k8s-logs
- name: node.name
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: discovery.seed_hosts
value: "es-cluster-0.elasticsearch,es-cluster-1.elasticsearch,es-cluster-2.elasticsearch"
- name: cluster.initial_master_nodes
value: "es-cluster-0,es-cluster-1,es-cluster-2"
- name: ES_JAVA_OPTS
value: "-Xms512m -Xmx512m"
initContainers:
- name: fix-permissions
image: busybox
command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"]
securityContext:
privileged: true
volumeMounts:
- name: data
mountPath: /usr/share/elasticsearch/data
- name: increase-vm-max-map
image: busybox
command: ["sysctl", "-w", "vm.max_map_count=262144"]
securityContext:
privileged: true
- name: increase-fd-ulimit
image: busybox
command: ["sh", "-c", "ulimit -n 65536"]
securityContext:
privileged: true
volumeClaimTemplates:
- metadata:
name: data
labels:
app: elasticsearch
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: local-storage
resources:
requests:
storage: 5Gi
---
所以这是我的配置,但是当它被应用时,Elasticsearch 的两个 pod 之一仍处于待处理状态。当我对此 pod 进行 kubectl 描述时,这是我得到的错误:“1 个节点没有找到要绑定的可用持久卷”
我的配置正确吗?我必须使用 PV + storageclass + volumeClaimTemplates 吗?先感谢您。
这些是我的输出:
[root@node1 nex]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
my-pv 5Gi RWO Retain Bound kube-logging/data-es-cluster-0 local-storage 24m
[root@node1 nex]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
data-es-cluster-0 Bound my-pv 5Gi RWO local-storage 24m
data-es-cluster-1 Pending local-storage 23m
[root@node1 nex]# kubectl describe pvc data-es-cluster-0
Name: data-es-cluster-0
Namespace: kube-logging
StorageClass: local-storage
Status: Bound
Volume: my-pv
Labels: app=elasticsearch
Annotations: pv.kubernetes.io/bind-completed: yes
pv.kubernetes.io/bound-by-controller: yes
Finalizers: [kubernetes.io/pvc-protection]
Capacity: 5Gi
Access Modes: RWO
VolumeMode: Filesystem
Mounted By: es-cluster-0
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal WaitForFirstConsumer 24m persistentvolume-controller waiting for first consumer to be created before binding
[root@node1 nex]# kubectl describe pvc data-es-cluster-1
Name: data-es-cluster-1
Namespace: kube-logging
StorageClass: local-storage
Status: Pending
Volume:
Labels: app=elasticsearch
Annotations: <none>
Finalizers: [kubernetes.io/pvc-protection]
Capacity:
Access Modes:
VolumeMode: Filesystem
Mounted By: es-cluster-1
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal WaitForFirstConsumer 4m12s (x82 over 24m) persistentvolume-controller waiting for first consumer to be created before binding
[root@node1 nex]#