1. На сервере должен быть поднят nfs сервер и настроен на опеределенный каталог и нужно дать доступ(/etc/exports) до nfs нашему k8s
2. В k8s должен быть создан Storage Classes и добавлен наш nfs, я использовал этот helm chart nfs-subdir-external-provisioner-4.0.16
3. Storage Classes создан в namespace default, чтоб nfs был доступен всем
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: elk-pvc-1
namespace: elk
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: elk-pvc-2
namespace: elk
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs
resources:
requests:
storage: 3Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: elk-pvc-3
namespace: elk
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs
resources:
requests:
storage: 3Gi
---
kind: Service
apiVersion: v1
metadata:
name: kibana-svc
namespace: elk
spec:
type: NodePort
selector:
app: kibana
ports:
- protocol: TCP
nodePort: 30003
port: 5601
---
kind: Service
apiVersion: v1
metadata:
name: logstash-svc
namespace: elk
spec:
type: NodePort
selector:
app: logstash
ports:
- protocol: TCP
nodePort: 30004
port: 5044
---
kind: Service
apiVersion: v1
metadata:
name: elk-svc-nodeport
namespace: elk
spec:
type: NodePort
selector:
app: elasticsearch
ports:
- protocol: TCP
nodePort: 30006
port: 9200
---
apiVersion: v1
kind: Service
metadata:
name: elk-svc
namespace: elk
spec:
ports:
- port: 9200
name: rest
- port: 9300
name: inter-node
clusterIP: None
selector:
app: elasticsearch
sessionAffinity: None
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: elasticsearch
namespace: elk
labels:
app: elasticsearch
spec:
selector:
matchLabels:
app: elasticsearch
replicas: 1
template:
metadata:
labels:
app: elasticsearch
spec:
initContainers:
- name: fix-permissions
image: busybox
command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch"]
securityContext:
privileged: true
volumeMounts:
- name: config-elk
mountPath: /usr/share/elasticsearch
- name: increase-vm-max-map
image: busybox
command: ["sysctl", "-w", "vm.max_map_count=262144"]
securityContext:
privileged: true
- name: increase-fd-ulimit
image: busybox
command: ["sh", "-c", "ulimit -n 65536"]
securityContext:
privileged: true
containers:
- name: elasticsearch
image: "elasticsearch:7.17.4"
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 2000m
requests:
cpu: 200m
ports:
- containerPort: 9200
name: rest
protocol: TCP
- containerPort: 9300
name: inter-node
protocol: TCP
env:
- name: ES_JAVA_OPTS
value: "-Xms512m -Xmx512m"
- name: discovery.seed_hosts
value: "127.0.0.1"
volumeMounts:
- mountPath: /usr/share/elasticsearch
name: config-elk
volumes:
- name: config-elk
persistentVolumeClaim:
claimName: elk-pvc-1
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: elk
labels:
app: kibana
spec:
selector:
matchLabels:
app: kibana
replicas: 1
template:
metadata:
labels:
app: kibana
spec:
containers:
- name: kibana
image: "kibana:7.17.4"
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 2000m
requests:
cpu: 200m
env:
- name: ELASTICSEARCH_URL
value: http://elk-svc:9200
- name: ES_JAVA_OPTS
value: "-Xms512m -Xmx512m"
ports:
- containerPort: 5601
volumeMounts:
- name: config-kibana
mountPath: /usr/share/kibana
volumes:
- name: config-kibana
persistentVolumeClaim:
claimName: elk-pvc-2
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logstash
namespace: elk
labels:
app: logstash
spec:
selector:
matchLabels:
app: logstash
replicas: 1
template:
metadata:
labels:
app: logstash
spec:
initContainers:
containers:
- name: logstash
image: "logstash:7.17.4"
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 2000m
requests:
cpu: 200m
env:
- name: ES_JAVA_OPTS
value: "-Xms512m -Xmx512m"
ports:
- containerPort: 5044
volumeMounts:
- name: config-logstash
mountPath: /usr/share/logstash
volumes:
- name: config-logstash
persistentVolumeClaim:
claimName: elk-pvc-3
Kibana можно обратиться по порту 30003 и осталось настроить конфиги , которые лежат на nfs