linux怎么查看本机内存大小
519
2022-11-21
K8S部署ES集群 - 运维笔记
一、使用NFS配置持久化存储
1)在NFS服务器端(172.16.60.238)通过nfs创建es、filebeat共享目录
[root@k8s-harbor01 k8s]# mkdir -p /data/storage/k8s/es
2)创建NFS的rbac
[root@k8s-master01 k8s_project]# cd
[root@k8s-master01 ~]# cd /opt/k8s/k8s_project/
[root@k8s-master01 k8s_project]# mkdir elk
[root@k8s-master01 k8s_project]# cd elk/
[root@k8s-master01 elk]# vim nfs-rbac.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-provisioner
namespace: wiseco
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-provisioner-runner
namespace: wiseco
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get","create","list", "watch","update"]
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["nfs-provisioner"]
verbs: ["use"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: wiseco
roleRef:
kind: ClusterRole
name: nfs-provisioner-runner
apiGroup: rbac.authorization.k8s.io
创建和查看
[root@k8s-master01 elk]# kubectl apply -f nfs-rbac.yaml
serviceaccount/nfs-provisioner created
clusterrole.rbac.authorization.k8s.io/nfs-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-provisioner created
[root@k8s-master01 elk]# kubectl get sa -n wiseco|grep nfs
nfs-provisioner 1 4s
[root@k8s-master01 elk]# kubectl get clusterrole -n wiseco|grep nfs
nfs-provisioner-runner 2021-02-19T08:39:05Z
[root@k8s-master01 elk]# kubectl get clusterrolebinding -n wiseco|grep nfs
run-nfs-provisioner ClusterRole/nfs-provisioner-runner
二、ES集群部署
ES7.0+新版废弃了原先discovery.zen.ping.unicast.hosts及discovery.zen.minimum_master_nodes的探测方式,而是改为了discovery.seed_hosts及cluster.initial_master_nodes。
1)创建es集群的storage
[root@k8s-master01 elk]# pwd
/opt/k8s/k8s_project/elk
[root@k8s-master01 elk]# mkdir es
[root@k8s-master01 elk]# cd es/
[root@k8s-master01 es]# vim es-nfs-class.yaml
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: es-nfs-storage
namespace: wiseco
provisioner: es/nfs
reclaimPolicy: Retain
创建和查看
[root@k8s-master01 es]# kubectl apply -f es-nfs-class.yaml
storageclass.storage.k8s.io/es-nfs-storage created
[root@k8s-master01 es]# kubectl get sc -n wiseco
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
es-nfs-storage es/nfs Retain Immediate false 10s
2)创建es集群的nfs-client-provisioner
[root@k8s-master01 es]# vim es-nfs.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: es-nfs-client-provisioner
namespace: wiseco
spec:
replicas: 1
selector:
matchLabels:
app: es-nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: es-nfs-client-provisioner
spec:
serviceAccount: nfs-provisioner
containers:
- name: es-nfs-client-provisioner
image: registry.cn-hangzhou.aliyuncs.com/open-ali/nfs-client-provisioner
imagePullPolicy: IfNotPresent
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: es/nfs
- name: NFS_SERVER
value: 172.16.60.238
- name: NFS_PATH
value: /data/storage/k8s/es
volumes:
- name: nfs-client-root
nfs:
server: 172.16.60.238
path: /data/storage/k8s/es
创建并查看
[root@k8s-master01 es]# kubectl apply -f es-nfs.yml
deployment.apps/es-nfs-client-provisioner created
[root@k8s-master01 es]# kubectl get pods -n wiseco|grep nfs
es-nfs-client-provisioner-5c989d9b5-nkpdb 1/1 Running 0 4s
3)制作ES集群的镜像(jdk镜像、es镜像)
需要注意:
ES 7.6.2启动要求jdk要在java11以上版本,否则es启动会报错:
future versions of Elasticsearch will require Java 11; your Java version from [/usr/java/jdk1.8.0_192/jre] does not meet this requirement
接着制作es集群的镜像
下载elasticsearch-7.6.2-linux-x86_64.tar.gz安装包、准备elasticsearch.yml配置文件,这两个文件一起放到image镜像里。
[root@k8s-master01 images]# pwd
/opt/k8s/k8s_project/elk/es/images
[root@k8s-master01 images]# ll
total 0
drwxr-xr-x 2 root root 63 Feb 20 16:11 jdk_images
[root@k8s-master01 images]# mkdir es_images/
[root@k8s-master01 images]# ll
total 0
drwxr-xr-x 2 root root 96 Feb 20 15:49 es_images
drwxr-xr-x 2 root root 63 Feb 20 16:11 jdk_images
[root@k8s-master01 images]# cd es_images/
[root@k8s-master01 es_images]#
[root@k8s-master01 es_images]# wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.6.2-linux-x86_64.tar.gz
[root@k8s-master01 es_images]# ll
total 289540
-rw-r--r-- 1 root root 718 Feb 20 17:34 Dockerfile
-rw-r--r-- 1 root root 296477546 Mar 31 2020 elasticsearch-7.6.2-linux-x86_64.tar.gz
-rw-r--r-- 1 root root 448 Feb 20 17:49 elasticsearch.yml
这里千万要注意:node节点主机名要使用正确解析到的完整域名:pod名称.service名称.namespace名称.svc.cluster.local
[root@k8s-master01 es_images]# cat elasticsearch.yml
cluster.name: es-cluster
node.name: ${MY_POD_NAME}.es-svc.wiseco.svc.cluster.local
path.data: /opt/elasticsearch-7.6.2/data
path.logs: /opt/elasticsearch-7.6.2/logs
network.host: 0.0.0.0
9200
true
"*"
node.master: true
node.data: true
discovery.seed_hosts: ["es-0.es-svc.wiseco.svc.cluster.local","es-1.es-svc.wiseco.svc.cluster.local","es-2.es-svc.wiseco.svc.cluster.local"]
cluster.initial_master_nodes: ["es-0.es-svc.wiseco.svc.cluster.local","es-1.es-svc.wiseco.svc.cluster.local","es-2.es-svc.wiseco.svc.cluster.local"]
镜像文件内容:
[root@k8s-master01 es_images]# cat Dockerfile
FROM 172.16.60.238/wiseco/jdk13.0.2
RUN rm -f /etc/localtime \
&& ln -sv /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \
&& echo "Asia/Shanghai" > /etc/timezone
ENV LANG en_US.UTF-8
ADD elasticsearch-7.6.2-linux-x86_64.tar.gz /opt
RUN mkdir -p /opt/elasticsearch-7.6.2/data \
&& mkdir -p /opt/elasticsearch-7.6.2/logs \
&& useradd elasticsearch \
&& chown -R elasticsearch:elasticsearch /opt \
&& chmod -R 777 /opt \
&& setfacl -R -m u:elasticsearch:rwx /opt \
&& setfacl -R -m u:elasticsearch:rwx /opt \
&& rm -f /opt/elasticsearch-7.6.2/config/elasticsearch.yml
COPY elasticsearch.yml /opt/elasticsearch-7.6.2/config/
USER elasticsearch
EXPOSE 9200 9300
CMD ["/opt/elasticsearch-7.6.2/bin/elasticsearch"]
制作镜像并上传到Harbor仓库
[root@k8s-master01 images]# docker build -t 172.16.60.238/wiseco/elasticsearch-7.6.2 .
[root@k8s-master01 images]# docker push 172.16.60.238/wiseco/elasticsearch-7.6.2
4)部署ES集群容器
注意:这里使用初始化容器来修改系统参数。
[root@k8s-master01 es]# pwd
/opt/k8s/k8s_project/elk/es
[root@k8s-master01 es]# mkdir deploy/
[root@k8s-master01 es]# cd deploy/
[root@k8s-master01 deploy]#
[root@k8s-master01 deploy]# cat es_cluster.yaml
apiVersion: v1
kind: Service
metadata:
name: es-svc
namespace: wiseco
labels:
app: es
spec:
ports:
- port: 9200
targetPort: 9200
name: outer
- port: 9300
targetPort: 9300
name: inner
clusterIP: None
selector:
app: es
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: es
namespace: wiseco
spec:
serviceName: "es-svc"
replicas: 3
selector:
matchLabels:
app: es
template:
metadata:
labels:
app: es
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- es
topologyKey: "kubernetes.io/hostname"
initContainers:
- name: increase-vm-max-map
image: busybox
command: ["sysctl", "-w", "vm.max_map_count=262144"]
securityContext:
privileged: true
- name: increase-fd-ulimit
image: busybox
command: ["sh", "-c", "ulimit -n 65536"]
securityContext:
privileged: true
terminationGracePeriodSeconds: 60
containers:
- name: es
image: 172.16.60.238/wiseco/elasticsearch-7.6.2
imagePullPolicy: Always
ports:
- containerPort: 9200
name: outer
- containerPort: 9300
name: inner
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
resources:
requests:
memory: 1024Mi
cpu: 500m
limits:
memory: 2048Mi
cpu: 1500m
lifecycle:
postStart:
exec:
command: ["/bin/sh","-c","touch /tmp/health"]
livenessProbe:
exec:
command: ["test","-e","/tmp/health"]
initialDelaySeconds: 5
timeoutSeconds: 5
periodSeconds: 10
readinessProbe:
tcpSocket:
port: outer
initialDelaySeconds: 15
timeoutSeconds: 5
periodSeconds: 20
volumeMounts:
- name: es-date
mountPath: /opt/elasticsearch-7.6.2/data
- name: es-log
mountPath: /opt/local/elasticsearch-7.6.2/logs
readOnly: false
volumes:
- name: es-log
hostPath:
path: /var/log/k8s-log/es
volumeClaimTemplates:
- metadata:
name: es-date
annotations:
volume.beta.kubernetes.io/storage-class: "es-nfs-storage"
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
创建和查看
[root@k8s-master01 deploy]# kubectl apply -f es_cluster.yaml
service/es-svc created
statefulset.apps/es created
[root@k8s-master01 deploy]# kubectl get pods -n wiseco -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
es-0 1/1 Running 0 9m36s 172.30.85.230 k8s-node01
*************** 当你发现自己的才华撑不起野心时,就请安静下来学习吧!***************
版权声明:本文内容由网络用户投稿,版权归原作者所有,本站不拥有其著作权,亦不承担相应法律责任。如果您发现本站中有涉嫌抄袭或描述失实的内容,请联系我们jiasou666@gmail.com 处理,核实后本网站将在24小时内删除侵权内容。
发表评论
暂时没有评论,来抢沙发吧~