debian怎么配置静态ip地址
282
2022-09-11
ansible搭建k8s
1.主机规划
类型 | 主机ip | 域名 | vip |
k8s-master | 192.168.47.47 | k8s-master.example.com | 192.168.47.49 |
k8s-harbar/haproxy | 192.168.47.48 | k8s-harbar.example.com | |
k8s-etcd1 | 192.168.47.50 | k8s-etcd1.example.com | |
k8s-etcd2 | 192.168.47.51 | k8s-etcd2.example.com | |
k8s-etcd3 | 192.168.47.52 | k8s-etcd3.example.com | |
k8s-node1 | 192.168.47.53 | k8s-node1.example.com | |
k8s-node2 | 192.168.47.54 | k8s-node2.example.com |
环境
root@ubuntu:~# cat /etc/issueUbuntu 18.04.3 LTS \n \l
2.基础环境准备
2.1.更改网卡名称为eth0
vim /etc/default/grub------------------------------------------------------GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0"------------------------------------------------------update-grub
2.2.更改系统ip地址
vim /etc/netplan/01-netcfg.yaml------------------------------------------------------network: version: 2 renderer: networkd ethernets: eth0: dhcp4: no addresses: [192.168.47.47/24] gateway4: 192.168.47.2 nameservers: addresses: [192.168.47.2]------------------------------------------------------netplan apply
2.3.更改主机名
cat /etc/hostname------------------------k8s-master.example.com------------------------
2.4.apt源改为阿里源
upgrade
2.5.安装常用命令
apt-get install iproute2 ntpdate tcpdump telnet traceroute \nfs-kernel-server nfs-common lrzsz tree openssl libssl-dev \libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute \gcc openssh-server lrzsz tree openssl libssl-dev libpcre3 libpcre3-dev \zlib1g-dev ntpdate tcpdump telnet traceroute iotop unzip zip -y
2.6.其他配置
grep "^[a-Z]" /etc/sysctl.conf ---------------------------------------------net.bridge.bridge-nf-call-ip6tables = 1net.bridge.bridge-nf-call-iptables = 1vm.swappiness=0net.ipv4.ip_forward = 1---------------------------------------------
2.7.安装docker
参考:-fsSL | bash -s docker --mirror Aliyun#方式2:apt-get updateapt-get -y install apt-transport-ca-certificates curl software-properties-commoncurl -fsSL | sudo apt-key add -add-apt-repository "deb [arch=amd64] $(lsb_release -cs) stable"apt-get -y update && apt-get -y install docker-cedocker info#镜像加速:sudo mkdir -p /etc/dockersudo tee /etc/docker/daemon.json <<-'EOF'{ "registry-mirrors": ["systemctl daemon-reloadsudo systemctl restart docker
2.8.禁用swap,selinux(ubuntu没有),iptables
swapoff -a #临时关闭swap
2.9master/node/etcd安装
apt-get install python2.7 -y && ln -s /usr/bin/python2.7 /usr/bin/python
2.10.reboot做快照
3.keepalived
apt-get install keepalived -ycp /usr/share/doc/keepalived/samples/keepalived.conf.vrrp /etc/keepalived/keepalived.confvim /etc/keepalived/keepalived.conf-----------------------------------------------------------------virtual_ipaddress { 192.168.47.49/24 dev eth0 label eth0:1 #设置的vip必须被master访问}-----------------------------------------------------------------systemctl restart keepalived && systemctl enable keepalived
4.harproxy
apt-get install haproxy -yvim /etc/haproxy/haproxy.cfg-----------------------------------------------------------------listen k8s_api_nodes_6443bind 192.168.47.49:6443mode tcpserver 192.168.47.47 192.168.47.47:6443 check inter 2000 fall 3 rise 5-----------------------------------------------------------------systemctl restart haproxy && systemctl enable haproxy
5.harbor-scp /lib/systemd/system/docker.service 192.168.47.47:/lib/systemd/system/docker.service#master 创建目录mkdir /etc/docker/certs.d/harbor.gesila.com -pscp /etc/docker/certs.d/harbor.gesila.com/harbor.gesila.com.crt 192.168.47.47:/etc/docker/certs.d/harbor.gesila.comsystemctl daemon-reload && systemctl restart docker
注意:要使其他主机登录harbor,需要将/lib/systemd/system/docker.service中Ip改为域名(因为这里用了域名),如下:
6.master免密要登录
apt-get install sshpass #ssh同步公钥root@k8s-master1:~# ssh-keygen同步证书及秘钥脚本:-----------------------------------------------------------------------------------#!/bin/bash#目标主机列表IP="192.168.47.50192.168.47.51192.168.47.52192.168.47.53192.168.47.54"for node in ${IP};dosshpass -p tao123 ssh-copy-id ${node} -o StrictHostKeyChecking=noif [ $? -eq 0 ];thenecho "${node} 秘钥copy完成"echo "${node} 秘钥copy完成,准备环境初始化....."ssh ${node} "mkdir /etc/docker/certs.d/harbor.gesila.com -p"echo "Harbor 证书目录创建成功!"scp /etc/docker/certs.d/harbor.gesila.com/harbor.gesila.com.crt ${node}:/etc/docker/certs.d/harbor.gesila.com/harbor.gesila.com.crt > /dev/nullecho "Harbor 证书拷贝成功!"scp -r /root/.docker ${node}:/root/ > /dev/nullecho "Harbor 认证文件拷贝完成!"scp /etc/hosts ${node}:/etc/hosts > /dev/nullecho "host 文件拷贝完成"scp /lib/systemd/system/docker.service ${node}:/lib/systemd/system/docker.service > /dev/nullsystemctl daemon-reload && systemctl restart dockerecho "docker.service 文件拷贝完成并重启成功"elseecho "${node} 秘钥copy失败"fidone-----------------------------------------------------------------------------------tao123 #是登录用户的密码
#测试sshpass -p "tao123" ssh-copy-id 192.168.47.47 -o StrictHostKeyChecking=no
其他主机测试登录
7.ansible部署
7.1.ansible安装
apt-get install git ansible git clone -b 0.6.0 mkdir /data/ansibleroot@k8s-master1:~/kubeasz# mv /etc/ansible/* /data/ansible/root@k8s-master1:~/kubeasz# pwd/root/kubeaszroot@k8s-master1:~/kubeasz# cp -r ./* /etc/ansible/root@k8s-master1:~/kubeasz# cd /etc/ansible/root@k8s-master1:/etc/ansible# cp example/hosts.m-masters.example ./hosts
root@k8s-master1:/etc/ansible# ll-------------------------------------------------------------------total 132drwxr-xr-x 10 root root 4096 Dec 27 15:33 ./drwxr-xr-x 96 root root 4096 Dec 27 15:28 ../-rw-r--r-- 1 root root 499 Dec 27 15:31 01.prepare.yml-rw-r--r-- 1 root root 58 Dec 27 15:31 02.etcd.yml-rw-r--r-- 1 root root 115 Dec 27 15:31 03.docker.yml-rw-r--r-- 1 root root 532 Dec 27 15:31 04.kube-master.yml-rw-r--r-- 1 root root 72 Dec 27 15:31 05.kube-node.yml-rw-r--r-- 1 root root 346 Dec 27 15:31 06.network.yml-rw-r--r-- 1 root root 77 Dec 27 15:31 07.cluster-addon.yml-rw-r--r-- 1 root root 1549 Dec 27 15:31 11.harbor.yml-rw-r--r-- 1 root root 1667 Dec 27 15:31 19.addetcd.yml-rw-r--r-- 1 root root 1110 Dec 27 15:31 20.addnode.yml-rw-r--r-- 1 root root 1666 Dec 27 15:31 21.addmaster.yml-rw-r--r-- 1 root root 467 Dec 27 15:31 22.upgrade.yml-rw-r--r-- 1 root root 1394 Dec 27 15:31 23.backup.yml-rw-r--r-- 1 root root 1447 Dec 27 15:31 24.restore.yml-rw-r--r-- 1 root root 1723 Dec 27 15:31 90.setup.yml-rw-r--r-- 1 root root 5496 Dec 27 15:31 99.clean.yml-rw-r--r-- 1 root root 10283 Dec 27 15:31 ansible.cfgdrwxr-xr-x 2 root root 4096 Dec 27 15:31 bin/drwxr-xr-x 8 root root 4096 Dec 27 15:31 docs/drwxr-xr-x 2 root root 4096 Dec 27 15:31 down/drwxr-xr-x 2 root root 4096 Dec 27 15:31 example/-rw-r--r-- 1 root root 2667 Dec 27 15:33 hostsdrwxr-xr-x 14 root root 4096 Dec 27 15:31 manifests/drwxr-xr-x 2 root root 4096 Dec 27 15:31 pics/-rw-r--r-- 1 root root 4963 Dec 27 15:31 README.mddrwxr-xr-x 23 root root 4096 Dec 27 15:31 roles/drwxr-xr-x 2 root root 4096 Dec 27 15:31 tools/
7.2.修改host文件
root@k8s-master:/etc/ansible# grep -Ev '^($|#)' hosts[deploy]192.168.47.47 NTP_ENABLED=no[etcd]192.168.47.50 NODE_NAME=etcd1192.168.47.51 NODE_NAME=etcd2192.168.47.52 NODE_NAME=etcd3[new-etcd] # 预留组,后续添加etcd节点使用[kube-master]192.168.47.47[new-master] # 预留组,后续添加master节点使用[kube-node]192.168.47.53192.168.47.54[new-node] # 预留组,后续添加node节点使用[harbor][lb][ex-lb][all:vars]DEPLOY_MODE=multi-masterK8S_VER="v1.13"MASTER_IP="192.168.47.49"KUBE_APISERVER="MASTER_IP }}:6443"CLUSTER_NETWORK="calico"SERVICE_CIDR="10.20.0.0/16"CLUSTER_CIDR="172.31.0.0/16"NODE_PORT_RANGE="20000-40000"CLUSTER_KUBERNETES_SVC_IP="10.20.0.1"CLUSTER_DNS_SVC_IP="10.20.254.254"CLUSTER_DNS_DOMAIN="linux36.local."BASIC_AUTH_USER="admin"BASIC_AUTH_PASS="test1234"bin_dir="/usr/bin"ca_dir="/etc/kubernetes/ssl"base_dir="/etc/ansible"
7.3.二进制文件
root@k8s-master1:~# tar xf k8s.1-13-5.tar.gz root@k8s-master1:~# lsbin k8s.1-13-5.tar.gz root@k8s-master1:~# cd bin/root@k8s-master1:~/bin# mv * /etc/ansible/bin/
root@k8s-master1:~/bin# cd /etc/ansible/bin/root@k8s-master1:/etc/ansible/bin# lltotal 763036drwxr-xr-x 2 root root 4096 Dec 27 16:52 ./drwxr-xr-x 10 root root 4096 Dec 27 15:39 ../-rwxr-xr-x 1 root root 4028260 Mar 16 2019 bridge*-rwxr-xr-x 1 root root 30863968 Mar 10 2019 calicoctl*-rwxr-xr-x 1 root root 10376657 Jun 22 2018 cfssl*-rwxr-xr-x 1 root root 6595195 Jun 22 2018 cfssl-certinfo*-rwxr-xr-x 1 root root 2277873 Jun 22 2018 cfssljson*-rwxr-xr-x 1 root root 27941976 Feb 10 2019 containerd*-rwxr-xr-x 1 root root 4964704 Feb 10 2019 containerd-shim*-rwxr-xr-x 1 root root 15678392 Feb 10 2019 ctr*-rwxr-xr-x 1 root root 50683148 Feb 10 2019 docker*-rwxr-xr-x 1 root root 10858808 Jul 6 2018 docker-compose*-rwxr-xr-x 1 root root 54320560 Feb 10 2019 dockerd*-rwxr-xr-x 1 root root 764144 Feb 10 2019 docker-init*-rwxr-xr-x 1 root root 2837280 Feb 10 2019 docker-proxy*-rwxr-xr-x 1 root root 19237536 Oct 11 2018 etcd*-rwxr-xr-x 1 root root 15817472 Oct 11 2018 etcdctl*-rwxr-xr-x 1 root root 2856252 Mar 16 2019 flannel*-rwxr-xr-x 1 root root 36844864 Jan 23 2019 helm*-rwxr-xr-x 1 root root 3036768 Mar 16 2019 host-local*-rwxr-xr-x 1 root root 138710240 Mar 26 2019 kube-apiserver*-rwxr-xr-x 1 root root 103982976 Mar 26 2019 kube-controller-manager*-rwxr-xr-x 1 root root 39239104 Mar 26 2019 kubectl*-rwxr-xr-x 1 root root 113104888 Mar 26 2019 kubelet*-rwxr-xr-x 1 root root 34820416 Mar 26 2019 kube-proxy*-rwxr-xr-x 1 root root 37279968 Mar 26 2019 kube-scheduler*-rwxr-xr-x 1 root root 3084347 Mar 16 2019 loopback*-rwxr-xr-x 1 root root 3551125 Mar 16 2019 portmap*-rwxr-xr-x 1 root root 171 Mar 11 2019 readme.md*-rwxr-xr-x 1 root root 7522464 Feb 10 2019 runc*
7.4.开始部署
7.4.1.环境初始化
ansible-playbook 01.prepare.yml
7.4.2.部署etcd集群
ansible-playbook 02.etcd.yml任一etcd节点进行验证:root@k8s-etcd1:~# export NODE_IPS="192.168.47.50 192.168.47.51 192.168.47.52"root@k8s-etcd1:~# for ip in ${NODE_IPS}; do ETCDCTL_API=3 /usr/bin/etcdctl --endpoints=--cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem endpoint health;doneis healthy: successfully committed proposal: took = 1.002603msis healthy: successfully committed proposal: took = 1.888051msis healthy: successfully committed proposal: took = 1.666994ms
7.4.3.部署docker
之前已经安装来docker,这一步直接跳过
7.4.4.部署master
7.4.4.1.安装v1.13.5所需镜像
kubeadm config images list --kubernetes-version v1.13.5--------------------------------------------------k8s.gcr.io/kube-apiserver:v1.13.5k8s.gcr.io/kube-controller-manager:v1.13.5k8s.gcr.io/kube-scheduler:v1.13.5k8s.gcr.io/kube-proxy:v1.13.5k8s.gcr.io/pause:3.1k8s.gcr.io/etcd:3.2.24k8s.gcr.io/coredns:1.2.6
7.4.4.2.pause镜像
为了让拉取镜像速度更快,将下载好的镜像上传到habor
pause镜像在每个node节点都要有#上传镜像到本地harbordocker tag da86e6ba6ca1 harbor.gesila.com/k8s/pause:3.1docker images docker push harbor.gesila.com/k8s/pause:3.1#修改镜像源vim /etc/ansible/roles/kube-node/defaults/main.yml----------------------------------------------------------------# 基础容器镜像SANDBOX_IMAGE: "harbor.gesila.com/k8s/pause:3.1"
注意:记得把项目设置为公开,不然镜像无法拉取
7.4.4.3.测试master能否连接vip
root@k8s-master1:/etc/ansible/images# telnet 192.168.47.49 6443Trying 192.168.47.49...Connected to 192.168.47.49.Escape character is '^]'.Connection closed by foreign host.
7.4.4.4.执行
root@k8s-master1:/etc/ansible# ansible-playbook 04.kube-master.ymlroot@k8s-master:/etc/ansible/manifests/dashboard# kubectl get nodesNAME STATUS ROLES AGE VERSION192.168.47.47 Ready,SchedulingDisabled master 115m v1.13.5192.168.47.53 Ready node 114m v1.13.5192.168.47.54 Ready node 114m v1.13.5
7.4.5.部署node
root@k8s-master1:/etc/ansible# ansible-playbook 05.kube-node.yml
7.4.6.部署网络服务calico
7.4.6.1.下载包
下载地址: tar xf calico-release-v3.3.6.tgzroot@k8s-master1:/etc/ansible/images/release-v3.3.6# tree.├── bin│ ├── calicoctl│ ├── calicoctl-darwin-amd64│ └── calicoctl-windows-amd64.exe├── images│ ├── calico-cni.tar│ ├── calico-kube-controllers.tar│ ├── calico-node.tar│ └── calico-typha.tar├── k8s-manifests│ ├── calico-kube-controllers.yaml│ ├── hosted│ │ ├── calicoctl.yaml│ │ ├── calico.yaml│ │ ├── canal│ │ │ ├── canal-etcd.yaml│ │ │ ├── canal.yaml│ │ │ ├── rbac-etcd.yaml│ │ │ └── rbac.yaml│ │ ├── etcd.yaml│ │ ├── kubernetes-datastore│ │ │ ├── calicoctl.yaml│ │ │ ├── calico-networking│ │ │ │ └── 1.7│ │ │ │ └── calico.yaml│ │ │ └── policy-only│ │ │ └── 1.7│ │ │ └── calico.yaml│ │ └── rbac-kdd.yaml│ └── rbac.yaml└── README
7.4.6.2.将镜像上传到本地harbor
root@k8s-master1:/etc/ansible/images/release-v3.3.6/images# tree.├── calico-cni.tar├── calico-kube-controllers.tar├── calico-node.tar└── calico-typha.tar#导入镜像docker load -i calico-cni.tar docker load -i calico-kube-controllers.tardocker load -i calico-node.tardocker images#打tagdocker tag ce902e610f51 harbor.gesila.com/k8s/node:v3.3.6 && \docker tag b8eeeae14aa4 harbor.gesila.com/k8s/cni:v3.3.6 && \docker tag 2fd138c9cb06 harbor.gesila.com/k8s/kube-controllers:v3.3.6#上传到harbordocker push harbor.gesila.com/k8s/node:v3.3.6 && \docker push harbor.gesila.com/k8s/cni:v3.3.6 && \docker push harbor.gesila.com/k8s/kube-controllers:v3.3.6
7.4.6.3.查看版本
root@k8s-master1:/etc/ansible/images/release-v3.3.6/bin# ./calicoctl versionClient Version: v3.3.6
7.4.6.4.修改版本
vim /etc/ansible/roles/calico/defaults/main.yml----------------------------------------------------------# 更新支持calico 版本: [v3.2.x] [v3.3.x] [v3.4.x]calico_ver: "v3.3.6"----------------------------------------------------------
7.4.6.5.修改配置文件镜像源
vim /etc/ansible/roles/calico/templates/calico-v3.3.yaml.j2----------------------------------------------------------- name: calico-node image: harbor.gesila.com/k8s/node:v3.3.6- name: install-cni image: harbor.gesila.com/k8s/cni:v3.3.6- name: calico-kube-controllers image: harbor.gesila.com/k8s/kube-controllers:v3.3.6----------------------------------------------------------
7.4.6.6.执行
ansible-playbook 06.network.yml #如果node节点无法自动拉取镜像,这一步会执行失败,pause镜像会影响其他3个镜像的拉取
calicoctl node statuskubectl get nodes
8.k8s-web管理界面dashboard
8.1.下载镜像并上传到本地仓库
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.10.1docker tag f9aed6605b81 harbor.gesila.com/k8s/kubernetes-dashboard-amd64:v1.10.1docker push harbor.gesila.com/k8s/kubernetes-dashboard-amd64:v1.10.1
8.2.修改镜像源
root@k8s-master:/etc/ansible/manifests/dashboard# ll-rw-r--r-- 1 root root 357 Dec 27 21:24 admin-user-sa-rbac.yaml-rw-r--r-- 1 root root 4766 Dec 27 21:24 kubernetes-dashboard.yaml-rw-r--r-- 1 root root 2223 Dec 27 21:24 read-user-sa-rbac.yaml-rw-r--r-- 1 root root 458 Dec 27 21:24 ui-admin-rbac.yaml-rw-r--r-- 1 root root 477 Dec 27 21:24 ui-read-rbac.yamlvim kubernetes-dashboard.yaml---------------------------------------------------------------------------- name: kubernetes-dashboard image: harbor.gesila.com/k8s/kubernetes-dashboard-amd64:v1.10.1---------------------------------------------------------------------------
8.3.运行
root@k8s-master1:/etc/ansible/manifests/dashboard/1.10.1# kubectl create -f .---------------------------------------------------------------------------serviceaccount/admin-user createdclusterrolebinding.rbac.authorization.k8s.io/admin-user createdsecret/kubernetes-dashboard-certs createdserviceaccount/kubernetes-dashboard createdrole.rbac.authorization.k8s.io/kubernetes-dashboard-minimal createdrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal createddeployment.apps/kubernetes-dashboard createdservice/kubernetes-dashboard createdserviceaccount/dashboard-read-user createdclusterrolebinding.rbac.authorization.k8s.io/dashboard-read-binding createdclusterrole.rbac.authorization.k8s.io/dashboard-read-clusterrole createdclusterrole.rbac.authorization.k8s.io/ui-admin createdrolebinding.rbac.authorization.k8s.io/ui-admin-binding createdclusterrole.rbac.authorization.k8s.io/ui-read createdrolebinding.rbac.authorization.k8s.io/ui-read-binding created
kubectl get pods -n kube-system #确认kubernetes-dashboard 运行kubectl get service -n kube-systemkubectl cluster-info #获取访问地址
8.4.登录密码
vim /etc/ansible/hosts---------------------------------------------------------------------------# 集群basic auth 使用的用户名和密码BASIC_AUTH_USER="admin"BASIC_AUTH_PASS="test1234"---------------------------------------------------------------------------
8.5.获取token登录dashboard
root@k8s-master:/etc/ansible/manifests/dashboard# kubectl -n kube-system get secret | grep admin-useradmin-user-token-mlfkq kubernetes.io/service-account-token 3 5m14skubectl -n kube-system describe secret admin-user-token-mlfkq
8.6.Kubeconfig登录
cp /root/.kube/config /opt/kubeconfig把token加入到kubeconfig文件内,登录时以kubeconfig方式登录,选择kubeconfig文件
9.其他设置
9.1.修改iptables为ipvs及调度算法
vim /etc/systemd/system/kube-proxy.service----------------------------------------------proxy-mode=ipvs \ --ipvs-scheduler=sh
9.2.设置token登录会话保持时间
vim /etc/ansible/manifests/dashboard/1.6.3/kubernetes-dashboard.yaml----------------------------------------------------------------------------image: 192.168.200.110/baseimages/kubernetes-dashboard-amd64:v1.10.1ports:- containerPort: 8443protocol: TCPargs: - --auto-generate-certificates - --token-ttl=43200
9.3.session保持
sessionAffinity: ClientIPsessionAffinityConfig:clientIP:timeoutSeconds: 10800
9.4.查看docker登陆的密码文件
root@node1:~# cat /root/.docker/config.json{ "auths": { "harbor.gesila.com": { "auth": "YWRtaW46MTIzNDU=" } }}root@node1:~# cat /root/.docker/config.json|base64ewoJImF1dGhzIjogewoJCSJoYXJib3IuZ2VzaWxhLmNvbSI6IHsKCQkJImF1dGgiOiAiWVdSdGFXNDZNVEl6TkRVPSIKCQl9Cgl9Cn0=
版权声明:本文内容由网络用户投稿,版权归原作者所有,本站不拥有其著作权,亦不承担相应法律责任。如果您发现本站中有涉嫌抄袭或描述失实的内容,请联系我们jiasou666@gmail.com 处理,核实后本网站将在24小时内删除侵权内容。
发表评论
暂时没有评论,来抢沙发吧~