本文主要是介绍kubernetes v1.16.2高可用集群,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
第一部分:基础环境准备
1,安装规划
Pod 分配 IP 段: 10.244.0.0/16
ClusterIP 地址: 10.99.0.0/16
CoreDns 地址: 10.99.110.110
VIP地址:192.168.204.199
角色划分
| k8s-master01 | 192.168.204.158 | Master | Kube-apiserver 、 kube-controller-manager、kube-scheduler、etcd、kube-proxy、docker、calico
| k8s-master02 | 192.168.204.159 | Master | Kube-apiserver 、 kube-controller-manager、kube-scheduler、etcd、kube-proxy、docker、calico
| k8s-worker01 | 192.168.204.160 | Master | Kube-apiserver 、 kube-controller-manager、kube-scheduler、etcd、kube-proxy、docker、calico
| k8s-worker02 | 192.168.204.161 | Node | Kubelet 、 kube-proxy 、 docker 、calico
| k8s-harbor01 | 192.168.204.162 | Node | Kubelet 、 kube-proxy 、 docker 、calico
2,间同步
安装时间同步软件
yum install chrony –y
修改/etc/chrony.conf 配置文件, 增加如下内容。
server time.aliyun.com iburst
启动 chronyd
systemctl start chronyd
3.安装必备软件
yum install wget vim gcc git lrzsz net-tools tcpdump telnet rsync -y
yum -y install vim-enhanced wget curl net-tools conntrack-tools bind-utils socat ipvsadm ipset
3, 调整内核参数
cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
EOF
执行以下命令使修改生效
sysctl --system
modprobe br_netfilter
4,加载 ipvs 模块
在所有的Kubernetes节点执行以下脚本(若内核大于4.19替换nf_conntrack_ipv4为nf_conntrack):
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
#执行脚本
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
5,升级系统内核
[root@K8S-PROD-MASTER-A1~]# rpm -import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
安装elrepo的yum源
[root@K8S-PROD-MASTER-A1 ~]# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
Retrieving http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
Retrieving http://elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
Preparing... #################################[100%]
Updating / installing...
1:elrepo-release-7.0-3.el7.elrepo #################################[100%]
使用以下命令列出可用的内核相关包
执行如下命令查看可用内核
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
安装 4.4.176 内核
yum --enablerepo=elrepo-kernel install kernel-lt kernel-lt-devel -y
更改内核默认启动顺序
grub2-set-default 0
reboot
验证内核是否升级成功
[root@K8S-PROD-LB-A1 ~]# uname -rp
4.4.176-1.el7.elrepo.x86_64 x86_64
6,关闭防火墙
7,关闭selinux/swap,设置主机host映射:
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
关闭swap:
swapoff -a $ 临时
vim /etc/fstab $ 永久
修改文件打开数
cat >>/etc/security/limits.conf <<EOF
* soft memlock unlimited
* hard memlock unlimited
* soft nofile 65535
* hard nofile 65535
* soft nproc 65535
* hard nproc 65535
EOF
添加主机名与IP对应关系(记得设置主机名)
cat /etc/hosts
192.168.204.158 k8s-master01
192.168.204.159 k8s-master02
192.168.204.160 k8s-master03
192.168.204.161 k8s-worker01
192.168.204.162 k8s-worker02
#修改/etc/hosts
cat >> /etc/hosts << EOF
192.168.204.158 k8s-master01
192.168.204.159 k8s-master02
192.168.204.160 k8s-master03
192.168.204.161 k8s-worker01
192.168.204.162 k8s-worker02
EOF
8. 安装Docker
yum install -y yum-utils device-mapper-persistent-data lvm2
#wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/dockerce.repo
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum list docker-ce --showduplicates | sort -r
yum -y install docker-ce-18.09.0 docker-ce-cli-18.09.0
docker --version
配置 docker 参数
/etc/docker/daemon.json
# mkdir /etc/docker
# vi /etc/docker/daemon.json
{
"log-level": "warn",
"selinux-enabled": false,
"insecure-registries": ["192.168.204.188:10500"], #配置自己的私有镜像地址
"registry-mirrors": ["https://3laho3y3.mirror.aliyuncs.com"],
"default-shm-size": "128M",
"data-root": "/data/docker",
"max-concurrent-downloads": 10,
"max-concurrent-uploads": 5,
"oom-score-adjust": -1000,
"debug": false,
"live-restore": true,
"exec-opts": ["native.cgroupdriver=systemd"]
}
192.168.204.188:10500,为搭建的docker私有仓库harbor
参数说明
参数名称 描述
log-level 日志级别[error|warn|info|debug]。
insecure-registries 配置私有镜像仓库,多个地址以“,”隔开。
registry-mirrors 默认拉取镜像仓库地址
max-concurrent-downloads 最大下载镜像数量
max-concurrent-uploads 最大上传镜像数量
live-restore Docker 停止时保持容器继续运行,取值[true|false]
native.cgroupdriver Docker 存储驱动
data-root 设置 docker 存储路径, 默认为/var/lib/docker
详细参数请参考官方文档:
https://docs.docker.com/engine/reference/commandline/dockerd/#/linux-configuration-file
/etc/sysconfig/docker (经测试,此文件在设置过/etc/docker/daemon.json后可不设置,没什么用,主要是上面的/etc/docker/daemon.json文件要配置好)
[root@k8s-harbor harbor]# cat /etc/sysconfig/docker
OPTIONS='--selinux-enabled --log-driver=journald --signature-verification=false --registry-mirror=https://fzhifedh.mirror.aliyuncs.com --insecure-registry=192.168.204.188:10500'
if [ -z "${DOCKER_CERT_PATH}" ]; then
DOCKER_CERT_PATH=/etc/docker
fi
/usr/lib/systemd/system/docker.service (docker服务启动文件)
# cat /usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=-/etc/sysconfig/docker
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd $other_args
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
MountFlags=slave
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
9,安装负载均衡
kubernetes master 节点运行如下组件:
kube-apiserver
kube-scheduler
kube-controller-manager
kube-scheduler 和 kube-controller-manager 可以以集群模式运行,通过 leader 选举产生一个工作进程,其它进程处于阻塞模式。
kube-apiserver可以运行多个实例,但对其它组件需要提供统一的访问地址,该地址需要高可用。本次部署使用 keepalived+haproxy 实现 kube-apiserver VIP 高可用和负载均衡。
haproxy+keepalived配置vip,实现了api唯一的访问地址和负载均衡。keepalived 提供 kube-apiserver 对外服务的 VIP。haproxy 监听 VIP,后端连接所有 kube-apiserver 实例,提供健康检查和负载均衡功能。
运行 keepalived 和 haproxy 的节点称为 LB 节点。由于 keepalived 是一主多备运行模式,故至少两个 LB 节点。
本次部署复用 master 节点的三台机器,在所有3个master节点部署haproxy和keepalived组件,以达到更高的可用性,haproxy 监听的端口(6444) 需要与 kube-apiserver的端口 6443 不同,避免冲突。
keepalived 在运行过程中周期检查本机的 haproxy 进程状态,如果检测到 haproxy 进程异常,则触发重新选主的过程,VIP 将飘移到新选出来的主节点,从而实现 VIP 的高可用。
所有组件(如 kubeclt、apiserver、controller-manager、scheduler 等)都通过 VIP +haproxy 监听的6444端口访问 kube-apiserver 服务。
运行HA容器
使用的容器镜像为睿云智合开源项目breeze相关镜像,具体使用方法请访问:
https://github.com/wise2c-devops
其他选择:haproxy镜像也可以使用dockerhub官方镜像,但keepalived未提供官方镜像,可自行构建或使用dockerhub他人已构建好的镜像,本次部署全部使用breeze提供的镜像。
在3个master节点以容器方式部署haproxy,容器暴露6444端口,负载均衡到后端3个apiserver的6443端口,3个节点haproxy配置文件相同。
以下操作在master01节点执行。
创建haproxy启动脚本
编辑start-haproxy.sh文件,修改Kubernetes Master节点IP地址为实际Kubernetes集群所使用的值(Master Port默认为6443不用修改):
mkdir -p /data/lb
cat > /data/lb/start-haproxy.sh << "EOF"
#!/bin/bash
MasterIP1=192.168.204.158
MasterIP2=192.168.204.159
MasterIP3=192.168.204.160
MasterPort=6443
docker run -d --restart=always --name HAProxy-K8S -p 6444:6444 \
-e MasterIP1=$MasterIP1 \
-e MasterIP2=$MasterIP2 \
-e MasterIP3=$MasterIP3 \
-e MasterPort=$MasterPort \
wise2c/haproxy-k8s
EOF
创建keepalived启动脚本
编辑start-keepalived.sh文件,修改虚拟IP地址VIRTUAL_IP、虚拟网卡设备名INTERFACE、虚拟网卡的子网掩码NETMASK_BIT、路由标识符RID、虚拟路由标识符VRID的值为实际Kubernetes集群所使用的值。(CHECK_PORT的值6444一般不用修改,它是HAProxy的暴露端口,内部指向Kubernetes Master Server的6443端口)
cat > /data/lb/start-keepalived.sh << "EOF"
#!/bin/bash
VIRTUAL_IP=192.168.204.199
INTERFACE=ens33
NETMASK_BIT=24
CHECK_PORT=6444
RID=10
VRID=160
MCAST_GROUP=224.0.0.18
docker run -itd --restart=always --name=Keepalived-K8S \
--net=host --cap-add=NET_ADMIN \
-e VIRTUAL_IP=$VIRTUAL_IP \
-e INTERFACE=$INTERFACE \
-e CHECK_PORT=$CHECK_PORT \
-e RID=$RID \
-e VRID=$VRID \
-e NETMASK_BIT=$NETMASK_BIT \
-e MCAST_GROUP=$MCAST_GROUP \
wise2c/keepalived-k8s
EOF
复制启动脚本到其他2个master节点
[root@k8s-master02 ~]# mkdir -p /data/lb
[root@k8s-master03 ~]# mkdir -p /data/lb
[root@k8s-master01 ~]# scp start-haproxy.sh start-keepalived.sh 192.168.204.159:/data/lb/
[root@k8s-master01 ~]# scp start-haproxy.sh start-keepalived.sh 192.168.204.160:/data/lb/
分别在3个master节点运行脚本启动haproxy和keepalived容器:
sh /data/lb/start-haproxy.sh && sh /data/lb/start-keepalived.sh
验证HA状态
查看容器运行状态
[root@k8s-master01 ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
c1d1901a7201 wise2c/haproxy-k8s "/docker-entrypoint.…" 5 days ago Up 3 hours 0.0.0.0:6444->6444/tcp HAProxy-K8S
2f02a9fde0be wise2c/keepalived-k8s "/usr/bin/keepalived…" 5 days ago Up 3 hours Keepalived-K8S
查看网卡绑定的vip 为192.168.204.199
[root@k8s-master01 ~]# ip a | grep ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
inet 192.168.92.10/24 brd 192.168.92.255 scope global noprefixroute ens33
inet 192.168.92.30/24 scope global secondary ens33
查看监听端口为6444
[root@k8s-master01 ~]# netstat -tnlp | grep 6444
tcp6 0 0 :::6444 :::* LISTEN 11695/docker-proxy
keepalived配置文件中配置了vrrp_script脚本,使用nc命令对haproxy监听的6444端口进行检测,如果检测失败即认定本机haproxy进程异常,将vip漂移到其他节点。
所以无论本机keepalived容器异常或haproxy容器异常都会导致vip漂移到其他节点,可以停掉vip所在节点任意容器进行测试。
[root@k8s-master01 ~]# docker stop HAProxy-K8S
HAProxy-K8S
#可以看到vip漂移到k8s-master02节点
[root@k8s-master02 ~]# ip a | grep ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
inet 192.168.92.11/24 brd 192.168.92.255 scope global noprefixroute ens33
inet 192.168.92.30/24 scope global secondary ens33
也可以在本地执行该nc命令查看结果
[root@k8s-master02 ~]# yum install -y nc
[root@k8s-master02 ~]# nc -v -w 2 -z 127.0.0.1 6444 2>&1 | grep 'Connected to' | grep 6444
Ncat: Connected to 127.0.0.1:6444.
关于haproxy和keepalived配置文件可以在github源文件中参考Dockerfile,或使用docker exec -it xxx sh命令进入容器查看,容器中的具体路径:
/etc/keepalived/keepalived.conf
/usr/local/etc/haproxy/haproxy.cfg
负载均衡部分配置完成后即可开始部署kubernetes集群。
10, 安装kubeadm
添加阿里云YUM软件源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
安装kubeadm,kubelet和kubectl
yum install -y kubelet-1.16.2 kubeadm-1.16.2 kubectl-1.16.2
systemctl enable kubelet
10, 设置阿里云镜像加速
docker login --username=QDSH registry.cn-hangzhou.aliyuncs.com
输入密码
登陆成功后,使用下面命令查看认证记录
cat ~/.docker/config.json
第二部分,集群配置
1, 部署Kubernetes Master
下面命令只能部署单节点master,高可用master需要用config文件进行初始化
单master节点适用(本文不用关注)
命令行方式初始化(单master节点适用)
kubeadm init \
--apiserver-advertise-address=192.168.204.199 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.16.2 \
--service-cidr=10.1.0.0/16 \
--pod-network-cidr=10.244.0.0/16
apiserver-advertise-address该参数一般指定为(haproxy+keepalived) 的vip。
service-cidr string Default: “10.96.0.0/12” 设置service的CIDRs,默认为 10.96.0.0/12。
pod-network-cidr string 通过这个值来设定pod网络的IP地址网段;设置了这个值以后,控制平面会自动给每个节点设置CIDRs(无类别域间路由,Classless Inter-Domain Routing)。
2,多master部署(本文重点关注)
config方式初始化(多master适用)
kubeadm config print init-defaults > kubeadm-config.yaml
2.1 然后对初始化的配置文件进行局部修改
controlPlaneEndpoint:为vip地址和haproxy监听端口6444
imageRepository:由于国内无法访问google镜像仓库k8s.gcr.io,这里指定为阿里云镜像仓库registry.aliyuncs.com/google_containers
podSubnet:指定的IP地址段与后续部署的网络插件相匹配,这里需要部署flannel插件,所以配置为10.244.0.0/16
mode: ipvs:最后追加的配置为开启ipvs模式。
---
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.204.158
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master01
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.204.199:6444"
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.16.2
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
2.2 执行kubeadm init
mv kubeadm-config.yaml kubeadm-init-config.yaml
kubeadm init --config=kubeadm-init-config.yaml --upload-certs | tee kubeadm-init.log
kubectl -n kube-system get cm kubeadm-config -oyaml
这里追加tee命令将初始化日志输出到kubeadm-init.log中以备用(可选)
其中添加--upload-certs参数可以在后续执行加入节点时自动分发证书文件(不用手动去拷贝证书)。
-----------------------------------------------------------------------------------------------------
2.3 初如化,返回结果如下:
[root@k8s-master01 ~]# kubeadm init --config=kubeadm-init-config.yaml --upload-certs | tee kubeadm-init.log
[init] Using Kubernetes version: v1.16.2
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.204.158 192.168.204.199]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.204.158 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.204.158 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 20.932766 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.16" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
35381fccc67697ada7ebb11bae27b46605da547d32033be7a050eac948477945
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 192.168.204.199:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:788b32f99ee3c615255747b7e5b4165605cca38d712ac47b0157c61bd6bd2e9c \
--control-plane --certificate-key 35381fccc67697ada7ebb11bae27b46605da547d32033be7a050eac948477945
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.204.199:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:788b32f99ee3c615255747b7e5b4165605cca38d712ac47b0157c61bd6bd2e9c
------------------
kubeadm init主要执行了以下操作:
[init]:指定版本进行初始化操作
[preflight] :初始化前的检查和下载所需要的Docker镜像文件
[kubelet-start]:生成kubelet的配置文件”/var/lib/kubelet/config.yaml”,没有这个文件kubelet无法启动,所以初始化之前的kubelet实际上启动失败。
[certificates]:生成Kubernetes使用的证书,存放在/etc/kubernetes/pki目录中。
[kubeconfig] :生成 KubeConfig 文件,存放在/etc/kubernetes目录中,组件之间通信需要使用对应文件。
[control-plane]:使用/etc/kubernetes/manifest目录下的YAML文件,安装 Master 组件。
[etcd]:使用/etc/kubernetes/manifest/etcd.yaml安装Etcd服务。
[wait-control-plane]:等待control-plan部署的Master组件启动。
[apiclient]:检查Master组件服务状态。
[uploadconfig]:更新配置
[kubelet]:使用configMap配置kubelet。
[patchnode]:更新CNI信息到Node上,通过注释的方式记录。
[mark-control-plane]:为当前节点打标签,打了角色Master,和不可调度标签,这样默认就不会使用Master节点来运行Pod。
[bootstrap-token]:生成token记录下来,后边使用kubeadm join往集群中添加节点时会用到
[addons]:安装附加组件CoreDNS和kube-proxy
说明:无论是初始化失败或者集群已经完全搭建成功,你都可以直接执行kubeadm reset命令清理集群或节点,然后重新执行kubeadm init或kubeadm join相关操作即可。
2.4 配置kubectl命令
无论在master节点或node节点,要能够执行kubectl命令必须进行以下配置:
root用户执行以下命令
cat << EOF >> ~/.bashrc
export KUBECONFIG=/etc/kubernetes/admin.conf
EOF
source ~/.bashrc
普通用户(非root用户)执行以下命令(参考init时的输出结果)
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
等集群配置完成后,可以在所有master节点和node节点进行以上配置,以支持kubectl命令。针对node节点复制任意master节点/etc/kubernetes/admin.conf到本地。
2.5 查看当前状态
[root@k8s-master01 ~]# source ~/.bashrc
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 NotReady master 10m v1.16.2
[root@k8s-master01 ~]# kubectl -n kube-system get pod
NAME READY STATUS RESTARTS AGE
coredns-67c766df46-cs9ld 0/1 Pending 0 25s
coredns-67c766df46-ptlp7 0/1 Pending 0 26s
etcd-k8s-master01 1/1 Running 0 9m35s
kube-apiserver-k8s-master01 1/1 Running 0 9m45s
kube-controller-manager-k8s-master01 1/1 Running 3 9m35s
kube-proxy-95g6v 0/1 Pending 0 26s
kube-scheduler-k8s-master01 0/1 Running 0 9m45s
[root@k8s-master01 ~]# kubectl get cs
NAME AGE
scheduler <unknown>
controller-manager <unknown>
etcd-0 <unknown>
由于未安装网络插件,coredns处于pending状态,node处于notready状态。
2.6 安装网络插件
kubernetes支持多种网络方案,这里简单介绍常用的flannel和calico安装方法,选择其中一种方案进行部署即可。(生产环境多用calico,我们本文重点关注calico)
以下操作在master01节点执行即可。
安装Calico 插件(注意在阿里云上,不能使用bgp)
wget https://docs.projectcalico.org/v3.8/manifests/calico.yaml
因k8s是v1.16.2版本,api接口会发生变化,需要修改calico.yaml中的接口参数和 selector:
POD_CIDR="10.244.0.0/16"
sed -i -e "s?192.168.0.0/16?$POD_CIDR?g" calico.yaml
1)修改ipip模式关闭 和typha_service_name
- name: CALICO_IPV4POOL_IPIP
value: "off"
typha_service_name: "calico-typha"
calico网络,默认是ipip模式(在每台node主机创建一个tunl0网口,这个隧道链接所有的node容器网络,官网推荐不同的ip网段适合,比如aws的不同区域主机),
修改成BGP模式,它会以daemonset方式安装在所有node主机,每台主机启动一个bird(BGP client),它会将calico网络内的所有node分配的ip段告知集群内的主机,并通过本机的网卡eth0或者ens33转发数据;
2)修改replicas
replicas: 1
revisionHistoryLimit: 2
3)修改pod的网段CALICO_IPV4POOL_CIDR
- name: CALICO_IPV4POOL_CIDR
value: "10.244.0.0/16"
4)如果手动下载镜像请查看calico.yaml 文件里面标注的镜像版本 否则可以直接执行会自动下载
5)部署calico
kubectl apply -f calico.yaml
6)查看
kubectl get po --all-namespaces
此时你会发现是pending状态是因为node节点还没有相关组件
7) 验证是否为bgp模式
# ip route show
---------------------
再次查看node和 Pod状态,全部为Running
[root@k8s-master01 calico]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready master 16h v1.16.2
[root@k8s-master01 calico]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-847886cb6-7xp9p 1/1 Running 0 105s
kube-system calico-node-7kmnv 1/1 Running 0 6m7s
kube-system coredns-67c766df46-cs9ld 1/1 Running 0 16h
kube-system coredns-67c766df46-ptlp7 1/1 Running 0 16h
kube-system etcd-k8s-master01 1/1 Running 2 16h
kube-system kube-apiserver-k8s-master01 1/1 Running 3 16h
kube-system kube-controller-manager-k8s-master01 1/1 Running 8 16h
kube-system kube-proxy-95g6v 1/1 Running 1 16h
kube-system kube-scheduler-k8s-master01 1/1 Running 8 16h
-------------------------------------------
3. 安装flannel网络插件(可选):
由于kube-flannel.yml文件指定的镜像从coreos镜像仓库拉取,可能拉取失败,可以从dockerhub搜索相关镜像进行替换,另外可以看到yml文件中定义的网段地址段为10.244.0.0/16。
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
cat kube-flannel.yml | grep image
cat kube-flannel.yml | grep 10.244
sed -i 's#quay.io/coreos/flannel:v0.11.0-amd64#willdockerhub/flannel:v0.11.0-amd64#g' kube-flannel.yml
kubectl apply -f kube-flannel.yml
--------------------------------------------------------------
4.其它master加入集群
从初始化输出或kubeadm-init.log中获取命令
kubeadm join 192.168.204.199:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:cc610e6abe4e6c525375d9027bbd79a36971debbe7a50e103b6e8f2fd8d88274 \
--control-plane --certificate-key 4f5b7ed09dde9ed6c7cdb0497286c5e1d1073523e1f894b16bb9408c076926bc
依次将k8s-master02和k8s-master03加入到集群中,示例
[root@k8s-master02 ~]# kubeadm join 192.168.204.199:6444 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:788b32f99ee3c615255747b7e5b4165605cca38d712ac47b0157c61bd6bd2e9c \
> --control-plane --certificate-key 35381fccc67697ada7ebb11bae27b46605da547d32033be7a050eac948477945
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[preflight] Running pre-flight checks before initializing the new control plane instance
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[download-certs] Downloading the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master02 localhost] and IPs [192.168.204.159 127.0.0.1 ::1]
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master02 localhost] and IPs [192.168.204.159 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master02 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.204.159 192.168.204.199]
[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[certs] Using the existing "sa" key
[kubeconfig] Generating kubeconfig files
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[check-etcd] Checking that the etcd cluster is healthy
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.16" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[etcd] Announced new etcd member joining to the existing etcd cluster
[etcd] Creating static Pod manifest for "etcd"
[etcd] Waiting for the new etcd member to join the cluster. This can take up to 40s
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[mark-control-plane] Marking the node k8s-master02 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master02 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
This node has joined the cluster and a new control plane instance was created:
* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.
To start administering your cluster from this node, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Run 'kubectl get nodes' to see this node join the cluster.
master节点添加完成
[root@k8s-master01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready master 59m v1.16.2
k8s-master02 Ready master 55m v1.16.2
k8s-master03 Ready master 53m v1.16.2
---------------------------------------------------------------------------------------------------------
5.加入node节点
从kubeadm-init.log中获取命令
kubeadm join 192.168.204.199:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:377783ce9906407b52e7c6eb252d5f620322c8c9f5fa1082c4a38ee74eacca6a
返回结果如下:
[root@k8s-work01 ~]# kubeadm join 192.168.204.199:6444 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:788b32f99ee3c615255747b7e5b4165605cca38d712ac47b0157c61bd6bd2e9c
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.16" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
配置完成,查看集群的状态
[root@k8s-master01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready master 27m v1.16.2
k8s-master02 Ready master 23m v1.16.2
k8s-master03 Ready master 21m v1.16.2
k8s-work01 Ready <none> 3m37s v1.16.2
[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
calico-kube-controllers-847886cb6-m6tq9 1/1 Running 0 25m 10.244.32.129 k8s-master01 <none> <none>
calico-node-mpmvw 1/1 Running 0 21m 192.168.204.160 k8s-master03 <none> <none>
calico-node-nqjsf 1/1 Running 0 25m 192.168.204.158 k8s-master01 <none> <none>
calico-node-p2z68 1/1 Running 0 23m 192.168.204.159 k8s-master02 <none> <none>
calico-node-x7w4w 1/1 Running 3 3m42s 192.168.204.161 k8s-work01 <none> <none>
coredns-67c766df46-b4zqd 1/1 Running 0 26m 10.244.32.131 k8s-master01 <none> <none>
coredns-67c766df46-wf75h 1/1 Running 0 26m 10.244.32.130 k8s-master01 <none> <none>
etcd-k8s-master01 1/1 Running 0 26m 192.168.204.158 k8s-master01 <none> <none>
etcd-k8s-master02 1/1 Running 0 22m 192.168.204.159 k8s-master02 <none> <none>
etcd-k8s-master03 1/1 Running 0 21m 192.168.204.160 k8s-master03 <none> <none>
kube-apiserver-k8s-master01 1/1 Running 1 25m 192.168.204.158 k8s-master01 <none> <none>
kube-apiserver-k8s-master02 1/1 Running 0 22m 192.168.204.159 k8s-master02 <none> <none>
kube-apiserver-k8s-master03 1/1 Running 1 21m 192.168.204.160 k8s-master03 <none> <none>
kube-controller-manager-k8s-master01 1/1 Running 3 26m 192.168.204.158 k8s-master01 <none> <none>
kube-controller-manager-k8s-master02 1/1 Running 1 21m 192.168.204.159 k8s-master02 <none> <none>
kube-controller-manager-k8s-master03 1/1 Running 0 21m 192.168.204.160 k8s-master03 <none> <none>
kube-proxy-2xv6z 1/1 Running 0 21m 192.168.204.160 k8s-master03 <none> <none>
kube-proxy-7mgvt 1/1 Running 0 3m42s 192.168.204.161 k8s-work01 <none> <none>
kube-proxy-cgj7w 1/1 Running 0 26m 192.168.204.158 k8s-master01 <none> <none>
kube-proxy-g6kg6 1/1 Running 0 23m 192.168.204.159 k8s-master02 <none> <none>
kube-scheduler-k8s-master01 1/1 Running 1 25m 192.168.204.158 k8s-master01 <none> <none>
kube-scheduler-k8s-master02 1/1 Running 1 22m 192.168.204.159 k8s-master02 <none> <none>
kube-scheduler-k8s-master03 1/1 Running 1 21m 192.168.204.160 k8s-master03 <none> <none>
[root@k8s-master01 ~]# kubectl get cs
NAME AGE
scheduler <unknown>
controller-manager <unknown>
etcd-0 <unknown>
至此k8s集群配置完成
----------------------------------------
6.为集群设置node标签:
在任意 master 节点操作, 为集群节点打 label
master需要先重置原有的label
kubectl label nodes k8s-master01 node-role.kubernetes.io/master- --overwrite
kubectl label nodes k8s-master01 node-role.kubernetes.io/master=MASTER-A1
kubectl label nodes k8s-master02 node-role.kubernetes.io/master- --overwrite
kubectl label nodes k8s-master02 node-role.kubernetes.io/master=MASTER-A2
设置 192.168.0.66,192.168.0.172 lable 为 node
node节点直接打
kubectl label node k8s-worker01 node-role.kubernetes.io/node=NODE-A1
kubectl label node k8s-worker02 node-role.kubernetes.io/node=NODE-A2
为 Ingress 边缘节点(harbor)设置 label
kubectl label nodes k8s-harbor node-role.kubernetes.io/LB=LB-A1
为 Ingress 边缘节点设置 污点,不允许分配pod
kubectl taint node k8s-harbor node-role.kubernetes.io/LB=LB-A1:NoSchedule
设置 master 一般情况下不接受负载
kubectl taint nodes k8s-master01 node-role.kubernetes.io/master=MASTER-A1:NoSchedule --overwrite
kubectl taint nodes k8s-master02 node-role.kubernetes.io/master=MASTER-A2:NoSchedule --overwrite
如何删除 lable ?–overwrite
kubectl label nodes k8s-master01 node-role.kubernetes.io/master- --overwrite
kubectl label nodes k8s-master02 node-role.kubernetes.io/master- --overwrite
kubectl label nodes k8s-worker01 node-role.kubernetes.io/node- --overwrite
kubectl label nodes k8s-worker02 node-role.kubernetes.io/node- --overwrite
kubectl label nodes k8s-harbor node-role.kubernetes.io/LB- --overwrite
kubectl get nodes --show-labels
7,etcd集群
执行以下命令查看etcd集群状态
kubectl -n kube-system exec etcd-k8s-master01 -- etcdctl \
--endpoints=https://192.168.204.158:2379 \
--ca-file=/etc/kubernetes/pki/etcd/ca.crt \
--cert-file=/etc/kubernetes/pki/etcd/server.crt \
--key-file=/etc/kubernetes/pki/etcd/server.key cluster-health
8,验证HA
把master03关机(测试证明,三个master停掉一个可以,停掉两个etcd集群就会挂掉,集群运行就异常了)
[root@k8s-master03 ~]# shutdown -h now
[root@k8s-master01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready master 130m v1.16.2
k8s-master02 Ready master 126m v1.16.2
k8s-master03 NotReady master 124m v1.16.2
k8s-work01 Ready node 106m v1.16.2
[root@k8s-master01 ~]# ip a |grep ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
inet 192.168.204.158/16 brd 192.168.255.255 scope global ens33
inet 192.168.204.199/24 scope global ens33
第三部分:其它操作
为master01节点设置taint(污点)(题外话,可能会用到删除污点)
语法:
kubectl taint node [node] key=value[effect]
其中[effect] 可取值: [ NoSchedule | PreferNoSchedule | NoExecute ]
NoSchedule: 一定不能被调度
PreferNoSchedule: 尽量不要调度
NoExecute: 不仅不会调度, 还会驱逐Node上已有的Pod
添加污点
kubectl taint nodes k8s-master01 node-role.kubernetes.io/master=:NoSchedule
查看污点
kubectl describe nodes --all
kubectl describe node k8s-master01
删除污点
删除taint:
kubectl taint node node1 key1:NoSchedule- # 这里的key可以不用指定value
kubectl taint node node1 key1:NoExecute-
# kubectl taint node node1 key1- 删除指定key所有的effect
kubectl taint node node1 key2:NoSchedule-
解决:(master 节点去污)
kubectl taint nodes --all node-role.kubernetes.io/master-
这篇关于kubernetes v1.16.2高可用集群的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!