From fb40ad3199ee21861f65485cf28bdb89531e92c8 Mon Sep 17 00:00:00 2001 From: cby Date: Sat, 15 Apr 2023 22:17:04 +0800 Subject: [PATCH] Create v1.27.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md --- ...6-IPv4-Three-Masters-Two-Slaves-Offline.md | 4235 +++++++++++++++++ 1 file changed, 4235 insertions(+) create mode 100644 doc/v1.27.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md diff --git a/doc/v1.27.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md b/doc/v1.27.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md new file mode 100644 index 0000000..d129b22 --- /dev/null +++ b/doc/v1.27.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md @@ -0,0 +1,4235 @@ +# 二进制安装Kubernetes(k8s) v1.27.1 IPv4/IPv6双栈 可脱离互联网 + + + +[https://github.com/cby-chen/Kubernetes](https://github.com/cby-chen/Kubernetes) 开源不易,帮忙点个star,谢谢了 + +# 介绍 + +kubernetes(k8s)二进制高可用安装部署,支持IPv4+IPv6双栈。 + +我使用IPV6的目的是在公网进行访问,所以我配置了IPV6静态地址。 + +若您没有IPV6环境,或者不想使用IPv6,不对主机进行配置IPv6地址即可。 + +不配置IPV6,不影响后续,不过集群依旧是支持IPv6的。为后期留有扩展可能性。 + +若不要IPv6 ,不给网卡配置IPv6即可,不要对IPv6相关配置删除或操作,否则会出问题。 + +# 强烈建议在Github上查看文档 !!! + +## Github出问题会更新文档,并且后续尽可能第一时间更新新版本文档 !!! + +## 手动项目地址:https://github.com/cby-chen/Kubernetes + + +# 1.环境 + +| 主机名称 | IP地址 | 说明 | 软件 | +| -------- | --------- | ---------- | ------------------------------------------------------------ | +| | 192.168.1.60 | 外网节点 | 下载各种所需安装包 | +| Master01 | 192.168.0.31 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx | +| Master02 | 192.168.0.32 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx | +| Master03 | 192.168.0.33 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx | +| Node01 | 192.168.0.34 | node节点 | kubelet、kube-proxy、nfs-client、nginx | +| Node02 | 192.168.0.35 | node节点 | kubelet、kube-proxy、nfs-client、nginx | +| | 192.168.0.36 | VIP | | + +| 软件 | 版本 | +| :----------------------------------------------------------- | :-------------- | +| kernel | 5.4.240 | +| CentOS 8 | v8、 v7、Ubuntu | +| kube-apiserver、kube-controller-manager、kube-scheduler、kubelet、kube-proxy | v1.27.1 | +| etcd | v3.5.8 | +| containerd | v1.6.20 | +| docker | v23.0.3 | +| cfssl | v1.6.4 | +| cni | v1.2.0 | +| crictl | v1.26.1 | +| haproxy | v1.8.27 | +| keepalived | v2.1.5 | + + + +网段 + +物理主机:192.168.0.0/24 + +service:10.96.0.0/12 + +pod:172.16.0.0/12 + +安装包已经整理好:https://github.com/cby-chen/Kubernetes/releases/download/v1.27.1/kubernetes-v1.27.1.tar + + + +## 1.1.k8s基础系统环境配置 + +### 1.2.配置IP + +```shell +ssh root@192.168.0.345 "nmcli con mod eth0 ipv4.addresses 192.168.0.31/24; nmcli con mod eth0 ipv4.gateway 3.7.191.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0" +ssh root@192.168.0.347 "nmcli con mod eth0 ipv4.addresses 192.168.0.32/24; nmcli con mod eth0 ipv4.gateway 3.7.191.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0" +ssh root@192.168.0.344 "nmcli con mod eth0 ipv4.addresses 192.168.0.33/24; nmcli con mod eth0 ipv4.gateway 3.7.191.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0" +ssh root@192.168.0.341 "nmcli con mod eth0 ipv4.addresses 192.168.0.34/24; nmcli con mod eth0 ipv4.gateway 3.7.191.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0" +ssh root@192.168.0.346 "nmcli con mod eth0 ipv4.addresses 192.168.0.35/24; nmcli con mod eth0 ipv4.gateway 3.7.191.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0" + +# 没有IPv6选择不配置即可 +ssh root@192.168.0.31 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::10; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0" +ssh root@192.168.0.32 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::20; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0" +ssh root@192.168.0.33 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::30; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0" +ssh root@192.168.0.34 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::40; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0" +ssh root@192.168.0.35 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::50; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0" + +# 查看网卡配置 +[root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0 +TYPE=Ethernet +PROXY_METHOD=none +BROWSER_ONLY=no +BOOTPROTO=none +DEFROUTE=yes +IPV4_FAILURE_FATAL=no +IPV6INIT=yes +IPV6_AUTOCONF=no +IPV6_DEFROUTE=yes +IPV6_FAILURE_FATAL=no +IPV6_ADDR_GEN_MODE=stable-privacy +NAME=eth0 +UUID=424fd260-c480-4899-97e6-6fc9722031e8 +DEVICE=eth0 +ONBOOT=yes +IPADDR=192.168.0.31 +PREFIX=24 +GATEWAY=192.168.8.1 +DNS1=8.8.8.8 +IPV6ADDR=fc00:43f4:1eea:1::10/128 +IPV6_DEFAULTGW=fc00:43f4:1eea:1::1 +DNS2=2400:3200::1 +[root@localhost ~]# + +``` + +### 1.3.设置主机名 + +```shell +hostnamectl set-hostname k8s-master01 +hostnamectl set-hostname k8s-master02 +hostnamectl set-hostname k8s-master03 +hostnamectl set-hostname k8s-node01 +hostnamectl set-hostname k8s-node02 +``` + + +### 1.4.配置yum源 + +```shell +# 对于 Ubuntu +sed -i 's/cn.archive.ubuntu.com/mirrors.ustc.edu.cn/g' /etc/apt/sources.list + +# 对于 CentOS 7 +sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \ + -e 's|^#baseurl=http://mirror.centos.org|baseurl=https://mirrors.tuna.tsinghua.edu.cn|g' \ + -i.bak \ + /etc/yum.repos.d/CentOS-*.repo + +# 对于 CentOS 8 +sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \ + -e 's|^#baseurl=http://mirror.centos.org/$contentdir|baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos|g' \ + -i.bak \ + /etc/yum.repos.d/CentOS-*.repo + +# 对于私有仓库 +sed -e 's|^mirrorlist=|#mirrorlist=|g' -e 's|^#baseurl=http://mirror.centos.org/\$contentdir|baseurl=http://192.168.1.123/centos|g' -i.bak /etc/yum.repos.d/CentOS-*.repo +``` + +### 1.5.安装一些必备工具 + +```shell +# 对于 Ubuntu +apt update && apt upgrade -y && apt install -y wget psmisc vim net-tools nfs-kernel-server telnet lvm2 git tar curl + +# 对于 CentOS 7 +yum update -y && yum -y install wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git tar curl + +# 对于 CentOS 8 +yum update -y && yum -y install wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl +``` + +#### 1.5.1 下载离线所需文件(可选) + + 在互联网服务器上安装一个一模一样的系统进行下载所需包 + +##### CentOS7 +```shell +# 下载必要工具 +yum -y install createrepo yum-utils wget epel* + +# 下载全量依赖包 +repotrack createrepo wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git tar curl gcc keepalived haproxy bash-completion chrony sshpass ipvsadm ipset sysstat conntrack libseccomp + +# 删除libseccomp +rm -rf libseccomp-*.rpm + +# 下载libseccomp +wget http://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm + +# 创建yum源信息 +createrepo -u -d /data/centos7/ + +# 拷贝包到内网机器上 +scp -r /data/centos7/ root@192.168.0.31: +scp -r /data/centos7/ root@192.168.0.32: +scp -r /data/centos7/ root@192.168.0.33: +scp -r /data/centos7/ root@192.168.0.34: +scp -r /data/centos7/ root@192.168.0.35: + +# 在内网机器上创建repo配置文件 +rm -rf /etc/yum.repos.d/* +cat > /etc/yum.repos.d/123.repo << EOF +[cby] +name=CentOS-$releasever - Media +baseurl=file:///root/centos7/ +gpgcheck=0 +enabled=1 +EOF + +# 安装下载好的包 +yum clean all +yum makecache +yum install /root/centos7/* --skip-broken -y + +#### 备注 ##### +# 安装完成后,可能还会出现yum无法使用那么再次执行 +rm -rf /etc/yum.repos.d/* +cat > /etc/yum.repos.d/123.repo << EOF +[cby] +name=CentOS-$releasever - Media +baseurl=file:///root/centos7/ +gpgcheck=0 +enabled=1 +EOF +yum clean all +yum makecache +yum install /root/centos7/* --skip-broken -y + +#### 备注 ##### +# 安装 chrony 和 libseccomp +# yum install /root/centos7/libseccomp-2.5.1*.rpm -y +# yum install /root/centos7/chrony-*.rpm -y +``` +##### CentOS8 +```shell + +# 下载必要工具 +yum -y install createrepo yum-utils wget epel* + +# 下载全量依赖包 +repotrack wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl gcc keepalived haproxy bash-completion chrony sshpass ipvsadm ipset sysstat conntrack libseccomp + +# 创建yum源信息 +createrepo -u -d /data/centos8/ + +# 拷贝包到内网机器上 +scp -r centos8/ root@192.168.0.31: +scp -r centos8/ root@192.168.0.32: +scp -r centos8/ root@192.168.0.33: +scp -r centos8/ root@192.168.0.34: +scp -r centos8/ root@192.168.0.35: + +# 在内网机器上创建repo配置文件 +rm -rf /etc/yum.repos.d/* +cat > /etc/yum.repos.d/123.repo << EOF +[cby] +name=CentOS-$releasever - Media +baseurl=file:///root/centos8/ +gpgcheck=0 +enabled=1 +EOF + + +# 安装下载好的包 +yum clean all +yum makecache +yum install /root/centos8/* --skip-broken -y + +#### 备注 ##### +# 安装完成后,可能还会出现yum无法使用那么再次执行 +rm -rf /etc/yum.repos.d/* +cat > /etc/yum.repos.d/123.repo << EOF +[cby] +name=CentOS-$releasever - Media +baseurl=file:///root/centos8/ +gpgcheck=0 +enabled=1 +EOF +yum clean all +yum makecache +yum install /root/centos8/* --skip-broken -y +``` + +##### Ubuntu 下载包和依赖 +```shell + +#!/bin/bash + +logfile=123.log +ret="" +function getDepends() +{ + echo "fileName is" $1>>$logfile + # use tr to del < > + ret=`apt-cache depends $1|grep Depends |cut -d: -f2 |tr -d "<>"` + echo $ret|tee -a $logfile +} +# 需要获取其所依赖包的包 +libs="wget psmisc vim net-tools nfs-kernel-server telnet lvm2 git tar curl gcc keepalived haproxy bash-completion chrony sshpass ipvsadm ipset sysstat conntrack libseccomp" + +# download libs dependen. deep in 3 +i=0 +while [ $i -lt 3 ] ; +do + let i++ + echo $i + # download libs + newlist=" " + for j in $libs + do + added="$(getDepends $j)" + newlist="$newlist $added" + apt install $added --reinstall -d -y + done + + libs=$newlist +done + +# 创建源信息 +apt install dpkg-dev +sudo cp /var/cache/apt/archives/*.deb /data/ubuntu/ -r +dpkg-scanpackages . /dev/null |gzip > /data/ubuntu/Packages.gz -r + +# 拷贝包到内网机器上 +scp -r ubuntu/ root@192.168.0.31: +scp -r ubuntu/ root@192.168.0.32: +scp -r ubuntu/ root@192.168.0.33: +scp -r ubuntu/ root@192.168.0.34: +scp -r ubuntu/ root@192.168.0.35: + +# 在内网机器上配置apt源 +vim /etc/apt/sources.list +cat /etc/apt/sources.list +deb file:////root/ ubuntu/ + +# 安装deb包 +apt install ./*.deb + +``` + + +### 1.6.选择性下载需要工具 + +```shell +#!/bin/bash + +# 查看版本地址: +# +# https://github.com/containernetworking/plugins/releases/ +# https://github.com/containerd/containerd/releases/ +# https://github.com/kubernetes-sigs/cri-tools/releases/ +# https://github.com/Mirantis/cri-dockerd/releases/ +# https://github.com/etcd-io/etcd/releases/ +# https://github.com/cloudflare/cfssl/releases/ +# https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG +# https://download.docker.com/linux/static/stable/x86_64/ +# https://github.com/opencontainers/runc/releases/ +# https://mirrors.tuna.tsinghua.edu.cn/elrepo/kernel/el7/x86_64/RPMS/ + +# Version numbers +kernel_version='5.4.240' +runc_version='1.1.6' +docker_version='23.0.3' +cni_plugins_version='v1.2.0' +cri_containerd_cni_version='1.6.20' +crictl_version='v1.26.1' +cri_dockerd_version='0.3.1' +etcd_version='v3.5.8' +cfssl_version='1.6.4' +helm_version='canary' +kubernetes_server_version='v1.27.1' +nginx_version='1.24.0' + +# URLs +base_url='https://ghproxy.com/https://github.com' +kernel_url="http://mirrors.tuna.tsinghua.edu.cn/elrepo/kernel/el7/x86_64/RPMS/kernel-lt-${kernel_version}-1.el7.elrepo.x86_64.rpm" +runc_url="${base_url}/opencontainers/runc/releases/download/v${runc_version}/runc.amd64" +docker_url="https://download.docker.com/linux/static/stable/x86_64/docker-${docker_version}.tgz" +cni_plugins_url="${base_url}/containernetworking/plugins/releases/download/${cni_plugins_version}/cni-plugins-linux-amd64-${cni_plugins_version}.tgz" +cri_containerd_cni_url="${base_url}/containerd/containerd/releases/download/v${cri_containerd_cni_version}/cri-containerd-cni-${cri_containerd_cni_version}-linux-amd64.tar.gz" +crictl_url="${base_url}/kubernetes-sigs/cri-tools/releases/download/${crictl_version}/crictl-${crictl_version}-linux-amd64.tar.gz" +cri_dockerd_url="${base_url}/Mirantis/cri-dockerd/releases/download/v${cri_dockerd_version}/cri-dockerd-${cri_dockerd_version}.amd64.tgz" +etcd_url="${base_url}/etcd-io/etcd/releases/download/${etcd_version}/etcd-${etcd_version}-linux-amd64.tar.gz" +cfssl_url="${base_url}/cloudflare/cfssl/releases/download/v${cfssl_version}/cfssl_${cfssl_version}_linux_amd64" +cfssljson_url="${base_url}/cloudflare/cfssl/releases/download/v${cfssl_version}/cfssljson_${cfssl_version}_linux_amd64" +helm_url="https://get.helm.sh/helm-${helm_version}-linux-amd64.tar.gz" +kubernetes_server_url="https://dl.k8s.io/${kubernetes_server_version}/kubernetes-server-linux-amd64.tar.gz" +nginx_url="http://nginx.org/download/nginx-${nginx_version}.tar.gz" + +# Download packages +packages=( + $kernel_url + $runc_url + $docker_url + $cni_plugins_url + $cri_containerd_cni_url + $crictl_url + $cri_dockerd_url + $etcd_url + $cfssl_url + $cfssljson_url + $helm_url + $kubernetes_server_url + $nginx_url +) + +for package_url in "${packages[@]}"; do + filename=$(basename "$package_url") + if wget -cq --progress=bar:force:noscroll -nc "$package_url"; then + echo "Downloaded $filename" + else + echo "Failed to download $filename" + exit 1 + fi +done + +``` + +### 1.7.关闭防火墙 + +```shell +# Ubuntu忽略,CentOS执行 +systemctl disable --now firewalld +``` + +### 1.8.关闭SELinux + +```shell +# Ubuntu忽略,CentOS执行 +setenforce 0 +sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config +``` + +### 1.9.关闭交换分区 + +```shell +sed -ri 's/.*swap.*/#&/' /etc/fstab +swapoff -a && sysctl -w vm.swappiness=0 + +cat /etc/fstab +# /dev/mapper/centos-swap swap swap defaults 0 0 +``` + +### 1.10.网络配置(俩种方式二选一) + +```shell +# Ubuntu忽略,CentOS执行 + +# 方式一 +# systemctl disable --now NetworkManager +# systemctl start network && systemctl enable network + +# 方式二 +cat > /etc/NetworkManager/conf.d/calico.conf << EOF +[keyfile] +unmanaged-devices=interface-name:cali*;interface-name:tunl* +EOF +systemctl restart NetworkManager +``` + +### 1.11.进行时间同步 + +```shell +# 服务端 +# apt install chrony -y +yum install chrony -y +cat > /etc/chrony.conf << EOF +pool ntp.aliyun.com iburst +driftfile /var/lib/chrony/drift +makestep 1.0 3 +rtcsync +allow 192.168.0.0/24 +local stratum 10 +keyfile /etc/chrony.keys +leapsectz right/UTC +logdir /var/log/chrony +EOF + +systemctl restart chronyd ; systemctl enable chronyd + +# 客户端 +# apt install chrony -y +yum install chrony -y +cat > /etc/chrony.conf << EOF +pool 192.168.0.31 iburst +driftfile /var/lib/chrony/drift +makestep 1.0 3 +rtcsync +keyfile /etc/chrony.keys +leapsectz right/UTC +logdir /var/log/chrony +EOF + +systemctl restart chronyd ; systemctl enable chronyd + +#使用客户端进行验证 +chronyc sources -v +``` + +### 1.12.配置ulimit + +```shell +ulimit -SHn 65535 +cat >> /etc/security/limits.conf <> /etc/modules-load.d/ipvs.conf < /etc/sysctl.d/k8s.conf +net.ipv4.ip_forward = 1 +net.bridge.bridge-nf-call-iptables = 1 +fs.may_detach_mounts = 1 +vm.overcommit_memory=1 +vm.panic_on_oom=0 +fs.inotify.max_user_watches=89100 +fs.file-max=52706963 +fs.nr_open=52706963 +net.netfilter.nf_conntrack_max=2310720 + +net.ipv4.tcp_keepalive_time = 600 +net.ipv4.tcp_keepalive_probes = 3 +net.ipv4.tcp_keepalive_intvl =15 +net.ipv4.tcp_max_tw_buckets = 36000 +net.ipv4.tcp_tw_reuse = 1 +net.ipv4.tcp_max_orphans = 327680 +net.ipv4.tcp_orphan_retries = 3 +net.ipv4.tcp_syncookies = 1 +net.ipv4.tcp_max_syn_backlog = 16384 +net.ipv4.ip_conntrack_max = 65536 +net.ipv4.tcp_max_syn_backlog = 16384 +net.ipv4.tcp_timestamps = 0 +net.core.somaxconn = 16384 + +net.ipv6.conf.all.disable_ipv6 = 0 +net.ipv6.conf.default.disable_ipv6 = 0 +net.ipv6.conf.lo.disable_ipv6 = 0 +net.ipv6.conf.all.forwarding = 1 +EOF + +sysctl --system +``` + +### 1.18.所有节点配置hosts本地解析 + +```shell +cat > /etc/hosts < /etc/systemd/system/containerd.service < /etc/containerd/certs.d/docker.io/hosts.toml << EOF +server = "https://docker.io" +[host."https://hub-mirror.c.163.com"] + capabilities = ["pull", "resolve"] +EOF +``` + +### 2.1.5启动并设置为开机启动 + +```shell +systemctl daemon-reload +systemctl enable --now containerd +systemctl restart containerd +``` + +### 2.1.6配置crictl客户端连接的运行时位置 + +```shell +# wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.24.2/crictl-v1.24.2-linux-amd64.tar.gz + +#解压 +tar xf crictl-v*-linux-amd64.tar.gz -C /usr/bin/ +#生成配置文件 +cat > /etc/crictl.yaml </etc/systemd/system/containerd.service < /etc/systemd/system/docker.service < /etc/systemd/system/docker.socket </etc/docker/daemon.json < /usr/lib/systemd/system/cri-docker.service < /usr/lib/systemd/system/cri-docker.socket < admin-csr.json << EOF +{ + "CN": "admin", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "system:masters", + "OU": "Kubernetes-manual" + } + ] +} +EOF + +cat > ca-config.json << EOF +{ + "signing": { + "default": { + "expiry": "876000h" + }, + "profiles": { + "kubernetes": { + "usages": [ + "signing", + "key encipherment", + "server auth", + "client auth" + ], + "expiry": "876000h" + } + } + } +} +EOF + +cat > etcd-ca-csr.json << EOF +{ + "CN": "etcd", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "etcd", + "OU": "Etcd Security" + } + ], + "ca": { + "expiry": "876000h" + } +} +EOF + +cat > front-proxy-ca-csr.json << EOF +{ + "CN": "kubernetes", + "key": { + "algo": "rsa", + "size": 2048 + }, + "ca": { + "expiry": "876000h" + } +} +EOF + +cat > kubelet-csr.json << EOF +{ + "CN": "system:node:\$NODE", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "L": "Beijing", + "ST": "Beijing", + "O": "system:nodes", + "OU": "Kubernetes-manual" + } + ] +} +EOF + +cat > manager-csr.json << EOF +{ + "CN": "system:kube-controller-manager", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "system:kube-controller-manager", + "OU": "Kubernetes-manual" + } + ] +} +EOF + +cat > apiserver-csr.json << EOF +{ + "CN": "kube-apiserver", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "Kubernetes", + "OU": "Kubernetes-manual" + } + ] +} +EOF + + +cat > ca-csr.json << EOF +{ + "CN": "kubernetes", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "Kubernetes", + "OU": "Kubernetes-manual" + } + ], + "ca": { + "expiry": "876000h" + } +} +EOF + +cat > etcd-csr.json << EOF +{ + "CN": "etcd", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "etcd", + "OU": "Etcd Security" + } + ] +} +EOF + + +cat > front-proxy-client-csr.json << EOF +{ + "CN": "front-proxy-client", + "key": { + "algo": "rsa", + "size": 2048 + } +} +EOF + + +cat > kube-proxy-csr.json << EOF +{ + "CN": "system:kube-proxy", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "system:kube-proxy", + "OU": "Kubernetes-manual" + } + ] +} +EOF + + +cat > scheduler-csr.json << EOF +{ + "CN": "system:kube-scheduler", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "system:kube-scheduler", + "OU": "Kubernetes-manual" + } + ] +} +EOF + +cd .. +mkdir bootstrap +cd bootstrap +cat > bootstrap.secret.yaml << EOF +apiVersion: v1 +kind: Secret +metadata: + name: bootstrap-token-c8ad9c + namespace: kube-system +type: bootstrap.kubernetes.io/token +stringData: + description: "The default bootstrap token generated by 'kubelet '." + token-id: c8ad9c + token-secret: 2e4d610cf3e7426e + usage-bootstrap-authentication: "true" + usage-bootstrap-signing: "true" + auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubelet-bootstrap +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-bootstrapper +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:bootstrappers:default-node-token +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: node-autoapprove-bootstrap +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:certificates.k8s.io:certificatesigningrequests:nodeclient +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:bootstrappers:default-node-token +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: node-autoapprove-certificate-rotation +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:nodes +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:kube-apiserver-to-kubelet +rules: + - apiGroups: + - "" + resources: + - nodes/proxy + - nodes/stats + - nodes/log + - nodes/spec + - nodes/metrics + verbs: + - "*" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:kube-apiserver + namespace: "" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kube-apiserver-to-kubelet +subjects: + - apiGroup: rbac.authorization.k8s.io + kind: User + name: kube-apiserver +EOF + + +cd .. +mkdir coredns +cd coredns +cat > coredns.yaml << EOF +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: + - apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data: + Corefile: | + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local in-addr.arpa ip6.arpa { + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 + loop + reload + loadbalance + } +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coredns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/name: "CoreDNS" +spec: + # replicas: not specified here: + # 1. Default is 1. + # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on. + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + spec: + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + nodeSelector: + kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: ["kube-dns"] + topologyKey: kubernetes.io/hostname + containers: + - name: coredns + image: registry.cn-hangzhou.aliyuncs.com/chenby/coredns:v1.10.0 + imagePullPolicy: IfNotPresent + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + readOnly: true + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + dnsPolicy: Default + volumes: + - name: config-volume + configMap: + name: coredns + items: + - key: Corefile + path: Corefile +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: 10.96.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP +EOF + + +cd .. +mkdir metrics-server +cd metrics-server +cat > metrics-server.yaml << EOF +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:aggregated-metrics-reader +rules: +- apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +rules: +- apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + - namespaces + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:metrics-server +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + k8s-app: metrics-server +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: metrics-server + strategy: + rollingUpdate: + maxUnavailable: 0 + template: + metadata: + labels: + k8s-app: metrics-server + spec: + containers: + - args: + - --cert-dir=/tmp + - --secure-port=4443 + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + - --kubelet-insecure-tls + - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem # change to front-proxy-ca.crt for kubeadm + - --requestheader-username-headers=X-Remote-User + - --requestheader-group-headers=X-Remote-Group + - --requestheader-extra-headers-prefix=X-Remote-Extra- + image: registry.cn-hangzhou.aliyuncs.com/chenby/metrics-server:v0.5.2 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + port: https + scheme: HTTPS + periodSeconds: 10 + name: metrics-server + ports: + - containerPort: 4443 + name: https + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 10 + resources: + requests: + cpu: 100m + memory: 200Mi + securityContext: + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /tmp + name: tmp-dir + - name: ca-ssl + mountPath: /etc/kubernetes/pki + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: metrics-server + volumes: + - emptyDir: {} + name: tmp-dir + - name: ca-ssl + hostPath: + path: /etc/kubernetes/pki + +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + labels: + k8s-app: metrics-server + name: v1beta1.metrics.k8s.io +spec: + group: metrics.k8s.io + groupPriorityMinimum: 100 + insecureSkipTLSVerify: true + service: + name: metrics-server + namespace: kube-system + version: v1beta1 + versionPriority: 100 +EOF +``` + +# 3.相关证书生成 + +```shell +# master01节点下载证书生成工具 +# wget "https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.2_linux_amd64" -O /usr/local/bin/cfssl +# wget "https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.2_linux_amd64" -O /usr/local/bin/cfssljson + +# 软件包内有 +cp cfssl /usr/local/bin/cfssl +cp cfssljson /usr/local/bin/cfssljson + +chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson +``` + +## 3.1.生成etcd证书 + +特别说明除外,以下操作在所有master节点操作 + +### 3.1.1所有master节点创建证书存放目录 + +```shell +mkdir /etc/etcd/ssl -p +``` + +### 3.1.2master01节点生成etcd证书 + +```shell +cd pki +# 生成etcd证书和etcd证书的key(如果你觉得以后可能会扩容,可以在ip那多写几个预留出来) +# 若没有IPv6 可删除可保留 +cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca +cfssl gencert \ + -ca=/etc/etcd/ssl/etcd-ca.pem \ + -ca-key=/etc/etcd/ssl/etcd-ca-key.pem \ + -config=ca-config.json \ + -hostname=127.0.0.1,k8s-master01,k8s-master02,k8s-master03,192.168.0.31,192.168.0.32,192.168.0.33,fc00:43f4:1eea:1::10,fc00:43f4:1eea:1::20,fc00:43f4:1eea:1::30,::1 \ + -profile=kubernetes \ + etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd +``` + +### 3.1.3将证书复制到其他节点 + +```shell +Master='k8s-master02 k8s-master03' +for NODE in $Master; do ssh $NODE "mkdir -p /etc/etcd/ssl"; for FILE in etcd-ca-key.pem etcd-ca.pem etcd-key.pem etcd.pem; do scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}; done; done +``` + +## 3.2.生成k8s相关证书 + +特别说明除外,以下操作在所有master节点操作 + +### 3.2.1所有k8s节点创建证书存放目录 + +```shell +mkdir -p /etc/kubernetes/pki +``` + +### 3.2.2master01节点生成k8s证书 + +```shell +cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca + +# 生成一个根证书 ,多写了一些IP作为预留IP,为将来添加node做准备 +# 10.96.0.1是service网段的第一个地址,需要计算,192.168.0.36为高可用vip地址 +# 若没有IPv6 可删除可保留 + +cfssl gencert \ +-ca=/etc/kubernetes/pki/ca.pem \ +-ca-key=/etc/kubernetes/pki/ca-key.pem \ +-config=ca-config.json \ +-hostname=10.96.0.1,192.168.0.36,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,x.oiox.cn,k.oiox.cn,l.oiox.cn,o.oiox.cn,192.168.0.31,192.168.0.32,192.168.0.33,192.168.0.34,192.168.0.35,192.168.0.36,192.168.0.37,192.168.0.38,192.168.0.39,192.168.1.70,fc00:43f4:1eea:1::10,fc00:43f4:1eea:1::20,fc00:43f4:1eea:1::30,fc00:43f4:1eea:1::40,fc00:43f4:1eea:1::50,fc00:43f4:1eea:1::60,fc00:43f4:1eea:1::70,fc00:43f4:1eea:1::80,fc00:43f4:1eea:1::90,fc00:43f4:1eea:1::100,::1 \ +-profile=kubernetes apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver +``` + +### 3.2.3生成apiserver聚合证书 + +```shell +cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca + +# 有一个警告,可以忽略 + +cfssl gencert \ +-ca=/etc/kubernetes/pki/front-proxy-ca.pem \ +-ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem \ +-config=ca-config.json \ +-profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client +``` + +### 3.2.4生成controller-manage的证书 + +在《5.高可用配置》选择使用那种高可用方案 +若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443` +若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443` + +```shell +cfssl gencert \ + -ca=/etc/kubernetes/pki/ca.pem \ + -ca-key=/etc/kubernetes/pki/ca-key.pem \ + -config=ca-config.json \ + -profile=kubernetes \ + manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager + +# 设置一个集群项 + +# 在《5.高可用配置》选择使用那种高可用方案 +# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443` +# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443` + +kubectl config set-cluster kubernetes \ + --certificate-authority=/etc/kubernetes/pki/ca.pem \ + --embed-certs=true \ + --server=https://127.0.0.1:8443 \ + --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig + +# 设置一个环境项,一个上下文 + +kubectl config set-context system:kube-controller-manager@kubernetes \ + --cluster=kubernetes \ + --user=system:kube-controller-manager \ + --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig + +# 设置一个用户项 + +kubectl config set-credentials system:kube-controller-manager \ + --client-certificate=/etc/kubernetes/pki/controller-manager.pem \ + --client-key=/etc/kubernetes/pki/controller-manager-key.pem \ + --embed-certs=true \ + --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig + +# 设置默认环境 + +kubectl config use-context system:kube-controller-manager@kubernetes \ + --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig + +cfssl gencert \ + -ca=/etc/kubernetes/pki/ca.pem \ + -ca-key=/etc/kubernetes/pki/ca-key.pem \ + -config=ca-config.json \ + -profile=kubernetes \ + scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler + +# 在《5.高可用配置》选择使用那种高可用方案 +# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443` +# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443` + +kubectl config set-cluster kubernetes \ + --certificate-authority=/etc/kubernetes/pki/ca.pem \ + --embed-certs=true \ + --server=https://127.0.0.1:8443 \ + --kubeconfig=/etc/kubernetes/scheduler.kubeconfig + +kubectl config set-credentials system:kube-scheduler \ + --client-certificate=/etc/kubernetes/pki/scheduler.pem \ + --client-key=/etc/kubernetes/pki/scheduler-key.pem \ + --embed-certs=true \ + --kubeconfig=/etc/kubernetes/scheduler.kubeconfig + +kubectl config set-context system:kube-scheduler@kubernetes \ + --cluster=kubernetes \ + --user=system:kube-scheduler \ + --kubeconfig=/etc/kubernetes/scheduler.kubeconfig + +kubectl config use-context system:kube-scheduler@kubernetes \ + --kubeconfig=/etc/kubernetes/scheduler.kubeconfig + +cfssl gencert \ + -ca=/etc/kubernetes/pki/ca.pem \ + -ca-key=/etc/kubernetes/pki/ca-key.pem \ + -config=ca-config.json \ + -profile=kubernetes \ + admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin + +# 在《5.高可用配置》选择使用那种高可用方案 +# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443` +# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443` + +kubectl config set-cluster kubernetes \ + --certificate-authority=/etc/kubernetes/pki/ca.pem \ + --embed-certs=true \ + --server=https://127.0.0.1:8443 \ + --kubeconfig=/etc/kubernetes/admin.kubeconfig + +kubectl config set-credentials kubernetes-admin \ + --client-certificate=/etc/kubernetes/pki/admin.pem \ + --client-key=/etc/kubernetes/pki/admin-key.pem \ + --embed-certs=true \ + --kubeconfig=/etc/kubernetes/admin.kubeconfig + +kubectl config set-context kubernetes-admin@kubernetes \ + --cluster=kubernetes \ + --user=kubernetes-admin \ + --kubeconfig=/etc/kubernetes/admin.kubeconfig + +kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=/etc/kubernetes/admin.kubeconfig +``` + +### 3.2.5创建kube-proxy证书 + +在《5.高可用配置》选择使用那种高可用方案 +若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443` +若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443` + +```shell +cfssl gencert \ + -ca=/etc/kubernetes/pki/ca.pem \ + -ca-key=/etc/kubernetes/pki/ca-key.pem \ + -config=ca-config.json \ + -profile=kubernetes \ + kube-proxy-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-proxy + +# 在《5.高可用配置》选择使用那种高可用方案 +# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443` +# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443` + +kubectl config set-cluster kubernetes \ + --certificate-authority=/etc/kubernetes/pki/ca.pem \ + --embed-certs=true \ + --server=https://127.0.0.1:8443 \ + --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig + +kubectl config set-credentials kube-proxy \ + --client-certificate=/etc/kubernetes/pki/kube-proxy.pem \ + --client-key=/etc/kubernetes/pki/kube-proxy-key.pem \ + --embed-certs=true \ + --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig + +kubectl config set-context kube-proxy@kubernetes \ + --cluster=kubernetes \ + --user=kube-proxy \ + --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig + +kubectl config use-context kube-proxy@kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig +``` + + + +### 3.2.5创建ServiceAccount Key ——secret + +```shell +openssl genrsa -out /etc/kubernetes/pki/sa.key 2048 +openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub +``` + +### 3.2.6将证书发送到其他master节点 + +```shell +#其他节点创建目录 +# mkdir /etc/kubernetes/pki/ -p + +for NODE in k8s-master02 k8s-master03; do for FILE in $(ls /etc/kubernetes/pki | grep -v etcd); do scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE}; done; for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE}; done; done +``` + +### 3.2.7查看证书 + +```shell +ls /etc/kubernetes/pki/ +admin.csr controller-manager.csr kube-proxy.csr +admin-key.pem controller-manager-key.pem kube-proxy-key.pem +admin.pem controller-manager.pem kube-proxy.pem +apiserver.csr front-proxy-ca.csr sa.key +apiserver-key.pem front-proxy-ca-key.pem sa.pub +apiserver.pem front-proxy-ca.pem scheduler.csr +ca.csr front-proxy-client.csr scheduler-key.pem +ca-key.pem front-proxy-client-key.pem scheduler.pem +ca.pem front-proxy-client.pem + +# 一共26个就对了 +ls /etc/kubernetes/pki/ |wc -l +26 +``` + +# 4.k8s系统组件配置 + +## 4.1.etcd配置 + +### 4.1.1master01配置 + +```shell +# 如果要用IPv6那么把IPv4地址修改为IPv6即可 +cat > /etc/etcd/etcd.config.yml << EOF +name: 'k8s-master01' +data-dir: /var/lib/etcd +wal-dir: /var/lib/etcd/wal +snapshot-count: 5000 +heartbeat-interval: 100 +election-timeout: 1000 +quota-backend-bytes: 0 +listen-peer-urls: 'https://192.168.0.31:2380' +listen-client-urls: 'https://192.168.0.31:2379,http://127.0.0.1:2379' +max-snapshots: 3 +max-wals: 5 +cors: +initial-advertise-peer-urls: 'https://192.168.0.31:2380' +advertise-client-urls: 'https://192.168.0.31:2379' +discovery: +discovery-fallback: 'proxy' +discovery-proxy: +discovery-srv: +initial-cluster: 'k8s-master01=https://192.168.0.31:2380,k8s-master02=https://192.168.0.32:2380,k8s-master03=https://192.168.0.33:2380' +initial-cluster-token: 'etcd-k8s-cluster' +initial-cluster-state: 'new' +strict-reconfig-check: false +enable-v2: true +enable-pprof: true +proxy: 'off' +proxy-failure-wait: 5000 +proxy-refresh-interval: 30000 +proxy-dial-timeout: 1000 +proxy-write-timeout: 5000 +proxy-read-timeout: 0 +client-transport-security: + cert-file: '/etc/kubernetes/pki/etcd/etcd.pem' + key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem' + client-cert-auth: true + trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem' + auto-tls: true +peer-transport-security: + cert-file: '/etc/kubernetes/pki/etcd/etcd.pem' + key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem' + peer-client-cert-auth: true + trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem' + auto-tls: true +debug: false +log-package-levels: +log-outputs: [default] +force-new-cluster: false +EOF +``` + +### 4.1.2master02配置 + +```shell +# 如果要用IPv6那么把IPv4地址修改为IPv6即可 +cat > /etc/etcd/etcd.config.yml << EOF +name: 'k8s-master02' +data-dir: /var/lib/etcd +wal-dir: /var/lib/etcd/wal +snapshot-count: 5000 +heartbeat-interval: 100 +election-timeout: 1000 +quota-backend-bytes: 0 +listen-peer-urls: 'https://192.168.0.32:2380' +listen-client-urls: 'https://192.168.0.32:2379,http://127.0.0.1:2379' +max-snapshots: 3 +max-wals: 5 +cors: +initial-advertise-peer-urls: 'https://192.168.0.32:2380' +advertise-client-urls: 'https://192.168.0.32:2379' +discovery: +discovery-fallback: 'proxy' +discovery-proxy: +discovery-srv: +initial-cluster: 'k8s-master01=https://192.168.0.31:2380,k8s-master02=https://192.168.0.32:2380,k8s-master03=https://192.168.0.33:2380' +initial-cluster-token: 'etcd-k8s-cluster' +initial-cluster-state: 'new' +strict-reconfig-check: false +enable-v2: true +enable-pprof: true +proxy: 'off' +proxy-failure-wait: 5000 +proxy-refresh-interval: 30000 +proxy-dial-timeout: 1000 +proxy-write-timeout: 5000 +proxy-read-timeout: 0 +client-transport-security: + cert-file: '/etc/kubernetes/pki/etcd/etcd.pem' + key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem' + client-cert-auth: true + trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem' + auto-tls: true +peer-transport-security: + cert-file: '/etc/kubernetes/pki/etcd/etcd.pem' + key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem' + peer-client-cert-auth: true + trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem' + auto-tls: true +debug: false +log-package-levels: +log-outputs: [default] +force-new-cluster: false +EOF +``` + +### 4.1.3master03配置 + +```shell +# 如果要用IPv6那么把IPv4地址修改为IPv6即可 +cat > /etc/etcd/etcd.config.yml << EOF +name: 'k8s-master03' +data-dir: /var/lib/etcd +wal-dir: /var/lib/etcd/wal +snapshot-count: 5000 +heartbeat-interval: 100 +election-timeout: 1000 +quota-backend-bytes: 0 +listen-peer-urls: 'https://192.168.0.33:2380' +listen-client-urls: 'https://192.168.0.33:2379,http://127.0.0.1:2379' +max-snapshots: 3 +max-wals: 5 +cors: +initial-advertise-peer-urls: 'https://192.168.0.33:2380' +advertise-client-urls: 'https://192.168.0.33:2379' +discovery: +discovery-fallback: 'proxy' +discovery-proxy: +discovery-srv: +initial-cluster: 'k8s-master01=https://192.168.0.31:2380,k8s-master02=https://192.168.0.32:2380,k8s-master03=https://192.168.0.33:2380' +initial-cluster-token: 'etcd-k8s-cluster' +initial-cluster-state: 'new' +strict-reconfig-check: false +enable-v2: true +enable-pprof: true +proxy: 'off' +proxy-failure-wait: 5000 +proxy-refresh-interval: 30000 +proxy-dial-timeout: 1000 +proxy-write-timeout: 5000 +proxy-read-timeout: 0 +client-transport-security: + cert-file: '/etc/kubernetes/pki/etcd/etcd.pem' + key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem' + client-cert-auth: true + trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem' + auto-tls: true +peer-transport-security: + cert-file: '/etc/kubernetes/pki/etcd/etcd.pem' + key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem' + peer-client-cert-auth: true + trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem' + auto-tls: true +debug: false +log-package-levels: +log-outputs: [default] +force-new-cluster: false +EOF +``` + +## 4.2.创建service(所有master节点操作) + +### 4.2.1创建etcd.service并启动 + +```shell +cat > /usr/lib/systemd/system/etcd.service << EOF + +[Unit] +Description=Etcd Service +Documentation=https://coreos.com/etcd/docs/latest/ +After=network.target + +[Service] +Type=notify +ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml +Restart=on-failure +RestartSec=10 +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target +Alias=etcd3.service + +EOF +``` + +### 4.2.2创建etcd证书目录 + +```shell +mkdir /etc/kubernetes/pki/etcd +ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/ +systemctl daemon-reload +systemctl enable --now etcd +``` + +### 4.2.3查看etcd状态 + +```shell +# 如果要用IPv6那么把IPv4地址修改为IPv6即可 +export ETCDCTL_API=3 +etcdctl --endpoints="192.168.0.33:2379,192.168.0.32:2379,192.168.0.31:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status --write-out=table ++-----------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ +| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS | ++-----------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ +| 192.168.0.33:2379 | d58e37898979ae63 | 3.5.7 | 20 kB | false | false | 2 | 8 | 8 | | +| 192.168.0.32:2379 | ec6b15415e24cb42 | 3.5.7 | 20 kB | false | false | 2 | 8 | 8 | | +| 192.168.0.31:2379 | 5e5cf1ca5cb2d291 | 3.5.7 | 20 kB | true | false | 2 | 8 | 8 | | ++-----------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ +[root@k8s-master01 pki]# +``` + +# 5.高可用配置(在Master服务器上操作) + +**注意* 5.1.1 和5.1.2 二选一即可** + +选择使用那种高可用方案 + +在《3.2.生成k8s相关证书》 + +若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443` +若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443` + +## 5.1 NGINX高可用方案 (推荐) + +### 5.1.1自己手动编译(推荐) +在所有节点执行 +```shell +# 安装编译环境 +yum install gcc -y + +# 下载解压nginx二进制文件 +# wget http://nginx.org/download/nginx-1.22.1.tar.gz +tar xvf nginx-*.tar.gz +cd nginx-* + +# 进行编译 +./configure --with-stream --without-http --without-http_uwsgi_module --without-http_scgi_module --without-http_fastcgi_module +make && make install + +# 拷贝编译好的nginx +node='k8s-master02 k8s-master03 k8s-node01 k8s-node02' +for NODE in $node; do scp -r /usr/local/nginx/ $NODE:/usr/local/nginx/; done + +``` + + +### 5.1.2使用我编译好的 +```shell +# 使用我编译好的 + +cd kubernetes-v1.26.0/cby +# 拷贝我编译好的nginx +node='k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02' +for NODE in $node; do scp nginx.tar $NODE:/usr/local/; done + +# 其他节点上执行 +cd /usr/local/ +tar xvf nginx.tar +``` + +### 5.1.3写入启动配置 +在所有主机上执行 +```shell +# 写入nginx配置文件 +cat > /usr/local/nginx/conf/kube-nginx.conf < /etc/systemd/system/kube-nginx.service </etc/haproxy/haproxy.cfg<<"EOF" +global + maxconn 2000 + ulimit-n 16384 + log 127.0.0.1 local0 err + stats timeout 30s + +defaults + log global + mode http + option httplog + timeout connect 5000 + timeout client 50000 + timeout server 50000 + timeout http-request 15s + timeout http-keep-alive 15s + + +frontend monitor-in + bind *:33305 + mode http + option httplog + monitor-uri /monitor + +frontend k8s-master + bind 0.0.0.0:8443 + bind 127.0.0.1:8443 + mode tcp + option tcplog + tcp-request inspect-delay 5s + default_backend k8s-master + + +backend k8s-master + mode tcp + option tcplog + option tcp-check + balance roundrobin + default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100 + server k8s-master01 192.168.0.31:6443 check + server k8s-master02 192.168.0.32:6443 check + server k8s-master03 192.168.0.33:6443 check +EOF +``` + +### 5.2.3Master01配置keepalived master节点 + +```shell +#cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak + +cat > /etc/keepalived/keepalived.conf << EOF +! Configuration File for keepalived + +global_defs { + router_id LVS_DEVEL +} +vrrp_script chk_apiserver { + script "/etc/keepalived/check_apiserver.sh" + interval 5 + weight -5 + fall 2 + rise 1 +} +vrrp_instance VI_1 { + state MASTER + # 注意网卡名 + interface eth0 + mcast_src_ip 192.168.0.31 + virtual_router_id 51 + priority 100 + nopreempt + advert_int 2 + authentication { + auth_type PASS + auth_pass K8SHA_KA_AUTH + } + virtual_ipaddress { + 192.168.0.36 + } + track_script { + chk_apiserver +} } + +EOF +``` + +### 5.2.4Master02配置keepalived backup节点 + +```shell +# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak + +cat > /etc/keepalived/keepalived.conf << EOF +! Configuration File for keepalived + +global_defs { + router_id LVS_DEVEL +} +vrrp_script chk_apiserver { + script "/etc/keepalived/check_apiserver.sh" + interval 5 + weight -5 + fall 2 + rise 1 + +} +vrrp_instance VI_1 { + state BACKUP + # 注意网卡名 + interface eth0 + mcast_src_ip 192.168.0.32 + virtual_router_id 51 + priority 80 + nopreempt + advert_int 2 + authentication { + auth_type PASS + auth_pass K8SHA_KA_AUTH + } + virtual_ipaddress { + 192.168.0.36 + } + track_script { + chk_apiserver +} } + +EOF +``` + +### 5.2.5Master03配置keepalived backup节点 + +```shell +# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak + +cat > /etc/keepalived/keepalived.conf << EOF +! Configuration File for keepalived + +global_defs { + router_id LVS_DEVEL +} +vrrp_script chk_apiserver { + script "/etc/keepalived/check_apiserver.sh" + interval 5 + weight -5 + fall 2 + rise 1 + +} +vrrp_instance VI_1 { + state BACKUP + # 注意网卡名 + interface eth0 + mcast_src_ip 192.168.0.33 + virtual_router_id 51 + priority 50 + nopreempt + advert_int 2 + authentication { + auth_type PASS + auth_pass K8SHA_KA_AUTH + } + virtual_ipaddress { + 192.168.0.36 + } + track_script { + chk_apiserver +} } + +EOF +``` + + + +### 5.2.6健康检查脚本配置(两台lb主机) + +```shell +cat > /etc/keepalived/check_apiserver.sh << EOF +#!/bin/bash + +err=0 +for k in \$(seq 1 3) +do + check_code=\$(pgrep haproxy) + if [[ \$check_code == "" ]]; then + err=\$(expr \$err + 1) + sleep 1 + continue + else + err=0 + break + fi +done + +if [[ \$err != "0" ]]; then + echo "systemctl stop keepalived" + /usr/bin/systemctl stop keepalived + exit 1 +else + exit 0 +fi +EOF + +# 给脚本授权 + +chmod +x /etc/keepalived/check_apiserver.sh +``` + +### 5.2.7启动服务 + +```shell +systemctl daemon-reload +systemctl enable --now haproxy +systemctl enable --now keepalived +``` + +### 5.2.8测试高可用 + +```shell +# 能ping同 + +[root@k8s-node02 ~]# ping 192.168.0.36 + +# 能telnet访问 + +[root@k8s-node02 ~]# telnet 192.168.0.36 8443 + +# 关闭主节点,看vip是否漂移到备节点 +``` + +# 6.k8s组件配置(区别于第4点) + +所有k8s节点创建以下目录 + +```shell +mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes +``` + +## 6.1.创建apiserver(所有master节点) + +### 6.1.1master01节点配置 + +```shell +cat > /usr/lib/systemd/system/kube-apiserver.service << EOF + +[Unit] +Description=Kubernetes API Server +Documentation=https://github.com/kubernetes/kubernetes +After=network.target + +[Service] +ExecStart=/usr/local/bin/kube-apiserver \\ + --v=2 \\ + --allow-privileged=true \\ + --bind-address=0.0.0.0 \\ + --secure-port=6443 \\ + --advertise-address=192.168.0.31 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ + --service-node-port-range=30000-32767 \\ + --etcd-servers=https://192.168.0.31:2379,https://192.168.0.32:2379,https://192.168.0.33:2379 \\ + --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\ + --etcd-certfile=/etc/etcd/ssl/etcd.pem \\ + --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\ + --client-ca-file=/etc/kubernetes/pki/ca.pem \\ + --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \\ + --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \\ + --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \\ + --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \\ + --service-account-key-file=/etc/kubernetes/pki/sa.pub \\ + --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\ + --service-account-issuer=https://kubernetes.default.svc.cluster.local \\ + --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\ + --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \ + --authorization-mode=Node,RBAC \\ + --enable-bootstrap-token-auth=true \\ + --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\ + --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\ + --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\ + --requestheader-allowed-names=aggregator \\ + --requestheader-group-headers=X-Remote-Group \\ + --requestheader-extra-headers-prefix=X-Remote-Extra- \\ + --requestheader-username-headers=X-Remote-User \\ + --enable-aggregator-routing=true + # --feature-gates=IPv6DualStack=true + # --token-auth-file=/etc/kubernetes/token.csv + +Restart=on-failure +RestartSec=10s +LimitNOFILE=65535 + +[Install] +WantedBy=multi-user.target + +EOF +``` + +### 6.1.2master02节点配置 + +```shell +cat > /usr/lib/systemd/system/kube-apiserver.service << EOF +[Unit] +Description=Kubernetes API Server +Documentation=https://github.com/kubernetes/kubernetes +After=network.target + +[Service] +ExecStart=/usr/local/bin/kube-apiserver \\ + --v=2 \\ + --allow-privileged=true \\ + --bind-address=0.0.0.0 \\ + --secure-port=6443 \\ + --advertise-address=192.168.0.32 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ + --service-node-port-range=30000-32767 \\ + --etcd-servers=https://192.168.0.31:2379,https://192.168.0.32:2379,https://192.168.0.33:2379 \\ + --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\ + --etcd-certfile=/etc/etcd/ssl/etcd.pem \\ + --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\ + --client-ca-file=/etc/kubernetes/pki/ca.pem \\ + --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \\ + --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \\ + --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \\ + --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \\ + --service-account-key-file=/etc/kubernetes/pki/sa.pub \\ + --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\ + --service-account-issuer=https://kubernetes.default.svc.cluster.local \\ + --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\ + --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \\ + --authorization-mode=Node,RBAC \\ + --enable-bootstrap-token-auth=true \\ + --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\ + --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\ + --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\ + --requestheader-allowed-names=aggregator \\ + --requestheader-group-headers=X-Remote-Group \\ + --requestheader-extra-headers-prefix=X-Remote-Extra- \\ + --requestheader-username-headers=X-Remote-User \\ + --enable-aggregator-routing=true + # --feature-gates=IPv6DualStack=true + # --token-auth-file=/etc/kubernetes/token.csv + +Restart=on-failure +RestartSec=10s +LimitNOFILE=65535 + +[Install] +WantedBy=multi-user.target + +EOF +``` + +### 6.1.3master03节点配置 + +```shell +cat > /usr/lib/systemd/system/kube-apiserver.service << EOF + +[Unit] +Description=Kubernetes API Server +Documentation=https://github.com/kubernetes/kubernetes +After=network.target + +[Service] +ExecStart=/usr/local/bin/kube-apiserver \\ + --v=2 \\ + --allow-privileged=true \\ + --bind-address=0.0.0.0 \\ + --secure-port=6443 \\ + --advertise-address=192.168.0.33 \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ + --service-node-port-range=30000-32767 \\ + --etcd-servers=https://192.168.0.31:2379,https://192.168.0.32:2379,https://192.168.0.33:2379 \\ + --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\ + --etcd-certfile=/etc/etcd/ssl/etcd.pem \\ + --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\ + --client-ca-file=/etc/kubernetes/pki/ca.pem \\ + --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \\ + --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \\ + --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \\ + --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \\ + --service-account-key-file=/etc/kubernetes/pki/sa.pub \\ + --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\ + --service-account-issuer=https://kubernetes.default.svc.cluster.local \\ + --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\ + --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \\ + --authorization-mode=Node,RBAC \\ + --enable-bootstrap-token-auth=true \\ + --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\ + --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\ + --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\ + --requestheader-allowed-names=aggregator \\ + --requestheader-group-headers=X-Remote-Group \\ + --requestheader-extra-headers-prefix=X-Remote-Extra- \\ + --requestheader-username-headers=X-Remote-User \\ + --enable-aggregator-routing=true + # --feature-gates=IPv6DualStack=true + # --token-auth-file=/etc/kubernetes/token.csv + +Restart=on-failure +RestartSec=10s +LimitNOFILE=65535 + +[Install] +WantedBy=multi-user.target + +EOF +``` + +### 6.1.4启动apiserver(所有master节点) + +```shell +systemctl daemon-reload && systemctl enable --now kube-apiserver + +# 注意查看状态是否启动正常 +# systemctl status kube-apiserver +``` + +## 6.2.配置kube-controller-manager service + +```shell +# 所有master节点配置,且配置相同 +# 172.16.0.0/12为pod网段,按需求设置你自己的网段 + +cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF + +[Unit] +Description=Kubernetes Controller Manager +Documentation=https://github.com/kubernetes/kubernetes +After=network.target + +[Service] +ExecStart=/usr/local/bin/kube-controller-manager \\ + --v=2 \\ + --bind-address=0.0.0.0 \\ + --root-ca-file=/etc/kubernetes/pki/ca.pem \\ + --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \\ + --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \\ + --service-account-private-key-file=/etc/kubernetes/pki/sa.key \\ + --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \\ + --leader-elect=true \\ + --use-service-account-credentials=true \\ + --node-monitor-grace-period=40s \\ + --node-monitor-period=5s \\ + --controllers=*,bootstrapsigner,tokencleaner \\ + --allocate-node-cidrs=true \\ + --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\ + --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \\ + --node-cidr-mask-size-ipv4=24 \\ + --node-cidr-mask-size-ipv6=120 \\ + --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem + # --feature-gates=IPv6DualStack=true + +Restart=always +RestartSec=10s + +[Install] +WantedBy=multi-user.target + +EOF +``` + +### 6.2.1启动kube-controller-manager,并查看状态 + +```shell +systemctl daemon-reload +systemctl enable --now kube-controller-manager +# systemctl status kube-controller-manager +``` + +## 6.3.配置kube-scheduler service + +### 6.3.1所有master节点配置,且配置相同 + +```shell +cat > /usr/lib/systemd/system/kube-scheduler.service << EOF + +[Unit] +Description=Kubernetes Scheduler +Documentation=https://github.com/kubernetes/kubernetes +After=network.target + +[Service] +ExecStart=/usr/local/bin/kube-scheduler \\ + --v=2 \\ + --bind-address=0.0.0.0 \\ + --leader-elect=true \\ + --kubeconfig=/etc/kubernetes/scheduler.kubeconfig + +Restart=always +RestartSec=10s + +[Install] +WantedBy=multi-user.target + +EOF +``` + +### 6.3.2启动并查看服务状态 + +```shell +systemctl daemon-reload +systemctl enable --now kube-scheduler +# systemctl status kube-scheduler +``` + +# 7.TLS Bootstrapping配置 + +## 7.1在master01上配置 + +```shell +# 在《5.高可用配置》选择使用那种高可用方案 +# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.0.36:8443` +# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443` + +cd bootstrap + +kubectl config set-cluster kubernetes \ +--certificate-authority=/etc/kubernetes/pki/ca.pem \ +--embed-certs=true --server=https://127.0.0.1:8443 \ +--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig + +kubectl config set-credentials tls-bootstrap-token-user \ +--token=c8ad9c.2e4d610cf3e7426e \ +--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig + +kubectl config set-context tls-bootstrap-token-user@kubernetes \ +--cluster=kubernetes \ +--user=tls-bootstrap-token-user \ +--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig + +kubectl config use-context tls-bootstrap-token-user@kubernetes \ +--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig + +# token的位置在bootstrap.secret.yaml,如果修改的话到这个文件修改 +mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config +``` + +## 7.2查看集群状态,没问题的话继续后续操作 + +```shell +kubectl get cs +Warning: v1 ComponentStatus is deprecated in v1.19+ +NAME STATUS MESSAGE ERROR +scheduler Healthy ok +controller-manager Healthy ok +etcd-0 Healthy {"health":"true","reason":""} +etcd-2 Healthy {"health":"true","reason":""} +etcd-1 Healthy {"health":"true","reason":""} + +# 切记执行,别忘记!!! +kubectl create -f bootstrap.secret.yaml +``` + +# 8.node节点配置 + +## 8.1.在master01上将证书复制到node节点 + +```shell +cd /etc/kubernetes/ + +for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02; do ssh $NODE mkdir -p /etc/kubernetes/pki; for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig kube-proxy.kubeconfig; do scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}; done; done +``` + +## 8.2.kubelet配置 + +**注意 : 8.2.1 和 8.2.2 需要和 上方 2.1 和 2.2 对应起来** + +### 8.2.1当使用docker作为Runtime + +```shell +cat > /usr/lib/systemd/system/kubelet.service << EOF + +[Unit] +Description=Kubernetes Kubelet +Documentation=https://github.com/kubernetes/kubernetes + +[Service] +ExecStart=/usr/local/bin/kubelet \\ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\ + --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\ + --config=/etc/kubernetes/kubelet-conf.yml \\ + --container-runtime-endpoint=unix:///run/cri-dockerd.sock \\ + --node-labels=node.kubernetes.io/node= + +[Install] +WantedBy=multi-user.target +EOF +``` + +### 8.2.2当使用Containerd作为Runtime (推荐) + +```shell +mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /etc/kubernetes/manifests/ + +# 所有k8s节点配置kubelet service +cat > /usr/lib/systemd/system/kubelet.service << EOF + +[Unit] +Description=Kubernetes Kubelet +Documentation=https://github.com/kubernetes/kubernetes +After=containerd.service +Requires=containerd.service + +[Service] +ExecStart=/usr/local/bin/kubelet \\ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\ + --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\ + --config=/etc/kubernetes/kubelet-conf.yml \\ + --container-runtime-endpoint=unix:///run/containerd/containerd.sock \\ + --node-labels=node.kubernetes.io/node= + # --feature-gates=IPv6DualStack=true + # --container-runtime=remote + # --runtime-request-timeout=15m + # --cgroup-driver=systemd + +[Install] +WantedBy=multi-user.target +EOF +``` + + +### 8.2.3所有k8s节点创建kubelet的配置文件 + +```shell +cat > /etc/kubernetes/kubelet-conf.yml < 18s v1.27.1 +k8s-master02 Ready 16s v1.27.1 +k8s-master03 Ready 16s v1.27.1 +k8s-node01 Ready 14s v1.27.1 +k8s-node02 Ready 14s v1.27.1 +[root@k8s-master01 ~]# +``` + +### 8.2.6查看容器运行时 + +```shell +[root@k8s-master01 ~]# kubectl describe node | grep Runtime + Container Runtime Version: containerd://1.6.20 + Container Runtime Version: containerd://1.6.20 + Container Runtime Version: containerd://1.6.20 + Container Runtime Version: containerd://1.6.20 + Container Runtime Version: containerd://1.6.20 +[root@k8s-master01 ~]# kubectl describe node | grep Runtime + Container Runtime Version: docker://23.0.3 + Container Runtime Version: docker://23.0.3 + Container Runtime Version: docker://23.0.3 + Container Runtime Version: docker://23.0.3 + Container Runtime Version: docker://23.0.3 + +``` + + +## 8.3.kube-proxy配置 + +### 8.3.1将kubeconfig发送至其他节点 + +```shell +for NODE in k8s-master02 k8s-master03; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; done + +for NODE in k8s-node01 k8s-node02; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; done +``` + +### 8.3.2所有k8s节点添加kube-proxy的service文件 + +```shell +cat > /usr/lib/systemd/system/kube-proxy.service << EOF +[Unit] +Description=Kubernetes Kube Proxy +Documentation=https://github.com/kubernetes/kubernetes +After=network.target + +[Service] +ExecStart=/usr/local/bin/kube-proxy \\ + --config=/etc/kubernetes/kube-proxy.yaml \\ + --v=2 + +Restart=always +RestartSec=10s + +[Install] +WantedBy=multi-user.target + +EOF +``` + +### 8.3.3所有k8s节点添加kube-proxy的配置 + +```shell +cat > /etc/kubernetes/kube-proxy.yaml << EOF +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +bindAddress: 0.0.0.0 +clientConnection: + acceptContentTypes: "" + burst: 10 + contentType: application/vnd.kubernetes.protobuf + kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig + qps: 5 +clusterCIDR: 172.16.0.0/12,fc00:2222::/112 +configSyncPeriod: 15m0s +conntrack: + max: null + maxPerCore: 32768 + min: 131072 + tcpCloseWaitTimeout: 1h0m0s + tcpEstablishedTimeout: 24h0m0s +enableProfiling: false +healthzBindAddress: 0.0.0.0:10256 +hostnameOverride: "" +iptables: + masqueradeAll: false + masqueradeBit: 14 + minSyncPeriod: 0s + syncPeriod: 30s +ipvs: + masqueradeAll: true + minSyncPeriod: 5s + scheduler: "rr" + syncPeriod: 30s +kind: KubeProxyConfiguration +metricsBindAddress: 127.0.0.1:10249 +mode: "ipvs" +nodePortAddresses: null +oomScoreAdj: -999 +portRange: "" +udpIdleTimeout: 250ms + +EOF +``` + +### 8.3.4启动kube-proxy + +```shell + systemctl daemon-reload + systemctl restart kube-proxy + systemctl enable --now kube-proxy +``` + +# 9.安装网络插件 + +**注意 9.1 和 9.2 二选其一即可,建议在此处创建好快照后在进行操作,后续出问题可以回滚** + +** centos7 要升级libseccomp 不然 无法安装网络插件** + +```shell +# https://github.com/opencontainers/runc/releases +# 升级runc +# wget https://ghproxy.com/https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 + +install -m 755 runc.amd64 /usr/local/sbin/runc +cp -p /usr/local/sbin/runc /usr/local/bin/runc +cp -p /usr/local/sbin/runc /usr/bin/runc + +#下载高于2.4以上的包 +yum -y install http://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm + + +#查看当前版本 +[root@k8s-master-1 ~]# rpm -qa | grep libseccomp +libseccomp-2.5.1-1.el8.x86_64 + + +``` + +## 9.1安装Calico + +### 9.1.1更改calico网段 + +```shell +wget https://mirrors.chenby.cn/https://github.com/projectcalico/calico/blob/master/manifests/calico-typha.yaml + +cp calico-typha.yaml calico.yaml +cp calico-typha.yaml calico-ipv6.yaml + +vim calico.yaml +# calico-config ConfigMap处 + "ipam": { + "type": "calico-ipam", + }, + - name: IP + value: "autodetect" + + - name: CALICO_IPV4POOL_CIDR + value: "172.16.0.0/16" + +# vim calico-ipv6.yaml +# calico-config ConfigMap处 + "ipam": { + "type": "calico-ipam", + "assign_ipv4": "true", + "assign_ipv6": "true" + }, + - name: IP + value: "autodetect" + + - name: IP6 + value: "autodetect" + + - name: CALICO_IPV4POOL_CIDR + value: "172.16.0.0/16" + + - name: CALICO_IPV6POOL_CIDR + value: "fc00::/48" + + - name: FELIX_IPV6SUPPORT + value: "true" + + +# 若docker镜像拉不下来,可以使用我的仓库 +# sed -i "s#docker.io/calico/#registry.cn-hangzhou.aliyuncs.com/chenby/#g" calico.yaml +# sed -i "s#docker.io/calico/#registry.cn-hangzhou.aliyuncs.com/chenby/#g" calico-ipv6.yaml + + +# 本地没有公网 IPv6 使用 calico.yaml +kubectl apply -f calico.yaml + +# 本地有公网 IPv6 使用 calico-ipv6.yaml +# kubectl apply -f calico-ipv6.yaml + +``` + +### 9.1.2查看容器状态 + +```shell +# calico 初始化会很慢 需要耐心等待一下,大约十分钟左右 +[root@k8s-master01 ~]# kubectl get pod -A +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system calico-kube-controllers-6747f75cdc-fbvvc 1/1 Running 0 61s +kube-system calico-node-fs7hl 1/1 Running 0 61s +kube-system calico-node-jqz58 1/1 Running 0 61s +kube-system calico-node-khjlg 1/1 Running 0 61s +kube-system calico-node-wmf8q 1/1 Running 0 61s +kube-system calico-node-xc6gn 1/1 Running 0 61s +kube-system calico-typha-6cdc4b4fbc-57snb 1/1 Running 0 61s +``` + +## 9.2 安装cilium + +### 9.2.1 安装helm + +```shell +# [root@k8s-master01 ~]# curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 +# [root@k8s-master01 ~]# chmod 700 get_helm.sh +# [root@k8s-master01 ~]# ./get_helm.sh + +wget https://get.helm.sh/helm-canary-linux-amd64.tar.gz +tar xvf helm-canary-linux-amd64.tar.gz +cp linux-amd64/helm /usr/local/bin/ +``` + +### 9.2.2 安装cilium + +```shell +# 添加源 +helm repo add cilium https://helm.cilium.io + +# 修改为国内源 +helm pull cilium/cilium +tar xvf cilium-*.tgz +cd cilium/ +sed -i "s#quay.io/cilium#registry.cn-hangzhou.aliyuncs.com/chenby#g" values.yaml +sed -i "s#quay.io/coreos#registry.cn-hangzhou.aliyuncs.com/chenby#g" values.yaml + +# 默认参数安装 +helm install harbor ./cilium/ -n kube-system + +# 启用ipv6 +# helm install cilium cilium/cilium --namespace kube-system --set ipv6.enabled=true + +# 启用路由信息和监控插件 +# helm install cilium cilium/cilium --namespace kube-system --set hubble.relay.enabled=true --set hubble.ui.enabled=true --set prometheus.enabled=true --set operator.prometheus.enabled=true --set hubble.enabled=true --set hubble.metrics.enabled="{dns,drop,tcp,flow,port-distribution,icmp,http}" + +``` + +### 9.2.3 查看 + +```shell +[root@k8s-master01 ~]# kubectl get pod -A | grep cil +kube-system cilium-gmr6c 1/1 Running 0 5m3s +kube-system cilium-kzgdj 1/1 Running 0 5m3s +kube-system cilium-operator-69b677f97c-6pw4k 1/1 Running 0 5m3s +kube-system cilium-operator-69b677f97c-xzzdk 1/1 Running 0 5m3s +kube-system cilium-q2rnr 1/1 Running 0 5m3s +kube-system cilium-smx5v 1/1 Running 0 5m3s +kube-system cilium-tdjq4 1/1 Running 0 5m3s +[root@k8s-master01 ~]# +``` + +### 9.2.4 下载专属监控面板 + +安装时候没有创建 监控可以忽略 + +```shell +[root@k8s-master01 yaml]# wget https://raw.githubusercontent.com/cilium/cilium/1.12.1/examples/kubernetes/addons/prometheus/monitoring-example.yaml +[root@k8s-master01 yaml]# +[root@k8s-master01 yaml]# kubectl apply -f monitoring-example.yaml +namespace/cilium-monitoring created +serviceaccount/prometheus-k8s created +configmap/grafana-config created +configmap/grafana-cilium-dashboard created +configmap/grafana-cilium-operator-dashboard created +configmap/grafana-hubble-dashboard created +configmap/prometheus created +clusterrole.rbac.authorization.k8s.io/prometheus created +clusterrolebinding.rbac.authorization.k8s.io/prometheus created +service/grafana created +service/prometheus created +deployment.apps/grafana created +deployment.apps/prometheus created +[root@k8s-master01 yaml]# +``` + +### 9.2.5 下载部署测试用例 + +说明 测试用例 需要在 安装CoreDNS 之后即可完成 + +```shell +[root@k8s-master01 yaml]# wget https://raw.githubusercontent.com/cilium/cilium/master/examples/kubernetes/connectivity-check/connectivity-check.yaml + +[root@k8s-master01 yaml]# sed -i "s#google.com#oiox.cn#g" connectivity-check.yaml + +[root@k8s-master01 yaml]# kubectl apply -f connectivity-check.yaml +deployment.apps/echo-a created +deployment.apps/echo-b created +deployment.apps/echo-b-host created +deployment.apps/pod-to-a created +deployment.apps/pod-to-external-1111 created +deployment.apps/pod-to-a-denied-cnp created +deployment.apps/pod-to-a-allowed-cnp created +deployment.apps/pod-to-external-fqdn-allow-google-cnp created +deployment.apps/pod-to-b-multi-node-clusterip created +deployment.apps/pod-to-b-multi-node-headless created +deployment.apps/host-to-b-multi-node-clusterip created +deployment.apps/host-to-b-multi-node-headless created +deployment.apps/pod-to-b-multi-node-nodeport created +deployment.apps/pod-to-b-intra-node-nodeport created +service/echo-a created +service/echo-b created +service/echo-b-headless created +service/echo-b-host-headless created +ciliumnetworkpolicy.cilium.io/pod-to-a-denied-cnp created +ciliumnetworkpolicy.cilium.io/pod-to-a-allowed-cnp created +ciliumnetworkpolicy.cilium.io/pod-to-external-fqdn-allow-google-cnp created +[root@k8s-master01 yaml]# +``` + +### 9.2.6 查看pod + +```shell +[root@k8s-master01 yaml]# kubectl get pod -A +NAMESPACE NAME READY STATUS RESTARTS AGE +cilium-monitoring grafana-59957b9549-6zzqh 1/1 Running 0 10m +cilium-monitoring prometheus-7c8c9684bb-4v9cl 1/1 Running 0 10m +default chenby-75b5d7fbfb-7zjsr 1/1 Running 0 27h +default chenby-75b5d7fbfb-hbvr8 1/1 Running 0 27h +default chenby-75b5d7fbfb-ppbzg 1/1 Running 0 27h +default echo-a-6799dff547-pnx6w 1/1 Running 0 10m +default echo-b-fc47b659c-4bdg9 1/1 Running 0 10m +default echo-b-host-67fcfd59b7-28r9s 1/1 Running 0 10m +default host-to-b-multi-node-clusterip-69c57975d6-z4j2z 1/1 Running 0 10m +default host-to-b-multi-node-headless-865899f7bb-frrmc 1/1 Running 0 10m +default pod-to-a-allowed-cnp-5f9d7d4b9d-hcd8x 1/1 Running 0 10m +default pod-to-a-denied-cnp-65cc5ff97b-2rzb8 1/1 Running 0 10m +default pod-to-a-dfc64f564-p7xcn 1/1 Running 0 10m +default pod-to-b-intra-node-nodeport-677868746b-trk2l 1/1 Running 0 10m +default pod-to-b-multi-node-clusterip-76bbbc677b-knfq2 1/1 Running 0 10m +default pod-to-b-multi-node-headless-698c6579fd-mmvd7 1/1 Running 0 10m +default pod-to-b-multi-node-nodeport-5dc4b8cfd6-8dxmz 1/1 Running 0 10m +default pod-to-external-1111-8459965778-pjt9b 1/1 Running 0 10m +default pod-to-external-fqdn-allow-google-cnp-64df9fb89b-l9l4q 1/1 Running 0 10m +kube-system cilium-7rfj6 1/1 Running 0 56s +kube-system cilium-d4cch 1/1 Running 0 56s +kube-system cilium-h5x8r 1/1 Running 0 56s +kube-system cilium-operator-5dbddb6dbf-flpl5 1/1 Running 0 56s +kube-system cilium-operator-5dbddb6dbf-gcznc 1/1 Running 0 56s +kube-system cilium-t2xlz 1/1 Running 0 56s +kube-system cilium-z65z7 1/1 Running 0 56s +kube-system coredns-665475b9f8-jkqn8 1/1 Running 1 (36h ago) 36h +kube-system hubble-relay-59d8575-9pl9z 1/1 Running 0 56s +kube-system hubble-ui-64d4995d57-nsv9j 2/2 Running 0 56s +kube-system metrics-server-776f58c94b-c6zgs 1/1 Running 1 (36h ago) 37h +[root@k8s-master01 yaml]# +``` + +### 9.2.7 修改为NodePort + +安装时候没有创建 监控可以忽略 + +```shell +[root@k8s-master01 yaml]# kubectl edit svc -n kube-system hubble-ui +service/hubble-ui edited +[root@k8s-master01 yaml]# +[root@k8s-master01 yaml]# kubectl edit svc -n cilium-monitoring grafana +service/grafana edited +[root@k8s-master01 yaml]# +[root@k8s-master01 yaml]# kubectl edit svc -n cilium-monitoring prometheus +service/prometheus edited +[root@k8s-master01 yaml]# + +type: NodePort +``` + +### 9.2.8 查看端口 + +安装时候没有创建 监控可以忽略 + +```shell +[root@k8s-master01 yaml]# kubectl get svc -A | grep monit +cilium-monitoring grafana NodePort 10.100.250.17 3000:30707/TCP 15m +cilium-monitoring prometheus NodePort 10.100.131.243 9090:31155/TCP 15m +[root@k8s-master01 yaml]# +[root@k8s-master01 yaml]# kubectl get svc -A | grep hubble +kube-system hubble-metrics ClusterIP None 9965/TCP 5m12s +kube-system hubble-peer ClusterIP 10.100.150.29 443/TCP 5m12s +kube-system hubble-relay ClusterIP 10.109.251.34 80/TCP 5m12s +kube-system hubble-ui NodePort 10.102.253.59 80:31219/TCP 5m12s +[root@k8s-master01 yaml]# +``` + +### 9.2.9 访问 + +安装时候没有创建 监控可以忽略 + +```shell +http://192.168.0.31:30707 +http://192.168.0.31:31155 +http://192.168.0.31:31219 +``` + +# 10.安装CoreDNS + +## 10.1以下步骤只在master01操作 + +### 10.1.1修改文件 + +```shell +# 下载tgz包 +helm repo add coredns https://coredns.github.io/helm +helm pull coredns/coredns +tar xvf coredns-*.tgz +cd coredns/ + +# 修改IP地址 +vim values.yaml +cat values.yaml | grep clusterIP: +clusterIP: "10.96.0.10" + +# 示例 +--- +service: +# clusterIP: "" +# clusterIPs: [] +# loadBalancerIP: "" +# externalIPs: [] +# externalTrafficPolicy: "" +# ipFamilyPolicy: "" + # The name of the Service + # If not set, a name is generated using the fullname template + clusterIP: "10.96.0.10" + name: "" + annotations: {} +--- + +# 修改为国内源 docker源可选 +sed -i "s#coredns/#registry.cn-hangzhou.aliyuncs.com/chenby/#g" values.yaml +sed -i "s#registry.k8s.io/cpa/#registry.cn-hangzhou.aliyuncs.com/chenby/#g" values.yaml + +# 默认参数安装 +helm install coredns ./coredns/ -n kube-system +``` + +# 11.安装Metrics Server + +## 11.1以下步骤只在master01操作 + +### 11.1.1安装Metrics-server + +在新版的Kubernetes中系统资源的采集均使用Metrics-server,可以通过Metrics采集节点和Pod的内存、磁盘、CPU和网络的使用率 + +```shell +# 单机版 +wget https://mirrors.chenby.cn/https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml +# 高可用版本 +wget https://mirrors.chenby.cn/https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/high-availability.yaml + + +# 修改配置 +vim components.yaml +vim high-availability.yaml + +--- +# 1 +defaultArgs: + - --cert-dir=/tmp + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + - --kubelet-insecure-tls + - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem + - --requestheader-username-headers=X-Remote-User + - --requestheader-group-headers=X-Remote-Group + - --requestheader-extra-headers-prefix=X-Remote-Extra- + +# 2 + volumeMounts: + - mountPath: /tmp + name: tmp-dir + - name: ca-ssl + mountPath: /etc/kubernetes/pki + +# 3 + volumes: + - emptyDir: {} + name: tmp-dir + - name: ca-ssl + hostPath: + path: /etc/kubernetes/pki +--- + + +# 修改为国内源 docker源可选 +sed -i "s#registry.k8s.io/metrics-server/#registry.cn-hangzhou.aliyuncs.com/chenby/#g" *.yaml + +# 二选一 +kubectl apply -f high-availability.yaml +# kubectl apply -f components.yaml + +``` + +### 11.1.2稍等片刻查看状态 + +```shell +kubectl top node +NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% +k8s-master01 154m 1% 1715Mi 21% +k8s-master02 151m 1% 1274Mi 16% +k8s-master03 523m 6% 1345Mi 17% +k8s-node01 84m 1% 671Mi 8% +k8s-node02 73m 0% 727Mi 9% +k8s-node03 96m 1% 769Mi 9% +k8s-node04 68m 0% 673Mi 8% +k8s-node05 82m 1% 679Mi 8% +``` + +# 12.集群验证 + +## 12.1部署pod资源 + +```shell +cat< 443/TCP 17h + +kubectl exec busybox -n default -- nslookup kubernetes +3Server: 10.96.0.10 +Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local + +Name: kubernetes +Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local +``` + +## 12.3测试跨命名空间是否可以解析 + +```shell +kubectl exec busybox -n default -- nslookup kube-dns.kube-system +Server: 10.96.0.10 +Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local + +Name: kube-dns.kube-system +Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local +``` + +## 12.4每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53 + +```shell +telnet 10.96.0.1 443 +Trying 10.96.0.1... +Connected to 10.96.0.1. +Escape character is '^]'. + + telnet 10.96.0.10 53 +Trying 10.96.0.10... +Connected to 10.96.0.10. +Escape character is '^]'. + +curl 10.96.0.10:53 +curl: (52) Empty reply from server +``` + +## 12.5Pod和Pod之前要能通 + +```shell +kubectl get po -owide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +busybox 1/1 Running 0 17m 172.27.14.193 k8s-node02 + + kubectl get po -n kube-system -owide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +calico-kube-controllers-5dffd5886b-4blh6 1/1 Running 0 77m 172.25.244.193 k8s-master01 +calico-node-fvbdq 1/1 Running 1 (75m ago) 77m 192.168.0.31 k8s-master01 +calico-node-g8nqd 1/1 Running 0 77m 192.168.0.34 k8s-node01 +calico-node-mdps8 1/1 Running 0 77m 192.168.0.35 k8s-node02 +calico-node-nf4nt 1/1 Running 0 77m 192.168.0.33 k8s-master03 +calico-node-sq2ml 1/1 Running 0 77m 192.168.0.32 k8s-master02 +calico-typha-8445487f56-mg6p8 1/1 Running 0 77m 192.168.0.35 k8s-node02 +calico-typha-8445487f56-pxbpj 1/1 Running 0 77m 192.168.0.31 k8s-master01 +calico-typha-8445487f56-tnssl 1/1 Running 0 77m 192.168.0.34 k8s-node01 +coredns-5db5696c7-67h79 1/1 Running 0 63m 172.25.92.65 k8s-master02 +metrics-server-6bf7dcd649-5fhrw 1/1 Running 0 61m 172.18.195.1 k8s-master03 + +# 进入busybox ping其他节点上的pod + +kubectl exec -ti busybox -- sh +/ # ping 192.168.0.34 +PING 192.168.0.34 (192.168.0.34): 56 data bytes +64 bytes from 192.168.0.34: seq=0 ttl=63 time=0.358 ms +64 bytes from 192.168.0.34: seq=1 ttl=63 time=0.668 ms +64 bytes from 192.168.0.34: seq=2 ttl=63 time=0.637 ms +64 bytes from 192.168.0.34: seq=3 ttl=63 time=0.624 ms +64 bytes from 192.168.0.34: seq=4 ttl=63 time=0.907 ms + +# 可以连通证明这个pod是可以跨命名空间和跨主机通信的 +``` + +## 12.6创建三个副本,可以看到3个副本分布在不同的节点上(用完可以删了) + +```shell +cat > deployments.yaml << EOF +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 + +EOF + +kubectl apply -f deployments.yaml +deployment.apps/nginx-deployment created + +kubectl get pod +NAME READY STATUS RESTARTS AGE +busybox 1/1 Running 0 6m25s +nginx-deployment-9456bbbf9-4bmvk 1/1 Running 0 8s +nginx-deployment-9456bbbf9-9rcdk 1/1 Running 0 8s +nginx-deployment-9456bbbf9-dqv8s 1/1 Running 0 8s + +# 删除nginx + +[root@k8s-master01 ~]# kubectl delete -f deployments.yaml +``` + +# 13.安装dashboard + +```shell +helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/ +helm install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --namespace kube-system +``` + +## 13.1更改dashboard的svc为NodePort,如果已是请忽略 + +```shell +kubectl edit svc kubernetes-dashboard -n kube-system + + type: NodePort +``` + +## 13.2查看端口号 + +```shell +kubectl get svc kubernetes-dashboard -n kube-system +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kubernetes-dashboard NodePort 10.108.120.110 443:30034/TCP 34s +``` + +## 13.3创建token + +```shell +cat > dashboard-user.yaml << EOF +apiVersion: v1 +kind: ServiceAccount +metadata: + name: admin-user + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: admin-user +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: admin-user + namespace: kube-system +EOF + +kubectl apply -f dashboard-user.yaml + +# 创建token +kubectl -n kube-system create token admin-user +eyJhbGciOiJSUzI1NiIsImtpZCI6IlhWQmNELWlxWWRXeDZwTm5iTU9jbTNfMWMxaUhoZkdTYlFaaFg0SVVGT0EifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjgxNTQ4NjQzLCJpYXQiOjE2ODE1NDUwNDMsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiNzY1ZTU2MjMtZjIyYy00ZGM3LTliODEtYjU4MGY5ZDllZWQ2In19LCJuYmYiOjE2ODE1NDUwNDMsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTphZG1pbi11c2VyIn0.Pqzj9L0rO6FXCpy6yRBx700CjlOpfmXXKG-LIds1lGnz0m1bYwLZpq5RDPWrv_in2NuVu-O3Ej9q1-LExFNhoWWOKdjntRrm4fCRS1JZATcXZdk-ND_dNOVcoyOgon54krXrnmU8kDgcpMJrn7TL03rSn1loVMI0ZL53aRM7JrGFkAnCytkr0vdqZRBnUwvPMDFD9D5wetBGXoaI3fI257w4jp0mRj5nICl5pliMBX2MLM2RfhGVoLkfZdHN83kr597lOkR2UExEBH3qq-vS4tjX7YWrSHgBkKwfaQTv8RpMQYH4StJcsYALlatnG6-ZUBE7mI8d1fl2qSvT7qbjrQ +``` + +## 13.3登录dashboard + +https://192.168.0.31:30034/ + +# 14.ingress安装 + +## 14.1执行部署 + +```shell +wget https://mirrors.chenby.cn/https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/cloud/deploy.yaml + + +vim deploy.yaml + +cat deploy.yaml | grep image: + image: registry.cn-hangzhou.aliyuncs.com/chenby/controller:v1.7.0 + image: registry.cn-hangzhou.aliyuncs.com/chenby/kube-webhook-certgen:v1.3.0 + image: registry.cn-hangzhou.aliyuncs.com/chenby/kube-webhook-certgen:v1.3.0 + + +cat > backend.yaml << EOF +apiVersion: apps/v1 +kind: Deployment +metadata: + name: default-http-backend + labels: + app.kubernetes.io/name: default-http-backend + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: default-http-backend + template: + metadata: + labels: + app.kubernetes.io/name: default-http-backend + spec: + terminationGracePeriodSeconds: 60 + containers: + - name: default-http-backend + image: registry.cn-hangzhou.aliyuncs.com/chenby/defaultbackend-amd64:1.5 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + ports: + - containerPort: 8080 + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: default-http-backend + namespace: kube-system + labels: + app.kubernetes.io/name: default-http-backend +spec: + ports: + - port: 80 + targetPort: 8080 + selector: + app.kubernetes.io/name: default-http-backend +EOF + +kubectl apply -f deploy.yaml +kubectl apply -f backend.yaml + + +cat > backend.yaml << EOF +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hello-server +spec: + replicas: 2 + selector: + matchLabels: + app: hello-server + template: + metadata: + labels: + app: hello-server + spec: + containers: + - name: hello-server + image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/hello-server + ports: + - containerPort: 9000 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx-demo + name: nginx-demo +spec: + replicas: 2 + selector: + matchLabels: + app: nginx-demo + template: + metadata: + labels: + app: nginx-demo + spec: + containers: + - image: nginx + name: nginx +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: nginx-demo + name: nginx-demo +spec: + selector: + app: nginx-demo + ports: + - port: 8000 + protocol: TCP + targetPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: hello-server + name: hello-server +spec: + selector: + app: hello-server + ports: + - port: 8000 + protocol: TCP + targetPort: 9000 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ingress-host-bar +spec: + ingressClassName: nginx + rules: + - host: "hello.chenby.cn" + http: + paths: + - pathType: Prefix + path: "/" + backend: + service: + name: hello-server + port: + number: 8000 + - host: "demo.chenby.cn" + http: + paths: + - pathType: Prefix + path: "/nginx" + backend: + service: + name: nginx-demo + port: + number: 8000 +EOF + +# 等创建完成后在执行: +kubectl apply -f ingress-demo-app.yaml + +kubectl get ingress +NAME CLASS HOSTS ADDRESS PORTS AGE +ingress-host-bar nginx hello.chenby.cn,demo.chenby.cn 192.168.0.32 80 7s + +``` + +## 14.2过滤查看ingress端口 + +```shell +[root@hello ~/yaml]# kubectl get svc -A | grep ingress +ingress-nginx ingress-nginx-controller NodePort 10.104.231.36 80:32636/TCP,443:30579/TCP 104s +ingress-nginx ingress-nginx-controller-admission ClusterIP 10.101.85.88 443/TCP 105s +[root@hello ~/yaml]# +``` + +# 15.IPv6测试 + + + +```shell +#部署应用 + +cat< 80:30779/TCP 5s +[root@k8s-master01 ~]# + +#使用内网访问 +[root@localhost yaml]# curl -I http://[fd00::a29c] +HTTP/1.1 200 OK +Server: nginx/1.21.6 +Date: Thu, 05 May 2022 10:20:35 GMT +Content-Type: text/html +Content-Length: 615 +Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT +Connection: keep-alive +ETag: "61f01158-267" +Accept-Ranges: bytes + +[root@localhost yaml]# curl -I http://192.168.0.31:30779 +HTTP/1.1 200 OK +Server: nginx/1.21.6 +Date: Thu, 05 May 2022 10:20:59 GMT +Content-Type: text/html +Content-Length: 615 +Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT +Connection: keep-alive +ETag: "61f01158-267" +Accept-Ranges: bytes + +[root@localhost yaml]# + +#使用公网访问 +[root@localhost yaml]# curl -I http://[2409:8a10:9e18:9020::10]:30779 +HTTP/1.1 200 OK +Server: nginx/1.21.6 +Date: Thu, 05 May 2022 10:20:54 GMT +Content-Type: text/html +Content-Length: 615 +Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT +Connection: keep-alive +ETag: "61f01158-267" +Accept-Ranges: bytes +``` + +# 16.安装命令行自动补全功能 + +```shell +yum install bash-completion -y +source /usr/share/bash-completion/bash_completion +source <(kubectl completion bash) +echo "source <(kubectl completion bash)" >> ~/.bashrc +``` + +# 附录 +```shell +镜像版本要自行查看,因为镜像版本是随时更新的,文档无法做到实时更新 + +# docker pull 镜像 + +docker pull registry.cn-hangzhou.aliyuncs.com/chenby/cni:master +docker pull registry.cn-hangzhou.aliyuncs.com/chenby/node:master +docker pull registry.cn-hangzhou.aliyuncs.com/chenby/kube-controllers:master +docker pull registry.cn-hangzhou.aliyuncs.com/chenby/typha:master +docker pull registry.cn-hangzhou.aliyuncs.com/chenby/coredns:v1.10.0 +docker pull registry.cn-hangzhou.aliyuncs.com/chenby/pause:3.6 +docker pull registry.cn-hangzhou.aliyuncs.com/chenby/metrics-server:v0.5.2 +docker pull kubernetesui/dashboard:v2.7.0 +docker pull kubernetesui/metrics-scraper:v1.0.8 +docker pull quay.io/cilium/cilium:v1.12.6 +docker pull quay.io/cilium/certgen:v0.1.8 +docker pull quay.io/cilium/hubble-relay:v1.12.6 +docker pull quay.io/cilium/hubble-ui-backend:v0.9.2 +docker pull quay.io/cilium/hubble-ui:v0.9.2 +docker pull quay.io/cilium/cilium-etcd-operator:v2.0.7 +docker pull quay.io/cilium/operator:v1.12.6 +docker pull quay.io/cilium/clustermesh-apiserver:v1.12.6 +docker pull quay.io/coreos/etcd:v3.5.4 +docker pull quay.io/cilium/startup-script:d69851597ea019af980891a4628fb36b7880ec26 + +# docker 保存镜像 +docker save registry.cn-hangzhou.aliyuncs.com/chenby/cni:master -o cni.tar +docker save registry.cn-hangzhou.aliyuncs.com/chenby/node:master -o node.tar +docker save registry.cn-hangzhou.aliyuncs.com/chenby/typha:master -o typha.tar +docker save registry.cn-hangzhou.aliyuncs.com/chenby/kube-controllers:master -o kube-controllers.tar +docker save registry.cn-hangzhou.aliyuncs.com/chenby/coredns:v1.10.0 -o coredns.tar +docker save registry.cn-hangzhou.aliyuncs.com/chenby/pause:3.6 -o pause.tar +docker save registry.cn-hangzhou.aliyuncs.com/chenby/metrics-server:v0.5.2 -o metrics-server.tar +docker save kubernetesui/dashboard:v2.7.0 -o dashboard.tar +docker save kubernetesui/metrics-scraper:v1.0.8 -o metrics-scraper.tar +docker save quay.io/cilium/cilium:v1.12.6 -o cilium.tar +docker save quay.io/cilium/certgen:v0.1.8 -o certgen.tar +docker save quay.io/cilium/hubble-relay:v1.12.6 -o hubble-relay.tar +docker save quay.io/cilium/hubble-ui-backend:v0.9.2 -o hubble-ui-backend.tar +docker save quay.io/cilium/hubble-ui:v0.9.2 -o hubble-ui.tar +docker save quay.io/cilium/cilium-etcd-operator:v2.0.7 -o cilium-etcd-operator.tar +docker save quay.io/cilium/operator:v1.12.6 -o operator.tar +docker save quay.io/cilium/clustermesh-apiserver:v1.12.6 -o clustermesh-apiserver.tar +docker save quay.io/coreos/etcd:v3.5.4 -o etcd.tar +docker save quay.io/cilium/startup-script:d69851597ea019af980891a4628fb36b7880ec26 -o startup-script.tar + +# 传输到各个节点 +for NODE in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02; do scp -r images/ $NODE:/root/ ; done + +# 创建命名空间 +ctr ns create k8s.io + +# 导入镜像 +ctr --namespace k8s.io image import images/cni.tar +ctr --namespace k8s.io image import images/node.tar +ctr --namespace k8s.io image import images/typha.tar +ctr --namespace k8s.io image import images/kube-controllers.tar +ctr --namespace k8s.io image import images/coredns.tar +ctr --namespace k8s.io image import images/pause.tar +ctr --namespace k8s.io image import images/metrics-server.tar +ctr --namespace k8s.io image import images/dashboard.tar +ctr --namespace k8s.io image import images/metrics-scraper.tar +ctr --namespace k8s.io image import images/dashboard.tar +ctr --namespace k8s.io image import images/metrics-scraper.tar +ctr --namespace k8s.io image import images/cilium.tar +ctr --namespace k8s.io image import images/certgen.tar +ctr --namespace k8s.io image import images/hubble-relay.tar +ctr --namespace k8s.io image import images/hubble-ui-backend.tar +ctr --namespace k8s.io image import images/hubble-ui.tar +ctr --namespace k8s.io image import images/cilium-etcd-operator.tar +ctr --namespace k8s.io image import images/operator.tar +ctr --namespace k8s.io image import images/clustermesh-apiserver.tar +ctr --namespace k8s.io image import images/etcd.tar +ctr --namespace k8s.io image import images/startup-script.tar + +# pull tar包 解压后 +helm pull cilium/cilium + +# 查看镜像版本 +root@hello:~/cilium# cat values.yaml| grep tag: -C1 + repository: "quay.io/cilium/cilium" + tag: "v1.12.6" + pullPolicy: "IfNotPresent" +-- + repository: "quay.io/cilium/certgen" + tag: "v0.1.8@sha256:4a456552a5f192992a6edcec2febb1c54870d665173a33dc7d876129b199ddbd" + pullPolicy: "IfNotPresent" +-- + repository: "quay.io/cilium/hubble-relay" + tag: "v1.12.6" + # hubble-relay-digest +-- + repository: "quay.io/cilium/hubble-ui-backend" + tag: "v0.9.2@sha256:a3ac4d5b87889c9f7cc6323e86d3126b0d382933bd64f44382a92778b0cde5d7" + pullPolicy: "IfNotPresent" +-- + repository: "quay.io/cilium/hubble-ui" + tag: "v0.9.2@sha256:d3596efc94a41c6b772b9afe6fe47c17417658956e04c3e2a28d293f2670663e" + pullPolicy: "IfNotPresent" +-- + repository: "quay.io/cilium/cilium-etcd-operator" + tag: "v2.0.7@sha256:04b8327f7f992693c2cb483b999041ed8f92efc8e14f2a5f3ab95574a65ea2dc" + pullPolicy: "IfNotPresent" +-- + repository: "quay.io/cilium/operator" + tag: "v1.12.6" + # operator-generic-digest +-- + repository: "quay.io/cilium/startup-script" + tag: "d69851597ea019af980891a4628fb36b7880ec26" + pullPolicy: "IfNotPresent" +-- + repository: "quay.io/cilium/cilium" + tag: "v1.12.6" + # cilium-digest +-- + repository: "quay.io/cilium/clustermesh-apiserver" + tag: "v1.12.6" + # clustermesh-apiserver-digest +-- + repository: "quay.io/coreos/etcd" + tag: "v3.5.4@sha256:795d8660c48c439a7c3764c2330ed9222ab5db5bb524d8d0607cac76f7ba82a3" + pullPolicy: "IfNotPresent" + +``` + +> **关于** +> +> https://www.oiox.cn/ +> +> https://www.oiox.cn/index.php/start-page.html +> +> **CSDN、GitHub、知乎、开源中国、思否、掘金、简书、华为云、阿里云、腾讯云、哔哩哔哩、今日头条、新浪微博、个人博客** +> +> **全网可搜《小陈运维》** +> +> **文章主要发布于微信公众号:《Linux运维交流社区》**