From f39756222031785ba48f1ace7a9de08404558c5a Mon Sep 17 00:00:00 2001
From: cby <46200206+cby-chen@users.noreply.github.com>
Date: Tue, 8 Mar 2022 14:24:29 +0800
Subject: [PATCH] Update v1.23.4-binary-install.md
---
v1.23.4-binary-install.md | 4230 +++++++++++++++++++------------------
1 file changed, 2116 insertions(+), 2114 deletions(-)
diff --git a/v1.23.4-binary-install.md b/v1.23.4-binary-install.md
index 5aa7504..78c2c46 100644
--- a/v1.23.4-binary-install.md
+++ b/v1.23.4-binary-install.md
@@ -1,1584 +1,1586 @@
-# 1.环境
-
-| 主机名称 | IP地址 | 说明 | 软件 |
-| -------- | ------------ | -------- | ---------------------------------------------------------------------------------------------- |
-| Master01 | 192.168.1.30 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client |
-| Master02 | 192.168.1.31 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client |
-| Master03 | 192.168.1.32 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client |
-| Node01 | 192.168.1.33 | node节点 | kubelet、kube-proxy、nfs-client |
-| Node02 | 192.168.1.34 | node节点 | kubelet、kube-proxy、nfs-client |
-| Node03 | 192.168.1.35 | node节点 | kubelet、kube-proxy、nfs-client |
-| Node04 | 192.168.1.36 | node节点 | kubelet、kube-proxy、nfs-client |
-| Node05 | 192.168.1.37 | node节点 | kubelet、kube-proxy、nfs-client |
-| Lb01 | 192.168.1.38 | Lb01节点 | haproxy、keepalived |
-| Lb02 | 192.168.1.39 | Lb02节点 | haproxy、keepalived |
-| | 192.168.1.88 | VIP | |
-| | | | |
-
-| 软件 | 版本 |
-|:------------------------------------------------------------------------ |:-------------------------- |
-| 内核 | 5.16.7-1.el8.elrepo.x86_64 |
-| CentOS 8 | v8 |
-| kube-apiserver、kube-controller-manager、kube-scheduler、kubelet、kube-proxy | v1.23.4 |
-| etcd | v3.5.2 |
-| docker-ce | v20.10.9 |
-| containerd | v1.6.0 |
-| cfssl | v1.6.1 |
-| cni | v1.6.0 |
-| crictl | v1.23.0 |
-| haproxy | v1.8.27 |
-| keepalived | v2.1.5 |
-
-网段
-
-物理主机:192.168.1.0/24
-
-service:10.96.0.0/12
-
-pod:172.16.0.0/12
-
-如果有条件建议k8s集群与etcd集群分开安装
-
-## 1.1.k8s基础系统环境配置
-
-### 1.2.配置IP
-
-```shell
-ssh root@192.168.1.76 "nmcli con mod ens18 ipv4.addresses 192.168.1.30/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
-ssh root@192.168.1.77 "nmcli con mod ens18 ipv4.addresses 192.168.1.31/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
-ssh root@192.168.1.78 "nmcli con mod ens18 ipv4.addresses 192.168.1.32/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
-ssh root@192.168.1.79 "nmcli con mod ens18 ipv4.addresses 192.168.1.33/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
-ssh root@192.168.1.80 "nmcli con mod ens18 ipv4.addresses 192.168.1.34/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
-ssh root@192.168.1.86 "nmcli con mod ens18 ipv4.addresses 192.168.1.35/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
-ssh root@192.168.1.87 "nmcli con mod ens18 ipv4.addresses 192.168.1.36/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
-ssh root@192.168.1.166 "nmcli con mod ens18 ipv4.addresses 192.168.1.37/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
-ssh root@192.168.1.100 "nmcli con mod ens18 ipv4.addresses 192.168.1.38/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
-ssh root@192.168.1.191 "nmcli con mod ens18 ipv4.addresses 192.168.1.39/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
-```
-
-### 1.3.设置主机名
-
-```shell
-hostnamectl set-hostname k8s-master01
-hostnamectl set-hostname k8s-master02
-hostnamectl set-hostname k8s-master03
-hostnamectl set-hostname k8s-node01
-hostnamectl set-hostname k8s-node02
-hostnamectl set-hostname k8s-node03
-hostnamectl set-hostname k8s-node04
-hostnamectl set-hostname k8s-node05
-hostnamectl set-hostname lb01
-hostnamectl set-hostname lb02
-```
-
-### 1.4.配置yum源
-
-```shell
-sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \
- -e 's|^#baseurl=http://mirror.centos.org/$contentdir|baseurl=http://192.168.1.123/centos|g' \
- -i.bak \
- /etc/yum.repos.d/CentOS-*.repo
-
-
-sed -e 's|^mirrorlist=|#mirrorlist=|g' -e 's|^#baseurl=http://mirror.centos.org/\$contentdir|baseurl=http://192.168.1.123/centos|g' -i.bak /etc/yum.repos.d/CentOS-*.repo
-```
-
-### 1.5.安装一些必备工具
-
-```shell
-yum -y install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl -y
-```
-
-### 1.6.安装docker工具
-
-```shell
-curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
-```
-
-### 1.7.关闭防火墙
-
-```shell
-systemctl disable --now firewalld
-```
-
-### 1.8.关闭SELinux
-
-```shell
-setenforce 0
-sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
-```
-
-### 1.9.关闭交换分区
-
-```shell
-sed -ri 's/.*swap.*/#&/' /etc/fstab
-swapoff -a && sysctl -w vm.swappiness=0
-cat /etc/fstab
-
-# /dev/mapper/centos-swap swap swap defaults 0 0
-```
-
-### 1.10.关闭NetworkManager 并启用 network
-
-```shell
-systemctl disable --now NetworkManager
-systemctl start network && systemctl enable network
-```
-
-### 1.11.进行时间同步
-
-```shell
-服务端
-
-yum install chrony -y
-cat > /etc/chrony.conf << EOF
-pool ntp.aliyun.com iburst
-driftfile /var/lib/chrony/drift
-makestep 1.0 3
-rtcsync
-allow 192.168.1.0/24
-local stratum 10
-keyfile /etc/chrony.keys
-leapsectz right/UTC
-logdir /var/log/chrony
-EOF
-
-systemctl restart chronyd
-systemctl enable chronyd
-
-客户端
-
-yum install chrony -y
-vim /etc/chrony.conf
-cat /etc/chrony.conf | grep -v "^#" | grep -v "^$"
-pool 192.168.1.30 iburst
-driftfile /var/lib/chrony/drift
-makestep 1.0 3
-rtcsync
-keyfile /etc/chrony.keys
-leapsectz right/UTC
-logdir /var/log/chrony
-
-systemctl restart chronyd ; systemctl enable chronyd
-
-
-yum install chrony -y ; sed -i "s#2.centos.pool.ntp.org#192.168.1.30#g" /etc/chrony.conf ; systemctl restart chronyd ; systemctl enable chronyd
-
-
-使用客户端进行验证
-
-chronyc sources -v
-```
-
-### 1.12.配置ulimit
-
-```shell
-ulimit -SHn 65535
-cat >> /etc/security/limits.conf <> /etc/modules-load.d/ipvs.conf < /etc/sysctl.d/k8s.conf
-net.ipv4.ip_forward = 1
-net.bridge.bridge-nf-call-iptables = 1
-fs.may_detach_mounts = 1
-vm.overcommit_memory=1
-vm.panic_on_oom=0
-fs.inotify.max_user_watches=89100
-fs.file-max=52706963
-fs.nr_open=52706963
-net.netfilter.nf_conntrack_max=2310720
-
-
-net.ipv4.tcp_keepalive_time = 600
-net.ipv4.tcp_keepalive_probes = 3
-net.ipv4.tcp_keepalive_intvl =15
-net.ipv4.tcp_max_tw_buckets = 36000
-net.ipv4.tcp_tw_reuse = 1
-net.ipv4.tcp_max_orphans = 327680
-net.ipv4.tcp_orphan_retries = 3
-net.ipv4.tcp_syncookies = 1
-net.ipv4.tcp_max_syn_backlog = 16384
-net.ipv4.ip_conntrack_max = 65536
-net.ipv4.tcp_max_syn_backlog = 16384
-net.ipv4.tcp_timestamps = 0
-net.core.somaxconn = 16384
-EOF
-
-sysctl --system
-```
-
-### 1.18.所有节点配置hosts本地解析
-
-```shell
-cat > /etc/hosts < /etc/crictl.yaml < /etc/etcd/etcd.config.yml << EOF
-name: 'k8s-master01'
-data-dir: /var/lib/etcd
-wal-dir: /var/lib/etcd/wal
-snapshot-count: 5000
-heartbeat-interval: 100
-election-timeout: 1000
-quota-backend-bytes: 0
-listen-peer-urls: 'https://192.168.1.30:2380'
-listen-client-urls: 'https://192.168.1.30:2379,http://127.0.0.1:2379'
-max-snapshots: 3
-max-wals: 5
-cors:
-initial-advertise-peer-urls: 'https://192.168.1.30:2380'
-advertise-client-urls: 'https://192.168.1.30:2379'
-discovery:
-discovery-fallback: 'proxy'
-discovery-proxy:
-discovery-srv:
-initial-cluster: 'k8s-master01=https://192.168.1.30:2380,k8s-master02=https://192.168.1.31:2380,k8s-master03=https://192.168.1.32:2380'
-initial-cluster-token: 'etcd-k8s-cluster'
-initial-cluster-state: 'new'
-strict-reconfig-check: false
-enable-v2: true
-enable-pprof: true
-proxy: 'off'
-proxy-failure-wait: 5000
-proxy-refresh-interval: 30000
-proxy-dial-timeout: 1000
-proxy-write-timeout: 5000
-proxy-read-timeout: 0
-client-transport-security:
- cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
- key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
- client-cert-auth: true
- trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
- auto-tls: true
-peer-transport-security:
- cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
- key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
- peer-client-cert-auth: true
- trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
- auto-tls: true
-debug: false
-log-package-levels:
-log-outputs: [default]
-force-new-cluster: false
-EOF
-```
-
-### 4.1.2master02配置
-
-```shell
-cat > /etc/etcd/etcd.config.yml << EOF
-name: 'k8s-master02'
-data-dir: /var/lib/etcd
-wal-dir: /var/lib/etcd/wal
-snapshot-count: 5000
-heartbeat-interval: 100
-election-timeout: 1000
-quota-backend-bytes: 0
-listen-peer-urls: 'https://192.168.1.31:2380'
-listen-client-urls: 'https://192.168.1.31:2379,http://127.0.0.1:2379'
-max-snapshots: 3
-max-wals: 5
-cors:
-initial-advertise-peer-urls: 'https://192.168.1.31:2380'
-advertise-client-urls: 'https://192.168.1.31:2379'
-discovery:
-discovery-fallback: 'proxy'
-discovery-proxy:
-discovery-srv:
-initial-cluster: 'k8s-master01=https://192.168.1.30:2380,k8s-master02=https://192.168.1.31:2380,k8s-master03=https://192.168.1.32:2380'
-initial-cluster-token: 'etcd-k8s-cluster'
-initial-cluster-state: 'new'
-strict-reconfig-check: false
-enable-v2: true
-enable-pprof: true
-proxy: 'off'
-proxy-failure-wait: 5000
-proxy-refresh-interval: 30000
-proxy-dial-timeout: 1000
-proxy-write-timeout: 5000
-proxy-read-timeout: 0
-client-transport-security:
- cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
- key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
- client-cert-auth: true
- trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
- auto-tls: true
-peer-transport-security:
- cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
- key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
- peer-client-cert-auth: true
- trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
- auto-tls: true
-debug: false
-log-package-levels:
-log-outputs: [default]
-force-new-cluster: false
-EOF
-```
-
-### 4.1.3master03配置
-
-```shell
-cat > /etc/etcd/etcd.config.yml << EOF
-name: 'k8s-master03'
-data-dir: /var/lib/etcd
-wal-dir: /var/lib/etcd/wal
-snapshot-count: 5000
-heartbeat-interval: 100
-election-timeout: 1000
-quota-backend-bytes: 0
-listen-peer-urls: 'https://192.168.1.32:2380'
-listen-client-urls: 'https://192.168.1.32:2379,http://127.0.0.1:2379'
-max-snapshots: 3
-max-wals: 5
-cors:
-initial-advertise-peer-urls: 'https://192.168.1.32:2380'
-advertise-client-urls: 'https://192.168.1.32:2379'
-discovery:
-discovery-fallback: 'proxy'
-discovery-proxy:
-discovery-srv:
-initial-cluster: 'k8s-master01=https://192.168.1.30:2380,k8s-master02=https://192.168.1.31:2380,k8s-master03=https://192.168.1.32:2380'
-initial-cluster-token: 'etcd-k8s-cluster'
-initial-cluster-state: 'new'
-strict-reconfig-check: false
-enable-v2: true
-enable-pprof: true
-proxy: 'off'
-proxy-failure-wait: 5000
-proxy-refresh-interval: 30000
-proxy-dial-timeout: 1000
-proxy-write-timeout: 5000
-proxy-read-timeout: 0
-client-transport-security:
- cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
- key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
- client-cert-auth: true
- trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
- auto-tls: true
-peer-transport-security:
- cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
- key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
- peer-client-cert-auth: true
- trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
- auto-tls: true
-debug: false
-log-package-levels:
-log-outputs: [default]
-force-new-cluster: false
-EOF
-```
-
-## 4.2.创建service(所有master节点操作)
-
-### 4.2.1创建etcd.service并启动
-
-```shell
-cat > /usr/lib/systemd/system/etcd.service << EOF
-
-[Unit]
-Description=Etcd Service
-Documentation=https://coreos.com/etcd/docs/latest/
-After=network.target
-
-[Service]
-Type=notify
-ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
-Restart=on-failure
-RestartSec=10
-LimitNOFILE=65536
-
-[Install]
-WantedBy=multi-user.target
-Alias=etcd3.service
-
-EOF
-```
-
-### 4.2.2创建etcd证书目录
-
-```shell
-mkdir /etc/kubernetes/pki/etcd
-ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
-systemctl daemon-reload
-systemctl enable --now etcd
-```
-
-### 4.2.3查看etcd状态
-
-```shell
-export ETCDCTL_API=3
-etcdctl --endpoints="192.168.1.32:2379,192.168.1.31:2379,192.168.1.30:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status --write-out=table
-+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
-| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
-+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
-| 192.168.1.32:2379 | 56875ab4a12c94e8 | 3.5.1 | 25 kB | false | false | 2 | 8 | 8 | |
-| 192.168.1.31:2379 | 33df6a8fe708d3fd | 3.5.1 | 25 kB | true | false | 2 | 8 | 8 | |
-| 192.168.1.30:2379 | 58fbe5ec9743048f | 3.5.1 | 20 kB | false | false | 2 | 8 | 8 | |
-+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
-```
-
-# 5.高可用配置
-
-## 5.1在lb01和lb02两台服务器上操作
-
-### 5.1.1安装keepalived和haproxy服务
-
-```shell
-
-systemctl disable --now firewalld
-
-setenforce 0
-sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
-
-
-yum -y install keepalived haproxy
-```
-
-### 5.1.2修改haproxy配置文件(两台配置文件一样)
-
-```shell
-# cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak
-
-cat >/etc/haproxy/haproxy.cfg<<"EOF"
-global
- maxconn 2000
- ulimit-n 16384
- log 127.0.0.1 local0 err
- stats timeout 30s
-
-defaults
- log global
- mode http
- option httplog
- timeout connect 5000
- timeout client 50000
- timeout server 50000
- timeout http-request 15s
- timeout http-keep-alive 15s
-
-
-frontend monitor-in
- bind *:33305
- mode http
- option httplog
- monitor-uri /monitor
-
-frontend k8s-master
- bind 0.0.0.0:8443
- bind 127.0.0.1:8443
- mode tcp
- option tcplog
- tcp-request inspect-delay 5s
- default_backend k8s-master
-
-
-backend k8s-master
- mode tcp
- option tcplog
- option tcp-check
- balance roundrobin
- default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
- server k8s-master01 192.168.1.30:6443 check
- server k8s-master02 192.168.1.31:6443 check
- server k8s-master03 192.168.1.32:6443 check
-EOF
-```
-
-### 5.1.3lb01配置keepalived master节点
-
-```shell
-#cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
-
-cat > /etc/keepalived/keepalived.conf << EOF
-! Configuration File for keepalived
-
-global_defs {
- router_id LVS_DEVEL
-}
-vrrp_script chk_apiserver {
- script "/etc/keepalived/check_apiserver.sh"
- interval 5
- weight -5
- fall 2
- rise 1
-}
-vrrp_instance VI_1 {
- state MASTER
- interface ens18
- mcast_src_ip 192.168.1.38
- virtual_router_id 51
- priority 100
- nopreempt
- advert_int 2
- authentication {
- auth_type PASS
- auth_pass K8SHA_KA_AUTH
- }
- virtual_ipaddress {
- 192.168.1.88
- }
- track_script {
- chk_apiserver
-} }
-
-EOF
-```
-
-### 5.1.4lb02配置keepalived backup节点
-
-```shell
-# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
-
-cat > /etc/keepalived/keepalived.conf << EOF
-! Configuration File for keepalived
-
-global_defs {
- router_id LVS_DEVEL
-}
-vrrp_script chk_apiserver {
- script "/etc/keepalived/check_apiserver.sh"
- interval 5
- weight -5
- fall 2
- rise 1
-
-}
-vrrp_instance VI_1 {
- state BACKUP
- interface ens18
- mcast_src_ip 192.168.1.39
- virtual_router_id 51
- priority 50
- nopreempt
- advert_int 2
- authentication {
- auth_type PASS
- auth_pass K8SHA_KA_AUTH
- }
- virtual_ipaddress {
- 192.168.1.88
- }
- track_script {
- chk_apiserver
-} }
-
-EOF
-```
-
-### 5.1.5健康检查脚本配置(两台lb主机)
-
-```shell
-cat > /etc/keepalived/check_apiserver.sh << EOF
-#!/bin/bash
-
-err=0
-for k in \$(seq 1 3)
-do
- check_code=\$(pgrep haproxy)
- if [[ \$check_code == "" ]]; then
- err=\$(expr \$err + 1)
- sleep 1
- continue
- else
- err=0
- break
- fi
-done
-
-if [[ \$err != "0" ]]; then
- echo "systemctl stop keepalived"
- /usr/bin/systemctl stop keepalived
- exit 1
-else
- exit 0
-fi
-EOF
-
-# 给脚本授权
-
-chmod +x /etc/keepalived/check_apiserver.sh
-```
-
-### 5.1.6启动服务
-
-```shell
-systemctl daemon-reload
-systemctl enable --now haproxy
-systemctl enable --now keepalived
-```
-
-### 5.1.7测试高可用
-
-```shell
-# 能ping同
-
-[root@k8s-node02 ~]# ping 192.168.1.88
-
-# 能telnet访问
-
-[root@k8s-node02 ~]# telnet 192.168.1.88 8443
-
-# 关闭主节点,看vip是否漂移到备节点
-```
-
-# 6.k8s组件配置(区别于第4点)
-
-所有k8s节点创建以下目录
-
-```shell
-mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
-```
-
-## 6.1.创建apiserver(所有master节点)
-
-### 6.1.1master01节点配置
-
-```shell
-cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
-
-[Unit]
-Description=Kubernetes API Server
-Documentation=https://github.com/kubernetes/kubernetes
-After=network.target
-
-[Service]
-ExecStart=/usr/local/bin/kube-apiserver \
- --v=2 \
- --logtostderr=true \
- --allow-privileged=true \
- --bind-address=0.0.0.0 \
- --secure-port=6443 \
- --insecure-port=0 \
- --advertise-address=192.168.1.30 \
- --service-cluster-ip-range=10.96.0.0/12 \
- --service-node-port-range=30000-32767 \
- --etcd-servers=https://192.168.1.30:2379,https://192.168.1.31:2379,https://192.168.1.32:2379 \
- --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
- --etcd-certfile=/etc/etcd/ssl/etcd.pem \
- --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
- --client-ca-file=/etc/kubernetes/pki/ca.pem \
- --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
- --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
- --service-account-key-file=/etc/kubernetes/pki/sa.pub \
- --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
- --service-account-issuer=https://kubernetes.default.svc.cluster.local \
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
- --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
- --authorization-mode=Node,RBAC \
- --enable-bootstrap-token-auth=true \
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
- --requestheader-allowed-names=aggregator \
- --requestheader-group-headers=X-Remote-Group \
- --requestheader-extra-headers-prefix=X-Remote-Extra- \
- --requestheader-username-headers=X-Remote-User
- # --token-auth-file=/etc/kubernetes/token.csv
-
-Restart=on-failure
-RestartSec=10s
-LimitNOFILE=65535
-
-[Install]
-WantedBy=multi-user.target
-
-EOF
-```
-
-### 6.1.2master02节点配置
-
-```shell
-cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
-[Unit]
-Description=Kubernetes API Server
-Documentation=https://github.com/kubernetes/kubernetes
-After=network.target
-
-[Service]
-ExecStart=/usr/local/bin/kube-apiserver \
- --v=2 \
- --logtostderr=true \
- --allow-privileged=true \
- --bind-address=0.0.0.0 \
- --secure-port=6443 \
- --insecure-port=0 \
- --advertise-address=192.168.1.31 \
- --service-cluster-ip-range=10.96.0.0/12 \
- --service-node-port-range=30000-32767 \
- --etcd-servers=https://192.168.1.30:2379,https://192.168.1.31:2379,https://192.168.1.32:2379 \
- --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
- --etcd-certfile=/etc/etcd/ssl/etcd.pem \
- --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
- --client-ca-file=/etc/kubernetes/pki/ca.pem \
- --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
- --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
- --service-account-key-file=/etc/kubernetes/pki/sa.pub \
- --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
- --service-account-issuer=https://kubernetes.default.svc.cluster.local \
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
- --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
- --authorization-mode=Node,RBAC \
- --enable-bootstrap-token-auth=true \
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
- --requestheader-allowed-names=aggregator \
- --requestheader-group-headers=X-Remote-Group \
- --requestheader-extra-headers-prefix=X-Remote-Extra- \
- --requestheader-username-headers=X-Remote-User
- # --token-auth-file=/etc/kubernetes/token.csv
-
-Restart=on-failure
-RestartSec=10s
-LimitNOFILE=65535
-
-[Install]
-WantedBy=multi-user.target
-
-EOF
-```
-
-### 6.1.3master03节点配置
-
-```shell
-cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
-
-[Unit]
-Description=Kubernetes API Server
-Documentation=https://github.com/kubernetes/kubernetes
-After=network.target
-
-[Service]
-ExecStart=/usr/local/bin/kube-apiserver \
- --v=2 \
- --logtostderr=true \
- --allow-privileged=true \
- --bind-address=0.0.0.0 \
- --secure-port=6443 \
- --insecure-port=0 \
- --advertise-address=192.168.1.32 \
- --service-cluster-ip-range=10.96.0.0/12 \
- --service-node-port-range=30000-32767 \
- --etcd-servers=https://192.168.1.30:2379,https://192.168.1.31:2379,https://192.168.1.32:2379 \
- --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
- --etcd-certfile=/etc/etcd/ssl/etcd.pem \
- --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
- --client-ca-file=/etc/kubernetes/pki/ca.pem \
- --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
- --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
- --service-account-key-file=/etc/kubernetes/pki/sa.pub \
- --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
- --service-account-issuer=https://kubernetes.default.svc.cluster.local \
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
- --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
- --authorization-mode=Node,RBAC \
- --enable-bootstrap-token-auth=true \
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
- --requestheader-allowed-names=aggregator \
- --requestheader-group-headers=X-Remote-Group \
- --requestheader-extra-headers-prefix=X-Remote-Extra- \
- --requestheader-username-headers=X-Remote-User
- # --token-auth-file=/etc/kubernetes/token.csv
-
-Restart=on-failure
-RestartSec=10s
-LimitNOFILE=65535
-
-[Install]
-WantedBy=multi-user.target
-
-EOF
-```
-
-### 6.1.4启动apiserver(所有master节点)
-
-```shell
-systemctl daemon-reload && systemctl enable --now kube-apiserver
-
-# 注意查看状态是否启动正常
-
-systemctl status kube-apiserver
-```
-
-## 6.2.配置kube-controller-manager service
-
-```shell
-所有master节点配置,且配置相同
-172.16.0.0/12为pod网段,按需求设置你自己的网段
-
-cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
-
-[Unit]
-Description=Kubernetes Controller Manager
-Documentation=https://github.com/kubernetes/kubernetes
-After=network.target
-
-[Service]
-ExecStart=/usr/local/bin/kube-controller-manager \
- --v=2 \
- --logtostderr=true \
- --address=127.0.0.1 \
- --root-ca-file=/etc/kubernetes/pki/ca.pem \
- --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
- --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
- --service-account-private-key-file=/etc/kubernetes/pki/sa.key \
- --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
- --leader-elect=true \
- --use-service-account-credentials=true \
- --node-monitor-grace-period=40s \
- --node-monitor-period=5s \
- --pod-eviction-timeout=2m0s \
- --controllers=*,bootstrapsigner,tokencleaner \
- --allocate-node-cidrs=true \
- --cluster-cidr=172.16.0.0/12 \
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
- --node-cidr-mask-size=24
-
-Restart=always
-RestartSec=10s
-
-[Install]
-WantedBy=multi-user.target
-
-EOF
-```
-
-### 6.2.1启动kube-controller-manager,并查看状态
-
-```shell
-systemctl daemon-reload
-systemctl enable --now kube-controller-manager
-systemctl status kube-controller-manager
-```
-
-## 6.3.配置kube-scheduler service
-
-### 6.3.1所有master节点配置,且配置相同
-
-```shell
-cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
-
-[Unit]
-Description=Kubernetes Scheduler
-Documentation=https://github.com/kubernetes/kubernetes
-After=network.target
-
-[Service]
-ExecStart=/usr/local/bin/kube-scheduler \
- --v=2 \
- --logtostderr=true \
- --address=127.0.0.1 \
- --leader-elect=true \
- --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
-
-Restart=always
-RestartSec=10s
-
-[Install]
-WantedBy=multi-user.target
-
-EOF
-```
-
-### 6.3.2启动并查看服务状态
-
-```shell
-systemctl daemon-reload
-systemctl enable --now kube-scheduler
-systemctl status kube-scheduler
-```
-
-# 7.TLS Bootstrapping配置
-
-## 7.1在master01上配置
-
-```shell
-cd /root/Kubernetes/bootstrap
-
-kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.1.88:8443 --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
-
-kubectl config set-credentials tls-bootstrap-token-user --token=c8ad9c.2e4d610cf3e7426e --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
-
-kubectl config set-context tls-bootstrap-token-user@kubernetes --cluster=kubernetes --user=tls-bootstrap-token-user --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
-
-kubectl config use-context tls-bootstrap-token-user@kubernetes --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
-
-# token的位置在bootstrap.secret.yaml,如果修改的话到这个文件修改
-
-mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
-```
-
-## 7.2查看集群状态,没问题的话继续后续操作
-
-```shell
-kubectl get cs
-
-Warning: v1 ComponentStatus is deprecated in v1.19+
-NAME STATUS MESSAGE ERROR
-controller-manager Healthy ok
-etcd-0 Healthy {"health":"true","reason":""}
-scheduler Healthy ok
-etcd-1 Healthy {"health":"true","reason":""}
-etcd-2 Healthy {"health":"true","reason":""}
-
-kubectl create -f bootstrap.secret.yaml
-```
-
-# 8.node节点配置
-
-## 8.1.在master01上将证书复制到node节点
-
-```shell
-cd /etc/kubernetes/
-
-for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03 k8s-node04 k8s-node05; do
- ssh $NODE mkdir -p /etc/kubernetes/pki
- for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig; do
- scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}
- done
- done
-```
-
-## 8.2.kubelet配置
-
-### 8.2.1所有k8s节点创建相关目录
-
-```shell
-mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /etc/kubernetes/manifests/
-
-
-
-所有k8s节点配置kubelet service
-cat > /usr/lib/systemd/system/kubelet.service << EOF
-
-[Unit]
-Description=Kubernetes Kubelet
-Documentation=https://github.com/kubernetes/kubernetes
-After=docker.service
-Requires=docker.service
-
-[Service]
-ExecStart=/usr/local/bin/kubelet
-
-Restart=always
-StartLimitInterval=0
-RestartSec=10
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-### 8.2.2所有k8s节点配置kubelet service的配置文件
-
-```shell
-cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf << EOF
-[Service]
-Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
-Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock --cgroup-driver=systemd"
-Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
-Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
-ExecStart=
-ExecStart=/usr/local/bin/kubelet \$KUBELET_KUBECONFIG_ARGS \$KUBELET_CONFIG_ARGS \$KUBELET_SYSTEM_ARGS \$KUBELET_EXTRA_ARGS
-
-EOF
-```
-
-### 8.2.3所有k8s节点创建kubelet的配置文件
-
-```shell
-cat > /etc/kubernetes/kubelet-conf.yml <kubelet、kube-proxy、nfs-client |
+| Master02 | 192.168.1.31 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client |
+| Master03 | 192.168.1.32 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client |
+| Node01 | 192.168.1.33 | node节点 | kubelet、kube-proxy、nfs-client |
+| Node02 | 192.168.1.34 | node节点 | kubelet、kube-proxy、nfs-client |
+| Node03 | 192.168.1.35 | node节点 | kubelet、kube-proxy、nfs-client |
+| Node04 | 192.168.1.36 | node节点 | kubelet、kube-proxy、nfs-client |
+| Node05 | 192.168.1.37 | node节点 | kubelet、kube-proxy、nfs-client |
+| Lb01 | 192.168.1.38 | Lb01节点 | haproxy、keepalived |
+| Lb02 | 192.168.1.39 | Lb02节点 | haproxy、keepalived |
+| | 192.168.1.88 | VIP | |
+| | | | |
+
+| 软件 | 版本 |
+|:------------------------------------------------------------------------ |:-------------------------- |
+| 内核 | 5.16.7-1.el8.elrepo.x86_64 |
+| CentOS 8 | v8 |
+| kube-apiserver、kube-controller-manager、kube-scheduler、kubelet、kube-proxy | v1.23.4 |
+| etcd | v3.5.2 |
+| docker-ce | v20.10.9 |
+| containerd | v1.6.0 |
+| cfssl | v1.6.1 |
+| cni | v1.6.0 |
+| crictl | v1.23.0 |
+| haproxy | v1.8.27 |
+| keepalived | v2.1.5 |
+
+网段
+
+物理主机:192.168.1.0/24
+
+service:10.96.0.0/12
+
+pod:172.16.0.0/12
+
+如果有条件建议k8s集群与etcd集群分开安装
+
+## 1.1.k8s基础系统环境配置
+
+### 1.2.配置IP
+
+```shell
+ssh root@192.168.1.76 "nmcli con mod ens18 ipv4.addresses 192.168.1.30/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
+ssh root@192.168.1.77 "nmcli con mod ens18 ipv4.addresses 192.168.1.31/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
+ssh root@192.168.1.78 "nmcli con mod ens18 ipv4.addresses 192.168.1.32/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
+ssh root@192.168.1.79 "nmcli con mod ens18 ipv4.addresses 192.168.1.33/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
+ssh root@192.168.1.80 "nmcli con mod ens18 ipv4.addresses 192.168.1.34/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
+ssh root@192.168.1.86 "nmcli con mod ens18 ipv4.addresses 192.168.1.35/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
+ssh root@192.168.1.87 "nmcli con mod ens18 ipv4.addresses 192.168.1.36/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
+ssh root@192.168.1.166 "nmcli con mod ens18 ipv4.addresses 192.168.1.37/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
+ssh root@192.168.1.100 "nmcli con mod ens18 ipv4.addresses 192.168.1.38/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
+ssh root@192.168.1.191 "nmcli con mod ens18 ipv4.addresses 192.168.1.39/24; nmcli con mod ens18 ipv4.gateway 192.168.1.99; nmcli con mod ens18 ipv4.method manual; nmcli con mod ens18 ipv4.dns "8.8.8.8"; nmcli con up ens18"
+```
+
+### 1.3.设置主机名
+
+```shell
+hostnamectl set-hostname k8s-master01
+hostnamectl set-hostname k8s-master02
+hostnamectl set-hostname k8s-master03
+hostnamectl set-hostname k8s-node01
+hostnamectl set-hostname k8s-node02
+hostnamectl set-hostname k8s-node03
+hostnamectl set-hostname k8s-node04
+hostnamectl set-hostname k8s-node05
+hostnamectl set-hostname lb01
+hostnamectl set-hostname lb02
+```
+
+### 1.4.配置yum源
+
+```shell
+sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \
+ -e 's|^#baseurl=http://mirror.centos.org/$contentdir|baseurl=http://192.168.1.123/centos|g' \
+ -i.bak \
+ /etc/yum.repos.d/CentOS-*.repo
+
+
+sed -e 's|^mirrorlist=|#mirrorlist=|g' -e 's|^#baseurl=http://mirror.centos.org/\$contentdir|baseurl=http://192.168.1.123/centos|g' -i.bak /etc/yum.repos.d/CentOS-*.repo
+```
+
+### 1.5.安装一些必备工具
+
+```shell
+yum -y install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl -y
+```
+
+### 1.6.安装docker工具
+
+```shell
+curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
+```
+
+### 1.7.关闭防火墙
+
+```shell
+systemctl disable --now firewalld
+```
+
+### 1.8.关闭SELinux
+
+```shell
+setenforce 0
+sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
+```
+
+### 1.9.关闭交换分区
+
+```shell
+sed -ri 's/.*swap.*/#&/' /etc/fstab
+swapoff -a && sysctl -w vm.swappiness=0
+cat /etc/fstab
+
+# /dev/mapper/centos-swap swap swap defaults 0 0
+```
+
+### 1.10.关闭NetworkManager 并启用 network
+
+```shell
+systemctl disable --now NetworkManager
+systemctl start network && systemctl enable network
+```
+
+### 1.11.进行时间同步
+
+```shell
+服务端
+
+yum install chrony -y
+cat > /etc/chrony.conf << EOF
+pool ntp.aliyun.com iburst
+driftfile /var/lib/chrony/drift
+makestep 1.0 3
+rtcsync
+allow 192.168.1.0/24
+local stratum 10
+keyfile /etc/chrony.keys
+leapsectz right/UTC
+logdir /var/log/chrony
+EOF
+
+systemctl restart chronyd
+systemctl enable chronyd
+
+客户端
+
+yum install chrony -y
+vim /etc/chrony.conf
+cat /etc/chrony.conf | grep -v "^#" | grep -v "^$"
+pool 192.168.1.30 iburst
+driftfile /var/lib/chrony/drift
+makestep 1.0 3
+rtcsync
+keyfile /etc/chrony.keys
+leapsectz right/UTC
+logdir /var/log/chrony
+
+systemctl restart chronyd ; systemctl enable chronyd
+
+
+yum install chrony -y ; sed -i "s#2.centos.pool.ntp.org#192.168.1.30#g" /etc/chrony.conf ; systemctl restart chronyd ; systemctl enable chronyd
+
+
+使用客户端进行验证
+
+chronyc sources -v
+```
+
+### 1.12.配置ulimit
+
+```shell
+ulimit -SHn 65535
+cat >> /etc/security/limits.conf <> /etc/modules-load.d/ipvs.conf < /etc/sysctl.d/k8s.conf
+net.ipv4.ip_forward = 1
+net.bridge.bridge-nf-call-iptables = 1
+fs.may_detach_mounts = 1
+vm.overcommit_memory=1
+vm.panic_on_oom=0
+fs.inotify.max_user_watches=89100
+fs.file-max=52706963
+fs.nr_open=52706963
+net.netfilter.nf_conntrack_max=2310720
+
+
+net.ipv4.tcp_keepalive_time = 600
+net.ipv4.tcp_keepalive_probes = 3
+net.ipv4.tcp_keepalive_intvl =15
+net.ipv4.tcp_max_tw_buckets = 36000
+net.ipv4.tcp_tw_reuse = 1
+net.ipv4.tcp_max_orphans = 327680
+net.ipv4.tcp_orphan_retries = 3
+net.ipv4.tcp_syncookies = 1
+net.ipv4.tcp_max_syn_backlog = 16384
+net.ipv4.ip_conntrack_max = 65536
+net.ipv4.tcp_max_syn_backlog = 16384
+net.ipv4.tcp_timestamps = 0
+net.core.somaxconn = 16384
+EOF
+
+sysctl --system
+```
+
+### 1.18.所有节点配置hosts本地解析
+
+```shell
+cat > /etc/hosts < /etc/crictl.yaml < /etc/etcd/etcd.config.yml << EOF
+name: 'k8s-master01'
+data-dir: /var/lib/etcd
+wal-dir: /var/lib/etcd/wal
+snapshot-count: 5000
+heartbeat-interval: 100
+election-timeout: 1000
+quota-backend-bytes: 0
+listen-peer-urls: 'https://192.168.1.30:2380'
+listen-client-urls: 'https://192.168.1.30:2379,http://127.0.0.1:2379'
+max-snapshots: 3
+max-wals: 5
+cors:
+initial-advertise-peer-urls: 'https://192.168.1.30:2380'
+advertise-client-urls: 'https://192.168.1.30:2379'
+discovery:
+discovery-fallback: 'proxy'
+discovery-proxy:
+discovery-srv:
+initial-cluster: 'k8s-master01=https://192.168.1.30:2380,k8s-master02=https://192.168.1.31:2380,k8s-master03=https://192.168.1.32:2380'
+initial-cluster-token: 'etcd-k8s-cluster'
+initial-cluster-state: 'new'
+strict-reconfig-check: false
+enable-v2: true
+enable-pprof: true
+proxy: 'off'
+proxy-failure-wait: 5000
+proxy-refresh-interval: 30000
+proxy-dial-timeout: 1000
+proxy-write-timeout: 5000
+proxy-read-timeout: 0
+client-transport-security:
+ cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
+ key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
+ client-cert-auth: true
+ trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
+ auto-tls: true
+peer-transport-security:
+ cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
+ key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
+ peer-client-cert-auth: true
+ trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
+ auto-tls: true
+debug: false
+log-package-levels:
+log-outputs: [default]
+force-new-cluster: false
+EOF
+```
+
+### 4.1.2master02配置
+
+```shell
+cat > /etc/etcd/etcd.config.yml << EOF
+name: 'k8s-master02'
+data-dir: /var/lib/etcd
+wal-dir: /var/lib/etcd/wal
+snapshot-count: 5000
+heartbeat-interval: 100
+election-timeout: 1000
+quota-backend-bytes: 0
+listen-peer-urls: 'https://192.168.1.31:2380'
+listen-client-urls: 'https://192.168.1.31:2379,http://127.0.0.1:2379'
+max-snapshots: 3
+max-wals: 5
+cors:
+initial-advertise-peer-urls: 'https://192.168.1.31:2380'
+advertise-client-urls: 'https://192.168.1.31:2379'
+discovery:
+discovery-fallback: 'proxy'
+discovery-proxy:
+discovery-srv:
+initial-cluster: 'k8s-master01=https://192.168.1.30:2380,k8s-master02=https://192.168.1.31:2380,k8s-master03=https://192.168.1.32:2380'
+initial-cluster-token: 'etcd-k8s-cluster'
+initial-cluster-state: 'new'
+strict-reconfig-check: false
+enable-v2: true
+enable-pprof: true
+proxy: 'off'
+proxy-failure-wait: 5000
+proxy-refresh-interval: 30000
+proxy-dial-timeout: 1000
+proxy-write-timeout: 5000
+proxy-read-timeout: 0
+client-transport-security:
+ cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
+ key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
+ client-cert-auth: true
+ trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
+ auto-tls: true
+peer-transport-security:
+ cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
+ key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
+ peer-client-cert-auth: true
+ trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
+ auto-tls: true
+debug: false
+log-package-levels:
+log-outputs: [default]
+force-new-cluster: false
+EOF
+```
+
+### 4.1.3master03配置
+
+```shell
+cat > /etc/etcd/etcd.config.yml << EOF
+name: 'k8s-master03'
+data-dir: /var/lib/etcd
+wal-dir: /var/lib/etcd/wal
+snapshot-count: 5000
+heartbeat-interval: 100
+election-timeout: 1000
+quota-backend-bytes: 0
+listen-peer-urls: 'https://192.168.1.32:2380'
+listen-client-urls: 'https://192.168.1.32:2379,http://127.0.0.1:2379'
+max-snapshots: 3
+max-wals: 5
+cors:
+initial-advertise-peer-urls: 'https://192.168.1.32:2380'
+advertise-client-urls: 'https://192.168.1.32:2379'
+discovery:
+discovery-fallback: 'proxy'
+discovery-proxy:
+discovery-srv:
+initial-cluster: 'k8s-master01=https://192.168.1.30:2380,k8s-master02=https://192.168.1.31:2380,k8s-master03=https://192.168.1.32:2380'
+initial-cluster-token: 'etcd-k8s-cluster'
+initial-cluster-state: 'new'
+strict-reconfig-check: false
+enable-v2: true
+enable-pprof: true
+proxy: 'off'
+proxy-failure-wait: 5000
+proxy-refresh-interval: 30000
+proxy-dial-timeout: 1000
+proxy-write-timeout: 5000
+proxy-read-timeout: 0
+client-transport-security:
+ cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
+ key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
+ client-cert-auth: true
+ trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
+ auto-tls: true
+peer-transport-security:
+ cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
+ key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
+ peer-client-cert-auth: true
+ trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
+ auto-tls: true
+debug: false
+log-package-levels:
+log-outputs: [default]
+force-new-cluster: false
+EOF
+```
+
+## 4.2.创建service(所有master节点操作)
+
+### 4.2.1创建etcd.service并启动
+
+```shell
+cat > /usr/lib/systemd/system/etcd.service << EOF
+
+[Unit]
+Description=Etcd Service
+Documentation=https://coreos.com/etcd/docs/latest/
+After=network.target
+
+[Service]
+Type=notify
+ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
+Restart=on-failure
+RestartSec=10
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
+Alias=etcd3.service
+
+EOF
+```
+
+### 4.2.2创建etcd证书目录
+
+```shell
+mkdir /etc/kubernetes/pki/etcd
+ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
+systemctl daemon-reload
+systemctl enable --now etcd
+```
+
+### 4.2.3查看etcd状态
+
+```shell
+export ETCDCTL_API=3
+etcdctl --endpoints="192.168.1.32:2379,192.168.1.31:2379,192.168.1.30:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status --write-out=table
++-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
+| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
++-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
+| 192.168.1.32:2379 | 56875ab4a12c94e8 | 3.5.1 | 25 kB | false | false | 2 | 8 | 8 | |
+| 192.168.1.31:2379 | 33df6a8fe708d3fd | 3.5.1 | 25 kB | true | false | 2 | 8 | 8 | |
+| 192.168.1.30:2379 | 58fbe5ec9743048f | 3.5.1 | 20 kB | false | false | 2 | 8 | 8 | |
++-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
+```
+
+# 5.高可用配置
+
+## 5.1在lb01和lb02两台服务器上操作
+
+### 5.1.1安装keepalived和haproxy服务
+
+```shell
+
+systemctl disable --now firewalld
+
+setenforce 0
+sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
+
+
+yum -y install keepalived haproxy
+```
+
+### 5.1.2修改haproxy配置文件(两台配置文件一样)
+
+```shell
+# cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak
+
+cat >/etc/haproxy/haproxy.cfg<<"EOF"
+global
+ maxconn 2000
+ ulimit-n 16384
+ log 127.0.0.1 local0 err
+ stats timeout 30s
+
+defaults
+ log global
+ mode http
+ option httplog
+ timeout connect 5000
+ timeout client 50000
+ timeout server 50000
+ timeout http-request 15s
+ timeout http-keep-alive 15s
+
+
+frontend monitor-in
+ bind *:33305
+ mode http
+ option httplog
+ monitor-uri /monitor
+
+frontend k8s-master
+ bind 0.0.0.0:8443
+ bind 127.0.0.1:8443
+ mode tcp
+ option tcplog
+ tcp-request inspect-delay 5s
+ default_backend k8s-master
+
+
+backend k8s-master
+ mode tcp
+ option tcplog
+ option tcp-check
+ balance roundrobin
+ default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
+ server k8s-master01 192.168.1.30:6443 check
+ server k8s-master02 192.168.1.31:6443 check
+ server k8s-master03 192.168.1.32:6443 check
+EOF
+```
+
+### 5.1.3lb01配置keepalived master节点
+
+```shell
+#cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
+
+cat > /etc/keepalived/keepalived.conf << EOF
+! Configuration File for keepalived
+
+global_defs {
+ router_id LVS_DEVEL
+}
+vrrp_script chk_apiserver {
+ script "/etc/keepalived/check_apiserver.sh"
+ interval 5
+ weight -5
+ fall 2
+ rise 1
+}
+vrrp_instance VI_1 {
+ state MASTER
+ interface ens18
+ mcast_src_ip 192.168.1.38
+ virtual_router_id 51
+ priority 100
+ nopreempt
+ advert_int 2
+ authentication {
+ auth_type PASS
+ auth_pass K8SHA_KA_AUTH
+ }
+ virtual_ipaddress {
+ 192.168.1.88
+ }
+ track_script {
+ chk_apiserver
+} }
+
+EOF
+```
+
+### 5.1.4lb02配置keepalived backup节点
+
+```shell
+# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
+
+cat > /etc/keepalived/keepalived.conf << EOF
+! Configuration File for keepalived
+
+global_defs {
+ router_id LVS_DEVEL
+}
+vrrp_script chk_apiserver {
+ script "/etc/keepalived/check_apiserver.sh"
+ interval 5
+ weight -5
+ fall 2
+ rise 1
+
+}
+vrrp_instance VI_1 {
+ state BACKUP
+ interface ens18
+ mcast_src_ip 192.168.1.39
+ virtual_router_id 51
+ priority 50
+ nopreempt
+ advert_int 2
+ authentication {
+ auth_type PASS
+ auth_pass K8SHA_KA_AUTH
+ }
+ virtual_ipaddress {
+ 192.168.1.88
+ }
+ track_script {
+ chk_apiserver
+} }
+
+EOF
+```
+
+### 5.1.5健康检查脚本配置(两台lb主机)
+
+```shell
+cat > /etc/keepalived/check_apiserver.sh << EOF
+#!/bin/bash
+
+err=0
+for k in \$(seq 1 3)
+do
+ check_code=\$(pgrep haproxy)
+ if [[ \$check_code == "" ]]; then
+ err=\$(expr \$err + 1)
+ sleep 1
+ continue
+ else
+ err=0
+ break
+ fi
+done
+
+if [[ \$err != "0" ]]; then
+ echo "systemctl stop keepalived"
+ /usr/bin/systemctl stop keepalived
+ exit 1
+else
+ exit 0
+fi
+EOF
+
+# 给脚本授权
+
+chmod +x /etc/keepalived/check_apiserver.sh
+```
+
+### 5.1.6启动服务
+
+```shell
+systemctl daemon-reload
+systemctl enable --now haproxy
+systemctl enable --now keepalived
+```
+
+### 5.1.7测试高可用
+
+```shell
+# 能ping同
+
+[root@k8s-node02 ~]# ping 192.168.1.88
+
+# 能telnet访问
+
+[root@k8s-node02 ~]# telnet 192.168.1.88 8443
+
+# 关闭主节点,看vip是否漂移到备节点
+```
+
+# 6.k8s组件配置(区别于第4点)
+
+所有k8s节点创建以下目录
+
+```shell
+mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
+```
+
+## 6.1.创建apiserver(所有master节点)
+
+### 6.1.1master01节点配置
+
+```shell
+cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
+
+[Unit]
+Description=Kubernetes API Server
+Documentation=https://github.com/kubernetes/kubernetes
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/kube-apiserver \
+ --v=2 \
+ --logtostderr=true \
+ --allow-privileged=true \
+ --bind-address=0.0.0.0 \
+ --secure-port=6443 \
+ --insecure-port=0 \
+ --advertise-address=192.168.1.30 \
+ --service-cluster-ip-range=10.96.0.0/12 \
+ --service-node-port-range=30000-32767 \
+ --etcd-servers=https://192.168.1.30:2379,https://192.168.1.31:2379,https://192.168.1.32:2379 \
+ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
+ --etcd-certfile=/etc/etcd/ssl/etcd.pem \
+ --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
+ --client-ca-file=/etc/kubernetes/pki/ca.pem \
+ --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
+ --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
+ --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
+ --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
+ --service-account-key-file=/etc/kubernetes/pki/sa.pub \
+ --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
+ --service-account-issuer=https://kubernetes.default.svc.cluster.local \
+ --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
+ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
+ --authorization-mode=Node,RBAC \
+ --enable-bootstrap-token-auth=true \
+ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
+ --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
+ --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
+ --requestheader-allowed-names=aggregator \
+ --requestheader-group-headers=X-Remote-Group \
+ --requestheader-extra-headers-prefix=X-Remote-Extra- \
+ --requestheader-username-headers=X-Remote-User
+ # --token-auth-file=/etc/kubernetes/token.csv
+
+Restart=on-failure
+RestartSec=10s
+LimitNOFILE=65535
+
+[Install]
+WantedBy=multi-user.target
+
+EOF
+```
+
+### 6.1.2master02节点配置
+
+```shell
+cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
+[Unit]
+Description=Kubernetes API Server
+Documentation=https://github.com/kubernetes/kubernetes
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/kube-apiserver \
+ --v=2 \
+ --logtostderr=true \
+ --allow-privileged=true \
+ --bind-address=0.0.0.0 \
+ --secure-port=6443 \
+ --insecure-port=0 \
+ --advertise-address=192.168.1.31 \
+ --service-cluster-ip-range=10.96.0.0/12 \
+ --service-node-port-range=30000-32767 \
+ --etcd-servers=https://192.168.1.30:2379,https://192.168.1.31:2379,https://192.168.1.32:2379 \
+ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
+ --etcd-certfile=/etc/etcd/ssl/etcd.pem \
+ --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
+ --client-ca-file=/etc/kubernetes/pki/ca.pem \
+ --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
+ --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
+ --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
+ --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
+ --service-account-key-file=/etc/kubernetes/pki/sa.pub \
+ --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
+ --service-account-issuer=https://kubernetes.default.svc.cluster.local \
+ --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
+ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
+ --authorization-mode=Node,RBAC \
+ --enable-bootstrap-token-auth=true \
+ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
+ --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
+ --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
+ --requestheader-allowed-names=aggregator \
+ --requestheader-group-headers=X-Remote-Group \
+ --requestheader-extra-headers-prefix=X-Remote-Extra- \
+ --requestheader-username-headers=X-Remote-User
+ # --token-auth-file=/etc/kubernetes/token.csv
+
+Restart=on-failure
+RestartSec=10s
+LimitNOFILE=65535
+
+[Install]
+WantedBy=multi-user.target
+
+EOF
+```
+
+### 6.1.3master03节点配置
+
+```shell
+cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
+
+[Unit]
+Description=Kubernetes API Server
+Documentation=https://github.com/kubernetes/kubernetes
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/kube-apiserver \
+ --v=2 \
+ --logtostderr=true \
+ --allow-privileged=true \
+ --bind-address=0.0.0.0 \
+ --secure-port=6443 \
+ --insecure-port=0 \
+ --advertise-address=192.168.1.32 \
+ --service-cluster-ip-range=10.96.0.0/12 \
+ --service-node-port-range=30000-32767 \
+ --etcd-servers=https://192.168.1.30:2379,https://192.168.1.31:2379,https://192.168.1.32:2379 \
+ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
+ --etcd-certfile=/etc/etcd/ssl/etcd.pem \
+ --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
+ --client-ca-file=/etc/kubernetes/pki/ca.pem \
+ --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
+ --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
+ --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
+ --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
+ --service-account-key-file=/etc/kubernetes/pki/sa.pub \
+ --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
+ --service-account-issuer=https://kubernetes.default.svc.cluster.local \
+ --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
+ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
+ --authorization-mode=Node,RBAC \
+ --enable-bootstrap-token-auth=true \
+ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
+ --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
+ --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
+ --requestheader-allowed-names=aggregator \
+ --requestheader-group-headers=X-Remote-Group \
+ --requestheader-extra-headers-prefix=X-Remote-Extra- \
+ --requestheader-username-headers=X-Remote-User
+ # --token-auth-file=/etc/kubernetes/token.csv
+
+Restart=on-failure
+RestartSec=10s
+LimitNOFILE=65535
+
+[Install]
+WantedBy=multi-user.target
+
+EOF
+```
+
+### 6.1.4启动apiserver(所有master节点)
+
+```shell
+systemctl daemon-reload && systemctl enable --now kube-apiserver
+
+# 注意查看状态是否启动正常
+
+systemctl status kube-apiserver
+```
+
+## 6.2.配置kube-controller-manager service
+
+```shell
+所有master节点配置,且配置相同
+172.16.0.0/12为pod网段,按需求设置你自己的网段
+
+cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
+
+[Unit]
+Description=Kubernetes Controller Manager
+Documentation=https://github.com/kubernetes/kubernetes
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/kube-controller-manager \
+ --v=2 \
+ --logtostderr=true \
+ --address=127.0.0.1 \
+ --root-ca-file=/etc/kubernetes/pki/ca.pem \
+ --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
+ --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
+ --service-account-private-key-file=/etc/kubernetes/pki/sa.key \
+ --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
+ --leader-elect=true \
+ --use-service-account-credentials=true \
+ --node-monitor-grace-period=40s \
+ --node-monitor-period=5s \
+ --pod-eviction-timeout=2m0s \
+ --controllers=*,bootstrapsigner,tokencleaner \
+ --allocate-node-cidrs=true \
+ --cluster-cidr=172.16.0.0/12 \
+ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
+ --node-cidr-mask-size=24
+
+Restart=always
+RestartSec=10s
+
+[Install]
+WantedBy=multi-user.target
+
+EOF
+```
+
+### 6.2.1启动kube-controller-manager,并查看状态
+
+```shell
+systemctl daemon-reload
+systemctl enable --now kube-controller-manager
+systemctl status kube-controller-manager
+```
+
+## 6.3.配置kube-scheduler service
+
+### 6.3.1所有master节点配置,且配置相同
+
+```shell
+cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
+
+[Unit]
+Description=Kubernetes Scheduler
+Documentation=https://github.com/kubernetes/kubernetes
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/kube-scheduler \
+ --v=2 \
+ --logtostderr=true \
+ --address=127.0.0.1 \
+ --leader-elect=true \
+ --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
+
+Restart=always
+RestartSec=10s
+
+[Install]
+WantedBy=multi-user.target
+
+EOF
+```
+
+### 6.3.2启动并查看服务状态
+
+```shell
+systemctl daemon-reload
+systemctl enable --now kube-scheduler
+systemctl status kube-scheduler
+```
+
+# 7.TLS Bootstrapping配置
+
+## 7.1在master01上配置
+
+```shell
+cd /root/Kubernetes/bootstrap
+
+kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.1.88:8443 --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
+
+kubectl config set-credentials tls-bootstrap-token-user --token=c8ad9c.2e4d610cf3e7426e --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
+
+kubectl config set-context tls-bootstrap-token-user@kubernetes --cluster=kubernetes --user=tls-bootstrap-token-user --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
+
+kubectl config use-context tls-bootstrap-token-user@kubernetes --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
+
+# token的位置在bootstrap.secret.yaml,如果修改的话到这个文件修改
+
+mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
+```
+
+## 7.2查看集群状态,没问题的话继续后续操作
+
+```shell
+kubectl get cs
+
+Warning: v1 ComponentStatus is deprecated in v1.19+
+NAME STATUS MESSAGE ERROR
+controller-manager Healthy ok
+etcd-0 Healthy {"health":"true","reason":""}
+scheduler Healthy ok
+etcd-1 Healthy {"health":"true","reason":""}
+etcd-2 Healthy {"health":"true","reason":""}
+
+kubectl create -f bootstrap.secret.yaml
+```
+
+# 8.node节点配置
+
+## 8.1.在master01上将证书复制到node节点
+
+```shell
+cd /etc/kubernetes/
+
+for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03 k8s-node04 k8s-node05; do
+ ssh $NODE mkdir -p /etc/kubernetes/pki
+ for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig; do
+ scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}
+ done
+ done
+```
+
+## 8.2.kubelet配置
+
+### 8.2.1所有k8s节点创建相关目录
+
+```shell
+mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /etc/kubernetes/manifests/
+
+
+
+所有k8s节点配置kubelet service
+cat > /usr/lib/systemd/system/kubelet.service << EOF
+
+[Unit]
+Description=Kubernetes Kubelet
+Documentation=https://github.com/kubernetes/kubernetes
+After=docker.service
+Requires=docker.service
+
+[Service]
+ExecStart=/usr/local/bin/kubelet
+
+Restart=always
+StartLimitInterval=0
+RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
+EOF
+```
+
+### 8.2.2所有k8s节点配置kubelet service的配置文件
+
+```shell
+cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf << EOF
+[Service]
+Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
+Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock --cgroup-driver=systemd"
+Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
+Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
+ExecStart=
+ExecStart=/usr/local/bin/kubelet \$KUBELET_KUBECONFIG_ARGS \$KUBELET_CONFIG_ARGS \$KUBELET_SYSTEM_ARGS \$KUBELET_EXTRA_ARGS
+
+EOF
+```
+
+### 8.2.3所有k8s节点创建kubelet的配置文件
+
+```shell
+cat > /etc/kubernetes/kubelet-conf.yml < 80s v1.23.4
k8s-master02 Ready 78s v1.23.4
@@ -1588,536 +1590,536 @@ k8s-node02 Ready 95s v1.23.4
k8s-node03 Ready 87s v1.23.4
k8s-node04 Ready 65s v1.23.4
k8s-node05 Ready 77s v1.23.4
-
-```
-
-## 8.3.kube-proxy配置
-
-### 8.3.1此配置只在master01操作
-
-```shell
-cd /root/Kubernetes/
-kubectl -n kube-system create serviceaccount kube-proxy
-
-kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy
-
-SECRET=$(kubectl -n kube-system get sa/kube-proxy \
- --output=jsonpath='{.secrets[0].name}')
-
-JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET \
---output=jsonpath='{.data.token}' | base64 -d)
-
-PKI_DIR=/etc/kubernetes/pki
-K8S_DIR=/etc/kubernetes
-
-kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.1.88:8443 --kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig
-
-kubectl config set-credentials kubernetes --token=${JWT_TOKEN} --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
-
-kubectl config set-context kubernetes --cluster=kubernetes --user=kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
-
-kubectl config use-context kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
-```
-
-### 8.3.2将kubeconfig发送至其他节点
-
-```shell
-for NODE in k8s-master02 k8s-master03; do
- scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig
- done
-
-for NODE in k8s-node01 k8s-node02 k8s-node03 k8s-node04 k8s-node05; do
- scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig
- done
-```
-
-### 8.3.3所有k8s节点添加kube-proxy的配置和service文件
-
-```shell
-cat > /usr/lib/systemd/system/kube-proxy.service << EOF
-[Unit]
-Description=Kubernetes Kube Proxy
-Documentation=https://github.com/kubernetes/kubernetes
-After=network.target
-
-[Service]
-ExecStart=/usr/local/bin/kube-proxy \
- --config=/etc/kubernetes/kube-proxy.yaml \
- --v=2
-
-Restart=always
-RestartSec=10s
-
-[Install]
-WantedBy=multi-user.target
-
-EOF
-```
-
-```shell
-cat > /etc/kubernetes/kube-proxy.yaml << EOF
-apiVersion: kubeproxy.config.k8s.io/v1alpha1
-bindAddress: 0.0.0.0
-clientConnection:
- acceptContentTypes: ""
- burst: 10
- contentType: application/vnd.kubernetes.protobuf
- kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
- qps: 5
-clusterCIDR: 172.16.0.0/12
-configSyncPeriod: 15m0s
-conntrack:
- max: null
- maxPerCore: 32768
- min: 131072
- tcpCloseWaitTimeout: 1h0m0s
- tcpEstablishedTimeout: 24h0m0s
-enableProfiling: false
-healthzBindAddress: 0.0.0.0:10256
-hostnameOverride: ""
-iptables:
- masqueradeAll: false
- masqueradeBit: 14
- minSyncPeriod: 0s
- syncPeriod: 30s
-ipvs:
- masqueradeAll: true
- minSyncPeriod: 5s
- scheduler: "rr"
- syncPeriod: 30s
-kind: KubeProxyConfiguration
-metricsBindAddress: 127.0.0.1:10249
-mode: "ipvs"
-nodePortAddresses: null
-oomScoreAdj: -999
-portRange: ""
-udpIdleTimeout: 250ms
-
-EOF
-```
-
-### 8.3.4启动kube-proxy
-
-```shell
- systemctl daemon-reload
- systemctl enable --now kube-proxy
-```
-
-# 9.安装Calico
-
-## 9.1以下步骤只在master01操作
-
-### 9.1.1更改calico网段
-
-```shell
-cd /root/Kubernetes/calico/
-sed -i "s#POD_CIDR#172.16.0.0/12#g" calico.yaml
-grep "IPV4POOL_CIDR" calico.yaml -A 1
- - name: CALICO_IPV4POOL_CIDR
- value: "172.16.0.0/12"
-
-# 创建
-
-kubectl apply -f calico.yaml
-```
-
-### 9.1.2查看容器状态
-
-```shell
-kubectl get pod -A
-NAMESPACE NAME READY STATUS RESTARTS AGE
-kube-system calico-kube-controllers-6f6595874c-cf2sf 1/1 Running 0 14m
-kube-system calico-node-2xd2q 1/1 Running 0 2m25s
-kube-system calico-node-jtzfh 1/1 Running 0 2m3s
-kube-system calico-node-k4jkc 1/1 Running 0 2m24s
-kube-system calico-node-msxwp 1/1 Running 0 2m15s
-kube-system calico-node-tv849 1/1 Running 0 2m12s
-kube-system calico-node-wdbzt 1/1 Running 0 2m18s
-kube-system calico-node-x9sjr 1/1 Running 0 2m33s
-kube-system calico-node-z2mz5 1/1 Running 0 2m16s
-kube-system calico-typha-6b6cf8cbdf-gshvt 1/1 Running 0 14m
-```
-
-# 10.安装CoreDNS
-
-## 10.1以下步骤只在master01操作
-
-### 10.1.1修改文件
-
-```shell
-cd /root/Kubernetes/CoreDNS/
-sed -i "s#KUBEDNS_SERVICE_IP#10.96.0.10#g" coredns.yaml
-
-cat coredns.yaml | grep clusterIP:
- clusterIP: 10.96.0.10
-```
-
-### 10.1.2安装
-
-```shell
-kubectl create -f coredns.yaml
-serviceaccount/coredns created
-clusterrole.rbac.authorization.k8s.io/system:coredns created
-clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
-configmap/coredns created
-deployment.apps/coredns created
-service/kube-dns created
-```
-
-# 11.安装Metrics Server
-
-## 11.1以下步骤只在master01操作
-
-### 11.1.1安装Metrics-server
-
-在新版的Kubernetes中系统资源的采集均使用Metrics-server,可以通过Metrics采集节点和Pod的内存、磁盘、CPU和网络的使用率
-
-```shell
-安装metrics server
-cd /root/Kubernetes/metrics-server/
-
-kubectl create -f .
-
-serviceaccount/metrics-server created
-clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
-clusterrole.rbac.authorization.k8s.io/system:metrics-server created
-rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
-clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
-clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
-service/metrics-server created
-deployment.apps/metrics-server created
-apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
-```
-
-### 11.1.2稍等片刻查看状态
-
-```shell
-kubectl top node
-NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
-k8s-master01 172m 2% 1307Mi 16%
-k8s-master02 157m 1% 1189Mi 15%
-k8s-master03 155m 1% 1105Mi 14%
-k8s-node01 99m 1% 710Mi 9%
-k8s-node02 79m 0% 585Mi 7%
-```
-
-# 12.集群验证
-
-## 12.1部署pod资源
-
-```shell
-cat< 443/TCP 17h
-
-
-kubectl exec busybox -n default -- nslookup kubernetes
-3Server: 10.96.0.10
-Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
-
-Name: kubernetes
-Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
-```
-
-## 12.3测试跨命名空间是否可以解析
-
-```shell
-kubectl exec busybox -n default -- nslookup kube-dns.kube-system
-Server: 10.96.0.10
-Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
-
-Name: kube-dns.kube-system
-Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
-```
-
-## 12.4每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53
-
-```shell
-telnet 10.96.0.1 443
-Trying 10.96.0.1...
-Connected to 10.96.0.1.
-Escape character is '^]'.
-
- telnet 10.96.0.10 53
-Trying 10.96.0.10...
-Connected to 10.96.0.10.
-Escape character is '^]'.
-
-curl 10.96.0.10:53
-curl: (52) Empty reply from server
-```
-
-## 12.5Pod和Pod之前要能通
-
-```shell
-kubectl get po -owide
-NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
-busybox 1/1 Running 0 17m 172.27.14.193 k8s-node02
-
- kubectl get po -n kube-system -owide
-NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
-calico-kube-controllers-5dffd5886b-4blh6 1/1 Running 0 77m 172.25.244.193 k8s-master01
-calico-node-fvbdq 1/1 Running 1 (75m ago) 77m 192.168.1.30 k8s-master01
-calico-node-g8nqd 1/1 Running 0 77m 192.168.1.33 k8s-node01
-calico-node-mdps8 1/1 Running 0 77m 192.168.1.34 k8s-node02
-calico-node-nf4nt 1/1 Running 0 77m 192.168.1.32 k8s-master03
-calico-node-sq2ml 1/1 Running 0 77m 192.168.1.31 k8s-master02
-calico-typha-8445487f56-mg6p8 1/1 Running 0 77m 192.168.1.34 k8s-node02
-calico-typha-8445487f56-pxbpj 1/1 Running 0 77m 192.168.1.30 k8s-master01
-calico-typha-8445487f56-tnssl 1/1 Running 0 77m 192.168.1.33 k8s-node01
-coredns-5db5696c7-67h79 1/1 Running 0 63m 172.25.92.65 k8s-master02
-metrics-server-6bf7dcd649-5fhrw 1/1 Running 0 61m 172.18.195.1 k8s-master03
-
-# 进入busybox ping其他节点上的pod
-
-kubectl exec -ti busybox -- sh
-/ # ping 192.168.1.33
-PING 192.168.1.33 (192.168.1.33): 56 data bytes
-64 bytes from 192.168.1.33: seq=0 ttl=63 time=0.358 ms
-64 bytes from 192.168.1.33: seq=1 ttl=63 time=0.668 ms
-64 bytes from 192.168.1.33: seq=2 ttl=63 time=0.637 ms
-64 bytes from 192.168.1.33: seq=3 ttl=63 time=0.624 ms
-64 bytes from 192.168.1.33: seq=4 ttl=63 time=0.907 ms
-
-# 可以连通证明这个pod是可以跨命名空间和跨主机通信的
-```
-
-## 12.6创建三个副本,可以看到3个副本分布在不同的节点上(用完可以删了)
-
-```shell
-cat > deployments.yaml << EOF
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: nginx-deployment
- labels:
- app: nginx
-spec:
- replicas: 3
- selector:
- matchLabels:
- app: nginx
- template:
- metadata:
- labels:
- app: nginx
- spec:
- containers:
- - name: nginx
- image: nginx:1.14.2
- ports:
- - containerPort: 80
-
-EOF
-
-
-kubectl apply -f deployments.yaml
-deployment.apps/nginx-deployment created
-
-kubectl get pod
-NAME READY STATUS RESTARTS AGE
-busybox 1/1 Running 0 6m25s
-nginx-deployment-9456bbbf9-4bmvk 1/1 Running 0 8s
-nginx-deployment-9456bbbf9-9rcdk 1/1 Running 0 8s
-nginx-deployment-9456bbbf9-dqv8s 1/1 Running 0 8s
-
-# 删除nginx
-
-[root@k8s-master01 ~]# kubectl delete -f deployments.yaml
-```
-
-# 13.安装dashboard
-
-```shell
-cd /root/Kubernetes/dashboard/
-
-kubectl create -f .
-serviceaccount/admin-user created
-clusterrolebinding.rbac.authorization.k8s.io/admin-user created
-namespace/kubernetes-dashboard created
-serviceaccount/kubernetes-dashboard created
-service/kubernetes-dashboard created
-secret/kubernetes-dashboard-certs created
-secret/kubernetes-dashboard-csrf created
-secret/kubernetes-dashboard-key-holder created
-configmap/kubernetes-dashboard-settings created
-role.rbac.authorization.k8s.io/kubernetes-dashboard created
-clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
-rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
-clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
-deployment.apps/kubernetes-dashboard created
-service/dashboard-metrics-scraper created
-deployment.apps/dashboard-metrics-scraper created
-```
-
-## 13.1创建管理员用户
-
-```shell
-cat > admin.yaml << EOF
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: admin-user
-
- namespace: kube-system
----
-
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: admin-user
- annotations:
- rbac.authorization.kubernetes.io/autoupdate: "true"
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: cluster-admin
-subjects:
-
-- kind: ServiceAccount
- name: admin-user
- namespace: kube-system
-
-EOF
-```
-
-## 13.2执行yaml文件
-
-```shell
-kubectl apply -f admin.yaml -n kube-system
-
-serviceaccount/admin-user created
-clusterrolebinding.rbac.authorization.k8s.io/admin-user created
-```
-
-## 13.3更改dashboard的svc为NodePort,如果已是请忽略
-
-```shell
-kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
- type: NodePort
-```
-
-## 13.4查看端口号
-
-```shell
-kubectl get svc kubernetes-dashboard -n kubernetes-dashboard
-NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
-kubernetes-dashboard NodePort 10.98.201.22 443:31245/TCP 10m
-```
-
-## 13.5查看token
-
-```shell
-kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
-Name: admin-user-token-k545k
-Namespace: kube-system
-Labels:
-Annotations: kubernetes.io/service-account.name: admin-user
- kubernetes.io/service-account.uid: c308071c-4cf5-4583-83a2-eaf7812512b4
-
-Type: kubernetes.io/service-account-token
-
-Data
-====
-token: eyJhbGciOiJSUzI1NiIsImtpZCI6InYzV2dzNnQzV3hHb2FQWnYzdnlOSmpudmtpVmNjQW5VM3daRi12SFM4dEEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWs1NDVrIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJjMzA4MDcxYy00Y2Y1LTQ1ODMtODNhMi1lYWY3ODEyNTEyYjQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.pshvZPi9ZJkXUWuWilcYs1wawTpzV-nMKesgF3d_l7qyTPaK2N5ofzIThd0SjzU7BFNb4_rOm1dw1Be5kLeHjY_YW5lDnM5TAxVPXmZQ0HJ2pAQ0pjQqCHFnPD0bZFIYkeyz8pZx0Hmwcd3ZdC1yztr0ADpTAmMgI9NC2ZFIeoFFo4Ue9ZM_ulhqJQjmgoAlI_qbyjuKCNsWeEQBwM6HHHAsH1gOQIdVxqQ83OQZUuynDQRpqlHHFIndbK2zVRYFA3GgUnTu2-VRQ-DXBFRjvZR5qArnC1f383jmIjGT6VO7l04QJteG_LFetRbXa-T4mcnbsd8XutSgO0INqwKpjw
-ca.crt: 1363 bytes
-namespace: 11 bytes
-```
-
-## 13.6登录dashboard
-
-https://192.168.1.30:31245/
-
-eyJhbGciOiJSUzI1NiIsImtpZCI6InYzV2dzNnQzV3hHb2FQWnYzdnlOSmpudmtpVmNjQW5VM3daRi12SFM4dEEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWs1NDVrIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJjMzA4MDcxYy00Y2Y1LTQ1ODMtODNhMi1lYWY3ODEyNTEyYjQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.pshvZPi9ZJkXUWuWilcYs1wawTpzV-nMKesgF3d_l7qyTPaK2N5ofzIThd0SjzU7BFNb4_rOm1dw1Be5kLeHjY_YW5lDnM5TAxVPXmZQ0HJ2pAQ0pjQqCHFnPD0bZFIYkeyz8pZx0Hmwcd3ZdC1yztr0ADpTAmMgI9NC2ZFIeoFFo4Ue9ZM_ulhqJQjmgoAlI_qbyjuKCNsWeEQBwM6HHHAsH1gOQIdVxqQ83OQZUuynDQRpqlHHFIndbK2zVRYFA3GgUnTu2-VRQ-DXBFRjvZR5qArnC1f383jmIjGT6VO7l04QJteG_LFetRbXa-T4mcnbsd8XutSgO0INqwKpjw
-
-# 14.安装命令行自动补全功能
-
-```shell
-yum install bash-completion -y
-source /usr/share/bash-completion/bash_completion
-source <(kubectl completion bash)
-echo "source <(kubectl completion bash)" >> ~/.bashrc
-```
-
-# 附录:
-
-配置kube-controller-manager有效期100年(能不能生效的先配上再说)
-
-```shell
-vim /usr/lib/systemd/system/kube-controller-manager.service
-
-# [Service]下找个地方加上
-
---cluster-signing-duration=876000h0m0s \
-
-
-# 重启
-
-systemctl daemon-reload
-systemctl restart kube-controller-manager
-```
-
-防止漏洞扫描
-
-```shell
-vim /etc/systemd/system/kubelet.service.d/10-kubelet.conf
-
-[Service]
-Environment="KUBELET_KUBECONFIG_ARGS=--kubeconfig=/etc/kubernetes/kubelet.kubeconfig --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig"
-Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
-Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6"
-Environment="KUBELET_EXTRA_ARGS=--tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 --image-pull-progress-deadline=30m"
-ExecStart=
-ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS
-```
-
-预留空间,按需分配
-
-```shell
-vim /etc/kubernetes/kubelet-conf.yml
-
-rotateServerCertificates: true
-allowedUnsafeSysctls:
-
- - "net.core*"
- - "net.ipv4.*"
- kubeReserved:
- cpu: "1"
- memory: 1Gi
- ephemeral-storage: 10Gi
- systemReserved:
- cpu: "1"
- memory: 1Gi
- ephemeral-storage: 10Gi
-```
-
-数据盘要与系统盘分开;etcd使用ssd磁盘
+
+```
+
+## 8.3.kube-proxy配置
+
+### 8.3.1此配置只在master01操作
+
+```shell
+cd /root/Kubernetes/
+kubectl -n kube-system create serviceaccount kube-proxy
+
+kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy
+
+SECRET=$(kubectl -n kube-system get sa/kube-proxy \
+ --output=jsonpath='{.secrets[0].name}')
+
+JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET \
+--output=jsonpath='{.data.token}' | base64 -d)
+
+PKI_DIR=/etc/kubernetes/pki
+K8S_DIR=/etc/kubernetes
+
+kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.1.88:8443 --kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig
+
+kubectl config set-credentials kubernetes --token=${JWT_TOKEN} --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
+
+kubectl config set-context kubernetes --cluster=kubernetes --user=kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
+
+kubectl config use-context kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
+```
+
+### 8.3.2将kubeconfig发送至其他节点
+
+```shell
+for NODE in k8s-master02 k8s-master03; do
+ scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig
+ done
+
+for NODE in k8s-node01 k8s-node02 k8s-node03 k8s-node04 k8s-node05; do
+ scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig
+ done
+```
+
+### 8.3.3所有k8s节点添加kube-proxy的配置和service文件
+
+```shell
+cat > /usr/lib/systemd/system/kube-proxy.service << EOF
+[Unit]
+Description=Kubernetes Kube Proxy
+Documentation=https://github.com/kubernetes/kubernetes
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/kube-proxy \
+ --config=/etc/kubernetes/kube-proxy.yaml \
+ --v=2
+
+Restart=always
+RestartSec=10s
+
+[Install]
+WantedBy=multi-user.target
+
+EOF
+```
+
+```shell
+cat > /etc/kubernetes/kube-proxy.yaml << EOF
+apiVersion: kubeproxy.config.k8s.io/v1alpha1
+bindAddress: 0.0.0.0
+clientConnection:
+ acceptContentTypes: ""
+ burst: 10
+ contentType: application/vnd.kubernetes.protobuf
+ kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
+ qps: 5
+clusterCIDR: 172.16.0.0/12
+configSyncPeriod: 15m0s
+conntrack:
+ max: null
+ maxPerCore: 32768
+ min: 131072
+ tcpCloseWaitTimeout: 1h0m0s
+ tcpEstablishedTimeout: 24h0m0s
+enableProfiling: false
+healthzBindAddress: 0.0.0.0:10256
+hostnameOverride: ""
+iptables:
+ masqueradeAll: false
+ masqueradeBit: 14
+ minSyncPeriod: 0s
+ syncPeriod: 30s
+ipvs:
+ masqueradeAll: true
+ minSyncPeriod: 5s
+ scheduler: "rr"
+ syncPeriod: 30s
+kind: KubeProxyConfiguration
+metricsBindAddress: 127.0.0.1:10249
+mode: "ipvs"
+nodePortAddresses: null
+oomScoreAdj: -999
+portRange: ""
+udpIdleTimeout: 250ms
+
+EOF
+```
+
+### 8.3.4启动kube-proxy
+
+```shell
+ systemctl daemon-reload
+ systemctl enable --now kube-proxy
+```
+
+# 9.安装Calico
+
+## 9.1以下步骤只在master01操作
+
+### 9.1.1更改calico网段
+
+```shell
+cd /root/Kubernetes/calico/
+sed -i "s#POD_CIDR#172.16.0.0/12#g" calico.yaml
+grep "IPV4POOL_CIDR" calico.yaml -A 1
+ - name: CALICO_IPV4POOL_CIDR
+ value: "172.16.0.0/12"
+
+# 创建
+
+kubectl apply -f calico.yaml
+```
+
+### 9.1.2查看容器状态
+
+```shell
+kubectl get pod -A
+NAMESPACE NAME READY STATUS RESTARTS AGE
+kube-system calico-kube-controllers-6f6595874c-cf2sf 1/1 Running 0 14m
+kube-system calico-node-2xd2q 1/1 Running 0 2m25s
+kube-system calico-node-jtzfh 1/1 Running 0 2m3s
+kube-system calico-node-k4jkc 1/1 Running 0 2m24s
+kube-system calico-node-msxwp 1/1 Running 0 2m15s
+kube-system calico-node-tv849 1/1 Running 0 2m12s
+kube-system calico-node-wdbzt 1/1 Running 0 2m18s
+kube-system calico-node-x9sjr 1/1 Running 0 2m33s
+kube-system calico-node-z2mz5 1/1 Running 0 2m16s
+kube-system calico-typha-6b6cf8cbdf-gshvt 1/1 Running 0 14m
+```
+
+# 10.安装CoreDNS
+
+## 10.1以下步骤只在master01操作
+
+### 10.1.1修改文件
+
+```shell
+cd /root/Kubernetes/CoreDNS/
+sed -i "s#KUBEDNS_SERVICE_IP#10.96.0.10#g" coredns.yaml
+
+cat coredns.yaml | grep clusterIP:
+ clusterIP: 10.96.0.10
+```
+
+### 10.1.2安装
+
+```shell
+kubectl create -f coredns.yaml
+serviceaccount/coredns created
+clusterrole.rbac.authorization.k8s.io/system:coredns created
+clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
+configmap/coredns created
+deployment.apps/coredns created
+service/kube-dns created
+```
+
+# 11.安装Metrics Server
+
+## 11.1以下步骤只在master01操作
+
+### 11.1.1安装Metrics-server
+
+在新版的Kubernetes中系统资源的采集均使用Metrics-server,可以通过Metrics采集节点和Pod的内存、磁盘、CPU和网络的使用率
+
+```shell
+安装metrics server
+cd /root/Kubernetes/metrics-server/
+
+kubectl create -f .
+
+serviceaccount/metrics-server created
+clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
+clusterrole.rbac.authorization.k8s.io/system:metrics-server created
+rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
+clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
+clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
+service/metrics-server created
+deployment.apps/metrics-server created
+apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
+```
+
+### 11.1.2稍等片刻查看状态
+
+```shell
+kubectl top node
+NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
+k8s-master01 172m 2% 1307Mi 16%
+k8s-master02 157m 1% 1189Mi 15%
+k8s-master03 155m 1% 1105Mi 14%
+k8s-node01 99m 1% 710Mi 9%
+k8s-node02 79m 0% 585Mi 7%
+```
+
+# 12.集群验证
+
+## 12.1部署pod资源
+
+```shell
+cat< 443/TCP 17h
+
+
+kubectl exec busybox -n default -- nslookup kubernetes
+3Server: 10.96.0.10
+Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
+
+Name: kubernetes
+Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
+```
+
+## 12.3测试跨命名空间是否可以解析
+
+```shell
+kubectl exec busybox -n default -- nslookup kube-dns.kube-system
+Server: 10.96.0.10
+Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
+
+Name: kube-dns.kube-system
+Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
+```
+
+## 12.4每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53
+
+```shell
+telnet 10.96.0.1 443
+Trying 10.96.0.1...
+Connected to 10.96.0.1.
+Escape character is '^]'.
+
+ telnet 10.96.0.10 53
+Trying 10.96.0.10...
+Connected to 10.96.0.10.
+Escape character is '^]'.
+
+curl 10.96.0.10:53
+curl: (52) Empty reply from server
+```
+
+## 12.5Pod和Pod之前要能通
+
+```shell
+kubectl get po -owide
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+busybox 1/1 Running 0 17m 172.27.14.193 k8s-node02
+
+ kubectl get po -n kube-system -owide
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+calico-kube-controllers-5dffd5886b-4blh6 1/1 Running 0 77m 172.25.244.193 k8s-master01
+calico-node-fvbdq 1/1 Running 1 (75m ago) 77m 192.168.1.30 k8s-master01
+calico-node-g8nqd 1/1 Running 0 77m 192.168.1.33 k8s-node01
+calico-node-mdps8 1/1 Running 0 77m 192.168.1.34 k8s-node02
+calico-node-nf4nt 1/1 Running 0 77m 192.168.1.32 k8s-master03
+calico-node-sq2ml 1/1 Running 0 77m 192.168.1.31 k8s-master02
+calico-typha-8445487f56-mg6p8 1/1 Running 0 77m 192.168.1.34 k8s-node02
+calico-typha-8445487f56-pxbpj 1/1 Running 0 77m 192.168.1.30 k8s-master01
+calico-typha-8445487f56-tnssl 1/1 Running 0 77m 192.168.1.33 k8s-node01
+coredns-5db5696c7-67h79 1/1 Running 0 63m 172.25.92.65 k8s-master02
+metrics-server-6bf7dcd649-5fhrw 1/1 Running 0 61m 172.18.195.1 k8s-master03
+
+# 进入busybox ping其他节点上的pod
+
+kubectl exec -ti busybox -- sh
+/ # ping 192.168.1.33
+PING 192.168.1.33 (192.168.1.33): 56 data bytes
+64 bytes from 192.168.1.33: seq=0 ttl=63 time=0.358 ms
+64 bytes from 192.168.1.33: seq=1 ttl=63 time=0.668 ms
+64 bytes from 192.168.1.33: seq=2 ttl=63 time=0.637 ms
+64 bytes from 192.168.1.33: seq=3 ttl=63 time=0.624 ms
+64 bytes from 192.168.1.33: seq=4 ttl=63 time=0.907 ms
+
+# 可以连通证明这个pod是可以跨命名空间和跨主机通信的
+```
+
+## 12.6创建三个副本,可以看到3个副本分布在不同的节点上(用完可以删了)
+
+```shell
+cat > deployments.yaml << EOF
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx-deployment
+ labels:
+ app: nginx
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.14.2
+ ports:
+ - containerPort: 80
+
+EOF
+
+
+kubectl apply -f deployments.yaml
+deployment.apps/nginx-deployment created
+
+kubectl get pod
+NAME READY STATUS RESTARTS AGE
+busybox 1/1 Running 0 6m25s
+nginx-deployment-9456bbbf9-4bmvk 1/1 Running 0 8s
+nginx-deployment-9456bbbf9-9rcdk 1/1 Running 0 8s
+nginx-deployment-9456bbbf9-dqv8s 1/1 Running 0 8s
+
+# 删除nginx
+
+[root@k8s-master01 ~]# kubectl delete -f deployments.yaml
+```
+
+# 13.安装dashboard
+
+```shell
+cd /root/Kubernetes/dashboard/
+
+kubectl create -f .
+serviceaccount/admin-user created
+clusterrolebinding.rbac.authorization.k8s.io/admin-user created
+namespace/kubernetes-dashboard created
+serviceaccount/kubernetes-dashboard created
+service/kubernetes-dashboard created
+secret/kubernetes-dashboard-certs created
+secret/kubernetes-dashboard-csrf created
+secret/kubernetes-dashboard-key-holder created
+configmap/kubernetes-dashboard-settings created
+role.rbac.authorization.k8s.io/kubernetes-dashboard created
+clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
+rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
+clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
+deployment.apps/kubernetes-dashboard created
+service/dashboard-metrics-scraper created
+deployment.apps/dashboard-metrics-scraper created
+```
+
+## 13.1创建管理员用户
+
+```shell
+cat > admin.yaml << EOF
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: admin-user
+
+ namespace: kube-system
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: admin-user
+ annotations:
+ rbac.authorization.kubernetes.io/autoupdate: "true"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+
+- kind: ServiceAccount
+ name: admin-user
+ namespace: kube-system
+
+EOF
+```
+
+## 13.2执行yaml文件
+
+```shell
+kubectl apply -f admin.yaml -n kube-system
+
+serviceaccount/admin-user created
+clusterrolebinding.rbac.authorization.k8s.io/admin-user created
+```
+
+## 13.3更改dashboard的svc为NodePort,如果已是请忽略
+
+```shell
+kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
+ type: NodePort
+```
+
+## 13.4查看端口号
+
+```shell
+kubectl get svc kubernetes-dashboard -n kubernetes-dashboard
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+kubernetes-dashboard NodePort 10.98.201.22 443:31245/TCP 10m
+```
+
+## 13.5查看token
+
+```shell
+kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
+Name: admin-user-token-k545k
+Namespace: kube-system
+Labels:
+Annotations: kubernetes.io/service-account.name: admin-user
+ kubernetes.io/service-account.uid: c308071c-4cf5-4583-83a2-eaf7812512b4
+
+Type: kubernetes.io/service-account-token
+
+Data
+====
+token: eyJhbGciOiJSUzI1NiIsImtpZCI6InYzV2dzNnQzV3hHb2FQWnYzdnlOSmpudmtpVmNjQW5VM3daRi12SFM4dEEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWs1NDVrIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJjMzA4MDcxYy00Y2Y1LTQ1ODMtODNhMi1lYWY3ODEyNTEyYjQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.pshvZPi9ZJkXUWuWilcYs1wawTpzV-nMKesgF3d_l7qyTPaK2N5ofzIThd0SjzU7BFNb4_rOm1dw1Be5kLeHjY_YW5lDnM5TAxVPXmZQ0HJ2pAQ0pjQqCHFnPD0bZFIYkeyz8pZx0Hmwcd3ZdC1yztr0ADpTAmMgI9NC2ZFIeoFFo4Ue9ZM_ulhqJQjmgoAlI_qbyjuKCNsWeEQBwM6HHHAsH1gOQIdVxqQ83OQZUuynDQRpqlHHFIndbK2zVRYFA3GgUnTu2-VRQ-DXBFRjvZR5qArnC1f383jmIjGT6VO7l04QJteG_LFetRbXa-T4mcnbsd8XutSgO0INqwKpjw
+ca.crt: 1363 bytes
+namespace: 11 bytes
+```
+
+## 13.6登录dashboard
+
+https://192.168.1.30:31245/
+
+eyJhbGciOiJSUzI1NiIsImtpZCI6InYzV2dzNnQzV3hHb2FQWnYzdnlOSmpudmtpVmNjQW5VM3daRi12SFM4dEEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWs1NDVrIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJjMzA4MDcxYy00Y2Y1LTQ1ODMtODNhMi1lYWY3ODEyNTEyYjQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.pshvZPi9ZJkXUWuWilcYs1wawTpzV-nMKesgF3d_l7qyTPaK2N5ofzIThd0SjzU7BFNb4_rOm1dw1Be5kLeHjY_YW5lDnM5TAxVPXmZQ0HJ2pAQ0pjQqCHFnPD0bZFIYkeyz8pZx0Hmwcd3ZdC1yztr0ADpTAmMgI9NC2ZFIeoFFo4Ue9ZM_ulhqJQjmgoAlI_qbyjuKCNsWeEQBwM6HHHAsH1gOQIdVxqQ83OQZUuynDQRpqlHHFIndbK2zVRYFA3GgUnTu2-VRQ-DXBFRjvZR5qArnC1f383jmIjGT6VO7l04QJteG_LFetRbXa-T4mcnbsd8XutSgO0INqwKpjw
+
+# 14.安装命令行自动补全功能
+
+```shell
+yum install bash-completion -y
+source /usr/share/bash-completion/bash_completion
+source <(kubectl completion bash)
+echo "source <(kubectl completion bash)" >> ~/.bashrc
+```
+
+# 附录:
+
+配置kube-controller-manager有效期100年(能不能生效的先配上再说)
+
+```shell
+vim /usr/lib/systemd/system/kube-controller-manager.service
+
+# [Service]下找个地方加上
+
+--cluster-signing-duration=876000h0m0s \
+
+
+# 重启
+
+systemctl daemon-reload
+systemctl restart kube-controller-manager
+```
+
+防止漏洞扫描
+
+```shell
+vim /etc/systemd/system/kubelet.service.d/10-kubelet.conf
+
+[Service]
+Environment="KUBELET_KUBECONFIG_ARGS=--kubeconfig=/etc/kubernetes/kubelet.kubeconfig --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig"
+Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
+Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6"
+Environment="KUBELET_EXTRA_ARGS=--tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 --image-pull-progress-deadline=30m"
+ExecStart=
+ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS
+```
+
+预留空间,按需分配
+
+```shell
+vim /etc/kubernetes/kubelet-conf.yml
+
+rotateServerCertificates: true
+allowedUnsafeSysctls:
+
+ - "net.core*"
+ - "net.ipv4.*"
+ kubeReserved:
+ cpu: "1"
+ memory: 1Gi
+ ephemeral-storage: 10Gi
+ systemReserved:
+ cpu: "1"
+ memory: 1Gi
+ ephemeral-storage: 10Gi
+```
+
+数据盘要与系统盘分开;etcd使用ssd磁盘