手把手教你玩转Kubernetes

Published on
154 50~65 min

Docker离线安装

docker-20.10.17下载地址

上传docker-20.10.17.tgz到服务器 /export 文件夹下

#解压
tar -zxvf docker-20.10.17.tgz
#解压之后的文件复制到 /usr/bin/ 目录下
cp docker/* /usr/bin/

#在/etc/systemd/system/目录下新增docker.service文件
vi /etc/systemd/system/docker.service

粘贴下面内容

[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
  
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd --selinux-enabled=false
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
  
[Install]
WantedBy=multi-user.target

启动Dokcer

#给docker.service文件添加执行权限
chmod +x /etc/systemd/system/docker.service
#重新加载配置文件(每次有修改docker.service文件时都要重新加载下)
systemctl daemon-reload   
systemctl restart docker
#启动docker

#设置开机启动
systemctl enable docker.service
#查看docker服务状态
systemctl status docker
#docker配置
#配置本地harbor镜像地址 10.25.140.19:8090
vi /etc/docker/daemon.json
{
  "registry-mirrors": ["https://mirror.ccs.tencentyun.com"],
  "insecure-registries":["10.25.140.19:8090"], 
  "live-restore": true
}

卸载docker

#docker卸载
rm -rf /etc/docker
rm -rf /run/docker
rm -rf /var/lib/dockershim
rm -rf /var/lib/docker

ps -ef|grep docker
kill -9 pid

yum list installed | grep docker

yum remove  containerd.io.x86_64

安装Harbor镜像仓库

下载 docker-compose 并上传至服务器

curl -L https://github.com/docker/compose/releases/download/v2.9.0/docker-compose-linux-x86_64 -o docker-compose

修改 docker-compose 执行权限

 mv docker-compose /usr/local/bin/
 chmod +x /usr/local/bin/docker-compose
 docker-compose version

下载 harbor 离线安装包并上传至服务器

wget https://github.com/goharbor/harbor/releases/download/v2.4.3/harbor-offline-installer-v2.4.3.tgz

解压安装包

tar -xzvf harbor-offline-installer-v2.4.3.tgz -C /export/servers/
cd /export/servers/harbor

修改配置文件

mv harbor.yml.tmpl harbor.yml
vim harbor.yml

设置内容

hostname: 10.132.10.100
http.port: 8090
data_volume: /export/data/harbor
log.location: /export/logs/harbor

导入Harbor镜像到Docker内

$ docker load -i harbor.v2.4.3.tar.gz 
# 等待导入harbor依赖镜像文件
$ docker images
REPOSITORY                      TAG       IMAGE ID       CREATED       SIZE
goharbor/harbor-exporter        v2.4.3    776ac6ee91f4   4 weeks ago   81.5MB
goharbor/chartmuseum-photon     v2.4.3    f39a9694988d   4 weeks ago   172MB
goharbor/redis-photon           v2.4.3    b168e9750dc8   4 weeks ago   154MB
goharbor/trivy-adapter-photon   v2.4.3    a406a715461c   4 weeks ago   251MB
goharbor/notary-server-photon   v2.4.3    da89404c7cf9   4 weeks ago   109MB
goharbor/notary-signer-photon   v2.4.3    38468ac13836   4 weeks ago   107MB
goharbor/harbor-registryctl     v2.4.3    61243a84642b   4 weeks ago   135MB
goharbor/registry-photon        v2.4.3    9855479dd6fa   4 weeks ago   77.9MB
goharbor/nginx-photon           v2.4.3    0165c71ef734   4 weeks ago   44.4MB
goharbor/harbor-log             v2.4.3    57ceb170dac4   4 weeks ago   161MB
goharbor/harbor-jobservice      v2.4.3    7fea87c4b884   4 weeks ago   219MB
goharbor/harbor-core            v2.4.3    d864774a3b8f   4 weeks ago   197MB
goharbor/harbor-portal          v2.4.3    85f00db66862   4 weeks ago   53.4MB
goharbor/harbor-db              v2.4.3    7693d44a2ad6   4 weeks ago   225MB
goharbor/prepare                v2.4.3    c882d74725ee   4 weeks ago   268MB

启动Harbor

./prepare  # 如果有二次修改harbor.yml文件,请执行使配置文件生效
./install.sh --help # 查看启动参数
./install.sh --with-chartmuseum

#重启harbor
cd /export/servers/harbor
docker-compose stop
#启动
docker-compose up -d

kubernetes 安装

系统基础设置

mkdir -p /export/servers
mkdir -p /export/logs
mkdir -p /export/data
mkdir -p /export/upload

内核及网络参数优化

vim /etc/sysctl.conf

# 设置以下内容
fs.file-max = 1048576
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 5
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.all.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2 
vm.max_map_count = 262144

# 及时生效
sysctl -w vm.max_map_count=262144




ulimit 优化

vim /etc/security/limits.conf 
# 设置以下内容
* soft memlock unlimited
* hard memlock unlimited
* soft nproc 102400
* hard nproc 102400
* soft nofile 1048576
* hard nofile 1048576

设置Host

echo '''
10.192.193.17    master1
10.192.193.18    master2
10.192.193.19    worker1
10.192.193.20    worker2
''' >> /etc/hosts

有网环境安装

# 添加阿里云YUM的软件源:
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 执行命令:
yum install -y kubelet-1.22.4 kubeadm-1.22.4 kubectl-1.22.4

无网环境安装

下载离线安装包

Kubernetes安装包

Kubernetes镜像

# 创建rpm软件存储目录:
mkdir -p /export/download/kubeadm-rpm

# 执行命令:
yum install -y kubelet-1.22.4 kubeadm-1.22.4 kubectl-1.22.4 --downloadonly --downloaddir /export/download/kubeadm-rpm

上传 kubeadm 及依赖 rpm 包

ls /export/upload/
kubeadm-rpm.tgz 

#解压安装
tar xzvf /export/upload/kubeadm-rpm.tgz -C /export/upload/ && yum -y install /export/upload/kubeadm-rpm/*

#设置开机自启并启动
systemctl enable kubelet && systemctl start kubelet

#注:此时kubelet启动失败,会进入不断重启,这个是正常现象,执行init或join后问题会自动解决,对此官网有如下描述,也就是此时不用理会kubelet.service,可执行发下命令查看kubelet状态。

分发依赖镜像至集群节点

# 可以在有公网环境提前下载镜像
$ docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.4
$ docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.0-0
$ docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.22.4
$ docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.22.4
$ docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.22.4
$ docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.22.4
$ docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.5
$ docker pull rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
$ docker pull rancher/mirrored-flannelcni-flannel:v0.19.1
# 导出镜像文件,上传部署节点并导入镜像库
$ ls /export/upload

$ docker load -i google_containers-coredns-v1.8.4.tar
$ docker load -i google_containers-etcd:3.5.0-0.tar
$ docker load -i google_containers-kube-apiserver:v1.22.4.tar
$ docker load -i google_containers-kube-controller-manager-v1.22.4.tar
$ docker load -i google_containers-kube-proxy-v1.22.4.tar
$ docker load -i google_containers-kube-scheduler-v1.22.4.tar
$ docker load -i google_containers-pause-3.5.tar
$ docker load -i rancher-mirrored-flannelcni-flannel-cni-plugin-v1.1.0.tar
$ docker load -i rancher-mirrored-flannelcni-flannel-v0.19.1.tar

# 镜像打harbor镜像库tag
$ docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.4 10.132.10.100:8090/community/coredns:v1.8.4
$ docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.0-0 10.132.10.100:8090/community/etcd:3.5.0-0
$ docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.22.4 10.132.10.100:8090/community/kube-apiserver:v1.22.4
$ docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.22.4 10.132.10.100:8090/community/kube-controller-manager:v1.22.4
$ docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.22.4 10.132.10.100:8090/community/kube-proxy:v1.22.4
$ docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.22.4 10.132.10.100:8090/community/kube-scheduler:v1.22.4
$ docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.5 10.132.10.100:8090/community/pause:3.5
$ docker tag rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0 10.132.10.100:8090/community/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
$ docker tag rancher/mirrored-flannelcni-flannel:v0.19.1 10.132.10.100:8090/community/mirrored-flannelcni-flannel:v0.19.1

# 推送至harbor镜像库
$ docker push 192.168.186.120:8090/community/coredns:v1.8.4
$ docker push 192.168.186.120:8090/community/etcd:3.5.0-0
$ docker push 192.168.186.120:8090/community/kube-apiserver:v1.22.4
$ docker push 192.168.186.120:8090/community/kube-controller-manager:v1.22.4
$ docker push 192.168.186.120:8090/community/kube-proxy:v1.22.4
$ docker push 192.168.186.120:8090/community/kube-scheduler:v1.22.4
$ docker push 192.168.186.120:8090/community/pause:3.5
$ docker push 192.168.186.120:8090/community/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
$ docker push 192.168.186.120:8090/community/mirrored-flannelcni-flannel:v0.19.1

部署首个 master

kubeadm init 
--control-plane-endpoint "10.25.140.19:6443" 
--image-repository 10.192.193.22:8080/community #Harbor地址
--kubernetes-version v1.22.4
--service-cidr=172.16.0.0/16
--pod-network-cidr=10.244.0.0/16 
--ignore-preflight-errors=all
--v=6

生成 kubelet 环境配置文件

# 执行命令
$ mkdir -p $HOME/.kube
$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
$ sudo chown $(id -u):$(id -g) $HOME/.kube/config

配置网络插件 flannel

# 创建flannel.yml文件
$ touch /export/servers/kubernetes/flannel.yml
$ vim /export/servers/kubernetes/flannel.yml
# 设置以下内容,需要关注有网无网时对应的地址切换

安装网络插件 flannel

# 生效yml配置文件
$ kubectl apply -f kube-flannel.yml

# 查看pods状态
$ kubectl get pods -A
NAMESPACE      NAME                               READY   STATUS    RESTARTS   AGE
kube-flannel   kube-flannel-ds-kjmt4              1/1     Running   0          148m
kube-system    coredns-7f84d7b4b5-7qr8g           1/1     Running   0          4h18m
kube-system    coredns-7f84d7b4b5-fljws           1/1     Running   0          4h18m
kube-system    etcd-master01                      1/1     Running   0          4h19m
kube-system    kube-apiserver-master01            1/1     Running   0          4h19m
kube-system    kube-controller-manager-master01   1/1     Running   0          4h19m
kube-system    kube-proxy-wzq2t                   1/1     Running   0          4h18m
kube-system    kube-scheduler-master01            1/1     Running   0          4h19m

加入其他 master 节点

# 在master01执行如下操作
# 查看token列表
$ kubeadm token list

# master01执行init操作后生成加入命令如下
$ kubeadm join 10.132.10.91:6443 \
--token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:4884a98b0773bc89c36dc5fa51569293103ff093e9124431c4c8c2d5801a96a2 \
--control-plane --certificate-key 9151ea7bb260a42297f2edc486d5792f67d9868169310b82ef1eb18f6e4c0f13

# 在其他master节点执行如下操作
# 分别执行上一步的加入命令,加入master节点至集群
$ kubeadm join 10.132.10.91:6443 \
--token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:4884a98b0773bc89c36dc5fa51569293103ff093e9124431c4c8c2d5801a96a2 \
--control-plane --certificate-key 9151ea7bb260a42297f2edc486d5792f67d9868169310b82ef1eb18f6e4c0f13

# 此处如果报错,一般是certificate-key过期,可以在master01执行如下命令更新
$ kubeadm init phase upload-certs --upload-certs
3b647155b06311d39faf70cb094d9a5e102afd1398323e820cfb3cfd868ae58f

# 将上面生成的值替换certificate-key值再次在其他master节点执行如下命令
$ kubeadm join 10.132.10.91:6443 \
--token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:4884a98b0773bc89c36dc5fa51569293103ff093e9124431c4c8c2d5801a96a2     
--control-plane 
--certificate-key 3b647155b06311d39faf70cb094d9a5e102afd1398323e820cfb3cfd868ae58f

# 生成kubelet环境配置文件
$ mkdir -p $HOME/.kube
$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
$ sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 在任意master节点执行查看节点状态命令
$ kubectl get nodes
NAME       STATUS   ROLES                  AGE     VERSION
master01   Ready    control-plane,master   5h58m   v1.22.4
master02   Ready    control-plane,master   45m     v1.22.4
master03   Ready    control-plane,master   44m     v1.22.4

加入 worker 节点

# 在其他worker节点执行master01执行init操作后生成的加入命令如下
# 分别执行上一步的加入命令,加入master节点至集群
$ kubeadm join 10.132.10.91:6443 \
--token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:4884a98b0773bc89c36dc5fa51569293103ff093e9124431c4c8c2d5801a96a2

# 此处如果报错,一般是token过期,可以在master01执行如下命令重新生成加入命令
$ kubeadm token create --print-join-command
kubeadm join 10.132.10.91:6443 \
--token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:cf30ddd3df1c6215b886df1ea378a68ad5a9faad7933d53ca9891ebbdf9a1c3f

# 将上面生成的加入命令再次在其他worker节点执行
# 查看集成状态
$ kubectl get nodes
NAME       STATUS   ROLES                  AGE     VERSION
master01   Ready    control-plane,master   6h12m   v1.22.4
master02   Ready    control-plane,master   58m     v1.22.4
master03   Ready    control-plane,master   57m     v1.22.4
worker01   Ready    <none>                 5m12s   v1.22.4
worker02   Ready    <none>                 4m10s   v1.22.4
worker03   Ready    <none>                 3m42s   v1.22.4

配置官方dashboard(可选)

部署Kuboard 3.0

#docker一键部署kuboard:v3
sudo docker run -d \
  --restart=unless-stopped \
  --name=kuboard \
  -p 8848:80/tcp \
  -p 10081:10081/tcp \
  -e KUBOARD_ENDPOINT="http://10.25.140.19:8848" \
  -e KUBOARD_AGENT_SERVER_TCP_PORT="10081" \
  -v /root/kuboard-data:/data \
  eipwork/kuboard:v3

kubectl 安装(可选)

解压之前上传的 kubadm-rpm 包

$ tar xzvf kubeadm-rpm.tgz 

$ rpm -ivh bc7a9f8e7c6844cfeab2066a84b8fecf8cf608581e56f6f96f80211250f9a5e7-kubectl-1.22.4-0.x86_64.rpm

# 生成kubelet环境配置文件
$ mkdir -p $HOME/.kube
$ sudo touch $HOME/.kube/config
$ sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 从任意master节点复制内容至上面的配置文件


$ kubectl version
Client Version: version.Info{Major:"1", Minor:"22", GitVersion:"v1.22.4", GitCommit:"b695d79d4f967c403a96986f1750a35eb75e75f1", GitTreeState:"clean", BuildDate:"2021-11-17T15:48:33Z", GoVersion:"go1.16.10", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"22", GitVersion:"v1.22.4", GitCommit:"b695d79d4f967c403a96986f1750a35eb75e75f1", GitTreeState:"clean", BuildDate:"2021-11-17T15:42:41Z", GoVersion:"go1.16.10", Compiler:"gc", Platform:"linux/amd64"}

常见问题

修复网卡问题

cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.ipv4.tcp_tw_recycle = 0
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

sysctl --system


#重载网卡
ifconfig cni0 down    
ip link delete cni0

Docker运行MySQL8

docker run \
--name mysql8 \
-v /usr/local/docker/mysql/conf/my.cnf:/etc/my.cnf \
-v /usr/local/docker/mysql/data:/var/lib/mysql \
-v /usr/local/docker/mysql/log:/var/log \
-v /usr/local/docker/mysql/mysql-files:/var/lib/mysql-files \
-p 3306:3306 \
-e MYSQL_ROOT_PASSWORD='123456' \
-d mysql:8.0.33

创建访问外部服务Service pod

apiVersion: v1
kind: Service
metadata:
	#service名称
  name: mysql-svc
  namespace: smart-city
spec:
  clusterIP: None
  ports:
  - name: mysql
    port: 3306
    protocol: TCP
    targetPort: 3306
  type: ClusterIP

---
apiVersion: v1
kind: Endpoints
metadata:
  name: mysql-svc
  namespace: smart-city
subsets:
- addresses:
	# 外部网络IP
  - ip: 10.25.140.19
  ports:
  - name: mysql
    port: 3306
    protocol: TCP

离线导入k8s所需镜像包

#拉去k8s所需镜像
docker pull registry.aliyuncs.com/google_containers/coredns:v1.8.4
docker pull registry.aliyuncs.com/google_containers/etcd:3.5.0-0
docker pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.22.4
docker pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.22.4
docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.22.4
docker pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.22.4
docker pull registry.aliyuncs.com/google_containers/pause:3.5
docker pull rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
docker pull rancher/mirrored-flannelcni-flannel:v0.19.1

#导出
docker save -o google_containers-coredns-v1.8.4.tar registry.aliyuncs.com/google_containers/coredns:v1.8.4
docker save -o google_containers-etcd:3.5.0-0.tar registry.aliyuncs.com/google_containers/etcd:3.5.0-0
docker save -o google_containers-kube-apiserver:v1.22.4.tar registry.aliyuncs.com/google_containers/kube-apiserver:v1.22.4
docker save -o google_containers-kube-controller-manager-v1.22.4.tar registry.aliyuncs.com/google_containers/kube-controller-manager:v1.22.4
docker save -o google_containers-kube-proxy-v1.22.4.tar registry.aliyuncs.com/google_containers/kube-proxy:v1.22.4
docker save -o google_containers-kube-scheduler-v1.22.4.tar registry.aliyuncs.com/google_containers/kube-scheduler:v1.22.4
docker save -o google_containers-pause-3.5.tar registry.aliyuncs.com/google_containers/pause:3.5
docker save -o rancher-mirrored-flannelcni-flannel-cni-plugin-v1.1.0.tar rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
docker save -o rancher-mirrored-flannelcni-flannel-v0.19.1.tar rancher/mirrored-flannelcni-flannel:v0.19.1

#无网络docker加载
docker load -i google_containers-coredns-v1.8.4.tar
docker load -i google_containers-etcd_3.5.0-0.tar
docker load -i google_containers-kube-apiserver_v1.22.4.tar
docker load -i google_containers-kube-controller-manager-v1.22.4.tar
docker load -i google_containers-kube-proxy-v1.22.4.tar
docker load -i google_containers-kube-scheduler-v1.22.4.tar
docker load -i google_containers-pause-3.5.tar
docker load -i rancher-mirrored-flannelcni-flannel-cni-plugin-v1.1.0.tar
docker load -i rancher-mirrored-flannelcni-flannel-v0.19.1.tar

#打包镜像
docker tag registry.aliyuncs.com/google_containers/coredns:v1.8.4 10.192.193.22:8080/smart/coredns:v1.8.4
docker tag registry.aliyuncs.com/google_containers/etcd:3.5.0-0 10.192.193.22:8080/smart/etcd:3.5.0-0
docker tag registry.aliyuncs.com/google_containers/kube-apiserver:v1.22.4 10.192.193.22:8080/smart/kube-apiserver:v1.22.4
docker tag registry.aliyuncs.com/google_containers/kube-controller-manager:v1.22.4 10.192.193.22:8080/smart/kube-controller-manager:v1.22.4
docker tag registry.aliyuncs.com/google_containers/kube-proxy:v1.22.4 10.192.193.22:8080/smart/kube-proxy:v1.22.4
docker tag registry.aliyuncs.com/google_containers/kube-scheduler:v1.22.4 10.192.193.22:8080/smart/kube-scheduler:v1.22.4
docker tag registry.aliyuncs.com/google_containers/pause:3.5 10.192.193.22:8080/smart/pause:3.5
docker tag rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0 10.192.193.22:8080/smart/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
docker tag rancher/mirrored-flannelcni-flannel:v0.19.1 10.192.193.22:8080/smart/mirrored-flannelcni-flannel:v0.19.1

#推送到harbor
docker push 10.192.193.22:8080/smart/coredns:v1.8.4
docker push 10.192.193.22:8080/smart/etcd:3.5.0-0
docker push 10.192.193.22:8080/smart/kube-apiserver:v1.22.4
docker push 10.192.193.22:8080/smart/kube-controller-manager:v1.22.4
docker push 10.192.193.22:8080/smart/kube-proxy:v1.22.4
docker push 10.192.193.22:8080/smart/kube-scheduler:v1.22.4
docker push 10.192.193.22:8080/smart/pause:3.5
docker push 10.192.193.22:8080/smart/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
docker push 10.192.193.22:8080/smart/mirrored-flannelcni-flannel:v0.19.1

#删除本地包
docker rmi 10.192.193.22/library/mirrored-flannelcni-flannel:v0.19.1   
docker rmi 10.192.193.22/library/mirrored-flannelcni-flannel-cni-plugin:v1.1.0    
docker rmi 10.192.193.22/library/kube-apiserver:v1.22.4  
docker rmi 10.192.193.22/library/kube-controller-manager:v1.22.4   
docker rmi 10.192.193.22/library/kube-scheduler:v1.22.4   
docker rmi 10.192.193.22/library/kube-proxy:v1.22.4  
docker rmi 10.192.193.22/library/etcd:3.5.0-0   
docker rmi 10.192.193.22/library/coredns:v1.8.4    
docker rmi 10.192.193.22/library/pause:3.5       

kubectl init 后一直健康检查

关闭swap和selinux