Contents

kubeadm

Kubeadm 搭建 集群

Version: 1.26

kubeadm是Kubernetes官方提供的用于快速安部署Kubernetes集群的工具,伴随Kubernetes每个版本的发布都会同步更新,kubeadm会对集群配置方面的一些实践做调整,通过实验kubeadm可以学习到Kubernetes官方在集群配置上一些新的最佳实践。

1、主机名与域名解析

节点加入集群时默认以节点的主机名为节点名,内部访问的时候默认使用节点名,如果不想配置主机名,可以设置加入集群时节点名为该节点的ip

[root@k8s-m1 ~]# cat <<EOF >> /etc/hosts
192.168.8.173 m1
192.168.8.174 m2
192.168.8.175 m3
192.168.8.176 n1
EOF

2、关闭selinux与防火墙

[root@k8s-m1 ~]# systemctl stop firewalld
[root@k8s-m1 ~]# systemctl disable firewalld

[root@k8s-m1 ~]# sed -i "s/SELINUX=enforcing/SELINUX=disabled/" /etc/selinux/config
[root@k8s-m1 ~]# setenforce 0

如果各个主机启用了防火墙策略,需要开放Kubernetes各个组件所需要的端口,可以查看Ports and Protocols中的内容, 开放相关端口或者关闭主机的防火墙。

3、同步时间

[root@k8s-m1 ~]# yum -y install chrony
[root@k8s-m1 ~]# systemctl restart chronyd
[root@k8s-m1 ~]# systemctl enable chronyd
[root@k8s-m1 ~]# timedatectl set-timezone Asia/Shanghai
[root@k8s-m1 ~]# timedatectl set-local-rtc 0

4、升级系统内核和修改内核模块参数

centos7 需要升级内核

[root@k8s-m1 ~]# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
[root@k8s-m1 ~]# rpm -Uvh  https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm
[root@k8s-m1 ~]# yum --enablerepo=elrepo-kernel install kernel-lt -y
[root@k8s-m1 ~]# grub2-set-default 0
# 查看系统可用内核
[root@k8s-m1 ~]# awk -F\' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg

[root@k8s-m1 ~]# reboot
[root@k8s-m1 ~]#  cat <<EOF > /etc/sysctl.d/k8s.conf
# https://github.com/moby/moby/issues/31208 
# ipvsadm -l --timout
# 修复ipvs模式下长连接timeout问题 小于900即可
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2

net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
# 要求iptables不对bridge的数据进行处理
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
vm.swappiness = 0
vm.overcommit_memory=1
vm.panic_on_oom=0
EOF

加载内核参数

[root@k8s-m1 ~]# sysctl -p 

升级软件

[root@k8s-m1 ~]# yum -y upgrade

5、ipvs配置

[root@k8s-m1 ~]# yum -y install  ipvsadm ipset

参考链接:https://github.com/kubernetes/kubernetes/blob/master/pkg/proxy/ipvs/README.md

Notes: use nf_conntrack instead of nf_conntrack_ipv4 for Linux kernel 4.19 and later

[root@k8s-m1 ~]# yum install ipset ipvsadm -y
[root@k8s-m1 ~]# cat >/etc/modules-load.d/ipvs.conf<<EOF
ip_vs
# 负载均衡调度算法-最少连接
ip_vs_lc
# 负载均衡调度算法-加权最少连接
ip_vs_wlc
# 负载均衡调度算法-轮询
ip_vs_rr
# 负载均衡调度算法-加权轮询
ip_vs_wrr
# 源地址散列调度算法
ip_vs_sh
nf_conntrack
br_netfilter
# containerd
overlay
EOF

[root@k8s-m1 ~]# systemctl restart systemd-modules-load.service

查看加载情况

[root@k8s-m1 ~]# lsmod | grep -e ip_vs -e nf_conntrack -e br_netfilter

6、配置yum源

epel-release

[root@k8s-m1 ~]# yum -y install epel-release
[root@k8s-m1 ~]# sed -e 's|^metalink=|#metalink=|g' \
         -e 's|^#baseurl=https\?://download.fedoraproject.org/pub/epel/|baseurl=https://mirrors.ustc.edu.cn/epel/|g' \
         -e 's|^#baseurl=https\?://download.example/pub/epel/|baseurl=https://mirrors.ustc.edu.cn/epel/|g' \
         -i.bak \
         /etc/yum.repos.d/epel.repo

kubernetes

新版安装包目录结构已经更改

# amd64
[root@k8s-m1 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF


# arm64
[root@k8s-m1 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-aarch64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

7、安装

containerd安装方式有两种,可以使用docker安装源里的containerd,也可以使用二进制包安装

使用docker源安装containerd

[root@k8s-m1 ~]# curl -sL https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo -o /etc/yum.repos.d/docker-ce.repo
[root@k8s-m1 ~]# sed -i 's+download.docker.com+mirrors.ustc.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo
[root@k8s-m1 ~]# yum -y install containerd

配置镜像加速

https://github.com/containerd/containerd/blob/main/docs/cri/registry.md

https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration

https://www.cnblogs.com/liy36/p/16590745.html

cp /etc/containerd/config.toml{,.bak}
containerd config default > /etc/containerd/config.toml

使用二进制安装containerd

压缩包中已经按照官方二进制部署推荐的目录结构布局好。 里面包含了systemd配置文件,containerd以及cni的部署文件。 将解压缩到系统的根目录/中:

[root@k8s-m1 ~]# curl -sL https://github.com/containerd/containerd/releases/download/v1.6.21/cri-containerd-cni-1.6.21-linux-amd64.tar.gz -o cri-containerd-cni-1.6.21-linux-amd64.tar.gz

[root@k8s-m1 ~]# tar -xf cri-containerd-cni-1.6.21-linux-amd64.tar.gz -C /

etc/
etc/cni/
etc/cni/net.d/
etc/cni/net.d/10-containerd-net.conflist
etc/systemd/
etc/systemd/system/
etc/systemd/system/containerd.service
etc/crictl.yaml
usr/
usr/local/
usr/local/sbin/
usr/local/sbin/runc
usr/local/bin/
usr/local/bin/containerd-stress
usr/local/bin/containerd-shim
usr/local/bin/containerd-shim-runc-v1
usr/local/bin/crictl
usr/local/bin/critest
usr/local/bin/containerd-shim-runc-v2
usr/local/bin/ctd-decoder
usr/local/bin/containerd
usr/local/bin/ctr
opt/
opt/cni/
opt/cni/bin/
opt/cni/bin/ptp
opt/cni/bin/bandwidth
opt/cni/bin/static
opt/cni/bin/dhcp
...
opt/containerd/
opt/containerd/cluster/
...

生成containerd的配置文件

mkdir -p /etc/containerd
containerd config default > /etc/containerd/config.toml

根据文档Container runtimes 中的内容,对于使用systemd作为init system的Linux的发行版,使用systemd作为容器的cgroup driver可以确保服务器节点在资源紧张的情况更加稳定,因此这里配置各个节点上containerd的cgroup driver为systemd。

修改前面生成的配置文件/etc/containerd/config.toml

配置镜像加速

  • sjtu 镜像已不可用
# 使用SystemdCgroup
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml

# 修改配置目录
sed -i 's+config_path = ""+config_path = "/etc/containerd/certs.d"+' /etc/containerd/config.toml
mkdir /etc/containerd/certs.d/docker.io -pv

# 配置加速
 sed -i 's+registry.k8s.io+registry.cn-hangzhou.aliyuncs.com/serialt+' /etc/containerd/config.toml
 
cat > /etc/containerd/certs.d/docker.io/hosts.toml << EOF
server = "https://docker.io"
[host."https://docker.mirrors.sjtug.sjtu.edu.cn"]
  capabilities = ["pull", "resolve", "push"]
  # skip_verify = true
  # ca = "/path/to/ca.crt"
EOF

systemctl restart containerd
systemctl enable containerd

使用crictl工具管理containerd容器

tee /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF

关闭交换分区

# 临时方法
[root@k8s-m1 ~]# swapoff -a
[root@k8s-m1 ~]# free -h
              total        used        free      shared  buff/cache   available
Mem:           972M        344M         67M        8.1M        559M        456M
Swap:            0B          0B          0B

# 永久方法
[root@k8s-m1 ~]# sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

安装kubernetes

[root@k8s-m1 ~]# yum -y install kubeadm-1.26.5 kubectl-1.26.5 kubelet-1.26.5

kubectl 在node节点上可以不安装

8、获取k8s的镜像

[root@k8s-m1 ~]# kubeadm config images list --kubernetes-version=v1.26.5

于k8s是谷歌开发的,如果能科学上网,就直接获取k8s镜像,不然需要国内镜像加速)

注意:k8s对镜像版本要求比较高,以下操作建议使用脚本进行

[root@k8s9 ~]# cat pull-k8s.sh 
#!/bin/bash

version=v1.26.5
K8S_URL=registry.k8s.io
# MIRROR_URL=docker.io/serialt
MIRROR_URL=registry.cn-hangzhou.aliyuncs.com/serialt
images=`kubeadm config images list --kubernetes-version=${version} |awk -F '/' '{print $NF}'`
for imageName in ${images[@]}
do
    ctr images pull ${MIRROR_URL}/${imageName}

    echo ${imageName} | grep coredns
    if [[ $? == 0 ]];then 
        ctr images tag ${MIRROR_URL}/${imageName} ${K8S_URL}/coredns/${imageName}
    else
        ctr images tag ${MIRROR_URL}/${imageName} ${K8S_URL}/${imageName}
    fi
    ctr images rm  ${MIRROR_URL}/$imageName
done






[root@k8s9 ~]# cat pull-k8s.sh 
#!/bin/bash

version=v1.26.4
K8S_URL=registry.k8s.io
# MIRROR_URL=docker.io/serialt
MIRROR_URL=registry.cn-hangzhou.aliyuncs.com/serialt
images=`kubeadm config images list --kubernetes-version=${version} |awk -F '/' '{print $NF}'`
for imageName in ${images[@]}
do
    ctr -n k8s.io images pull ${MIRROR_URL}/${imageName}

    echo ${imageName} | grep coredns
    if [[ $? == 0 ]];then 
        ctr -n k8s.io  images tag ${MIRROR_URL}/${imageName} ${K8S_URL}/coredns/${imageName}
    else
        ctr -n k8s.io  images tag ${MIRROR_URL}/${imageName} ${K8S_URL}/${imageName}
    fi
    ctr -n k8s.io  images rm  ${MIRROR_URL}/$imageName
done

9、初始化集群

方法一:适用于单master节点

设置kubelet开机自启

[root@k8s-m1 ~]# systemctl enable kubelet

初始化节点

[root@k8s-m1 ~]# kubeadm init --kubernetes-version=v1.26.4 --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=SystemVerification 

若想忽略某些信息,加上–ignore-preflight-errors=[提示信息的名]

例如 –ignore-preflight-errors=SystemVerification

kubeadm初始化整个集群的过程,生成相关的各种证书、kubeconfig文件、bootstraptoken等等,后边是使用kubeadm join往集群中添加节点时用到的命令,下面的命令是配置如何使用kubectl访问集群的方式:

[root@k8s-m1 ~]# mkdir -p $HOME/.kube
[root@k8s-m1 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-m1 ~]# chown $(id -u):$(id -g) $HOME/.kube/config

使用配置文件进行初始化节点

官网链接:https://kubernetes.io/zh/docs/reference/setup-tools/kubeadm/kubeadm-config/

方法二:使用配置文件初始化节点

生成文件

[root@k8s-m1 ~]# kubeadm config print init-defaults --component-configs KubeProxyConfiguration,KubeletConfiguration > kubeadm-init.yml
初始化k8s集群

自定义kubelet参数:

imageMinimumGCAge: 20m  #是对未使用镜像进行垃圾搜集之前允许其存在的时长。
imageGCHighThresholdPercent: 80
imageGCLowThresholdPercent: 70
maxPods: 200

示例文件

apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.16.1.136    # 需要修改为本机ip
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: r1
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "172.16.1.200:6443" # 控制平面ip
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.k8s.io
kind: ClusterConfiguration
kubernetesVersion: 1.26.0
networking:
  dnsDomain: cluster.local
  podSubnet: "10.244.0.0/16"  # 指定pod网段 
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
bindAddressHardFail: false
clientConnection:
  acceptContentTypes: ""
  burst: 0
  contentType: ""
  kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
  qps: 0
clusterCIDR: ""
configSyncPeriod: 0s
conntrack:
  maxPerCore: null
  min: null
  tcpCloseWaitTimeout: null
  tcpEstablishedTimeout: null
detectLocal:
  bridgeInterface: ""
  interfaceNamePrefix: ""
detectLocalMode: ""
enableProfiling: false
healthzBindAddress: ""
hostnameOverride: ""
iptables:
  localhostNodePorts: null
  masqueradeAll: false
  masqueradeBit: null
  minSyncPeriod: 0s
  syncPeriod: 0s
ipvs:
  excludeCIDRs: null
  minSyncPeriod: 0s
  scheduler: "wrr" # 设置轮询算法
  strictARP: true # 方便使用metallb
  syncPeriod: 0s
  tcpFinTimeout: 0s
  tcpTimeout: 0s
  udpTimeout: 0s
kind: KubeProxyConfiguration
metricsBindAddress: ""
mode: "ipvs"    # 使用ipvs
nodePortAddresses: null
oomScoreAdj: null
portRange: ""
showHiddenMetricsForVersion: ""
winkernel:
  enableDSR: false
  forwardHealthCheckVip: false
  networkName: ""
  rootHnsEndpointName: ""
  sourceVip: ""
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 0s
    cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 20m   # 镜像回收时间
imageGCHighThresholdPercent: 80   # 触发镜像回收的百分比
imageGCLowThresholdPercent: 70
maxPods: 200 # 设置node 节点最大pod数
kind: KubeletConfiguration
logging:
  flushFrequency: 0
  options:
    json:
      infoBufferSize: "0"
  verbosity: 0
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
# 百年证书

cp /usr/bin/kubeadm{,_bak}

# amd64
curl -sl https://mirrors.imau.cc/app/kubeadm/v1.26.4/kubeadm-linux-amd64-100 -o /usr/bin/kubeadm && chmod +x /usr/bin/kubeadm

# curl -sL https://ghproxy.com/https://github.com/serialt/kubeadm/releases/download/v1.26.4/kubeadm-linux-amd64-100 -o /usr/bin/kubeadm && chmod +x /usr/bin/kubeadm

# arm64
curl -sL https://mirrors.imau.cc/app/kubeadm/v1.26.4/kubeadm-linux-arm64-100 -o /usr/bin/kubeadm && chmod +x /usr/bin/kubeadm

# arm64
curl -sL https://ghproxy.com/https://github.com/serialt/kubeadm/releases/download/v1.26.5/kubeadm-linux-arm64-100 -o /usr/bin/kubeadm && chmod +x /usr/bin/kubeadm

kube-vip配置

export VIP=172.16.1.200
export INTERFACE=ens160
KVVERSION=v0.5.12

alias kube-vip="ctr image pull ghcr.nju.edu.cn/kube-vip/kube-vip:$KVVERSION; ctr  run --rm --net-host ghcr.nju.edu.cn/kube-vip/kube-vip:$KVVERSION vip /kube-vip"

kube-vip manifest pod \
    --interface $INTERFACE \
    --address $VIP \
    --controlplane \
    --services \
    --arp \
    --leaderElection | tee /etc/kubernetes/manifests/kube-vip.yaml

配置节点名使用ip,节点默认使用主机名

LOCALHOST_IP=$(ip a | grep inet |grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:" | awk -F '/' '{print $1}')
echo ${LOCALHOST_IP}


[root@k8s1 ~]# kubeadm init --config=kubeadm-init.yml --upload-certs --node-name ${LOCALHOST_IP} | tee kubeadm-init.log
LOCALHOST_IP=$(ip a | grep inet |grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:" | awk -F '/' '{print $1}')
echo ${LOCALHOST_IP}



  kubeadm join 192.168.8.55:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:a7ecd3a298c0bddfa90465a250c3429016b1a104fff7f71a4ab462ffdaaa5edb \
        --control-plane --certificate-key 6d2e68b6759e1dbbbdf613d9c4edeacba1a52f595e044a5149f5271f55462695 \
        --node-name ${LOCALHOST_IP}
LOCALHOST_IP=$(ip a | grep inet |grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:" | awk -F '/' '{print $1}')
echo ${LOCALHOST_IP}
kubeadm join 192.168.8.55:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:a7ecd3a298c0bddfa90465a250c3429016b1a104fff7f71a4ab462ffdaaa5edb \
        --node-name ${LOCALHOST_IP}
[root@k8s1 ~]# kubeadm init --config=kubeadm-init.yml --upload-certs | tee kubeadm-init.log
[init] Using Kubernetes version: v1.21.3
[preflight] Running pre-flight checks
        [WARNING Hostname]: hostname "node" could not be reached
        [WARNING Hostname]: hostname "node": lookup node on 114.114.114.114:53: no such host
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local node] and IPs [10.96.0.1 192.168.100.198 192.168.100.200]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost node] and IPs [192.168.100.198 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost node] and IPs [192.168.100.198 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 17.005388 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.21" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
c7cdf884c5cf124d3ff234eb21573e99a8e3e6028d02d3d8fde1650b8f24a10e
[mark-control-plane] Marking the node node as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node node as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.100.200:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:22aecdff461a3c709b5a4070702ffd3fb3702f9240c6dc499707b94dc75c860c \
        --control-plane --certificate-key c7cdf884c5cf124d3ff234eb21573e99a8e3e6028d02d3d8fde1650b8f24a10e

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.100.200:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:22aecdff461a3c709b5a4070702ffd3fb3702f9240c6dc499707b94dc75c860c 

10、安装网卡

安装flannel

# Needs manual creation of namespace to avoid helm error
kubectl create ns kube-flannel
kubectl label --overwrite ns kube-flannel pod-security.kubernetes.io/enforce=privileged

helm repo add flannel https://flannel-io.github.io/flannel/
helm install flannel --set podCidr="10.244.0.0/16" --namespace kube-flannel flannel/flannel

下载flannel镜像

#!/bin/bash

SRC_URL=docker.io/rancher
#MIRROR_URL=docker.io/serialt
MIRROR_URL=registry.cn-hangzhou.aliyuncs.com/serialt

imageListFlannel=(
${SRC_URL}/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
${SRC_URL}/mirrored-flannelcni-flannel:v0.19.2
)


for imageNameFlannel in ${imageListFlannel[@]}
do
    imageNameFlannel=`echo ${imageNameFlannel} |awk -F '/' '{print $NF}'`
    docker pull ${MIRROR_URL}/${imageNameFlannel} 
    docker tag  ${MIRROR_URL}/${imageNameFlannel}  ${SRC_URL}/${imageNameFlannel}
    docker rmi ${MIRROR_URL}/${imageNameFlannel}
done

删除node节点上残留的网络

# 删除cni0
ifconfig cni0 down
ip link delete cni0
rm -rf /var/lib/cni/

# 删除flannel网络
ifconfig flannel.1 down
ip link delete flannel.1
rm -f /etc/cni/net.d/*

# 重启kubelet
systemctl restart kubelet

安装calico网卡

[root@k8s-m1 ~]# helm repo add projectcalico https://projectcalico.docs.tigera.io/charts
[root@k8s-m1 ~]# helm repo update 
[root@k8s-m1 ~]# helm install calico projectcalico/tigera-operator --version v3.25.1 --namespace tigera-operator --create-namespace
#!/bin/bash

SRC_URL=docker.io/calico
#MIRROR_URL=docker.io/serialt
MIRROR_URL=registry.cn-hangzhou.aliyuncs.com/serialt



imageListCalico=(
${MIRROR_URL}/cni:v3.22.5
${MIRROR_URL}/pod2daemon-flexvol:v3.22.5
${MIRROR_URL}/node:v3.22.5
${MIRROR_URL}/kube-controllers:v3.22.5
)

for imageNameCalico  in ${imageListCalico[@]}
do
    imageNameCalico=`echo ${imageNameCalico} |awk -F '/' '{print $NF}'`
    docker pull ${MIRROR_URL}/${imageNameCalico} 
    docker tag  ${MIRROR_URL}/${imageNameCalico}  ${SRC_URL}/${imageNameCalico}
    docker rmi ${MIRROR_URL}/${imageNameCalico}
done

11、节点加入

node节点加入

[root@k8s-n1 ~]# kubeadm join 192.168.122.100:6443 --token abcdef.0123456789abcdef     --discovery-token-ca-cert-hash sha256:174be9ec793b09d1b5a17da472bdb33c25463c65b0a42c8c09c4248783103b8b

master节点查看

[root@k8s-m1 ~]# kubectl get node
NAME         STATUS   ROLES    AGE    VERSION
k8s-m1.com   Ready    master   60m    v1.18.15
k8s-n1.com   Ready    <none>   113s   v1.18.15

如果长时间是NotReady,则检测是否有flannel镜像,或查看日志/var/log/messages

检查集群状态

[root@k8s-m1 ~]# kubectl get pod --all-namespaces
NAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGE
kube-system   coredns-66bff467f8-fp948             1/1     Running   0          109s
kube-system   coredns-66bff467f8-vmhwq             1/1     Running   0          109s
kube-system   etcd-k8s-m1.com                      1/1     Running   0          2m4s
kube-system   kube-apiserver-k8s-m1.com            1/1     Running   0          2m4s
kube-system   kube-controller-manager-k8s-m1.com   1/1     Running   0          2m4s
kube-system   kube-flannel-ds-8wpjv                1/1     Running   0          22s
kube-system   kube-flannel-ds-dhwtr                1/1     Running   0          38s
kube-system   kube-proxy-dd8l9                     1/1     Running   0          22s
kube-system   kube-proxy-wh9bc                     1/1     Running   0          109s
kube-system   kube-scheduler-k8s-m1.com            1/1     Running   0          2m4s
[root@k8s-m1 ~]# kubectl get node
NAME         STATUS   ROLES    AGE     VERSION
k8s-m1.com   Ready    master   2m16s   v1.18.15
k8s-n1.com   Ready    <none>   30s     v1.18.15
token的创建及使用

token用于机器加入kubernetes集群时用到,默认token 24小时就会过期,后续的机器要加入集群需要重新生成token

[root@k8s-m1 ~]# kubeadm token list
TOKEN                     TTL       EXPIRES                     USAGES                   DESCRIPTION                                                EXTRA GROUPS
f2b012.1416e09e3d1fff4d   23h       2018-06-24T21:26:17+08:00   authentication,signing   The default bootstrap token generated by 'kubeadm init'.   system:bootstrappers:kubeadm:default-node-token

24小时后需要重新创建token

加入node节点

# 24 小时有效
[root@k8s-m1 ~]# kubeadm token create --print-join-command
kubeadm join --token 48a5ec.6297ad9983652bc6 192.168.191.175:6443 --discovery-token-ca-cert-hash sha256:25e52920789d850d1b04b032e0da4a814fa0efd9ee85542500769b86f3f9b6dc

[root@k8s-m1 ~]# kubeadm token create --ttl 0 --print-join-command可以创建一个永不过期的token.

# node节点执行以上命令

加入master节点

# 新版本
[root@k8s-m1 ~]# kubeadm init phase upload-certs --upload-certs
[upload-certs] Storing the certificates in ConfigMap "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
f8d1c027c01baef6985ddf24266641b7c64f9fd922b15a32fce40b6b4b21e47d

# 加入master节点
[root@k8s-m1 ~]# kubeadm join --token 48a5ec.6297ad9983652bc6 192.168.191.175:6443 --discovery-token-ca-cert-hash sha256:25e52920789d850d1b04b032e0da4a814fa0efd9ee85542500769b86f3f9b6dc \ 
--control-plane --certificate-key f8d1c027c01baef6985ddf24266641b7c64f9fd922b15a32fce40b6b4b21e47d
# 对于非root用户
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# chown $(id -u):$(id -g) $HOME/.kube/config

12、firewalld开放kubernetes端口

https://www.cnblogs.com/Dev0ps/p/11401530.html

k8s master需要开启以下端口

firewall-cmd --permanent --add-port=6443/tcp \
--add-port=2379-2380/tcp \
--add-port=10250/tcp \
--add-port=10251/tcp \
--add-port=10252/tcp \
--add-port=10255/tcp \
--add-port=8472/udp \
--add-port=443/udp \
--add-port=53/udp \
--add-port=53/tcp \
--add-port=9153/tcp 

firewall-cmd --add-masquerade --permanent
firewall-cmd  --reload

# only if you want NodePorts exposed on control plane IP as well
firewall-cmd --permanent --add-port=30000-32767/tcp
systemctl restart firewalld

k8s node需要开启以下端口

firewall-cmd --permanent --add-port=10250/tcp \
--add-port=10255/tcp \
--add-port=8472/udp \
--add-port=443/udp \
--add-port=30000-32767/tcp \
--add-port=53/udp \
--add-port=53/tcp \
--add-port=9153/tcp
firewall-cmd --add-masquerade --permanent
firewall-cmd  --reload

以下几点需要特别注意:

  • 8472/udp为flannel的通信端口

  • 443/tcp 为Kubernetes server端口

注意一点:一定要执行以下命令打开NAT,默认是关闭状态

firewall-cmd --add-masquerade --permanent
# 检查是否允许NAT转发
firewall-cmd --query-masquerade
# 关闭NAT转发
firewall-cmd --remove-masquerade

如果你使用了istio还有把istio-pilot的端口加到防火墙里:

firewall-cmd --permanent --add-port=15010-15014/tcp

否则会出现以下报错:

Envoy proxy is NOT ready