Rocky Linux 9.5 安装部署k8s集群(文内有快速配置安装脚本)

安装k8s集群

[root@localhost ~]# uname -a
Linux localhost.localdomain 5.14.0-503.22.1.el9_5.x86_64 #1 SMP PREEMPT_DYNAMIC Wed Jan 22 13:59:07 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux
[root@localhost ~]#
[root@localhost ~]# more /etc/redhat-release
Rocky Linux release 9.5 (Blue Onyx)

1.环境准备

|主机名称| 物理IP| 说明| |master |192.168.100.207| Master节点| |worker |192.168.100.208| Node1节点| |worker |192.168.100.208| Node2节点|

2.关闭SELINUX和防火墙

sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
setenforce 0
systemctl disable firewalld && systemctl stop firewalld

3.安装依赖包所有节点

[root@master ~]# yum install -y conntrack ntpdate ntp ipvsadm ipset jq curl sysstat libseccomp wget vim net-tools git socat

4.配置Host所有节点

cat >> /etc/hosts <<EOF
192.168.100.207 k8s_master
192.168.100.208 k8s_worker1
192.168.100.209 k8s_worker2
EOF

5.免密登录(master节点执行)

[root@k8s_master ~]# ssh-keygen 
[root@k8s_master ~]# ssh-copy-id root@k8s_master
[root@k8s_master ~]# ssh-copy-id root@k8s_worker1
[root@k8s_master ~]# ssh-copy-id root@k8s_worker2

6.环境准备

# 禁用交换分区
[root@k8s_master ~]# swapoff -a
[root@k8s_master ~]# sed -i 's/.*swap.*/#&/' /etc/fstab

# 设置时区
[root@k8s_master ~]# timedatectl set-timezone Asia/Shanghai

# 同步时间
[root@k8s_master ~]# ntpdate time.windows.com

7.配置内核转发

[root@k8s_master ~]# cat >> /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
EOF

# 配置生效
[root@k8s_master ~]# sysctl --system

# 加载系统模块
[root@k8s_master ~]# modprobe br_netfilter
[root@k8s_master ~]# lsmod | grep br_netfilter
br_netfilter           22256  0 
bridge                155432  1 br_netfilter

8.安装containerd

# 下载源
[root@k8s_master ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@k8s_master ~]# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

# 替换成阿里云的镜像
[root@k8s_master ~]# sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo

# 安装 containerd
[root@k8s_master ~]# yum install -y containerd

# 重建containerd配置文件
[root@k8s_master ~]# cp /etc/containerd/config.toml{,bak}
[root@k8s_master ~]# containerd config default > /etc/containerd/config.toml

# 配置 systemd cgroup 驱动
[root@k8s_master ~]# vim /etc/containerd/config.toml
[root@k8s_master ~]# sed -n "63p;127p" /etc/containerd/config.toml
    # 替换成阿里源
    sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.8"
    # 开启Cgroup
    SystemdCgroup = true

# 启动containerd并设置开机启动
[root@k8s_master ~]# systemctl enable --now containerd

9.安装Kubernetes

# 添加Kubernetes源
[root@k8s_master ~]# cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.31/rpm/
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.31/rpm/repodata/repomd.xml.key
EOF

# 安装kubeadm、kubelet和kubectl
[root@k8s_master ~]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

# 启动kubelet并设置开机启动
[root@k8s_master ~]# systemctl enable --now kubelet
EOF
9.1初始化Kubernetes集群(Master节点执行)
# 使用kubeadm初始化集群(请根据您的实际情况替换<your-control-plane-endpoint>)
[root@k8s_master ~]# kubeadm init \
  --apiserver-advertise-address=192.168.100.207 \
  --image-repository=registry.cn-hangzhou.aliyuncs.com/google_containers \
  --kubernetes-version=v1.31.0 \
  --service-cidr=10.96.0.0/12 \
  --pod-network-cidr=10.244.0.0/16

# 配置kubectl以访问集群
[root@k8s_master ~]# mkdir -p $HOME/.kube
[root@k8s_master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s_master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
9.2加入Kubernetes集群(Worker节点执行)
在Master节点上执行kubeadm token create --print-join-command获取加入集群的命令,然后在每个Worker节点上执行该命令。

[root@k8s_worker1 ~]# kubeadm join 192.168.100.207:6443 --token cddh3x.lc2xhd19l1d134le \
	--discovery-token-ca-cert-hash sha256:fdf02257ce01071cd1e61bde518e417fae2e86e3a7650d81eabbb75e74d1ff51

10.安装网络插件(所有节点执行)

选择 Calico 还是 Flannel 主要取决于你的具体需求:

如果运行一个大型集群,需要精细的网络策略控制,并且愿意接受更复杂的配置过程,那么 Calico 可能是一个更好的选择。 如果集群规模较小,对网络性能要求不是特别高,而且希望有一个易于管理和配置的网络解决方案,那么 Flannel 可能更适合你。

# 应用YAML文件以部署Flannel(网络模式二选一)
[root@k8s_master ~]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

# 应用YAML文件以部署Calico(网络模式二选一)
# 安装 Operator
[root@k8s_master ~]# kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/tigera-operator.yaml

# 下载配置 Calico 所需的自定义资源
[root@k8s_master ~]# curl https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/custom-resources.yaml -O

# 安装 Calico
[root@k8s_master ~]# kubectl create -f custom-resources.yaml
EOF

11.集群验证

# 验证集群状态
[root@k8s_master ~]# kubectl get nodes
NAME     STATUS   ROLES           AGE     VERSION
master   Ready    control-plane   5m11s   v1.31.1
worker   Ready    <none>          3m7s    v1.31.1

# 查看集群信息
[root@k8s_master ~]# kubectl cluster-info
Kubernetes control plane is running at https://192.168.100.207:6443
CoreDNS is running at https://192.168.100.207:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

# 检查集群的组件状态
[root@k8s_master ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE   ERROR
controller-manager   Healthy   ok        
scheduler            Healthy   ok        
etcd-0               Healthy   ok