\[root@node-01 ~\]# yum update -y
Repository AppStream is listed more than once in the configuration
Repository extras is listed more than once in the configuration
Repository PowerTools is listed more than once in the configuration
Repository centosplus is listed more than once in the configuration
Last metadata expiration check: 0:19:42 ago on Sat 28 Nov 2020 04:25:04 PM CST.
Dependencies resolved.
Nothing to do.
Complete!
\[root@node-01 ~\]# yum install -y conntrack ipvsadm ipset jq sysstat curl iptables libseccomp bind-utils
...
\[root@master-01 ~\]# systemctl stop firewalld && systemctl disable firewalld
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
\[root@master-01 ~\]# swapoff -a
\[root@master-01 ~\]# iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat && iptables -P FORWARD ACCEPT
\[root@master-01 ~\]# sed -i '/swap/s/^\\(.\*\\)$/#\\1/g' /etc/fstab
\[root@master-01 ~\]# cat /etc/fstab
#
# /etc/fstab
# Created by anaconda on Mon Nov 23 08:19:33 2020
#
# Accessible filesystems, by reference, are maintained under '/dev/disk/'.
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info.
#
# After editing this file, run 'systemctl daemon-reload' to update systemd
# units generated from this file.
#
/dev/mapper/cl-root / xfs defaults 0 0
UUID=46ea6159-eda5-4931-ae11-73095cf284c1 /boot ext4 defaults 1 2
#/dev/mapper/cl-swap swap swap defaults 0 0
\[root@master-01 ~\]# setenforce 0
\[root@master-01 ~\]# vim /etc/sysconfig/selinux
\[root@master-01 ~\]# cat /etc/sysconfig/selinux
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=disabled
# SELINUXTYPE= can take one of these three values:
# targeted - Targeted processes are protected,
# minimum - Modification of targeted policy. Only selected processes are protected.
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
\[root@master-02 ~\]# wget https://download.docker.com/linux/centos/8/x86_64/stable/Packages/containerd.io-1.3.7-3.1.el8.x86_64.rpm
--2020-11-28 17:47:12-- https://download.docker.com/linux/centos/8/x86_64/stable/Packages/containerd.io-1.3.7-3.1.el8.x86_64.rpm
Resolving download.docker.com (download.docker.com)... 99.84.206.7, 99.84.206.109, 99.84.206.25, ...
Connecting to download.docker.com (download.docker.com)|99.84.206.7|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 30388860 (29M) \[binary/octet-stream\]
Saving to: ‘containerd.io-1.3.7-3.1.el8.x86_64.rpm’
containerd.io-1.3.7-3.1 100%\[===============================>\] 28.98M 188KB/s in 3m 15s
2020-11-28 17:50:27 (153 KB/s) - ‘containerd.io-1.3.7-3.1.el8.x86_64.rpm’ saved \[30388860/30388860\]
\[root@node-02 ~\]# yum install ./containerd.io-1.3.7-3.1.el8.x86_64.rpm
Repository AppStream is listed more than once in the configuration
Repository extras is listed more than once in the configuration
...
\[root@node-01 ~\]# sudo yum -y install docker-ce
Repository AppStream is listed more than once in the configuration
Repository extras is listed more than once in the configuration
Repository PowerTools is listed more than once in the configuration
...
\[root@master-01 ~\]# systemctl start docker && systemctl enable docker
\[root@master-01 ~\]# cd /opt/kubernetes/
\[root@master-01 kubernetes\]#
\[root@master-01 kubernetes\]# cat kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
# k8s的版本号,必须跟安装的Kubeadm版本等保持一致,否则启动报错
kubernetesVersion: v1.19.4
# docker镜像仓库地址,k8s.gcr.io需要翻墙才可以下载镜像,这里使用镜像服务器下载http://mirror.azure.cn/help/gcr-proxy-cache.html
# imageRepository: k8s.gcr.io/google_containers
# 集群名称
clusterName: kubernetes
# apiServer的集群访问地址,填写vip地址即可 #
controlPlaneEndpoint: "10.0.0.99:6443"
networking:
# pod的网段
podSubnet: 10.10.0.0/16
serviceSubnet: 10.96.0.0/12
dnsDomain: cluster.local
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
# kube-proxy模式指定为ipvs,需要提前在节点上安装ipvs的依赖并开启相关模块
mode: ipvs
# 拉去镜像
\[root@master-01 kubernetes\]# kubeadm config images pull
W1128 20:33:21.822265 4536 configset.go:348\] WARNING: kubeadm cannot validate component configs for API groups \[kubelet.config.k8s.io kubeproxy.config.k8s.io\]
\[config/images\] Pulled k8s.gcr.io/kube-apiserver:v1.19.4
\[config/images\] Pulled k8s.gcr.io/kube-controller-manager:v1.19.4
\[config/images\] Pulled k8s.gcr.io/kube-scheduler:v1.19.4
\[config/images\] Pulled k8s.gcr.io/kube-proxy:v1.19.4
\[config/images\] Pulled k8s.gcr.io/pause:3.2
\[config/images\] Pulled k8s.gcr.io/etcd:3.4.13-0
# 记得:
\[root@master-01 kubernetes\]# swapoff -a && kubeadm reset && systemctl daemon-reload && systemctl restart kubelet && iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
# 初始化
\[root@master-01 kubernetes\]# kubeadm init --config=kubeadm-config.yaml --upload-certs
...
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f \[podnetwork\].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 10.0.0.99:6443 --token dtkoyq.8ciqez70nj1ysdix \\
--discovery-token-ca-cert-hash sha256:f65ee972a9e9d0b8784f7db583a9cdf9865253459aa96a9b3529be2517570155 \\
--control-plane --certificate-key 0dc20030f8dfdede8cbb3b0906eda1a3a140e91f7e6ebb6eac1ad02ac65389d3
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.0.0.99:6443 --token dtkoyq.8ciqez70nj1ysdix \\
--discovery-token-ca-cert-hash sha256:f65ee972a9e9d0b8784f7db583a9cdf9865253459aa96a9b3529be2517570155
# 安装网络组件
\[root@master-01 kubernetes\]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
\[root@master-01 kubernetes\]#
\[root@master-01 kubernetes\]#
\[root@master-01 kubernetes\]#
\[root@master-01 kubernetes\]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-f9fd979d6-2hs76 0/1 Pending 0 5m18s
kube-system coredns-f9fd979d6-5j4w8 0/1 Pending 0 5m18s
kube-system etcd-master-01 1/1 Running 0 5m29s
kube-system kube-apiserver-master-01 1/1 Running 0 5m30s
kube-system kube-controller-manager-master-01 1/1 Running 0 5m30s
kube-system kube-flannel-ds-grhh6 0/1 Init:0/1 0 5s
kube-system kube-proxy-pl74w 1/1 Running 0 5m18s
kube-system kube-scheduler-master-01 1/1 Running 0 5m30s
\[root@master-01 ~\]# wget -P /etc/kubernetes/addons https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc1/aio/deploy/recommended.yaml && cd /etc/kubernetes/addons
\[root@master-01 addons\]# kubectl apply -f recommended.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created