安装kubernetes
1. 配置主机名(三台分别执行)
# 根据规划设置主机名【master节点上操作】
hostnamectl set-hostname k8s-master
# 根据规划设置主机名【node01节点操作】
hostnamectl set-hostname k8s-node-1
# 根据规划设置主机名【node02节点操作】
hostnamectl set-hostname k8s-node-2
2. hosts文件添加内容(三台)
vi /etc/hosts
192.168.50.201 k8s-master
192.168.50.202 k8s-node-1
192.168.50.203 k8s-node-2
改成自己服务器的ip
3. 关闭firewalld,关闭selinux,关闭NetworkManager(三台)
systemctl status firewalld
systemctl stop firewalld
systemctl disable firewalld
systemctl status NetworkManager
systemctl stop NetworkManager
systemctl disable NetworkManager
##查看是否关闭selinux
getenforce
vim /etc/sysconfig/selinux
## SELINUX=enforcing修改为SELINUX=disabled
4. 时间同步配置(三台)
yum install chrony -y
systemctl start chronyd && systemctl enable chronyd && chronyc sources
5. 配置内核路由转发及网桥过滤(三台)
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward=1
EOF
## 使配置生效
sysctl --system
# 查询br_netfilter模块
lsmod |grep br_netfilter
lsmod | grep overlay
通过运行以下指令确认 net.bridge.bridge-nf-call-iptables、net.bridge.bridge-nf-call-ip6tables 和 net.ipv4.ip_forward 系统变量在你的 sysctl 配置中被设置为 1:
sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward
6. 配置ipvs转发(三台)
yum -y install ipset ipvsadm
# 配置ipvsadm模块加载方式
# 添加需要加载的模块
mkdir -p /etc/sysconfig/ipvsadm
cat > /etc/sysconfig/ipvsadm/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
chmod +x /etc/sysconfig/ipvsadm/ipvs.modules
bash /etc/sysconfig/ipvsadm/ipvs.modules
lsmod |grep -e ip_vs -e nf_conntrack
7. 关闭swap分区(三台)
sed -ri 's/.*swap.*/#&/' /etc/fstab
swapoff -a
grep swap /etc/fstab
8. 安装docker(三台)
curl -fsSL https://get.docker.com -o get-docker.sh
sudo sh ./get-docker.sh
# 配置cgroup驱动及镜像下载加速器:
vi /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": [
"https://rsbud4vc.mirror.aliyuncs.com",
"https://registry.docker-cn.com",
"https://docker.mirrors.ustc.edu.cn",
"https://dockerhub.azk8s.cn",
"http://hub-mirror.c.163.com"
]
}
systemctl enable docker
systemctl start docker
systemctl status docker
docker info|grep systemd
9. 安装cri-dockerd(三台)
# 下载安装最新版的cri-dockerd
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.9/cri-dockerd-0.3.9-3.el7.x86_64.rpm
rpm -ivh cri-dockerd-0.3.9-3.el7.x86_64.rpm
rm -rf cri-dockerd-0.3.9-3.el7.x86_64.rpm
systemctl daemon-reload
systemctl enable cri-docker
systemctl start cri-docker
systemctl status cri-docker
10. 安装k8s(三台)
# 配置k8s源
cat << EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 更新yum
yum check-update
## 安装kubelet kubeadm kubectl
yum install -y kubelet kubeadm kubectl
#注意:先不要启动,只是设置开机自启动
systemctl enable kubelet
#确定kubeadm等程序文件的版本
kubeadm version
11. 整合kubelet和cri-dockerd(三台)
vim /usr/lib/systemd/system/cri-docker.service
#ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd://
ExecStart=/usr/bin/cri-dockerd --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9 --container-runtime-endpoint fd:// --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-cache-dir=/var/lib/cni/cache --cni-conf-dir=/etc/cni/net.d
# 说明:
# 需要添加的各配置参数(各参数的值要与系统部署的CNI插件的实际路径相对应):
# --pod-infra-container-image 重要,指定镜像仓库,建议国内,否则镜像拉取不下来或超时会出问题
# --network-plugin:指定网络插件规范的类型,这里要使用CNI;
# --cni-bin-dir:指定CNI插件二进制程序文件的搜索目录;
# --cni-cache-dir:CNI插件使用的缓存目录;
# --cni-conf-dir:CNI插件加载配置文件的目录;
# 配置完成后,重载并重启cri-docker.service服务。
systemctl daemon-reload && systemctl restart cri-docker.service
systemctl status cri-docker
# 配置kubelet
# 所有节点执行:
# 配置kubelet,为其指定cri-dockerd在本地打开的Unix Sock文件的路径,该路径一般默认为“/run/cri-dockerd.sock“
vim /etc/sysconfig/kubelet
KUBELET_KUBEADM_ARGS="--cgroup-driver=systemd --container-runtime-endpoint=/run/cri-dockerd.sock"
cat /etc/sysconfig/kubelet
KUBELET_KUBEADM_ARGS="--cgroup-driver=systemd --container-runtime-endpoint=/run/cri-dockerd.sock"
#说明:该配置也可不进行,而是直接在后面的各kubeadm命令上使用“--cri-socket unix:///run/cri-dockerd.sock”选项
12. 初始化集群(只在主节点执行)
kubeadm init \
--apiserver-advertise-address=192.168.50.201 \
--kubernetes-version=v1.28.2 \
--image-repository=registry.aliyuncs.com/google_containers \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12 \
--cri-socket=unix:///run/cri-dockerd.sock
apiserver-advertise-address 主节点IP地址
kubernetes-version k8s版本
image-repository registry.aliyuncs.com/google_containers 镜像仓库
pod-network-cidr pod网段
重要 初始化主节点完成后按照控制台输出提示操作,记录节点加入集群命令
13. 其它节点加入集群(node节点)
初始化主节点完成后的控制台输出:
kubeadm join 192.168.50.201:6443 --token kx0q6a.uhg9et3ooqkaxwpd \
--discovery-token-ca-cert-hash sha256:f6d1f54f4f887f42446684fd139b96b7021d6020cc79fef334535377107e232a
加上 --cri-socket=unix:///run/cri-dockerd.sock
kubeadm join 192.168.50.201:6443 --token kx0q6a.uhg9et3ooqkaxwpd \
--discovery-token-ca-cert-hash sha256:f6d1f54f4f887f42446684fd139b96b7021d6020cc79fef334535377107e232a \
--cri-socket=unix:///run/cri-dockerd.sock
14. 部署容器网络(主)
##wget https://github.com/projectcalico/calico/blob/v3.27.0/manifests/calico.yaml
wget https://docs.projectcalico.org/manifests/calico.yaml
下载完后还需要修改里面定义Pod网络(CALICO_IPV4POOL_CIDR),与前面kubeadm init的 --pod-network-cidr指定的一样
sed -i -e 's#192.168.0.0/16#10.244.0.0/16#g' calico.yaml
kubectl apply -f calico.yaml
## 经过一段时间后
kubectl get pods -A -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
calico-kube-controllers-658d97c59c-nng82 1/1 Running 0 17m 10.244.140.66 k8s-node-2 <none> <none>
calico-node-55tlg 1/1 Running 0 17m 192.168.50.202 k8s-node-1 <none> <none>
calico-node-tb8qs 1/1 Running 0 17m 192.168.50.203 k8s-node-2 <none> <none>
calico-node-xm2fh 1/1 Running 0 9m24s 192.168.50.201 k8s-master <none> <none>
coredns-66f779496c-7sjs4 1/1 Running 0 13h 10.244.235.193 k8s-master <none> <none>
coredns-66f779496c-9d67s 1/1 Running 0 13h 10.244.235.194 k8s-master <none> <none>
etcd-k8s-master 1/1 Running 0 13h 192.168.50.201 k8s-master <none> <none>
kube-apiserver-k8s-master 1/1 Running 0 13h 192.168.50.201 k8s-master <none> <none>
kube-controller-manager-k8s-master 1/1 Running 0 13h 192.168.50.201 k8s-master <none> <none>
kube-proxy-2zqk5 1/1 Running 0 13h 192.168.50.201 k8s-master <none> <none>
kube-proxy-4mgls 1/1 Running 0 94m 192.168.50.202 k8s-node-1 <none> <none>
kube-proxy-c2l4q 1/1 Running 0 96m 192.168.50.203 k8s-node-2 <none> <none>
kube-scheduler-k8s-master 1/1 Running 0 13h 192.168.50.201 k8s-master <none> <none>
kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-master Ready control-plane 14h v1.28.2 192.168.50.201 <none> CentOS Linux 7 (Core) 3.10.0-1160.71.1.el7.x86_64 docker://25.0.3
k8s-node-1 Ready <none> 113m v1.28.2 192.168.50.202 <none> CentOS Linux 7 (Core) 3.10.0-1160.71.1.el7.x86_64 docker://25.0.3
k8s-node-2 Ready <none> 115m v1.28.2 192.168.50.203 <none> CentOS Linux 7 (Core) 3.10.0-1160.71.1.el7.x86_64 docker://25.0.3
15. 安装dashboard
15.1 helm安装dashboard
安装helm
1.脚本安装
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
chmod 700 get_helm.sh
./get_helm.sh
2.二进制安装
# 下载
wget https://get.helm.sh/helm-v3.14.2-linux-amd64.tar.gz
# 解压
tar -zxvf helm-v3.14.2-linux-amd64.tar.gz
# 在解压目录中找到 helm 二进制文件,并将其移动到所需的目的地
mv linux-amd64/helm /usr/local/bin/helm
# Add kubernetes-dashboard repository
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
# Deploy a Helm Release named "kubernetes-dashboard" using the kubernetes-dashboard chart
helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --create-namespace --namespace kubernetes-dashboard
To uninstall/delete the kubernetes-dashboard deployment:
helm delete kubernetes-dashboard --namespace kubernetes-dashboard
15.2 yaml安装dashboard(推荐)
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
vim recommended.yaml
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 30000
selector:
k8s-app: kubernetes-dashboard
## 修改,新增type: NodePort,新增nodePort: 30000,这样可以通过宿主机ip:30000访问dashboard
kubectl apply -f recommended.yaml
15.3 创建dashboard用户
Creating sample user
In this guide, we will find out how to create a new user using the Service Account mechanism of Kubernetes, grant this user admin permissions and login to Dashboard using a bearer token tied to this user.
For each of the following snippets for ServiceAccount and ClusterRoleBinding, you should copy them to new manifest files like dashboard-adminuser.yaml and use kubectl apply -f dashboard-adminuser.yaml to create them.
Creating a Service Account
We are creating Service Account with the name admin-user in namespace kubernetes-dashboard first.
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
Creating a ClusterRoleBinding
In most cases after provisioning the cluster using kops, kubeadm or any other popular tool, the ClusterRole cluster-admin already exists in the cluster. We can use it and create only a ClusterRoleBinding for our ServiceAccount. If it does not exist then you need to create this role first and grant required privileges manually.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
Getting a Bearer Token for ServiceAccount
Now we need to find the token we can use to log in. Execute the following command:
kubectl -n kubernetes-dashboard create token admin-user
Check Kubernetes docs for more information about API tokens for a ServiceAccount.
Getting a long-lived Bearer Token for ServiceAccount
We can also create a token with the secret which bound the service account and the token will be saved in the Secret:
apiVersion: v1
kind: Secret
metadata:
name: admin-user
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: "admin-user"
type: kubernetes.io/service-account-token
After Secret is created, we can execute the following command to get the token which saved in the Secret:
kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d
Check Kubernetes docs for more information about long-lived API tokens for a ServiceAccount.
整合:dashboard-adminuser.yaml
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
name: admin-user
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: "admin-user"
type: kubernetes.io/service-account-token
kubectl apply -f dashboard-adminuser.yaml
##获取admin-user登录token
kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d
eyJhbGciOiJSUzI1NiIsImtpZCI6InhkblNiMEk1aTQzNXRVZjNWRFRBR0ZWUE1YODFwMC1OYjNqd2FWVzI1MW8ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJmYWY1NGM1NC1hZTc5LTRkODgtYTk1ZC1jN2NlOGJhNzA4ZTIiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.bYaz4ZUedkhWzEuBB__E_lKOLQZTFNWtc-8tU5NEDMUXRi6kpiS3eFfktF0eCyLrjCq45bHlpc0M-FG2J82WWD-DNZLkSDIXYAjTrCp2fT35ZS3ekPyktszNiA-xGyLTXs6haZiBlis6WVfrbmGMemlpm5sX3_PhpjKfC1KFC1OBo8UtwpGAcCUJEJUBPh361gfOypYN7AOiszv1LeI-aZzw9mlP4cGxt6M6nUDxDAeIunWm4IKdDmqxvgXd6sPxfk2N4JluM-eTlgEtVc_BAjbknsp1jCxzb8w707ijRd2odBisqLG6x6rMFvq4zman0PX3wngIdDrxHZf41pwvvg