dpkg-reconfigure tzdata
选择 Asia(亚洲)
选择 Shanghai(上海)
# 安装 ntpdate
apt-get install ntpdate
# 设置系统时间与网络时间同步(cn.pool.ntp.org 位于中国的公共 NTP 服务器)
ntpdate cn.pool.ntp.org
# 将系统时间写入硬件时间
hwclock --systohc
Sun Feb 23 12:05:17 CST 2020
复制代码
修改 cloud.cfg
主要作用是防止重启后主机名还原
vi /etc/cloud/cloud.cfg
# 该配置默认为 false,修改为 true 即可
preserve_hostname: true
复制代码
单独节点配置
配置 IP
编辑 vi /etc/netplan/50-cloud-init.yaml 配置文件,修改内容如下
network:
ethernets:
ens33:
addresses: [192.168.23.110/24]
gateway4: 192.168.23.2
nameservers:
addresses: [192.168.23.2]
version: 2
复制代码
使用 netplan apply 命令让配置生效
配置主机名
hostnamectl set-hostname kubernetes-master
cat >> /etc/hosts << EOF
192.168.23.110 kubernetes-master
复制代码
正式安装
创建并修改配置
我这里使用命令kubeadm config print init-defaults --kubeconfig ClusterConfiguration > kubeadm.yml,但是报错kubeadm cannot validate component configs for API groups。。。,变成kubeadm config print init-defaults > kubeadm-config.yaml就没有问题了
kubeadm config print init-defaults > kubeadm-config.yaml
复制代码
问题:安装时候我把kubernetesVersion改了版本号,导致docker报错镜像标签不对应,使用kubeadm reset重新更新配置
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.23.110
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: kubernetes-master
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.18.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
scheduler: {}
复制代码
查看所需镜像
kubeadm config images list --config kubeadm.yml
registry.aliyuncs.com/google_containers/kube-apiserver:v1.17.3
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.17.3
registry.aliyuncs.com/google_containers/kube-scheduler:v1.17.3
registry.aliyuncs.com/google_containers/kube-proxy:v1.17.3
registry.aliyuncs.com/google_containers/pause:3.1
registry.aliyuncs.com/google_containers/etcd:3.4.3-0
registry.aliyuncs.com/google_containers/coredns:1.6.5
复制代码
拉取所需镜像
kubeadm config images pull --config kubeadm.yml
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.17.3
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.17.3
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.17.3
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.17.3
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.1
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.4.3-0
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:1.6.5
复制代码
安装主节点
kubeadm init --config=kubeadm.yml --upload-certs | tee kubeadm-init.log
# 输出如下
W0701 09:12:04.080540 79411 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.18.0
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.23.110]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [kubernetes-master localhost] and IPs [192.168.23.110 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [kubernetes-master localhost] and IPs [192.168.23.110 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
W0701 09:12:11.796349 79411 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
W0701 09:12:11.797269 79411 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 20.025352 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
c647153d348929eb96a38f77441e2d1295550fe8769b67a768a227f2c6c41e3f
[mark-control-plane] Marking the node kubernetes-master as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node kubernetes-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.23.110:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:2f2cb21ef2c5fb742ca8383187946ed8fe4c1e4789d7c460e881e717d0b057a4
复制代码
配置 kubectl
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
复制代码
验证是否成功
kubectl get node
# 输出如下
NAME STATUS ROLES AGE VERSION
kubernetes-master NotReady master 26m v1.18.5
复制代码
安装从节点
# 这个是根据你自己的来,安装好主节点之后会有提示
kubeadm join 192.168.23.110:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:2f2cb21ef2c5fb742ca8383187946ed8fe4c1e4789d7c460e881e717d0b057a4
# 成功后输出
W0701 09:47:23.519724 88017 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run
复制代码
注意,这里要换一台主机,必须在从节点执行,我开始因为没有换报错
验证是否成功
kubectl get nodes
NAME STATUS ROLES AGE VERSION
kubernetes-master NotReady master 36m v1.18.5
kubernetes-node-02 NotReady <none> 4m49s v1.18.5
kubernetes-node-03 NotReady <none> 81s v1.18.5
复制代码
查看 Pods 状态
coredns 尚未运行,此时我们还需要安装网络插件
watch kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-7ff77c879f-7krmz 0/1 Pending 0 102m <none> <none> <none> <none>
coredns-7ff77c879f-mcct4 0/1 Pending 0 102m <none> <none> <none> <none>
etcd-kubernetes-master 1/1 Running 0 102m 192.168.23.110 kubernetes-master <none> <none>
kube-apiserver-kubernetes-master 1/1 Running 0 102m 192.168.23.110 kubernetes-master <none> <none>
kube-controller-manager-kubernetes-master 1/1 Running 0 102m 192.168.23.110 kubernetes-master <none> <none>
kube-proxy-2j45n 1/1 Running 0 70m 192.168.23.112 kubernetes-node-02 <none> <none>
kube-proxy-dd7js 1/1 Running 0 102m 192.168.23.110 kubernetes-master <none> <none>
kube-proxy-kksk8 1/1 Running 0 67m 192.168.23.113 kubernetes-node-03 <none> <none>
kube-scheduler-kubernetes-master 1/1 Running 0 102m 192.168.23.110 kubernetes-master <none> <none>
复制代码
kubeadm init 的执行过程
init: 指定版本进行初始化操作
preflight: 初始化前的检查和下载所需要的 Docker 镜像文件
kubelet-start: 生成 kubelet 的配置文件 var/lib/kubelet/config.yaml,没有这个文件 kubelet 无法启动,所以初始化之前的 kubelet 实际上启动不会成功
certificates: 生成 Kubernetes 使用的证书,存放在 /etc/kubernetes/pki 目录中
kubeconfig: 生成 KubeConfig 文件,存放在 /etc/kubernetes 目录中,组件之间通信需要使用对应文件
control-plane: 使用 /etc/kubernetes/manifest 目录下的 YAML 文件,安装 Master 组件
etcd: 使用 /etc/kubernetes/manifest/etcd.yaml 安装 Etcd 服务
wait-control-plane: 等待 control-plan 部署的 Master 组件启动
apiclient: 检查 Master 组件服务状态。
uploadconfig: 更新配置
kubelet: 使用 configMap 配置 kubelet
patchnode: 更新 CNI 信息到 Node 上,通过注释的方式记录
mark-control-plane: 为当前节点打标签,打了角色 Master,和不可调度标签,这样默认就不会使用 Master 节点来运行 Pod
bootstrap-token: 生成 token 记录下来,后边使用 kubeadm join 往集群中添加节点时会用到
addons: 安装附加组件 CoreDNS 和 kube-proxy
Kubernetes 网络插件
容器网络是容器连接到其他容器,主机和外部网络机制,容器的 runtime 提供了各种网络模式,每种模式都会产生不同的体验,例如,Docker 默认情况下可以为容器配置以下网络:
none: 将容器添加到一个容器专门的网络堆栈中,没有对外连接。
host: 将容器添加到主机的网络堆栈中,没有隔离。
default bridge: 默认网络模式。每个容器可以通过 IP 地址相互连接。
自定义网桥: 用户定义的网桥,具有更多的灵活性、隔离性和其他便利功能
kubelet 有一个单独的默认网络插件,以及一个对整个集群通用的默认网络
CNI是一个标准的,通用的接口,。在容器平台,Docker,Kubernetes,Mesos 容器网络解决方案 flannel,calico,weave。只要提供一个标准的接口,就能为同样满足该协议的所有容器平台提供网络功能,而 CNI 正是这样的一个标准接口协议。
Kubernetes 中的 CNI 插件
CNI 的初衷是创建一个框架,用于在配置或销毁容器时动态配置适当的网络配置和资源。插件负责为接口配置和管理 IP 地址,并且通常提供与 IP 管理、每个容器的 IP 分配、以及多主机连接相关的功能。容器运行时会调用网络插件,从而在容器启动时分配 IP 地址并配置网络,并在删除容器时再次调用它以清理这些资源。
运行时或协调时决定了容器应该加入哪个网络以及它需要调用哪个插件。然后,插件会将接口添加到容器网络命名空间中,作为一个 veth 对的一侧。接着,它会在主机上进行更改,包括将 veth 的其他部分连接到网桥。再之后,它会通过调用单独的 IPAM(IP地址管理)插件来分配 IP 地址并设置路由。
在 Kubernetes 中,kubelet 可以在适当的时间调用它找到的插件,为通过 kubelet 启动的 pod进行自动的网络配置。
Kubernetes 中可选的 CNI 插件如下:
Flannel
Calico
Canal
Weave
什么是 Calico
Calico 为容器和虚拟机提供了安全的网络连接解决方案,并经过了大规模生产验证(在公有云和跨数千个集群节点中),可与 Kubernetes,OpenShift,Docker,Mesos,DC / OS 和 OpenStack 集成。
Calico 还提供网络安全规则的动态实施。使用 Calico 的简单策略语言,您可以实现对容器,虚拟机工作负载和裸机主机端点之间通信的细粒度控制
下载 Calico 配置文件并修改
wget https:
复制代码
将 192.168.0.0/16 修改为 10.244.0.0/16
安装网络插件 Calico
kubectl apply -f calico.yaml
报错:error: error parsing calico.yaml: error converting YAML to JSON: yaml: line 167: did not find expected '-' indicator,这是yml格式问题,我把它重新下载一次就可以了
configmap/calico-config unchanged
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org configured
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers unchanged
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers unchanged
clusterrole.rbac.authorization.k8s.io/calico-node unchanged
clusterrolebinding.rbac.authorization.k8s.io/calico-node unchanged
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
复制代码
验证安装是否成功
watch kubectl get pods
# 输出如下:
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-58b656d69f-x6dgl 0/1 ContainerCreating 0 2m7s
kube-system calico-node-7nrl5 0/1 Init:1/3 0 2m7s
kube-system calico-node-btdwc 0/1 Init:2/3 0 2m7s
kube-system calico-node-d4p7g 0/1 Init:1/3 0 2m7s
kube-system coredns-7ff77c879f-7krmz 0/1 ContainerCreating 0 3h46m
kube-system coredns-7ff77c879f-mcct4 0/1 ContainerCreating 0 3h46m
kube-system etcd-kubernetes-master 1/1 Running 0 3h46m
kube-system kube-apiserver-kubernetes-master 1/1 Running 0 3h46m
kube-system kube-controller-manager-kubernetes-master 0/1 Error 0 3h46m
kube-system kube-proxy-2j45n 1/1 Running 0 3h15m
kube-system kube-proxy-dd7js 1/1 Running 0 3h46m
kube-system kube-proxy-kksk8 1/1 Running 0 3h11m
kube-system kube-scheduler-kubernetes-master 1/1 Running 0 3h46m
复制代码
查看节点状态处于 Ready 即表示安装成功
kubectl get nodes
# 输出如下:
NAME STATUS ROLES AGE VERSION
kubernetes-master Ready master 3h52m v1.18.5
kubernetes-node-02 Ready <none> 3h20m v1.18.5
kubernetes-node-03 Ready <none> 3h17m v1.18.5
搞到最后才发现 我在我的从节点kubernetes-node-01上面执行kubeadm join显示加入成功,但是在主节点上面使用kubectl get nodes却没有node-01节点的信息,后来经过我的不断检查,发现是我的node01节点主机名写成了node2的
修改:在node01执行
STEPT1 → 修改主机名
STEPT2 → 重启
STEPT3 → kubeadm reset
STEPT4 → kubeadm join 192.168.23.110:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:2f2cb21ef2c5fb742ca8383187946ed8fe4c1e4789d7c460e881e717d0b057a4
最后面检查一次
kubectl get nodes
# 输出
kubernetes-master Ready master 4h24m v1.18.5