如何建设交流网站的论文,长沙网站设计我选刻,企业网站会员功能,手机网站跳转怎么办4.Pod详解 文章目录 4.Pod详解4.1 Pod介绍4.1.1 Pod结构4.1.2 Pod定义4.1.3 在kubernetes中基本所有资源的一级属性都是一样的#xff0c;主要包含5部分#xff1a;4.1.4 在上面的属性中#xff0c;spec是接下来研究的重点#xff0c;继续看下它的常见子属性: 4.2 Pod配置4…4.Pod详解 文章目录 4.Pod详解4.1 Pod介绍4.1.1 Pod结构4.1.2 Pod定义4.1.3 在kubernetes中基本所有资源的一级属性都是一样的主要包含5部分4.1.4 在上面的属性中spec是接下来研究的重点继续看下它的常见子属性: 4.2 Pod配置4.2.1 基本配置4.2.2 镜像拉取4.2.3 添加标签4.2.4 镜像拉取策略 imagePullPolicy4.2.5 启动命令4.2.6 执行命令 - command4.2.7 环境变量4.2.8 端口设置4.2.9 资源配额 4.3 Pod生命周期4.3.1 创建和终止4.3.2 初始化容器4.3.3 钩子函数4.3.4 容器探测4.4.5 查看livenessProbe的子属性4.3.5 重启策略 4.4 Pod调度4.4.1 定向调度4.4.2 亲和性调度4.4.3 污点和容忍 4.1 Pod介绍
4.1.1 Pod结构 每个Pod中都可以包含一个或者多个容器这些容器可以分为两类 用户程序所在的容器数量可多可少 Pause容器这是每个Pod都会有的一个根容器它的作用有两个 可以以它为依据评估整个Pod的健康状态可以在根容器上设置Ip地址其它容器都此IpPod IP以实现Pod内部的网路通信 这里是Pod内部的通讯Pod的之间的通讯采用虚拟二层网络技术来实现我们当前环境用的是Flannel4.1.2 Pod定义
下面是Pod的资源清单
apiVersion: v1 #必选版本号例如v1
kind: Pod #必选资源类型例如 Pod
metadata: #必选元数据name: string #必选Pod名称namespace: string #Pod所属的命名空间,默认为defaultlabels: #自定义标签列表- name: string
spec: #必选Pod中容器的详细定义containers: #必选Pod中容器列表- name: string #必选容器名称image: string #必选容器的镜像名称imagePullPolicy: [ Always|Never|IfNotPresent ] #获取镜像的策略 command: [string] #容器的启动命令列表如不指定使用打包时使用的启动命令args: [string] #容器的启动命令参数列表workingDir: string #容器的工作目录volumeMounts: #挂载到容器内部的存储卷配置- name: string #引用pod定义的共享存储卷的名称需用volumes[]部分定义的的卷名mountPath: string #存储卷在容器内mount的绝对路径应少于512字符readOnly: boolean #是否为只读模式ports: #需要暴露的端口库号列表- name: string #端口的名称containerPort: int #容器需要监听的端口号hostPort: int #容器所在主机需要监听的端口号默认与Container相同protocol: string #端口协议支持TCP和UDP默认TCPenv: #容器运行前需设置的环境变量列表- name: string #环境变量名称value: string #环境变量的值resources: #资源限制和请求的设置limits: #资源限制的设置cpu: string #Cpu的限制单位为core数将用于docker run --cpu-shares参数memory: string #内存限制单位可以为Mib/Gib将用于docker run --memory参数requests: #资源请求的设置cpu: string #Cpu请求容器启动的初始可用数量memory: string #内存请求,容器启动的初始可用数量lifecycle: #生命周期钩子postStart: #容器启动后立即执行此钩子,如果执行失败,会根据重启策略进行重启preStop: #容器终止前执行此钩子,无论结果如何,容器都会终止livenessProbe: #对Pod内各容器健康检查的设置当探测无响应几次后将自动重启该容器exec: #对Pod容器内检查方式设置为exec方式command: [string] #exec方式需要制定的命令或脚本httpGet: #对Pod内个容器健康检查方法设置为HttpGet需要制定Path、portpath: stringport: numberhost: stringscheme: stringHttpHeaders:- name: stringvalue: stringtcpSocket: #对Pod内个容器健康检查方式设置为tcpSocket方式port: numberinitialDelaySeconds: 0 #容器启动完成后首次探测的时间单位为秒timeoutSeconds: 0 #对容器健康检查探测等待响应的超时时间单位秒默认1秒periodSeconds: 0 #对容器监控检查的定期探测时间设置单位秒默认10秒一次successThreshold: 0failureThreshold: 0securityContext:privileged: falserestartPolicy: [Always | Never | OnFailure] #Pod的重启策略nodeName: string #设置NodeName表示将该Pod调度到指定到名称的node节点上nodeSelector: obeject #设置NodeSelector表示将该Pod调度到包含这个label的node上imagePullSecrets: #Pull镜像时使用的secret名称以keysecretkey格式指定- name: stringhostNetwork: false #是否使用主机网络模式默认为false如果设置为true表示使用宿主机网络volumes: #在该pod上定义共享存储卷列表- name: string #共享存储卷名称 volumes类型有很多种emptyDir: {} #类型为emtyDir的存储卷与Pod同生命周期的一个临时目录。为空值hostPath: string #类型为hostPath的存储卷表示挂载Pod所在宿主机的目录path: string #Pod所在宿主机的目录将被用于同期中mount的目录secret: #类型为secret的存储卷挂载集群与定义的secret对象到容器内部scretname: string items: - key: stringpath: stringconfigMap: #类型为configMap的存储卷挂载预定义的configMap对象到容器内部name: stringitems:- key: stringpath: string#小提示
# 在这里可通过一个命令来查看每种资源的可配置项
# kubectl explain 资源类型 查看某种资源可以配置的一级属性
# kubectl explain 资源类型.属性 查看属性的子属性
[rootk8s-master01 ~]# kubectl explain pod
KIND: Pod
VERSION: v1
FIELDS:apiVersion stringkind stringmetadata Objectspec Objectstatus Object[rootk8s-master01 ~]# kubectl explain pod.metadata
KIND: Pod
VERSION: v1
RESOURCE: metadata Object
FIELDS:annotations map[string]stringclusterName stringcreationTimestamp stringdeletionGracePeriodSeconds integerdeletionTimestamp stringfinalizers []stringgenerateName stringgeneration integerlabels map[string]stringmanagedFields []Objectname stringnamespace stringownerReferences []ObjectresourceVersion stringselfLink stringuid string4.1.3 在kubernetes中基本所有资源的一级属性都是一样的主要包含5部分
apiVersion 版本由kubernetes内部定义版本号必须可以用 kubectl api-versions 查询到kind 类型由kubernetes内部定义版本号必须可以用 kubectl api-resources 查询到metadata元数据主要是资源标识和说明常用的有name、namespace、labels等spec 描述这是配置中最重要的一部分里面是对各种资源配置的详细描述status 状态信息里面的内容不需要定义由kubernetes自动生成
4.1.4 在上面的属性中spec是接下来研究的重点继续看下它的常见子属性:
containers []Object 容器列表用于定义容器的详细信息nodeName 根据nodeName的值将pod调度到指定的Node节点上nodeSelector 根据节点选择器中定义的信息选择将该Pod调度到包含这些label的Node 上hostNetwork 是否使用主机网络模式默认为false如果设置为true表示使用宿主机网络volumes []Object 存储卷用于定义Pod上面挂在的存储信息restartPolicy 重启策略表示Pod在遇到故障的时候的处理策略出现故障是重启不重启还是在干掉在重启
4.2 Pod配置
pod.spec.containers属性 []Object 是个列表容器拉取策略
Always, Never, IfNotPresent
一直拉取绝不拉没有的时候拉有的时候不拉取镜像[rootk8s-master01 ~]# kubectl explain pod.spec.containers
KIND: Pod
VERSION: v1
RESOURCE: containers []Object # 数组代表可以有多个容器
FIELDS:name string # 容器名称image string # 容器需要的镜像地址imagePullPolicy string # 镜像拉取策略 command []string # 容器的启动命令列表如不指定使用打包时使用的启动命令args []string # 容器的启动命令需要的参数列表env []Object # 容器环境变量的配置ports []Object # 容器需要暴露的端口号列表resources Object # 资源限制和资源请求的设置能用多少资源4.2.1 基本配置
[rootk8s-master inventory]# cat nginx.yaml
apiVersion: v1
kind: Namespace
metadata:name: dev---apiVersion: v1
kind: Pod
metadata:name: nginxnamespace: devlabels:dev: test
spec:containers:- name: nginximage: nginx:1.17.1- name: busyboximage: busybox:1.30
定义了一个比较简单Pod的配置里面有两个容器nginx用1.17.1版本的nginx镜像创建nginx是一个轻量级web容器
busybox用1.30版本的busybox镜像创建busybox是一个小巧的linux命令集合# 创建Pod
[rootk8s-master inventory]# kubectl apply -f nginx.yaml
namespace/dev created
pod/nginx created# 查看Pod状况
# READY 1/2 : 表示当前Pod中有2个容器其中1个准备就绪1个未就绪
# RESTARTS : 重启次数因为有1个容器故障了Pod一直在重启试图恢复它
[rootk8s-master inventory]# kubectl get -f nginx.yaml
NAME STATUS AGE
namespace/dev Active 75sNAME READY STATUS RESTARTS AGE
pod/nginx 1/2 CrashLoopBackOff 3 (36s ago) 75s
# 可以通过describe查看内部的详情
# 此时已经运行起来了一个基本的Pod虽然它暂时有问题 修改之后,busybox没有内置命令一执行就退出了
[rootk8s-master inventory]# kubectl delete -f nginx.yaml
namespace dev deleted
pod nginx deleted[rootk8s-master inventory]# cat nginx.yaml
apiVersion: v1
kind: Namespace
metadata:name: dev---apiVersion: v1
kind: Pod
metadata:name: nginxnamespace: devlabels:dev: test
spec:containers:- name: nginximage: nginx:1.17.1- name: busyboximage: busybox:1.30command: [/bin/sleep,6000]
[rootk8s-master inventory]# kubectl get -f nginx.yaml
NAME STATUS AGE
namespace/dev Active 30sNAME READY STATUS RESTARTS AGE
pod/nginx 2/2 Running 0 30s4.2.2 镜像拉取
[rootk8s-master inventory]# cat pod-httpd.yaml
apiVersion: v1
kind: Namespace
metadata:name: dev---apiVersion: v1
kind: Pod
metadata:name: pod-httpdnamespace: devlabels:dev: test
spec:containers:- name: httpdimage: httpd:latestimagePullPolicy: IfNotPresent- name: busyboximage: busybox:latestcommand: [/bin/sleep,6000]
namespace/dev unchanged
pod/pod-httpd created
[rootk8s-master inventory]# kubectl get -f pod-httpd.yaml
NAME STATUS AGE
namespace/dev Active 25mNAME READY STATUS RESTARTS AGE
pod/pod-httpd 2/2 Running 0 16s# 查看Pod详情
# 此时明显可以看到httpd镜像有一步Pulling image httpd:latest的过程
[rootk8s-master inventory]# kubectl describe -f pod-httpd.yaml
.........
Events:Type Reason Age From Message---- ------ ---- ---- -------Normal Scheduled 70s default-scheduler Successfully assigned dev/pod-httpd to k8s-node2Normal Pulled 70s kubelet Container image httpd:latest already present on machineNormal Created 69s kubelet Created container httpdNormal Started 69s kubelet Started container httpdNormal Pulling 69s kubelet Pulling image busybox:latestNormal Pulled 67s kubelet Successfully pulled image busybox:latest in 2.613062239s (2.613072529s including waiting)Normal Created 67s kubelet Created container busyboxNormal Started 66s kubelet Started container busybox
4.2.3 添加标签
[rootk8s-master inventory]# cat pod-httpd.yaml
apiVersion: v1
kind: Namespace
metadata:name: dev---apiVersion: v1
kind: Pod
metadata:name: pod-httpdnamespace: devlabels:dev: test
spec:containers:- name: httpdimage: httpd:latestimagePullPolicy: IfNotPresent- name: busyboximage: busybox:latestcommand: [/bin/sleep,6000]
[rootk8s-master inventory]# kubectl apply -f pod-httpd.yaml
namespace/dev unchanged
pod/pod-httpd created# 查看标签
[rootk8s-master inventory]# kubectl get -f pod-httpd.yaml --show-labels
NAME STATUS AGE LABELS
namespace/dev Active 20h kubernetes.io/metadata.namedevNAME READY STATUS RESTARTS AGE LABELS
pod/pod-httpd 2/2 Running 12 (10m ago) 20h devtest
4.2.4 镜像拉取策略 imagePullPolicy 用于设置镜像拉取策略kubernetes支持配置三种拉取策略 Always总是从远程仓库拉取镜像一直远程下载IfNotPresent本地有则使用本地镜像本地没有则从远程仓库拉取镜像本地有就本地 本地没就远程下载Never只使用本地镜像从不去远程仓库拉取本地没有就报错 一直使用本地 默认值说明 如果镜像tag为具体版本号 默认策略是IfNotPresent如果镜像tag为latest最终版本 默认策略是always
4.2.5 启动命令
busybox并不是一个程序而是类似于一个工具类的集合kubernetes集群启动管理后它会自动关闭。解决方法就是让其一直在运行这就用到了command配置
[rootk8s-master inventory]# cat pod-command.yaml
apiVersion: v1
kind: Namespace
metadata:name: dev---apiVersion: v1
kind: Pod
metadata:name: pod-pullimagenamespace: devlabels:app: httpdlab
spec:containers:- name: httpdimage: httpd:latestimagePullPolicy: IfNotPresent- name: busyboximage: busybox:latestcommand: [/bin/sh,-c,while true;do /bin/echo $(date %T) /tmp/hello.txt; sleep 3; done;]# 查看Pod状态
# 此时发现两个pod都正常运行了
[rootk8s-master inventory]# kubectl apply -f pod-command.yaml
namespace/dev created
pod/pod-pullimage created
[rootk8s-master inventory]# kubectl get -f pod-command.yaml
NAME STATUS AGE
namespace/dev Active 6sNAME READY STATUS RESTARTS AGE
pod/pod-pullimage 2/2 Running 0 6s
[rootk8s-master inventory]# kubectl get -f pod-command.yaml --show-labels
NAME STATUS AGE LABELS
namespace/dev Active 9s kubernetes.io/metadata.namedevNAME READY STATUS RESTARTS AGE LABELS
pod/pod-pullimage 2/2 Running 0 9s apphttpdlab# 进入pod中的busybox容器查看文件内容
# 补充一个命令: kubectl exec pod名称 -n 命名空间 -it -c 容器名称 /bin/sh 在容器内部执行命令
# 使用这个命令就可以进入某个容器的内部然后进行相关操作了
# 比如可以查看txt文件的内容
[rootk8s-master inventory]# kubectl exec pod-pullimage -itn dev -c busybox -- /bin/sh
/ #
/ # tail -f /tmp/hello.txt
09:31:08
^C
/ # tail -f /tmp/hello.txt
09:31:17
[rootk8s-master inventory]# kubectl exec pod-pullimage -itn dev -c httpd -- /bin/bash
rootpod-pullimage:/usr/local/apache2#
rootpod-pullimage:/usr/local/apache2# cat /etc/os-release
PRETTY_NAMEDebian GNU/Linux 12 (bookworm)
NAMEDebian GNU/Linux
VERSION_ID12
VERSION12 (bookworm)
VERSION_CODENAMEbookworm
IDdebian
HOME_URLhttps://www.debian.org/
SUPPORT_URLhttps://www.debian.org/support
BUG_REPORT_URLhttps://bugs.debian.org/4.2.6 执行命令 - command
用于在pod中的容器初始化完毕之后运行一个命令
/bin/sh,-c, 使用sh执行命令while true;do /bin/echo $(date %T) /tmp/hello.txt; sleep 3; done; 每隔3秒向文件/tmp/hello.txt 文件中写入当前时间特别说明 通过上面发现command已经可以完成启动命令和传递参数的功能为什么这里还要提供一个args选项用于传递参数呢?这其实跟docker有点关系kubernetes中的command、args两项其实是实现覆盖Dockerfile中ENTRYPOINT的功能。1 如果command和args均没有写那么用Dockerfile的配置。2 如果command写了但args没有写那么Dockerfile默认的配置会被忽略执行输入的command3 如果command没写但args写了那么Dockerfile中配置的ENTRYPOINT的命令会被执行使用当前args的参数4 如果command和args都写了那么Dockerfile的配置被忽略执行command并追加上args参数command取代ENTRYPOINT功能
args取代cmd功能docker下entrypoint和cmd的区别是
1、CMD指令运行一个可执行的文件并提供参数可以为ENTRYPOINT指定参数
2、ENTRYPOINT指令本身也可以包含参数变动的参数不会被覆盖。4.2.7 环境变量
[rootk8s-master inventory]# vi pod-env.yaml
[rootk8s-master inventory]# cat pod-env.yaml
apiVersion: v1
kind: Namespace
metadata: name: dev---apiVersion: v1
kind: pod
metadata:name: pod-envnamespace: devlabels:app: httpdlab
speccontainers:- name: busyboximage: busybox: latestimagePullPolicy: IfNotPresentcommand: [/bin/sh, -c,while true;do /bin/echo $(date %T) /tem/hello.txt;sleep 3; done;]env:- name: usernamevalue: mushuang- name: passwordvalue: run123123- name: agevalue: 20
[rootk8s-master inventory]# kubectl apply -f pod-env.yaml
namespace/dev unchanged
pod/pod-env createdenv # 设置环境变量列表env环境变量用于在pod中的容器设置环境变量。
[rootk8s-master inventory]# kubectl get -f pod-env.yaml
NAME STATUS AGE
namespace/dev Active 21hNAME READY STATUS RESTARTS AGE
pod/pod-env 1/1 Running 0 62s# 进入容器输出环境变量
[rootk8s-master inventory]# kubectl exec pod-env -itn dev -c busybox -- /bin/sh
/ #
/ # echo $username
mushuang
/ # echo $age
20
/ # echo $password
run123123
/ # 4.2.8 端口设置
containers的ports选项ports支持的子选项
[rootk8s-master inventory]# kubectl explain pod.spec.containers.ports
KIND: Pod
VERSION: v1
RESOURCE: ports []Object
FIELDS:name string # 端口名称如果指定必须保证name在pod中是唯一的 containerPortinteger # 容器要监听的端口(0x65536)hostPort integer # 容器要在主机上公开的端口如果设置主机上只能运行容器的一个副本(一般省略) hostIP string # 要将外部端口绑定到的主机IP(一般省略)protocol string # 端口协议。必须是UDP、TCP或SCTP。默认为“TCP”。[rootk8s-master inventory]# cat pod-ports.yaml
apiVersion: v1
kind: Namespace
metadata: name: dev---apiVersion: v1
kind: Pod
metadata: name: pod-pullimagenamespace: devlabels:app: httpdlab
spec:containers: - name: httpdimage: httpd:latestimagePullPolicy: IfNotPresentports: # 设置容器暴露的端口列表- name: httpd-portcontainerPort: 80protocol: TCP
[rootk8s-master inventory]# kubectl apply -f pod-ports.yaml
namespace/dev created
pod/pod-pullimage created[rootk8s-master inventory]# kubectl get -f pod-ports.yaml
NAME STATUS AGE
namespace/dev Active 23mNAME READY STATUS RESTARTS AGE
pod/pod-pullimage 1/1 Running 0 23m# 在下面可以明显看到配置信息
[rootk8s-master inventory]# kubectl get -f pod-ports.yaml -o yamlspec:containers:- image: httpd:latestimagePullPolicy: IfNotPresentname: httpdports:- containerPort: 80name: httpd-portprotocol: TCPresources: {}
访问容器中的程序需要使用的是Podip:containerPort
4.2.9 资源配额 容器中的程序要运行肯定是要占用一定资源的比如cpu和内存等如果不对某个容器的资源做限制那么它就可能吃掉大量资源导致其它容器无法运行。针对这种情况kubernetes提供了对内存和cpu的资源进行配额的机制这种机制主要通过resources选项实现他有两个子选项 limits用于限制运行时容器的最大占用资源当容器占用资源超过limits时会被终止并进行重启限制最大使用多少超过了容器会进行重启requests 用于设置容器需要的最小资源如果环境资源不够容器将无法启动限制最小使用多少 可以通过上面两个选项设置资源的上下限
[rootk8s-master inventory]# cat pod-resources.yaml
apiVersion: v1
kind: Namespace
metadata:name: dev---apiVersion: v1
kind: Pod
metadata: name: httpd-resourcesnamespace: dev
spec:containers:- name: apacheimage: httpd:latestresources: # 资源配额limits: # 限制资源上限最大cpu: 2 # CPU限制单位是core数memory: 10Gi # 内存限制requests: # 请求资源下限最少cpu: 1 # CPU限制单位是core数memory: 10Mi # 内存限制
[rootk8s-master inventory]# kubectl apply -f pod-resources.yaml
namespace/dev unchanged
pod/httpd-resources created对cpu和memory的单位 cpucore数可以为整数或小数memory 内存大小可以使用Gi、Mi、G、M等形式
[rootk8s-master inventory]# kubectl get -f pod-resources.yaml
NAME STATUS AGE
namespace/dev Active 11mNAME READY STATUS RESTARTS AGE
pod/httpd-resources 1/1 Running 0 9m58s
[rootk8s-master inventory]# kubectl delete -f pod-resources.yaml
namespace dev deleted
pod httpd-resources deleted
[rootk8s-master inventory]# vi pod-resources.yaml
[rootk8s-master inventory]# kubectl apply -f pod-resources.yaml
namespace/dev created
pod/httpd-resources created
[rootk8s-master inventory]# kubectl get -f pod-resources.yaml
NAME STATUS AGE
namespace/dev Active 9sNAME READY STATUS RESTARTS AGE
pod/httpd-resources 0/1 Pending 0 9s
[rootk8s-master inventory]# kubectl describe -f pod-resources.yaml
........Warning FailedScheduling 33s default-scheduler 0/3 nodes are available: 1 node(s) had untolerated taint {node-role.kubernetes.io/control-plane: }, 2 Insufficient memory. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod..####Insufficient memory内存不足68s default-scheduler 0/3节点可用:1个(s)节点有不可容忍的污染{node- roles .kubernetes。io/control-plane:} 3内存不足。抢占:0/3节点可用:1抢占对调度没有帮助2传入pod没有发现抢占受害者。4.3 Pod生命周期
我们一般将pod对象从创建至终的这段时间范围称为pod的生命周期它主要包含下面的过程 pod创建过程 运行初始化容器init container过程 运行主容器main container 容器启动后钩子post start、容器终止前钩子pre stop容器的存活性探测liveness probe、就绪性探测readiness probe pod终止过程 在整个生命周期中Pod会出现5种状态相位分别如下 挂起Pendingapiserver已经创建了pod资源对象但它尚未被调度完成或者仍处于下载镜像的过程中运行中Runningpod已经被调度至某节点并且所有容器都已经被kubelet创建完成成功Succeededpod中的所有容器都已经成功终止并且不会被重启失败Failed所有容器都已经终止但至少有一个容器终止失败即容器返回了非0值的退出状态未知Unknownapiserver无法正常获取到pod对象的状态信息通常由网络通信失败所导致
4.3.1 创建和终止
pod的创建过程
用户通过kubectl或其他api客户端提交需要创建的pod信息给apiServerapiServer开始生成pod对象的信息并将信息存入etcd然后返回确认信息至客户端apiServer开始反映etcd中的pod对象的变化其它组件使用watch机制来跟踪检查apiServer上的变动scheduler发现有新的pod对象要创建开始为Pod分配主机并将结果信息更新至apiServernode节点上的kubelet发现有pod调度过来尝试调用docker启动容器并将结果回送至apiServerapiServer将接收到的pod状态信息存入etcd中 pod的终止过程
1. 用户向apiServer发送删除pod对象的命令
2. apiServcer中的pod对象信息会随着时间的推移而更新在宽限期内默认30spod被视为dead
3. 将pod标记为terminating状态
4. kubelet在监控到pod对象转为terminating状态的同时启动pod关闭过程
5. 端点控制器监控到pod对象的关闭行为时将其从所有匹配到此端点的service资源的端点列表中移除
6. 如果当前pod对象定义了preStop钩子处理器则在其标记为terminating后即会以同步的方式启动执行
7. pod对象中的容器进程收到停止信号
8. 宽限期结束后若pod中还存在仍在运行的进程那么pod对象会收到立即终止的信号
9. kubelet请求apiServer将此pod资源的宽限期设置为0从而完成删除操作此时pod对于用户已不可见4.3.2 初始化容器 初始化容器是在pod的主容器启动之前要运行的容器主要是做一些主容器的前置工作它具有两大特征 初始化容器必须运行完成直至结束若某初始化容器运行失败那么kubernetes需要重启它直到成功完成 初始化容器必须按照定义的顺序执行当且仅当前一个成功之后后面的一个才能运行 初始化容器有很多的应用场景下面列出的是最常见的几个 提供主容器镜像中不具备的工具程序或自定义代码初始化容器要先于应用容器串行启动并运行完成因此可用于延后应用容器的启动直至其依赖的条件得到满足
接下来做一个案例模拟下面这个需求
假设要以主容器来运行nginx但是要求在运行nginx之前先要能够连接上mysql和redis所在服务器
为了简化测试事先规定好mysql(192.168.207.13)和redis(192.168.207.14)服务器的地址
[rootk8s-master inventory]# vi pod-initcon.yaml
[rootk8s-master inventory]# cat pod-initcon.yaml
apiVersion: v1
kind: Namespace
metadata:name: dev---apiVersion: v1
kind: Pod
metadata:name: pod-initconnamespace: dev
spec:containers- name: main-conimage: nginx:latestports:- name: nginx-portcontainerPort: 80initContainers:- name: test-mysqlimage: busybox:latestcommand: [sh, -c, until ping 192.168.232.160 -c 1 ; do echo waiting for mysql...; sleep 2; done;]- name: test-redisimage: busybox:latestcommand: [sh, -c, until ping 192.168.232.160 -c 1 ; do echo waiting for mysql...; sleep 2; done;]command: [sh, -c, until ping 192.168.232.160 -c 1 ; do echo waiting for mysql...; sleep 2; done;]# 条件不满足ping不通做后面的事情。# 通了条件满足然后退出 # 创建pod
[rootk8s-master inventory]# kubectl apply -f pod-initcon.yaml
namespace/dev created
pod/pod-initcon created# 查看pod状态
[rootk8s-master inventory]# kubectl get -f pod-initcon.yaml
NAME STATUS AGE
namespace/dev Active 21sNAME READY STATUS RESTARTS AGE
pod/pod-initcon 0/1 Init:0/2 0 21s# 发现pod卡在启动第一个初始化容器过程中后面的容器不会运行
[rootk8s-master inventory]# kubectl describe pods pod-initcon -n dev
Events:Type Reason Age From Message---- ------ ---- ---- -------Normal Scheduled 31s default-scheduler Successfully assigned dev/pod-initcon to k8s-node2Normal Pulling 31s kubelet Pulling image busybox:latestNormal Pulled 26s kubelet Successfully pulled image busybox:latest in 4.252818682s (4.252828991s including waiting)Normal Created 26s kubelet Created container test-mysqlNormal Started 26s kubelet Started container test-mysql# 接下来新开一个shell为当前服务器新增两个ip观察pod的变化
[rootk8s-master ~]# ip addr add 192.168.207.13/24 dev ens160
[rootk8s-master ~]# ip addr add 192.168.207.14/24 dev ens160# 动态查看pod
[rootk8s-master inventory]# kubectl get pods pod-initcon -n dev -w
NAME READY STATUS RESTARTS AGE
pod-initcon 0/1 Init:0/2 0 2s
pod-initcon 0/1 Init:0/2 0 8s
pod-initcon 0/1 Init:1/2 0 20s
pod-initcon 0/1 PodInitializing 0 24s
pod-initcon 1/1 Running 0 27s
^C[rootk8s-master inventory]# # 查看pod状态
[rootk8s-master inventory]kubectl get -f pod-initcon.yaml
NAME STATUS AGE
namespace/dev Active 83sNAME READY STATUS RESTARTS AGE
pod/pod-initcon 1/1 Running 0 83s
4.3.3 钩子函数
钩子函数能够感知自身生命周期中的事件并在相应的时刻到来时运行用户指定的程序代码。
kubernetes在主容器的启动之后和停止之前提供了两个钩子函数
post start容器创建之后执行如果失败了会重启容器pre stop 容器终止之前执行执行完成之后容器将成功终止在其完成之前会阻塞删除容器的操作
钩子处理器支持使用下面三种方式定义动作
Exec命令在容器内执行一次命令
……
lifecycle:
postStart: exec:command:- cat- /tmp/healthy
……TCPSocket在当前容器尝试访问指定的socket ……
lifecycle:postStart:tcpSocket:port: 8080
……HTTPGet在当前容器中向某url发起http请求 ……
lifecycle:postStart:httpGet:path: / #URI地址port: 80 #端口号host: 192.168.5.3 #主机地址scheme: HTTP #支持的协议http或者https
……接下来以exec方式为例演示下钩子函数的使用创建pod-hook-exec.yaml文件内容如下
[rootk8s-master inventory]# vi pod-exec.yaml
[rootk8s-master inventory]# cat pod-exec.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-execnamespace:
spec:containers:- name: main-conimage: nginx:latestports:- name: nginx-portcontainerPort: 80lifecycle:postStart:exec: # 在容器启动的时候执行一个命令修改掉nginx的默认首页内容command: [/bin/sh, -c, echo hello world /usr/share/nginx/html/index.html]preStop:exec: # 在容器停止之前停止nginx服务command: [/usr/sbin/nginx,-s,quit]
[rootk8s-master inventory]# kubectl get -f pod-exec.yaml
NAME READY STATUS RESTARTS AGE
pod-exec 1/1 Running 0 26m[rootk8s-master inventory]# kubectl get -f pod-exec.yaml -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-exec 1/1 Running 0 26m 10.244.2.25 k8s-node2 none none
[rootk8s-master inventory]# curl 10.244.2.25
hello world[rootk8s-master inventory]# kubectl exec pod-exec -itn dev -c main-con -- /bin/bash
rootpod-exec:/# cd /usr/share/nginx/html/
rootpod-exec:/usr/share/nginx/html# ls
50x.html index.html
rootpod-exec:/usr/share/nginx/html# cat index.html
hello world
rootpod-exec:/usr/share/nginx/html# 4.3.4 容器探测
容器探测用于检测容器中的应用实例是否正常工作是保障业务可用性的一种传统机制。如果经过探测实例的状态不符合预期那么kubernetes就会把该问题实例 摘除 不承担业务流量。kubernetes提供了两种探针来实现容器探测分别是
liveness probes存活性探针用于检测应用实例当前是否处于正常运行状态如果不是k8s会重启容器readiness probes就绪性探针用于检测应用实例当前是否可以接收请求如果不能k8s不会转发流量 livenessProbe 决定是否重启容器readinessProbe 决定是否将请求转发给容器。 上面两种探针目前均支持三种探测方式 Exec命令在容器内执行一次命令如果命令执行的退出码为0则认为程序正常否则不正常 ……
livenessProbe:exec:command:- cat- /tmp/healthy
……TCPSocket将会尝试访问一个用户容器的端口如果能够建立这条连接则认为程序正常否则不正常 ……
livenessProbe:tcpSocket:port: 8080
……HTTPGet调用容器内Web应用的URL如果返回的状态码在200和399之间则认为程序正常否则不正常 ……
livenessProbe:httpGet:path: / #URI地址port: 80 #端口号host: 127.0.0.1 #主机地址scheme: HTTP #支持的协议http或者https
……下面以liveness probes为例做几个演示
Exec
[rootk8s-master inventory]# cat pod-liveness-exec.yaml
apiVersion: v1
kind: Pod
metadata: name: pod-liveness-execnamespace: dev
spec:containers:- name: nginximage: nginx:latestports:- name: nginx-portcontainerPort: 80livenessProbe:exec:command: [/bin/cat,/tmp/hello.txt] # 执行一个查看文件的命令
[rootk8s-master inventory]# kubectl apply -f pod-liveness-exec.yaml
pod/pod-liveness-exec created
[rootk8s-master inventory]# kubectl get -f pod-liveness-exec.yaml
NAME READY STATUS RESTARTS AGE
pod-liveness-exec 1/1 Running 3 (35s ago) 2m5s[rootk8s-master inventory]# kubectl describe pods pod-liveness-exec -n dev
Events:Type Reason Age From Message---- ------ ---- ---- -------Normal Scheduled 99s default-scheduler Successfully assigned dev/pod-liveness-exec to k8s-node1Normal Pulled 96s kubelet Successfully pulled image nginx:latest in 2.822924352s (2.822938262s including waiting)Normal Pulled 65s kubelet Successfully pulled image nginx:latest in 3.407035458s (3.407042031s including waiting)Normal Killing 39s (x2 over 69s) kubelet Container nginx failed liveness probe, will be restartedNormal Pulling 38s (x3 over 98s) kubelet Pulling image nginx:latestNormal Created 35s (x3 over 96s) kubelet Created container nginxNormal Started 35s (x3 over 95s) kubelet Started container nginxNormal Pulled 35s kubelet Successfully pulled image nginx:latest in 3.140911903s (3.14092833s including waiting)Warning Unhealthy 29s (x7 over 89s) kubelet Liveness probe failed: /bin/cat: /tmp/hello.txt: No such file or directory# 观察上面的信息就会发现nginx容器启动之后就进行了健康检查
# 检查失败之后容器被kill掉然后尝试进行重启
# 稍等一会之后再观察pod信息就可以看到RESTARTS不再是0而是一直增长
[rootk8s-master inventory]# kubectl get -f pod-liveness-exec.yaml
NAME READY STATUS RESTARTS AGE
pod-liveness-exec 0/1 CrashLoopBackOff 4 (6s ago) 2m36s# 当然接下来可以修改成一个存在的文件比如/tmp/hello.txt再试结果就正常了......
[rootk8s-master inventory]# kubectl exec pod-liveness-exec -itn dev -c nginx -- /bin/bash
rootpod-liveness-exec:/# echo 123456 tmp/hello.txt
rootpod-liveness-exec:/# cat /tmp/hello.txt
123456
rootpod-liveness-exec:/# exit
exit
[rootk8s-master inventory]# kubectl get -f pod-liveness-exec.yaml
NAME READY STATUS RESTARTS AGE
pod-liveness-exec 1/1 Running 2 (2m10s ago) 3m11s
TCPSocket
[rootk8s-master inventory]# vi pod-liveness-tcpsocket.yaml
[rootk8s-master inventory]# cat pod-liveness-tcpsocket.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-liveness-tcpsocketnamespace: dev
spec: containers:- name: nginximage: nginx:latestports:- name: nginx-portcontainerPort: 80livenessProbe:tcpSocket:port: 8080 # 尝试访问8080端口
[rootk8s-master inventory]# kubectl apply -f pod-liveness-tcpsocket.yaml
pod/pod-liveness-tcpsocket created
[rootk8s-master inventory]# kubectl describe -f pod-liveness-tcpsocket.yaml
Events:Type Reason Age From Message---- ------ ---- ---- -------Normal Scheduled 13s default-scheduler Successfully assigned dev/pod-liveness-tcpsocket to k8s-node1Normal Pulling 12s kubelet Pulling image nginx:latestNormal Pulled 8s kubelet Successfully pulled image nginx:latest in 3.669903843s (3.669924445s including waiting)Normal Created 8s kubelet Created container nginxNormal Started 8s kubelet Started container nginxWarning Unhealthy 2s kubelet Liveness probe failed: dial tcp 10.244.1.26:8080: connect: connection refusedfailed: dial tcp 10.244.1.26:8080# 观察上面的信息发现尝试访问8080端口,但是失败了
# 稍等一会之后再观察pod信息就可以看到RESTARTS不再是0而是一直增长
[rootk8s-master inventory]# kubectl get -f pod-liveness-tcpsocket.yaml
NAME READY STATUS RESTARTS AGE
pod-liveness-tcpsocket 0/1 CrashLoopBackOff 4 (36s ago) 3m7s# 当然接下来可以修改成一个可以访问的端口比如80再试结果就正常了......
[rootk8s-master inventory]# cat pod-liveness-tcpsocket.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-liveness-tcpsocketnamespace: dev
spec: containers:- name: nginximage: nginx:latestports:- name: nginx-portcontainerPort: 80livenessProbe:tcpSocket:port: 80
[rootk8s-master inventory]# kubectl get -f pod-liveness-tcpsocket.yaml
NAME READY STATUS RESTARTS AGE
pod-liveness-tcpsocket 1/1 Running 0 44s
HTTPGet
[rootk8s-master inventory]# vi pod-httpget.yaml
[rootk8s-master inventory]# cat pod-httpget.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-liveness-httpgetnamespace: dev
spec: containers:- name: nginximage: nginx:latestports:- name: nginx-portcontainerPort: 80livenessProbe:httpGet: # 其实就是访问http://127.0.0.1:80/hello scheme: HTTP #支持的协议http或者httpsport: 80 #端口号path: /hello #URI地址[rootk8s-master inventory]# kubectl apply -f pod-httpget.yaml
pod/pod-liveness-httpget created
[rootk8s-master inventory]# kubectl describe -f pod-httpget.yaml
Events:Type Reason Age From Message---- ------ ---- ---- -------Normal Scheduled 14s default-scheduler Successfully assigned dev/pod-liveness-httpget to k8s-node2Normal Pulling 14s kubelet Pulling image nginx:latestNormal Pulled 10s kubelet Successfully pulled image nginx:latest in 3.608349739s (3.608355865s including waiting)Normal Created 10s kubelet Created container nginxNormal Started 10s kubelet Started container nginxWarning Unhealthy 4s kubelet Liveness probe failed: HTTP probe failed with statuscode: 404# 观察上面信息尝试访问路径但是未找到,出现404错误
# 稍等一会之后再观察pod信息就可以看到RESTARTS不再是0而是一直增长
[rootk8s-master inventory]# kubectl get -f pod-httpget.yaml
NAME READY STATUS RESTARTS AGE
pod-liveness-httpget 0/1 CrashLoopBackOff 4 (32s ago) 3m2s# 当然接下来可以修改成一个可以访问的路径path比如/再试结果就正常了......
[rootk8s-master inventory]# vi pod-httpget.yaml
[rootk8s-master inventory]# cat pod-httpget.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-liveness-httpgetnamespace: dev
spec: containers:- name: nginximage: nginx:latestports:- name: nginx-portcontainerPort: 80livenessProbe:httpGet:scheme: HTTPport: 80path: /
[rootk8s-master inventory]# kubectl get -f pod-httpget.yaml
NAME READY STATUS RESTARTS AGE
pod-liveness-httpget 1/1 Running 0 56s
[rootk8s-master inventory]# kubectl describe -f pod-httpget.yaml
Events:Type Reason Age From Message---- ------ ---- ---- -------Normal Scheduled 78s default-scheduler Successfully assigned dev/pod-liveness-httpget to k8s-node2Normal Pulling 78s kubelet Pulling image nginx:latestNormal Pulled 75s kubelet Successfully pulled image nginx:latest in 3.21366977s (3.213685373s including waiting)Normal Created 75s kubelet Created container nginxNormal Started 74s kubelet Started container nginx4.4.5 查看livenessProbe的子属性
[rootk8s-master inventory]# kubectl describe -f pod-httpget.yaml
FIELDS:exec Object tcpSocket ObjecthttpGet ObjectinitialDelaySeconds integer # 容器启动后等待多少秒执行第一次探测timeoutSeconds integer # 探测超时时间。默认1秒最小1秒periodSeconds integer # 执行探测的频率。默认是10秒最小1秒failureThreshold integer # 连续探测失败多少次才被认定为失败。默认是3。最小值是1successThreshold integer # 连续探测成功多少次才被认定为成功。默认是1[rootk8s-master inventory]# vi pod-liveness-httpget.yaml
[rootk8s-master inventory]# cat pod-liveness-httpget.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-liveness-httpgetnamespace: dev
spec:containers:- name: nginximage: nginx:latestports: - name: nginx-portcontainerPort: 80livenessProbe:httpGet:scheme: HTTPport: 80path: / initialDelaySeconds: 30 # 容器启动后30s开始探测timeoutSeconds: 5 # 探测超时时间为5s
[rootk8s-master inventory]# kubectl apply -f pod-liveness-httpget.yaml
pod/pod-liveness-httpget created
[rootk8s-master inventory]# kubectl get -f pod-liveness-httpget.yaml
NAME READY STATUS RESTARTS AGE
pod-liveness-httpget 1/1 Running 0 19s
[rootk8s-master inventory]# kubectl describe -f pod-liveness-httpget.yaml
Events:Type Reason Age From Message---- ------ ---- ---- -------Normal Scheduled 32s default-scheduler Successfully assigned dev/pod-liveness-httpget to k8s-node2 # 成功分配dev/pod- alive -httpget到k8s-node2Normal Pulling 32s kubelet Pulling image nginx:latest # 将图像“nginx:最新的“Normal Pulled 29s kubelet Successfully pulled image nginx:latest in 2.661612093s (2.661632391s including waiting) # 在2.661612093s中成功拉出“nginx:latest”图像Normal Created 29s kubelet Created container nginx # 创建容器nginxNormal Started 29s kubelet Started container nginx # 启动容器nginx
4.3.5 重启策略
在上一节中一旦容器探测出现了问题kubernetes就会对容器所在的Pod进行重启其实这是由pod的重启策略决定的pod的重启策略有 3 种分别如下
Always 容器失效时自动重启该容器这也是默认值。OnFailure 容器终止运行且退出码不为0时重启Never 不论状态为何都不重启该容器
重启策略适用于pod对象中的所有容器首次需要重启的容器将在其需要时立即进行重启随后再次需要重启的操作将由kubelet延迟一段时间后进行且反复的重启操作的延迟时长以此为10s、20s、40s、80s、160s和300s300s是最大延迟时长。
创建pod-restartpolicy.yaml
[rootk8s-master inventory]# vi pod-restartPolicy.yaml
[rootk8s-master inventory]# cat pod-restartPolicy.yaml
apiVersion: v1
kind: Pod
metadata: name: pod-restartpolicynamespace: dev
spec: containers:- name: nginximage: nginx:latestports:- name: nginx-portcontainerPort: 80livenessProbe:httpGet:scheme: HTTPport: 80path: /hello # 访问不存在restartPolicy: Never # 设置重启策略为never 不重启
# 创建Pod
[rootk8s-master inventory]# kubectl apply -f pod-restartPolicy.yaml
pod/pod-restartpolicy created# 查看Pod详情发现nginx容器失败
[rootk8s-master inventory]# kubectl describe -f pod-restartPolicy.yaml
Events:Type Reason Age From Message---- ------ ---- ---- -------Normal Scheduled 53s default-scheduler Successfully assigned dev/pod-restartpolicy to k8s-node2Normal Pulling 53s kubelet Pulling image nginx:latestNormal Pulled 50s kubelet Successfully pulled image nginx:latest in 2.539945326s (2.539955192s including waiting)Normal Created 50s kubelet Created container nginxNormal Started 50s kubelet Started container nginxWarning Unhealthy 23s (x3 over 43s) kubelet Liveness probe failed: HTTP probe failed with statuscode: 404Normal Killing 23s kubelet Stopping container nginx
# 活动探测失败:HTTP探测失败状态码为404# 多等一会再观察pod的重启次数发现一直是0并未重启
[rootk8s-master inventory]# kubectl get -f pod-restartPolicy.yaml
NAME READY STATUS RESTARTS AGE
pod-restartpolicy 0/1 Completed 0 66s
[rootk8s-master inventory]# kubectl get -f pod-restartPolicy.yaml
NAME READY STATUS RESTARTS AGE
pod-restartpolicy 0/1 Completed 0 70s
[rootk8s-master inventory]# kubectl get -f pod-restartPolicy.yaml
NAME READY STATUS RESTARTS AGE
pod-restartpolicy 0/1 Completed 0 72s
[rootk8s-master inventory]# kubectl get -f pod-restartPolicy.yaml
NAME READY STATUS RESTARTS AGE
pod-restartpolicy 0/1 Completed 0 73s
4.4 Pod调度
在默认情况下一个Pod在哪个Node节点上运行是由Scheduler组件采用相应的算法计算出来的这个过程是不受人工控制的。但是在实际使用中这并不满足的需求因为很多情况下我们想控制某些Pod到达某些节点上那么应该怎么做呢这就要求了解kubernetes对Pod的调度规则kubernetes提供了四大类调度方式 自动调度运行在哪个节点上完全由Scheduler经过一系列的算法计算得出 定向调度NodeName、NodeSelector 亲和性调度NodeAffinity、PodAffinity、PodAntiAffinity 污点容忍调度Taints、Toleration [rootk8s-master inventory]# kubectl describe node k8s-master
Taints: node-role.kubernetes.io/control-plane:NoSchedule4.4.1 定向调度
定向调度指的是利用在pod上声明nodeName或者nodeSelector以此将Pod调度到期望的node节点上。注意这里的调度是强制的这就意味着即使要调度的目标Node不存在也会向上面进行调度只不过pod运行失败而已。
NodeName
NodeName用于强制约束将Pod调度到指定的Name的Node节点上。这种方式其实是直接跳过Scheduler的调度逻辑直接将Pod调度到指定名称的节点。
[rootk8s-master inventory]# vi pod-NodeName.yaml
[rootk8s-master inventory]# cat pod-NodeName.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-nodenamenamespace: devlabels:app: httpdlab
spec: nodeName: k8s-node1containers:- name: httpdimage: httpd:latestimagePullPolicy: IfNotPresent- name: busyboximage: busybox:latestcommand: [/bin/sleep,6000]
[rootk8s-master inventory]# kubectl apply -f pod-NodeName.yaml
pod/pod-nodename created
[rootk8s-master inventory]# kubectl get -f pod-NodeName.yaml
NAME READY STATUS RESTARTS AGE
pod-nodename 2/2 Running 0 9s
[rootk8s-master inventory]# kubectl get -f pod-NodeName.yaml -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-nodename 2/2 Running 0 18s 10.244.1.29 k8s-node1 none none
[rootk8s-master inventory]# kubectl delete -f pod-NodeName.yaml
pod pod-nodename deleted
[rootk8s-master inventory]# clear
[rootk8s-master inventory]# vi pod-NodeName.yaml
[rootk8s-master inventory]# cat pod-NodeName.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-nodenamenamespace: devlabels:app: httpdlab
spec: nodeName: k8s-node2 # 修改nodeName的值为node2containers:- name: httpdimage: httpd:latestimagePullPolicy: IfNotPresent- name: busyboximage: busybox:latestcommand: [/bin/sleep,6000]
[rootk8s-master inventory]# kubectl apply -f pod-NodeName.yaml
pod/pod-nodename created
[rootk8s-master inventory]# kubectl get -f pod-NodeName.yaml
NAME READY STATUS RESTARTS AGE
pod-nodename 2/2 Running 0 35s#再次查看发现已经向Node2节点调度
[rootk8s-master inventory]# kubectl get -f pod-NodeName.yaml -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-nodename 2/2 Running 0 44s 10.244.2.35 k8s-node2 none none
[rootk8s-master inventory]# kubectl delete -f pod-NodeName.yaml
pod pod-nodename deleted# 接下来删除pod修改nodeName的值为node3并没有node3节点
[rootk8s-master inventory]# vi pod-NodeName.yaml
[rootk8s-master inventory]# cat pod-NodeName.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-nodenamenamespace: devlabels:app: httpdlab
spec: nodeName: k8s-node3containers:- name: httpdimage: httpd:latestimagePullPolicy: IfNotPresent- name: busyboximage: busybox:latestcommand: [/bin/sleep,6000]
[rootk8s-master inventory]# kubectl apply -f pod-NodeName.yaml
pod/pod-nodename created#再次查看发现已经向Node3节点调度但是由于不存在node3节点所以pod无法正常运行
[rootk8s-master inventory]# kubectl get -f pod-NodeName.yaml
NAME READY STATUS RESTARTS AGE
pod-nodename 0/2 Pending 0 8s
[rootk8s-master inventory]# kubectl get -f pod-NodeName.yaml -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-nodename 0/2 Pending 0 14s none k8s-node3 none none
NodeSelector
NodeSelector用于将pod调度到添加了指定标签的node节点上。它是通过kubernetes的label-selector机制实现的也就是说在pod创建之前会由scheduler使用MatchNodeSelector调度策略进行label匹配找出目标node然后将pod调度到目标节点该匹配规则是强制约束。
# 首先分别为node节点添加标签
[rootk8s-master inventory]# kubectl get nodes --show-labels
NAME STATUS ROLES AGE VERSION LABELS
k8s-master Ready control-plane 98d v1.27.0 beta.kubernetes.io/archamd64,beta.kubernetes.io/oslinux,kubernetes.io/archamd64,kubernetes.io/hostnamek8s-master,kubernetes.io/oslinux,node-role.kubernetes.io/control-plane,node.kubernetes.io/exclude-from-external-load-balancers
k8s-node1 Ready none 98d v1.27.0 beta.kubernetes.io/archamd64,beta.kubernetes.io/oslinux,kubernetes.io/archamd64,kubernetes.io/hostnamek8s-node1,kubernetes.io/oslinux
k8s-node2 Ready none 98d v1.27.0 beta.kubernetes.io/archamd64,beta.kubernetes.io/oslinux,kubernetes.io/archamd64,kubernetes.io/hostnamek8s-node2,kubernetes.io/oslinux
[rootk8s-master inventory]# kubectl label nodes k8s-node1 envtest
node/k8s-node1 labeled
[rootk8s-master inventory]# kubectl label nodes k8s-node2 envprod
node/k8s-node2 labeled
[rootk8s-master inventory]# kubectl get nodes --show-labels
NAME STATUS ROLES AGE VERSION LABELS
k8s-master Ready control-plane 98d v1.27.0 beta.kubernetes.io/archamd64,beta.kubernetes.io/oslinux,kubernetes.io/archamd64,kubernetes.io/hostnamek8s-master,kubernetes.io/oslinux,node-role.kubernetes.io/control-plane,node.kubernetes.io/exclude-from-external-load-balancers
k8s-node1 Ready none 98d v1.27.0 beta.kubernetes.io/archamd64,beta.kubernetes.io/oslinux,envtest,kubernetes.io/archamd64,kubernetes.io/hostnamek8s-node1,kubernetes.io/oslinux
k8s-node2 Ready none 98d v1.27.0 beta.kubernetes.io/archamd64,beta.kubernetes.io/oslinux,envprod,kubernetes.io/archamd64,kubernetes.io/hostnamek8s-node2,kubernetes.io/oslinux创建一个pod-nodeselector.yaml文件并使用它创建Pod
[rootk8s-master inventory]# vi pod-nodeselector.yaml
[rootk8s-master inventory]# cat pod-nodeselector.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-nodenamenamespace: devlabels:app: httpdlab
spec: nodeSelector:env: prod # 指定调度到具有envprod标签的节点上containers:- name: httpdimage: httpd:latestimagePullPolicy: IfNotPresent[rootk8s-master inventory]# kubectl apply -f pod-nodeselector.yaml
pod/pod-nodename created
[rootk8s-master inventory]# kubectl get -f pod-nodeselector.yaml
NAME READY STATUS RESTARTS AGE
pod-nodename 1/1 Running 0 10s#查看Pod调度到NODE属性确实是调度到了node2节点上
[rootk8s-master inventory]# kubectl get -f pod-nodeselector.yaml -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-nodename 1/1 Running 0 14s 10.244.2.36 k8s-node2 none none# 接下来删除pod修改nodeSelector的值为env: mushuang不存在打有此标签的节点
[rootk8s-master inventory]# vi pod-nodeselector.yaml
[rootk8s-master inventory]# cat pod-nodeselector.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-nodenamenamespace: devlabels:app: httpdlab
spec: nodeSelector:env: mushuangcontainers:- name: httpdimage: httpd:latestimagePullPolicy: IfNotPresent#再次查看发现pod无法正常运行,Node的值为none
[rootk8s-master inventory]# kubectl apply -f pod-nodeselector.yaml
pod/pod-nodename created# 查看详情,发现node selector匹配失败的提示
[rootk8s-master inventory]# kubectl get -f pod-nodeselector.yaml -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-nodename 0/1 Pending 0 17s none none none none4.4.2 亲和性调度
上一节介绍了两种定向调度的方式使用起来非常方便但是也有一定的问题那就是如果没有满足条件的Node那么Pod将不会被运行即使在集群中还有可用Node列表也不行这就限制了它的使用场景。
基于上面的问题kubernetes还提供了一种亲和性调度Affinity。它在NodeSelector的基础之上的进行了扩展可以通过配置的形式实现优先选择满足条件的Node进行调度如果没有也可以调度到不满足条件的节点上使调度更加灵活。
Affinity主要分为三类
nodeAffinity(node亲和性: 以node为目标解决pod可以调度到哪些node的问题podAffinity(pod亲和性) : 以pod为目标解决pod可以和哪些已存在的pod部署在同一个拓扑域中的问题podAntiAffinity(pod反亲和性) : 以pod为目标解决pod不能和哪些已存在pod部署在同一个拓扑域中的问题 关于亲和性(反亲和性)使用场景的说明 亲和性如果两个应用频繁交互那就有必要利用亲和性让两个应用的尽可能的靠近这样可以减少因网络通信而带来的性能损耗。 反亲和性当应用的采用多副本部署时有必要采用反亲和性让各个应用实例打散分布在各个node上这样可以提高服务的高可用性。 NodeAffinity
首先来看一下NodeAffinity的可配置项
pod.spec.affinity.nodeAffinityrequiredDuringSchedulingIgnoredDuringExecution Node节点必须满足指定的所有规则才可以相当于硬限制nodeSelectorTerms 节点选择列表matchFields 按节点字段列出的节点选择器要求列表matchExpressions 按节点标签列出的节点选择器要求列表(推荐)key 键values 值operator 关系符 支持Exists, DoesNotExist, In, NotIn, Gt, LtpreferredDuringSchedulingIgnoredDuringExecution 优先调度到满足指定的规则的Node相当于软限制 (倾向)preference 一个节点选择器项与相应的权重相关联matchFields 按节点字段列出的节点选择器要求列表matchExpressions 按节点标签列出的节点选择器要求列表(推荐)key 键values 值operator 关系符 支持In, NotIn, Exists, DoesNotExist, Gt, Ltweight 倾向权重在范围1-100。# 硬限制requiredDuringSchedulingIgnoredDuringExecution必须调度到那个节点上去
# 软限制preferredDuringSchedulingIgnoredDuringExecution倾向于调度到那个节点也可在别的节点上去# 支持Exists, DoesNotExist, In, NotIn, Gt, Lt
# 存在 不存在 在 不在 大于 小于关系符的使用说明:- matchExpressions:- key: nodeenv # 匹配存在标签的key为nodeenv的节点operator: Exists- key: nodeenv # 匹配标签的key为nodeenv,且value是xxx或yyy的节点operator: Invalues: [xxx,yyy]- key: nodeenv # 匹配标签的key为nodeenv,且value大于xxx的节点operator: Gtvalues: xxx接下来首先演示一下requiredDuringSchedulingIgnoredDuringExecution ,
[rootk8s-master inventory]# vi pod-test.yaml
[rootk8s-master inventory]# cat pod-test.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-nodeaffinity-requirednamespace: dev
spec:containers:- name: httpdimage: httpd:latestaffinity: #亲和性设置nodeAffinity: #设置node亲和性requiredDuringSchedulingIgnoredDuringExecution: # 硬限制nodeSelectorTerms:- matchExpressions:- key: envoperator: Invalues: [abc,123] # 匹配env的值在[abc,123]中的标签[rootk8s-master inventory]# kubectl apply -f pod-test.yaml
pod/pod-nodeaffinity-required created# 查看pod状态 运行失败
[rootk8s-master inventory]# kubectl get -f pod-test.yaml -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-nodeaffinity-required 0/1 Pending 0 45s none none none none# 查看Pod的详情
# 发现调度失败提示node选择失败
[rootk8s-master inventory]# kubectl describe -f pod-test.yaml
Events:Type Reason Age From Message---- ------ ---- ---- -------Warning FailedScheduling 55s default-scheduler 0/3 nodes are available: 1 node(s) had untolerated taint {node-role.kubernetes.io/control-plane: }, 2 node(s) didnt match Pods node affinity/selector. preemption: 0/3 nodes are available: 3 Preemption is not helpful for scheduling..
[rootk8s-master inventory]# kubectl delete -f pod-test.yaml
pod pod-nodeaffinity-required deleted
#31s default-scheduler 0/3节点可用:1个(s)节点有不可容忍的污染{node- roles .kubernetes。io/control-plane:} 3个节点不匹配Pod的节点亲和性/选择器。preemption: 0/3节点可用:3节点抢占对调度没有帮助。
[rootk8s-master inventory]# kubectl delete -f pod-test.yaml
pod pod-nodeaffinity-required deleted# 修改文件将values: [abc,123]------ [prod,123]
[rootk8s-master inventory]# vi pod-test.yaml
[rootk8s-master inventory]# cat pod-test.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-nodeaffinity-requirednamespace: dev
spec:containers:- name: httpdimage: httpd:latestaffinity: nodeAffinity:requiredDuringSchedulingIgnoredDuringExecution:nodeSelectorTerms:- matchExpressions:- key: envoperator: Invalues: [prod,123][rootk8s-master inventory]# kubectl apply -f pod-test.yaml
pod/pod-nodeaffinity-required created# 此时查看发现调度成功已经将pod调度到了node2上
[rootk8s-master inventory]# kubectl get -f pod-test.yaml -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-nodeaffinity-required 1/1 Running 0 65s 10.244.2.37 k8s-node2 none none
接下来再演示一下prequiredDuringSchedulingIgnoredDuringExecution ,
创建pod-nodeaffinity-preferred.yaml
[rootk8s-master inventory]# vi pod-nodeaffinity-preferred.yaml
[rootk8s-master inventory]# cat pod-nodeaffinity-preferred.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-nodeaffinity-preferrednamespace: dev
spec:containers:- name: nginximage: nginx:1.17.1affinity: #亲和性设置 nodeAffinity: #设置node亲和性preferredDuringSchedulingIgnoredDuringExecution: # 软限制- weight: 1preference:matchExpressions: - key: envoperator: Invalues: [xxx,yyy] # 匹配env的值在[xxx,yyy]中的标签(当前环境没有)
[rootk8s-master inventory]# kubectl apply -f pod-nodeaffinity-preferred.yaml
pod/pod-nodeaffinity-preferred created
[rootk8s-master inventory]# kubectl get -f pod-nodeaffinity-preferred.yaml -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-nodeaffinity-preferred 1/1 Running 0 58s 10.244.1.30 k8s-node1 none none
NodeAffinity规则设置的注意事项1 如果同时定义了nodeSelector和nodeAffinity那么必须两个条件都得到满足Pod才能运行在指定的Node上2 如果nodeAffinity指定了多个nodeSelectorTerms那么只需要其中一个能够匹配成功即可3 如果一个nodeSelectorTerms中有多个matchExpressions 则一个节点必须满足所有的才能匹配成功4 如果一个pod所在的Node在Pod运行期间其标签发生了改变不再符合该Pod的节点亲和性需求则系统将忽略此变化PodAffinity
PodAffinity主要实现以运行的Pod为参照实现让新创建的Pod跟参照pod在一个区域的功能。
首先来看一下PodAffinity的可配置项
pod.spec.affinity.podAffinityrequiredDuringSchedulingIgnoredDuringExecution 硬限制namespaces 指定参照pod的namespacetopologyKey 指定调度作用域labelSelector 标签选择器matchExpressions 按节点标签列出的节点选择器要求列表(推荐)key 键values 值operator 关系符 支持In, NotIn, Exists, DoesNotExist.matchLabels 指多个matchExpressions映射的内容preferredDuringSchedulingIgnoredDuringExecution 软限制podAffinityTerm 选项namespaces topologyKeylabelSelectormatchExpressions key 键values 值operatormatchLabels weight 倾向权重在范围1-100
topologyKey用于指定调度时作用域,例如:如果指定为kubernetes.io/hostname那就是以Node节点为区分范围如果指定为beta.kubernetes.io/os,则以Node节点的操作系统类型来区分接下来演示下requiredDuringSchedulingIgnoredDuringExecution,
[rootk8s-master inventory]# vi pod-podaffinity-target.yaml
[rootk8s-master inventory]# cat pod-podaffinity-target.yaml
apiVersion: v1
kind: Pod
metadata: name: pod-podaffinity-targetnamespace: devlabels:env: prod
spec:containers:- name: nginximage: nginx:1.17.1nodeName: k8s-node1 # 将目标pod名确指定到node1上
[rootk8s-master inventory]# kubectl get -f pod-podaffinity-target.yaml -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-podaffinity-target 1/1 Running 0 14s 10.244.1.31 k8s-node1 none none创建pod-podaffinity-required.yaml新Pod必须要与拥有标签envxxx或envyyy的pod在同一Node上显然现在没有这样pod接下来运行测试一下。
[rootk8s-master inventory]# vi pod-podaffinity-required.yaml
[rootk8s-master inventory]# cat pod-podaffinity-required.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-podaffinity-requirednamespace: dev
spec:containers:- name: nginximage: nginx:1.17.1affitiny: #亲和性设置podAffinity: #设置pod亲和性requiredDuringSchedulingIgnoredDuringExecution: # 硬限制- labelSelector:matchExpressions: # 匹配env的值在[xxx,yyy]中的标签- key: envoperator: Invalues: [xxx,yyy]topologykey: kubernetes.io/hostname[rootk8s-master inventory]# kubectl apply -f pod-podaffinity-required.yaml
pod/pod-podaffinity-required created
[rootk8s-master inventory]# kubectl get -f pod-podaffinity-required.yaml
NAME READY STATUS RESTARTS AGE
pod-podaffinity-required 0/1 Pending 0 11s
[rootk8s-master inventory]# kubectl describe -f pod-podaffinity-required.yaml
Events:Type Reason Age From Message---- ------ ---- ---- -------Warning FailedScheduling 14s default-scheduler 0/3 nodes are available: 1 node(s) had untolerated taint {node-role.kubernetes.io/control-plane: }, 2 node(s) didnt match pod affinity rules. preemption: 0/3 nodes are available: 3 Preemption is not helpful for scheduling..
[rootk8s-master inventory]# kubectl delete -f pod-podaffinity-required.yaml
pod pod-podaffinity-required deleted
[rootk8s-master inventory]# vi pod-podaffinity-required.yaml
[rootk8s-master inventory]# cat pod-podaffinity-required.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-podaffinity-requirednamespace: dev
spec:containers:- name: nginximage: nginx:1.17.1affinity:podAffinity:requiredDuringSchedulingIgnoredDuringExecution:- labelSelector:matchExpressions:- key: envoperator: Invalues: [prod,yyy]topologyKey: kubernetes.io/hostname
[rootk8s-master inventory]# kubectl apply -f pod-podaffinity-required.yaml
pod/pod-podaffinity-required created
[rootk8s-master inventory]# kubectl get -f pod-podaffinity-required.yaml
NAME READY STATUS RESTARTS AGE
pod-podaffinity-required 1/1 Running 0 12s
[rootk8s-master inventory]# kubectl get -f pod-podaffinity-required.yaml -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-podaffinity-required 1/1 Running 0 18s 10.244.1.33 k8s-node1 none none
关于PodAffinity的 preferredDuringSchedulingIgnoredDuringExecution
[rootk8s-master inventory]# cat pod-podaffinity-target2.yaml
apiVersion: v1
kind: Pod
metadata: name: pod-podaffinity-target2namespace: devlabels:env: test
spec:containers:- name: nginximage: nginx:1.17.1nodeName: k8s-node2
[rootk8s-master inventory]# kubectl apply -f pod-podaffinity-target2.yaml
pod/pod-podaffinity-target2 created
[rootk8s-master inventory]# kubectl get -f pod-podaffinity-target2.yaml -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-podaffinity-target2 1/1 Running 0 105s 10.244.2.38 k8s-node2 none none
[rootk8s-master inventory]# kubectl get -f pod-podaffinity-target.yaml -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-podaffinity-target 1/1 Running 0 3d18h 10.244.1.32 k8s-node1 none none
[rootk8s-master inventory]# vi pod-podaffinity-required.yaml# 使用pod调度权重方式选择合适的pod调度到相同的节点。
[rootk8s-master inventory]# cat pod-podaffinity-required.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-podaffinity-requirednamespace: dev
spec:containers:- name: nginximage: nginx:1.17.1affinity:podAffinity:preferredDuringSchedulingIgnoredDuringExecution:- weight: 60podAffinityTerm:labelSelector: matchExpressions:- key: envoperator: Invalues: [prod]topologyKey: kubernetes.io/hostname- weight: 30podAffinityTerm:labelSelector:matchExpressions:- key: envoperator: Invalues: [test] topologyKey: kubernetes.io/hostname
[rootk8s-master inventory]# kubectl apply -f pod-podaffinity-required.yaml
pod/pod-podaffinity-required created
[rootk8s-master inventory]# kubectl get -f pod-podaffinity-required.yaml -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-podaffinity-required 1/1 Running 0 12s 10.244.1.35 k8s-node1 none none
[rootk8s-master inventory]# kubectl get pod -n dev -o wide --show-labels
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES LABELS
pod-podaffinity-required 1/1 Running 0 9m3s 10.244.1.35 k8s-node1 none none none
pod-podaffinity-target 1/1 Running 0 3d18h 10.244.1.32 k8s-node1 none none envprod
pod-podaffinity-target2 1/1 Running 0 12m 10.244.2.38 k8s-node2 none none envtestPodAntiAffinity
PodAntiAffinity主要实现以运行的Pod为参照让新创建的Pod跟参照pod不在一个区域中的功能。
它的配置方式和选项跟PodAffinty是一样的这里不再做详细解释直接做一个测试案例。
1继续使用上个案例中目标pod
[rootk8s-master inventory]# kubectl get -f pod-podaffinity-target.yaml -o wide --show-labels
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES LABELS
pod-podaffinity-target 1/1 Running 0 42s 10.244.1.36 k8s-node1 none none envprod2创建pod-podantiaffinity-required.yaml内容如下
[rootk8s-master inventory]# cat pod-podantiaffinity-required.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-podantiaffinity-requirednamespace: dev
spec:containers:- name: nginximage: nginx:1.17.1affinity: #亲和性设置podAntiAffinity: #设置pod亲和性requiredDuringSchedulingIgnoredDuringExecution: # 硬限制- labelSelector:matchExpressions: # 匹配podenv的值在[prod]中的标签- key: envoperator: Invalues: [prod]topologyKey: kubernetes.io/hostname
上面配置表达的意思是新Pod必须要与拥有标签envprod的pod不在同一Node上运行测试一下
[rootk8s-master inventory]# kubectl get -f pod-podantiaffinity-required.yaml -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-podantiaffinity-required 1/1 Running 0 17s 10.244.2.39 k8s-node2 none none
4.4.3 污点和容忍
污点Taints
前面的调度方式都是站在Pod的角度上通过在Pod上添加属性来确定Pod是否要调度到指定的Node上其实我们也可以站在Node的角度上通过在Node上添加污点属性来决定是否允许Pod调度过来。
Node被设置上污点之后就和Pod之间存在了一种相斥的关系进而拒绝Pod调度进来甚至可以将已经存在的Pod驱逐出去。
污点的格式为keyvalue:effect, key和value是污点的标签effect描述污点的作用支持如下三个选项
PreferNoSchedulekubernetes将尽量避免把Pod调度到具有该污点的Node上除非没有其他节点可调度NoSchedulekubernetes将不会把Pod调度到具有该污点的Node上但不会影响当前Node上已存在的PodNoExecutekubernetes将不会把Pod调度到具有该污点的Node上同时也会将Node上已存在的Pod驱离 # 先运行几个pod
[rootk8s-master inventory]# kubectl run pod1 --image httpd
pod/pod1 created
[rootk8s-master inventory]# kubectl run pod2 --image httpd
pod/pod2 created
[rootk8s-master inventory]# kubectl run pod3 --image httpd
pod/pod3 created
[rootk8s-master inventory]# kubectl run pod4 --image httpd
pod/pod4 created
[rootk8s-master inventory]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod1 1/1 Running 0 30s 10.244.2.40 k8s-node2 none none
pod2 1/1 Running 0 26s 10.244.1.37 k8s-node1 none none
pod3 1/1 Running 0 23s 10.244.2.41 k8s-node2 none none
pod4 1/1 Running 0 20s 10.244.1.38 k8s-node1 none none使用kubectl设置和去除污点的命令示例如下
# 设置污点
kubectl taint nodes node1 keyvalue:effect# 去除污点
kubectl taint nodes node1 key:effect-# 去除所有污点
kubectl taint nodes node1 key-接下来演示下污点的效果
# 为k8s-node1设置污点(PreferNoSchedule,尽可能不调度)
[rootk8s-master inventory]# kubectl taint nodes k8s-node1 tagzlbb:PreferNoSchedule
node/k8s-node1 tainted
[rootk8s-master inventory]# kubectl describe node k8s-node1|grep -i taint
Taints: tagzlbb:PreferNoSchedule
[rootk8s-master inventory]# kubectl run pod5 --image httpd
pod/pod5 created
[rootk8s-master inventory]# kubectl run pod6 --image httpd
pod/pod6 created
[rootk8s-master inventory]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod1 1/1 Running 0 25m 10.244.2.40 k8s-node2 none none
pod2 1/1 Running 0 25m 10.244.1.37 k8s-node1 none none
pod3 1/1 Running 0 25m 10.244.2.41 k8s-node2 none none
pod4 1/1 Running 0 25m 10.244.1.38 k8s-node1 none none
pod5 1/1 Running 0 9s 10.244.2.42 k8s-node2 none none
pod6 0/1 ContainerCreating 0 5s none k8s-node2 none none
[rootk8s-master inventory]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod1 1/1 Running 0 25m 10.244.2.40 k8s-node2 none none
pod2 1/1 Running 0 25m 10.244.1.37 k8s-node1 none none
pod3 1/1 Running 0 25m 10.244.2.41 k8s-node2 none none
pod4 1/1 Running 0 25m 10.244.1.38 k8s-node1 none none
pod5 1/1 Running 0 12s 10.244.2.42 k8s-node2 none none
pod6 1/1 Running 0 8s 10.244.2.43 k8s-node2 none none# 为node1设置污点(取消PreferNoSchedule设置NoSchedule)
[rootk8s-master inventory]# kubectl taint nodes k8s-node1 tagzlbb:PreferNoSchedule-
node/k8s-node1 untainted
[rootk8s-master inventory]# kubectl taint nodes k8s-node1 tagzlbb:NoSchedule
node/k8s-node1 tainted
[rootk8s-master inventory]# kubectl describe node k8s-node1|grep -i taint
Taints: tagzlbb:NoSchedule
[rootk8s-master inventory]# kubectl run pod7 --image httpd
pod/pod7 created
[rootk8s-master inventory]# kubectl run pod8 --image httpd
pod/pod8 creatednone
[rootk8s-master inventory]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod1 1/1 Running 0 29m 10.244.2.40 k8s-node2 none none
pod2 1/1 Running 0 29m 10.244.1.37 k8s-node1 none none
pod3 1/1 Running 0 29m 10.244.2.41 k8s-node2 none none
pod4 1/1 Running 0 28m 10.244.1.38 k8s-node1 none none
pod5 1/1 Running 0 3m41s 10.244.2.42 k8s-node2 none none
pod6 1/1 Running 0 3m37s 10.244.2.43 k8s-node2 none none
pod7 1/1 Running 0 36s 10.244.2.45 k8s-node2 none none
pod8 1/1 Running 0 62s 10.244.2.44 k8s-node2 none none# 取消node1 NoSchedule
[rootk8s-master inventory]# kubectl taint nodes k8s-node1 tagzlbb:NoSchedule-
node/k8s-node1 untainted
[rootk8s-master inventory]# kubectl describe node k8s-node1|grep -i taint
Taints: none# 为node2设置污点(设置NoExecute)
[rootk8s-master inventory]# kubectl taint nodes k8s-node2 tagzlbb:NoExecute
node/k8s-node2 tainted
[rootk8s-master inventory]# kubectl describe node k8s-node2|grep -i taint
Taints: tagzlbb:NoExecute# 会自动将node2上的pod去除
[rootk8s-master inventory]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod2 1/1 Running 0 31m 10.244.1.37 k8s-node1 none none
pod4 1/1 Running 0 31m 10.244.1.38 k8s-node1 none none小提示使用kubeadm搭建的集群默认就会给master节点添加一个污点标记,所以pod就不会调度到master节点上.容忍Toleration
上面介绍了污点的作用我们可以在node上添加污点用于拒绝pod调度上来但是如果就是想将一个pod调度到一个有污点的node上去这时候应该怎么做呢这就要使用到容忍。 污点就是拒绝容忍就是忽略Node通过污点拒绝pod调度上去Pod通过容忍忽略拒绝 下面先通过一个案例看下效果
上一小节已经在node1节点上打上了NoExecute的污点此时pod是调度不上去的本小节可以通过给pod添加容忍然后将其调度上去
创建pod-toleration.yaml,内容如下
[rootk8s-master inventory]# kubectl describe node k8s-node1|grep -i taint
Taints: none
[rootk8s-master inventory]# kubectl describe node k8s-node2|grep -i taint
Taints: tagzlbb:NoExecute
[rootk8s-master inventory]# vi pod-toleration.yaml
[rootk8s-master inventory]# cat pod-toleration.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-tolerationnamespace: dev
spec:containers:- name: nginximage: nginx:1.17.1tolerations: # 添加容忍- key: tag # 要容忍的污点的keyoperator: Equal # 操作符value: zlbb # 容忍的污点的valueeffect: NoExecute # 添加容忍的规则这里必须和标记的污点规则相同
[rootk8s-master inventory]# kubectl apply -f pod-toleration.yaml
pod/pod-toleration created# 添加容忍之后可以在node2上运行
[rootk8s-master inventory]# kubectl get -f pod-toleration.yaml -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-toleration 1/1 Running 0 19s 10.244.2.46 k8s-node2 none none下面看一下容忍的详细配置:
[rootk8s-master01 ~]# kubectl explain pod.spec.tolerations
......
FIELDS:key # 对应着要容忍的污点的键空意味着匹配所有的键value # 对应着要容忍的污点的值operator # key-value的运算符支持Equal和Exists默认effect # 对应污点的effect空意味着匹配所有影响tolerationSeconds # 容忍时间, 当effect为NoExecute时生效表示pod在Node上的停留时间