原创

k8s节点管理和维护

温馨提示:
本文最后更新于 2024年05月21日 ,已超过 333 天没有更新。若文章内的图片失效(无法正常加载),请留言反馈或直接联系我

kubectl drain可以让node在维护期间排除节点。

drain本意排水,意思是将出问题的node下的pod转移到其它node下运行。

kubectl drain nodename --delete-local-data --ignore-daemonsets --force
# 将node置为SchedulingDisabled不可调度状态
kubectl cordon nodename
# 恢复调度
kubectl uncordon nodename

将node置为SchedulingDisabled不可调度状态,后续的新创建pod容器时scheduler调度不会考虑该node,旧的pod容器不会受影响,仍可以对外提供正常服务。
(特殊情况:pod容器如果跟node绑定的话,容器下次更新就不会回到原宿主机,该情况如何处理呢?可能设置成不可调度状态就不太合适。调度器 预调度策略)

Terminating可使用kubectl中的强制删除命令

# 删除POD
kubectl delete pod PODNAME --force --grace-period=0

让Master也能当作Node使用

#将 Master 也当作 Node 使用
[root@app01 home]# kubectl taint node app01 node-role.kubernetes.io/master-node/app01 untainted
[root@app01 home]# 
#将 Master 恢复成 Master Only 状态
kubectl taint node nodename node-role.kubernetes.io/master="":NoSchedule
[root@app01 rabbitmq]# kubectl taint node app01  node-role.kubernetes.io/master="":NoSchedule
node/app01 tainted
[root@app01 rabbitmq]#

修改nodeport端口范围(nodePort 端口默认范围为:30000-32767)

# 编辑 kube-apiserver.yaml文件
vim /etc/kubernetes/manifests/kube-apiserver.yaml
# 找到 --service-cluster-ip-range 这一行,在这一行的下一行增加 如下内容
    - --service-node-port-range=30000-50000
# 重启服务
systemctl daemon-reload
systemctl restart kubelet

参考内容如下:

kind: Pod
metadata:
  annotations:
    scheduler.alpha.kubernetes.io/critical-pod: ""
  creationTimestamp: null
  labels:
    component: kube-apiserver
    tier: control-plane
  name: kube-apiserver
  namespace: kube-system
spec:
  containers:
 2. command:
    - kube-apiserver
    - --authorization-mode=Node,RBAC
    - --advertise-address=192.168.180.37
    - --allow-privileged=true
    - --client-ca-file=/etc/kubernetes/pki/ca.crt
    - --disable-admission-plugins=PersistentVolumeLabel
    - --enable-admission-plugins=NodeRestriction
    - --enable-bootstrap-token-auth=true
    - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
    - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
    - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
    - --etcd-servers=https://127.0.0.1:2379
    - --insecure-port=0
    - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
    - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
    - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
    - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
    - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
    - --requestheader-allowed-names=front-proxy-client
    - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
    - --requestheader-extra-headers-prefix=X-Remote-Extra-
    - --requestheader-group-headers=X-Remote-Group
    - --requestheader-username-headers=X-Remote-User
    - --secure-port=6443
    - --service-account-key-file=/etc/kubernetes/pki/sa.pub
    - --service-cluster-ip-range=10.96.0.0/12
    - --service-node-port-range=30000-50000
    - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
    - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key

修改k8sDNS

查看coredns配置

[root@dev15 ~]# kubectl get  configmap coredns  -n kube-system -o yaml
apiVersion: v1
data:
  Corefile: |
    .:53 {
        errors
        health
        kubernetes cluster.local in-addr.arpa ip6.arpa {
           pods insecure
           upstream
           fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        proxy . /etc/resolv.conf
        cache 30
        reload
    }
kind: ConfigMap
metadata:
  creationTimestamp: "2019-10-21T05:57:32Z"
  name: coredns
  namespace: kube-system
  resourceVersion: "77720660"
  selfLink: /api/v1/namespaces/kube-system/configmaps/coredns
  uid: ac49f58c-f3c7-11e9-a4a8-005056974fea
[root@dev15 ~]#

增加DNS配置

hosts {
192.168.168.168 sft-ap.pfizer.com sft-am.pfizer.com
fallthrough
}
[root@dev15 ~]# kubectl edit configmap coredns -n kube-system
apiVersion: v1
data:
  Corefile: |
    .:53 {
        errors
        health
        kubernetes cluster.local in-addr.arpa ip6.arpa {
           pods insecure
           upstream
           fallthrough in-addr.arpa ip6.arpa
        }
        hosts {
          192.168.168.168 sft-ap.pfizer.com sft-am.pfizer.com
          fallthrough
        }
        prometheus :9153
        proxy . /etc/resolv.conf
        cache 30
        reload
    }
kind: ConfigMap
metadata:
  creationTimestamp: "2019-10-21T05:57:32Z"
  name: coredns
  namespace: kube-system
  resourceVersion: "77720660"
  selfLink: /api/v1/namespaces/kube-system/configmaps/coredns
  uid: ac49f58c-f3c7-11e9-a4a8-005056974fea
[root@dev15 ~]#

原文链接:https://blog.csdn.net/lihongbao80/article/details/108075051

正文到此结束
本文目录