目录

K8S常用操作命令集合

K8S常用操作命令集合,持续更新~~~

pod 操作

批量删除terminating pods

1
2
3
# batch delete terminating pods
NAMESPACE=kubemark
kubectl get po -n $NAMESPACE |grep Terminating |awk '{print $1}' |xargs kubectl delete pod  -n $NAMESPACE --force --grace-period=0

[慎用] 删除NAMESPACE下的所有资源

1
2
3
# kubectl delete ns $NAMESPACE
NAMESPACE=kubemark
kubectl delete ns $NAMESPACE --force --grace-period=0

批量删除 NAMESPACE下的 Init:0/1 状态 pod

1
2
NAMESPACE=kube-system
kubectl get po -n $NAMESPACE |grep "Init:0/1" |awk '{print $1}' |xargs kubectl delete pod  -n $NAMESPACE --force --grace-period=0

批量删除 NAMESPACE下的 ContainerCreating 状态 pod

1
2
NAMESPACE=kube-system
kubectl get po -n $NAMESPACE |grep "ContainerCreating" |awk '{print $1}' |xargs kubectl delete pod  -n $NAMESPACE --force --grace-period=0

批量删除集群NAMESPACE下的 ContainerCreating 或 Init:0/1 状态 pod

1
2
3
4
5
NAMESPACE=kube-system
kubectl get po -n $NAMESPACE |grep -E "ContainerCreating|Init:0/1" |awk '{print $1}' |xargs kubectl delete pod  -n $NAMESPACE --force --grace-period=0

NAMESPACE=aistation
kubectl get po -n $NAMESPACE |grep -E "ContainerCreating|Init:0/1" |awk '{print $1}' |xargs kubectl delete pod  -n $NAMESPACE --force --grace-period=0

示例:清理kubemark资源

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
# delete hollow-node-sts
# kubectl delete -f hollow-node-sts.yaml

NAMESPACE=kubemark
kubectl get po -n $NAMESPACE |grep -E "Terminating|CrashLoopBackOff|Error" |awk '{print $1}' |xargs kubectl delete pod  -n $NAMESPACE --force --grace-period=0

NAMESPACE=kube-system
kubectl get po -n $NAMESPACE |grep -E "ContainerCreating|Init:0/1" |awk '{print $1}' |xargs kubectl delete pod  -n $NAMESPACE --force --grace-period=0

NAMESPACE=aistation
kubectl get po -n $NAMESPACE |grep -E "ContainerCreating|Init:0/1" |awk '{print $1}' |xargs kubectl delete pod  -n $NAMESPACE --force --grace-period=0

#NAMESPACE=kubemark
#kubectl delete ns $NAMESPACE --force --grace-period=0
# clear nodes
kubectl get no |grep "hollow-node" |awk '{print $1}' |xargs kubectl delete no --force --grace-period=0

node 操作

批量删除指定node

1
2
TAGCLASS="hollow-node-"
kubectl get no |grep  $TAGCLASS |awk '{print $1}' |xargs kubectl delete no

指定node 打标签

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16

# 添加node标签
# kubectl label nodes kube-node-name label_name=label_value
kubectl label nodes node61 perf-test=true

# 删除node标签
# kubectl label nodes kube-node-name label_name-
kubectl label nodes node61 perf-test-

# 更新node标签
# kubectl label nodes kube-node-name label_name=label_value --overwrite
kubectl label nodes node61 perf-test=true --overwrite
kubectl label nodes node53 perf-test=true --overwrite

# 显示ndoe标签
kubectl get no --show-labels

namespace操作

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
kubectl get namespace tsung  -o json |jq '.spec = {"finalizers":[]}' >temp.json

# 如果没有jq,则输出完整json,并修改

kubectl get namespace tsung  -o json >temp.json

# 修改temp.json中的 spec和 status 字段内容

curl -k -H "Content-Type: application/json" -X PUT --data-binary @temp.json 10.151.11.61:8080/api/v1/namespaces/tsung/finalize

强制删除namespace 状态为termiating, 名称为test- 的资源

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
kubectl get ns |grep test- |awk '{print $1}' |xargs kubectl delete ns --force --grace-period=0

# 指定ns_test
# ns_test=test-c6gg2n-1
# 或 查找系统中存在的1条ns_test
ns_test=$(kubectl get ns |grep test- |awk '{print $1}')
kubectl get namespace ${ns_test}  -o json >temp.json

# vim temp.json
# to delete spec and status fields
sed -i '12,19d' temp.json  
sed -i 's/},/}/g' temp.json

# k8s-apiserve url
api_url=192.168.2.101:8080
curl -k -H "Content-Type: application/json" -X PUT --data-binary @temp.json ${api_url}:8080/api/v1/namespaces/${ns_test}/finalize

实例

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72

[root@node61 perf-test]# kubectl get ns
NAME                                   STATUS        AGE
0eaea880-7d1c-4d07-8fa5-dc71eb10aa8a   Active        5d22h
aistation                              Active        6d
default                                Active        6d13h
kube-node-lease                        Active        6d13h
kube-public                            Active        6d13h
kube-system                            Active        6d13h
kubeflow                               Active        6d
kubemark                               Active        5d23h
monitoring                             Active        4d23h
test-pyb3an-1                          Terminating   15m

[root@node61 perf-test]# cat temp.json
{
    "apiVersion": "v1",
    "kind": "Namespace",
    "metadata": {
        "creationTimestamp": "2021-03-04T08:06:07Z",
        "deletionTimestamp": "2021-03-05T07:20:42Z",
        "name": "tsung",
        "resourceVersion": "2875375",
        "selfLink": "/api/v1/namespaces/tsung",
        "uid": "790d2ec0-7cc0-11eb-a786-6c92bf8b7fa6"
    },
    "spec": {
        "finalizers": [
            "kubernetes"
        ]
    },
    "status": {
        "phase": "Terminating"
    }
}
[root@node61 perf-test]# vi temp.json
[root@node61 perf-test]#
[root@node61 perf-test]#
[root@node61 perf-test]#
[root@node61 perf-test]# curl -k -H "Content-Type: application/json" -X PUT --data-binary @temp.json 10.151.11.61:8080/api/v1/namespaces/tsung/finalize
{
  "kind": "Namespace",
  "apiVersion": "v1",
  "metadata": {
    "name": "tsung",
    "selfLink": "/api/v1/namespaces/tsung/finalize",
    "uid": "790d2ec0-7cc0-11eb-a786-6c92bf8b7fa6",
    "resourceVersion": "6375674",
    "creationTimestamp": "2021-03-04T08:06:07Z",
    "deletionTimestamp": "2021-03-05T07:20:42Z"
  },
  "spec": {

  },
  "status": {
    "phase": "Terminating"
  }
}[root@node61 perf-test]#
[root@node61 perf-test]#
[root@node61 perf-test]# kubectl get ns
NAME                                   STATUS        AGE
0eaea880-7d1c-4d07-8fa5-dc71eb10aa8a   Active        5d22h
aistation                              Active        6d
default                                Active        6d13h
kube-node-lease                        Active        6d13h
kube-public                            Active        6d13h
kube-system                            Active        6d13h
kubeflow                               Active        6d
kubemark                               Active        5d23h
monitoring                             Active        4d23h


test pod启动yaml

test-pod.yaml

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
apiVersion: v1
kind: Pod
metadata:
  name: testpod
  labels:
    app: myapp
    version: v1
spec:
  # schedulerName: kube-batch
  containers:
  - name: app
    image: busybox:1.31.0
    imagePullPolicy: IfNotPresent
    command: ["sleep", "3600"]
    securityContext:
      privileged: true
    resources:
      limits:
        cpu: "0.5"
        # memory: "100Mi"
      requests:
        cpu: "0.5"
        # memory: "100Mi"
  affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:  # 硬策略
            nodeSelectorTerms:
            # - matchExpressions:
            #   - key: node-role.kubernetes.io/master
            #     operator: In
            #     values:
            #     - "true"
            - matchExpressions:
              - key: kubernetes.io/hostname
                operator: In
                values:
                - "test-node"


附录

一些命令

1
2
kubectl get po -A |grep latency- |grep -v Running |wc -l