rockyLinux10通过kubeadm安装k8s_1.34.0

初始化部分,参考二进制版本配置系统参数、cri-docker……

1769441521496

 https://blog.nn3n.com/archives/1757247174550 参考链接

安装 kubeadm、kubelet、kubectl(所欲node节点都需要安装)

 1 ## 添加 Kubernetes 官方源【可根据实际需要修改k8s的版本(repo、key)】
 2 cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
 3 [kubernetes]
 4 name=Kubernetes
 5 baseurl=https://pkgs.k8s.io/core:/stable:/v1.34/rpm/
 6 enabled=1
 7 gpgcheck=1
 8 gpgkey=https://pkgs.k8s.io/core:/stable:/v1.34/rpm/repodata/repomd.xml.key
 9 EOF
10 ## 导入 GPG 密钥
11 sudo rpm --import https://pkgs.k8s.io/core:/stable:/v1.34/rpm/repodata/repomd.xml.key
12 ## 列出所有可用的 Kubernetes 包版本
13 sudo dnf list --showduplicates kubelet kubeadm kubectl
14 
15 ## 安装指定版本组件
16 sudo dnf update
17 # 安装指定版本的组件
18 sudo dnf install -y kubeadm-1.34.0 kubelet-1.34.0 kubectl-1.34.0
19 
20 ## 锁定版本防止升级:
21 
22 sudo dnf install -y dnf-plugin-versionlock
23 sudo dnf versionlock add kubelet-1.34.0 kubeadm-1.34.0 kubectl-1.34.0
24 sudo dnf versionlock list
25 #如果未来需要升级 Kubernetes,需先解除锁定
26 sudo dnf versionlock delete kubelet-1.34.0 kubeadm-1.34.0 kubectl-1.34.0
27 
28 ##配置kubectl
29 
30 # vi /etc/sysconfig/kubelet
31 KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
32 ##启用 kubelet(先不启动)
33 sudo systemctl enable kubelet
34 
35 ##初始化 Master 节点(仅在 master 执行)
36 ##查看镜像、拉取镜像
37 root@k8s-master1:~# kubeadm config images list --kubernetes-version=v1.34.0 --image-repository registry.aliyuncs.com/google_containers
38 registry.aliyuncs.com/google_containers/kube-apiserver:v1.34.0
39 registry.aliyuncs.com/google_containers/kube-controller-manager:v1.34.0
40 registry.aliyuncs.com/google_containers/kube-scheduler:v1.34.0
41 registry.aliyuncs.com/google_containers/kube-proxy:v1.34.0
42 registry.aliyuncs.com/google_containers/coredns:v1.12.1
43 registry.aliyuncs.com/google_containers/pause:3.10.1
44 registry.aliyuncs.com/google_containers/etcd:3.6.4-0
45 
46 root@k8s-master1:~# kubeadm config images pull --kubernetes-version=v1.34.0 --image-repository registry.aliyuncs.com/google_containers --cri-socket unix:///var/run/cri-dockerd.sock
47 [config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.34.0
48 [config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.34.0
49 [config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.34.0
50 [config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.34.0
51 [config/images] Pulled registry.aliyuncs.com/google_containers/coredns:v1.12.1
52 [config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.10.1
53 [config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.6.4-0
初始化及组件安装

 

 1 ## 编写 kubeadm 配置文件
 2 ## kubeadm config print init-defaults> kubeadm-config-v1.34.0.yaml
 3 [root@k8s-master data]# cat kubeadm-config-v1.34.0.yaml
 4 apiVersion: kubeadm.k8s.io/v1beta4
 5 bootstrapTokens:
 6 - groups:
 7   - system:bootstrappers:kubeadm:default-node-token
 8   token: abcdef.0123456789abcdef
 9   ttl: 24h0m0s
10   usages:
11   - signing
12   - authentication
13 kind: InitConfiguration
14 localAPIEndpoint:
15   advertiseAddress: 192.168.123.122
16   bindPort: 6443
17 nodeRegistration:
18   criSocket: unix:///var/run/cri-dockerd.sock
19   imagePullPolicy: IfNotPresent
20   imagePullSerial: true
21   name: k8s-master
22   taints: null
23 timeouts:
24   controlPlaneComponentHealthCheck: 4m0s
25   discovery: 5m0s
26   etcdAPICall: 2m0s
27   kubeletHealthCheck: 4m0s
28   kubernetesAPICall: 1m0s
29   tlsBootstrap: 5m0s
30   upgradeManifests: 5m0s
31 ---
32 apiServer: {}
33 apiVersion: kubeadm.k8s.io/v1beta4
34 caCertificateValidityPeriod: 876000h0m0s
35 certificateValidityPeriod: 876000h0m0s
36 certificatesDir: /etc/kubernetes/pki
37 clusterName: kubernetes
38 controllerManager: {}
39 dns: {}
40 encryptionAlgorithm: RSA-2048
41 etcd:
42   local:
43     dataDir: /var/lib/etcd
44 imageRepository: registry.aliyuncs.com/google_containers
45 kind: ClusterConfiguration
46 kubernetesVersion: 1.34.0
47 networking:
48   dnsDomain: cluster.local
49   podSubnet: 10.244.0.0/16
50   serviceSubnet: 10.112.0.0/16
51 proxy: {}
52 scheduler: {}
53 ---
54 apiVersion: kubeproxy.config.k8s.io/v1alpha1
55 kind: KubeProxyConfiguration
56 mode: ipvs
57 ipvs:
58   strictARP: true
 1 ## 拉取镜像(使用阿里云镜像仓库)
 2 ## 也可以这样预先拉取镜像
 3 sudo kubeadm config images pull --config kubeadm-config-v1.34.0.yaml --image-repository registry.aliyuncs.com/google_containers
 4 
 5 ## 初始化集群
 6 kubeadm init --config kubeadm-config-v1.34.0.yaml --upload-certs --v=9
 7 …………
 8 
 9 mkdir -p $HOME/.kube
10 sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
11 sudo chown $(id -u):$(id -g) $HOME/.kube/config
12 
13 验证:
14 [root@k8s-master data]#
15  kubectl get nodes
16 NAME          STATUS     ROLES           AGE     VERSION
17 k8s-master   NotReady   control-plane   6m29s   v1.34.0
18 
19 ## 安装 CNI 插件(Calico)
20 wget https://raw.githubusercontent.com/projectcalico/calico/v3.30.1/manifests/tigera-operator.yaml
21 kubectl create -f tigera-operator.yaml
22 wget https://raw.githubusercontent.com/projectcalico/calico/v3.30.1/manifests/custom-resources.yaml
23 [root@k8s-master data]# cat custom-resources.yaml
24 # This section includes base Calico installation configuration.
25 # For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
26 apiVersion: operator.tigera.io/v1
27 kind: Installation
28 metadata:
29   name: default
30 spec:
31   # Configures Calico networking.
32   calicoNetwork:
33     ipPools:
34     - name: default-ipv4-ippool
35       blockSize: 26
36       cidr: 10.244.0.0/16
37       encapsulation: None
38       natOutgoing: Enabled
39       nodeSelector: all()
40     nodeAddressAutodetectionV4:
41       interface: "eth.*|en.*"
42   calicoNodeDaemonSet:
43     spec:
44       template:
45         spec:
46           # 将亲和性配置移动到这里
47           affinity:
48             nodeAffinity:
49               requiredDuringSchedulingIgnoredDuringExecution:
50                 nodeSelectorTerms:
51                 - matchExpressions:
52                   - key: node-role.kubernetes.io/edge
53                     operator: DoesNotExist
54   csiNodeDriverDaemonSet:
55     spec:
56       template:
57         spec:
58           # 将亲和性配置移动到这里
59           affinity:
60             nodeAffinity:
61               requiredDuringSchedulingIgnoredDuringExecution:
62                 nodeSelectorTerms:
63                 - matchExpressions:
64                   - key: node-role.kubernetes.io/edge
65                     operator: DoesNotExist
66 
67 ### kubectl explain Installation.spec  ##通过这个命令查看tigera-operator需要调整的对象
68 ## 修改相关配置
69 kubectl create -f custom-resources.yaml
70 ##验证集群状态
71 [root@k8s-master data]# kubectl get nodes
72 NAME         STATUS   ROLES           AGE   VERSION
73 k8s-master   Ready    control-plane   86m   v1.34.0
74 # 移除 NoSchedule 污点
75 kubectl taint nodes --all node-role.kubernetes.io/master:NoSchedule-
76 [root@k8s-master ~]# kubeadm certs check-expiration  #查看证书到期时间
77 [check-expiration] Reading configuration from the "kubeadm-config" ConfigMap in namespace "kube-system"...
78 [check-expiration] Use 'kubeadm init phase upload-config kubeadm --config your-config-file' to re-upload it.
79 
80 CERTIFICATE                EXPIRES                  RESIDUAL TIME   CERTIFICATE AUTHORITY   EXTERNALLY MANAGED
81 admin.conf                 Sep 19, 2125 07:12 UTC   99y             ca                      no
82 apiserver                  Sep 19, 2125 07:12 UTC   99y             ca                      no
83 apiserver-etcd-client      Sep 19, 2125 07:12 UTC   99y             etcd-ca                 no
84 apiserver-kubelet-client   Sep 19, 2125 07:12 UTC   99y             ca                      no
85 controller-manager.conf    Sep 19, 2125 07:12 UTC   99y             ca                      no
86 etcd-healthcheck-client    Sep 19, 2125 07:12 UTC   99y             etcd-ca                 no
87 etcd-peer                  Sep 19, 2125 07:12 UTC   99y             etcd-ca                 no
88 etcd-server                Sep 19, 2125 07:12 UTC   99y             etcd-ca                 no
89 front-proxy-client         Sep 19, 2125 07:12 UTC   99y             front-proxy-ca          no
90 scheduler.conf             Sep 19, 2125 07:12 UTC   99y             ca                      no
91 super-admin.conf           Sep 19, 2125 07:12 UTC   99y             ca                      no
92 
93 CERTIFICATE AUTHORITY   EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED
94 ca                      Sep 19, 2125 07:12 UTC   99y             no
95 etcd-ca                 Sep 19, 2125 07:12 UTC   99y             no
96 front-proxy-ca          Sep 19, 2125 07:12 UTC   99y             no
97 
98 ## 增加node节点的pod数量限制:
99 /var/lib/kubelet/config.yaml   添加maxPods: 9999
配置访问Calico Whisker webUI(临时策略)
配置访问Calico Whisker webUI
cat << EOF | kubectl apply -f - 
# whisker-nodeport.yaml
apiVersion: v1
kind: Service
metadata:
  name: whisker-external
  namespace: calico-system
  labels:
    app: whisker-external
spec:
  type: NodePort
  selector:
    # 使用与 whisker Pod 相同的标签选择器
    k8s-app: whisker
  ports:
  - name: http
    port: 8081        # Service 监听的端口
    targetPort: 8081  # Pod 的端口
    nodePort: 30081   # 外部访问的端口 (30000-32767)
    protocol: TCP
EOF
## 外部直接访问有限制,尝试使用ssh隧道进行代理访问

1769441521504

 

1769441521508

 

安装metalLB、ingressController《参考二进制安装教程》

 

node节点加入集群
 1 ### 主节点打印token
 2 kubeadm token create --ttl 24h --print-join-command
 3 ### node节点加入集群
 4 kubeadm join 192.168.123.122:6443 --token mk3px6.zwrbuzs0rreevmgq --discovery-token-ca-cert-hash sha256:7a2fce1fe7513be9aaaf0b808503730c82784074b03db9d32e2cc70a836ba763 --cri-socket unix:///var/run/cri-dockerd.sock
 5 [root@k8s-master ~]# kubectl get pods -A -o wide
 6 NAMESPACE          NAME                                           READY   STATUS    RESTARTS         AGE     IP                NODE         NOMINATED NODE   READINESS GATES
 7 calico-apiserver   calico-apiserver-7bd6c7f798-4dml7              1/1     Running   11 (3h34m ago)   44h     10.244.235.212    k8s-master   <none>           <none>
 8 calico-apiserver   calico-apiserver-7bd6c7f798-68rsh              1/1     Running   11 (3h34m ago)   44h     10.244.235.240    k8s-master   <none>           <none>
 9 calico-system      calico-kube-controllers-66c799f9f4-gtq9h       1/1     Running   12 (3h34m ago)   44h     10.244.235.238    k8s-master   <none>           <none>
10 calico-system      calico-node-97xqh                              1/1     Running   11 (3h34m ago)   44h     192.168.123.122   k8s-master   <none>           <none>
11 calico-system      calico-node-hlg7c                              1/1     Running   0                14m     192.168.123.102   node2        <none>           <none>
12 calico-system      calico-node-jxnlm                              1/1     Running   0                14m     192.168.123.101   node1        <none>           <none>
13 calico-system      calico-node-jzqhv                              1/1     Running   0                14m     192.168.123.104   node4        <none>           <none>
14 calico-system      calico-node-kzgjq                              1/1     Running   0                14m     192.168.123.103   node3        <none>           <none>
15 calico-system      calico-typha-66bb7d45cf-5wfp4                  1/1     Running   11 (3h34m ago)   44h     192.168.123.122   k8s-master   <none>           <none>
16 calico-system      calico-typha-66bb7d45cf-czttx                  1/1     Running   0                13m     192.168.123.102   node2        <none>           <none>
17 calico-system      calico-typha-66bb7d45cf-zr86j                  1/1     Running   0                13m     192.168.123.104   node4        <none>           <none>
18 calico-system      csi-node-driver-4k6tt                          2/2     Running   22 (3h34m ago)   44h     10.244.235.225    k8s-master   <none>           <none>
19 calico-system      csi-node-driver-576z5                          2/2     Running   0                14m     10.244.166.129    node1        <none>           <none>
20 calico-system      csi-node-driver-kdg6b                          2/2     Running   0                14m     10.244.3.64       node4        <none>           <none>
21 calico-system      csi-node-driver-lffnr                          2/2     Running   0                14m     10.244.135.0      node3        <none>           <none>
22 calico-system      csi-node-driver-vtrj4                          2/2     Running   0                14m     10.244.104.0      node2        <none>           <none>
23 calico-system      goldmane-58849b4d85-z868r                      1/1     Running   12 (3h34m ago)   44h     10.244.235.210    k8s-master   <none>           <none>
24 calico-system      whisker-99b797ffb-cvtpr                        2/2     Running   22 (3h34m ago)   44h     10.244.235.218    k8s-master   <none>           <none>
25 default            csi-cephfsplugin-6g5n4                         3/3     Running   0                11m     192.168.123.104   node4        <none>           <none>
26 default            csi-cephfsplugin-f5mcl                         3/3     Running   0                10m     192.168.123.103   node3        <none>           <none>
27 default            csi-cephfsplugin-njxbb                         3/3     Running   3 (3h34m ago)    13h     192.168.123.122   k8s-master   <none>           <none>
28 default            csi-cephfsplugin-provisioner-795d6c866-48mnt   6/6     Running   0                13h     10.244.3.65       node4        <none>           <none>
29 default            csi-cephfsplugin-provisioner-795d6c866-jpv4n   6/6     Running   6 (3h34m ago)    13h     10.244.235.214    k8s-master   <none>           <none>
30 default            csi-cephfsplugin-provisioner-795d6c866-qs6ts   6/6     Running   0                13h     10.244.166.128    node1        <none>           <none>
31 default            csi-cephfsplugin-qdnxb                         3/3     Running   0                10m     192.168.123.101   node1        <none>           <none>
32 default            csi-cephfsplugin-w7l89                         3/3     Running   0                9m30s   192.168.123.102   node2        <none>           <none>
33 ingress            ingress-nginx-controller-7b585d567-2sbq2       1/1     Running   8 (3h33m ago)    19h     10.244.235.220    k8s-master   <none>           <none>
34 kube-system        coredns-7cc97dffdd-2npzc                       1/1     Running   11 (3h34m ago)   45h     10.244.235.231    k8s-master   <none>           <none>
35 kube-system        coredns-7cc97dffdd-wghnk                       1/1     Running   11 (3h34m ago)   45h     10.244.235.221    k8s-master   <none>           <none>
36 kube-system        etcd-k8s-master                                1/1     Running   12 (3h34m ago)   45h     192.168.123.122   k8s-master   <none>           <none>
37 kube-system        kube-apiserver-k8s-master                      1/1     Running   12 (3h33m ago)   45h     192.168.123.122   k8s-master   <none>           <none>
38 kube-system        kube-controller-manager-k8s-master             1/1     Running   20 (3h34m ago)   45h     192.168.123.122   k8s-master   <none>           <none>
39 kube-system        kube-proxy-fgprk                               1/1     Running   0                14m     192.168.123.101   node1        <none>           <none>
40 kube-system        kube-proxy-jq8v9                               1/1     Running   0                14m     192.168.123.104   node4        <none>           <none>
41 kube-system        kube-proxy-lrz7j                               1/1     Running   11 (3h34m ago)   45h     192.168.123.122   k8s-master   <none>           <none>
42 kube-system        kube-proxy-v2hxg                               1/1     Running   0                14m     192.168.123.102   node2        <none>           <none>
43 kube-system        kube-proxy-wmjdt                               1/1     Running   0                14m     192.168.123.103   node3        <none>           <none>
44 kube-system        kube-scheduler-k8s-master                      1/1     Running   21 (3h34m ago)   45h     192.168.123.122   k8s-master   <none>           <none>
45 kube-system        metrics-server-594dd9f88b-9wv22                1/1     Running   13 (3h34m ago)   43h     10.244.235.243    k8s-master   <none>           <none>
46 metallb-system     controller-c88f8f96d-2vkq7                     1/1     Running   9 (3h34m ago)    43h     10.244.235.234    k8s-master   <none>           <none>
47 metallb-system     speaker-6fr5k                                  1/1     Running   0                10m     192.168.123.101   node1        <none>           <none>
48 metallb-system     speaker-8n6vc                                  1/1     Running   0                10m     192.168.123.103   node3        <none>           <none>
49 metallb-system     speaker-d7hpz                                  1/1     Running   0                9m30s   192.168.123.102   node2        <none>           <none>
50 metallb-system     speaker-fs58q                                  1/1     Running   0                11m     192.168.123.104   node4        <none>           <none>
51 metallb-system     speaker-kbzct                                  1/1     Running   14 (3h34m ago)   43h     192.168.123.122   k8s-master   <none>           <none>
52 tigera-operator    tigera-operator-697957d976-dn58h               1/1     Running   23 (3h34m ago)   44h     192.168.123.122   k8s-master   <none>           <none>
53 
54 [root@k8s-master ~]# kubectl get nodes  #一主4从
55 NAME         STATUS   ROLES           AGE   VERSION
56 k8s-master   Ready    control-plane   45h   v1.34.0
57 node1        Ready    <none>          14m   v1.34.0
58 node2        Ready    <none>          14m   v1.34.0
59 node3        Ready    <none>          14m   v1.34.0
60 node4        Ready    <none>          14m   v1.34.0
61 [root@k8s-master ~]# kubectl get cs
62 Warning: v1 ComponentStatus is deprecated in v1.19+
63 NAME                 STATUS    MESSAGE   ERROR
64 controller-manager   Healthy   ok
65 scheduler            Healthy   ok
66 etcd-0               Healthy   ok
67 
68 [root@k8s-master dataKubeadm]# calicoctl node status
69 Calico process is running.
70 
71 IPv4 BGP status
72 +-----------------+-------------------+-------+----------+-------------+
73 |  PEER ADDRESS   |     PEER TYPE     | STATE |  SINCE   |    INFO     |
74 +-----------------+-------------------+-------+----------+-------------+
75 | 192.168.123.102 | node-to-node mesh | up    | 04:35:29 | Established |
76 | 192.168.123.104 | node-to-node mesh | up    | 04:35:31 | Established |
77 | 192.168.123.101 | node-to-node mesh | up    | 04:35:33 | Established |
78 | 192.168.123.103 | node-to-node mesh | up    | 04:36:26 | Established |
79 +-----------------+-------------------+-------+----------+-------------+
80 
81 IPv6 BGP status
82 No IPv6 peers found.
83 
84 [root@k8s-master dataKubeadm]# ./calicoctl get ipPool
85 NAME                  CIDR            SELECTOR
86 default-ipv4-ippool   10.244.0.0/16   all()
 
删除node节点
在Kubernetes集群中安全地删除一个节点,需要遵循一套标准的流程,以确保运行在该节点上的服务能平稳迁移到其他节点,避免对业务造成影响。下面我将用一个表格总结安全删除节点的关键步骤:
步骤
核心命令
关键参数/说明
​​1. 确认节点状态​​
kubectl get nodes -o wide
获取节点名称,确认其当前状态(如Ready)。
​​2. 标记为不可调度​​
kubectl cordon
阻止新的 Pod 被调度到该节点,节点状态会变为Ready,SchedulingDisabled。
​​3. 排空(驱逐)节点​​
kubectl drain --ignore-daemonsets --delete-emptydir-data
​​--ignore-daemonsets​:忽略由 DaemonSet 管理的 Pod(如网络插件、日志收集器)。 ​
​--delete-emptydir-data​:删除使用emptyDir卷的 Pod 及其数据。
​​4. 删除节点​​
kubectl delete node
从 Kubernetes API 中移除该节点的定义,完成删除。
 相关服务检查的脚本,可设置定时任务。
 1 #!/bin/bash
 2 
 3 # 定义要检查的服务列表
 4 SERVICES=("containerd.service" "docker.service" "cri-docker.service" "kubelet.service")
 5 
 6 echo "=================================================="
 7 echo "    Kubernetes 相关服务状态检查与启动脚本"
 8 echo "=================================================="
 9 echo "检查时间: $(date)"
10 echo ""
11 
12 # 遍历所有服务
13 for SERVICE in "${SERVICES[@]}"; do
14     echo "-----------------------------------------------"
15     echo "检查服务: $SERVICE"
16     
17     # 检查服务是否存在
18     if ! systemctl list-unit-files | grep -q "$SERVICE"; then
19         echo "  警告: 服务 $SERVICE 未在系统中安装"
20         continue
21     fi
22     
23     # 检查服务当前状态
24     if systemctl is-active --quiet "$SERVICE"; then
25         echo "  状态: ✅ 正在运行"
26         # 显示详细的运行信息
27         SYSTEMCTL_STATUS=$(systemctl status "$SERVICE" | grep -oP "Active: \K.*since.*" | head -1)
28         if [ -n "$SYSTEMCTL_STATUS" ]; then
29             echo "  信息: $SYSTEMCTL_STATUS"
30         fi
31     else
32         echo "  状态: ❌ 未运行"
33         echo "  操作: 尝试启动服务..."
34         
35         # 启动服务
36         if systemctl start "$SERVICE"; then
37             echo "  结果: ✅ 服务启动成功"
38             
39             # 等待片刻再次检查状态
40             sleep 2
41             if systemctl is-active --quiet "$SERVICE"; then
42                 echo "  确认: ✅ 服务现在正常运行"
43             else
44                 echo "  警告: ⚠️ 服务启动但未保持运行状态"
45             fi
46         else
47             echo "  错误: ❌ 服务启动失败"
48             echo "  建议: 请使用 'systemctl status $SERVICE' 查看详细错误信息"
49             echo "  建议: 检查日志: 'journalctl -u $SERVICE -n 50'"
50         fi
51     fi
52     
53     # 显示服务的启用状态(是否开机自启)
54     if systemctl is-enabled "$SERVICE" >/dev/null 2>&1; then
55         ENABLED_STATUS=$(systemctl is-enabled "$SERVICE")
56         echo "  开机自启: $ENABLED_STATUS"
57     else
58         echo "  开机自启: 未知状态"
59     fi
60     
61     # 添加空行分隔
62     sleep 0.5
63 done
64 
65 echo ""
66 echo "-----------------------------------------------"
67 echo "最终状态汇总:"
68 for SERVICE in "${SERVICES[@]}"; do
69     if systemctl is-active --quiet "$SERVICE"; then
70         echo "  $SERVICE: ✅ 运行中"
71     else
72         echo "  $SERVICE: ❌ 未运行"
73     fi
74 done
75 
76 echo ""
77 echo "=================================================="
78 echo "检查完成于: $(date)"

 

posted on 2026-01-27 09:48  我,在等待  阅读(5)  评论(0)    收藏  举报