diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml index dd6bfbfd..e4c4593d 100644 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/all.yml @@ -1,16 +1,16 @@ -app_version: 3.1.1 +app_version: 3.2.1 gcr_mirror: kubesphere etcd_version: 3.4.13 cni_version: 0.8.6 -k8s_version: 1.20.6 +k8s_version: 1.21.5 flannel_version: 0.12.0 coredns_version: 1.6.9 calico_version: 3.16.3 hostnic_version: 1.0.0-alpha.5 qingcloud_csi_version: 1.3.0 qingcloud_ccm_version: 1.4.8 -ks_version: 3.1.1 -ks_installer_image_tag: v3.1.1 +ks_version: 3.2.1 +ks_installer_image_tag: v3.2.1 helm_version: 3.2.1 helm_2to3_version: 0.8.0 helm_stable_repo: https://charts.kubesphere.io/mirror @@ -32,104 +32,6 @@ binaries: - /opt/k8s docker_images_k8s: -- coredns/coredns:1.6.9 -- csiplugin/csi-attacher:v2.1.1 -- csiplugin/csi-node-driver-registrar:v1.2.0 -- csiplugin/csi-provisioner:v1.5.0 -- csiplugin/csi-qingcloud:v1.2.0 -- csiplugin/csi-resizer:v0.4.0 -- csiplugin/csi-snapshotter:v2.0.1 -- csiplugin/snapshot-controller:v2.0.1 -- nvidia/k8s-device-plugin:1.0.0-beta4 -- kubesphere/flannel:v0.12.0 -- kubesphere/pause:3.1 -- kubesphere/kube-apiserver:v1.19.8 -- kubesphere/kube-scheduler:v1.19.8 -- kubesphere/kube-proxy:v1.19.8 -- kubesphere/kube-controller-manager:v1.19.8 -- kubesphere/pause:3.2 -- kubesphere/etcd:v3.4.13 -- calico/cni:v3.16.3 -- calico/kube-controllers:v3.16.3 -- calico/node:v3.16.3 -- calico/pod2daemon-flexvol:v3.16.3 -- kubesphere/k8s-dns-node-cache:1.15.12 -- kubesphere/nfs-client-provisioner:v3.1.0-k8s1.11 -- csiplugin/csi-qingcloud:v1.2.1 -- csiplugin/csi-neonsan:v1.2.1 -- csiplugin/csi-neonsan-ubuntu:v1.2.1 -- csiplugin/csi-neonsan-centos:v1.2.1 -- kubesphere/cloud-controller-manager:v1.4.7 -- kubesphere/hostnic:v1.0.0-alpha.5 -- kubesphere/metrics-server:v0.4.2 - -docker_images_ks: -- kubesphere/ks-apiserver:v3.1.0 -- kubesphere/ks-console:v3.1.0 -- kubesphere/ks-controller-manager:v3.1.0 -- kubesphere/ks-installer:v3.1.0 -- kubesphere/kubectl:v1.19.0 -- kubesphere/nginx-ingress-controller:v0.35.0 -- kubesphere/kubefed:v0.7.0 -- kubesphere/tower:v0.2.0 -- kubesphere/prometheus-config-reloader:v0.42.1 -- kubesphere/prometheus-operator:v0.42.1 -- kubesphere/kube-state-metrics:v1.9.7 -- prom/prometheus:v2.26.0 -- prom/node-exporter:v0.18.1 -- kubesphere/ks-alerting-migration:v3.1.0 -- kubesphere/notification-manager-operator:v1.0.0 -- kubesphere/notification-manager:v1.0.0 -- kubesphere/kube-rbac-proxy:v0.8.0 -- openebs/provisioner-localpv:2.3.0 -- thanosio/thanos:v0.18.0 -- grafana/grafana:7.4.3 -- kubesphere/fluentbit-operator:v0.5.0 -- kubesphere/fluent-bit:v1.6.9 -- elastic/filebeat:6.7.0 -- kubesphere/kube-events-ruler:v0.2.0 -- istio/pilot:1.6.10 -- istio/proxyv2:1.6.10 -- kubesphere/kiali:v1.26.1 -- kubesphere/kiali-operator:v1.26.1 -- kubesphere/ks-jenkins:2.249.1 -- kubesphere/s2ioperator:v3.1.0 -- kubesphere/openpitrix-jobs:v3.1.0 -- kubeedge/cloudcore:v1.6.1 -- kubesphere/edge-watcher:v0.1.0 -- kubesphere/kube-rbac-proxy:v0.5.0 -- busybox:1.31.1 -- joosthofman/wget:1.0 -- kubesphere/netshoot:v1.0 -- wordpress:4.8-apache -- mirrorgooglecontainers/hpa-example:latest -- java:openjdk-8-jre-alpine -- fluent/fluentd:v1.4.2-2.0 -- perl:latest -- osixia/openldap:1.3.0 -- redis:5.0.5-alpine -- alpine:3.10.4 -- haproxy:2.0.4 -- nginx:1.14-alpine -- minio/minio:RELEASE.2019-08-07T01-59-21Z -- minio/mc:RELEASE.2019-08-07T23-14-43Z -- kubesphere/elasticsearch-oss:6.7.0-1 -- kubesphere/s2irun:v2.1.1 -- kubesphere/builder-base:v3.1.0 -- kubesphere/builder-nodejs:v3.1.0 -- kubesphere/builder-maven:v3.1.0 -- kubesphere/builder-go:v3.1.0 -- kubesphere/s2i-binary:v2.1.0 -- kubesphere/tomcat85-java11-centos7:v2.1.0 -- kubesphere/tomcat85-java11-runtime:v2.1.0 -- kubesphere/tomcat85-java8-centos7:v2.1.0 -- kubesphere/tomcat85-java8-runtime:v2.1.0 -- kubesphere/java-11-centos7:v2.1.0 -- kubesphere/java-8-centos7:v2.1.0 -- kubesphere/java-8-runtime:v2.1.0 -- kubesphere/java-11-runtime:v2.1.0 - -docker_images_k8s_new: - kubesphere/kube-apiserver:v1.20.6 - kubesphere/kube-scheduler:v1.20.6 - kubesphere/kube-proxy:v1.20.6 @@ -151,7 +53,7 @@ docker_images_k8s_new: - csiplugin/csi-resizer:v1.2.0 - csiplugin/csi-snapshotter:v2.0.1 -docker_images_ks_new: +docker_images_ks: - kubesphere/ks-apiserver:v3.1.1 - kubesphere/ks-console:v3.1.1 - kubesphere/ks-controller-manager:v3.1.1 @@ -233,3 +135,103 @@ docker_images_ks_new: - kubesphere/netshoot:v1.0 - fluent/fluentd:v1.4.2-2.0 - centos:centos8 + +docker_images_k8s_new: +- kubesphere/kube-apiserver:v1.21.5 +- kubesphere/kube-scheduler:v1.21.5 +- kubesphere/kube-proxy:v1.21.5 +- kubesphere/kube-controller-manager:v1.21.5 +- kubesphere/pause:3.2 +- calico/cni:v3.16.3 +- calico/kube-controllers:v3.16.3 +- calico/node:v3.16.3 +- calico/pod2daemon-flexvol:v3.16.3 +- calico/typha:v3.16.3 +- kubesphere/flannel:v0.12.0 +- coredns/coredns:1.6.9 +- kubesphere/k8s-dns-node-cache:1.15.12 +- qingcloud/cloud-controller-manager:v1.4.8 +- csiplugin/csi-node-driver-registrar:v2.2.0 +- csiplugin/csi-qingcloud:v1.3.0 +- csiplugin/csi-provisioner:v2.2.2 +- csiplugin/csi-attacher:v3.2.1 +- csiplugin/csi-resizer:v1.2.0 +- csiplugin/csi-snapshotter:v2.0.1 +- kubesphere/pause:3.5 +- kubesphere/pause:3.4.1 + +docker_images_ks_new: + +- kubesphere/ks-installer:v3.2.1 +- kubesphere/ks-apiserver:v3.2.1 +- kubesphere/ks-console:v3.2.1 +- kubesphere/ks-controller-manager:v3.2.1 +- kubesphere/kubectl:v1.20.0 +- kubesphere/kubefed:v0.8.1 +- kubesphere/tower:v0.2.0 +- minio/minio:RELEASE.2019-08-07T01-59-21Z +- minio/mc:RELEASE.2019-08-07T23-14-43Z +- csiplugin/snapshot-controller:v4.0.0 +- kubesphere/nginx-ingress-controller:v0.48.1 +- mirrorgooglecontainers/defaultbackend-amd64:1.4 +- kubesphere/metrics-server:v0.4.2 +- redis:5.0.14-alpine +- haproxy:2.0.25-alpine +- alpine:3.14 +- osixia/openldap:1.3.0 +- kubesphere/netshoot:v1.0 +- kubeedge/cloudcore:v1.7.2 +- kubesphere/edge-watcher:v0.1.1 +- kubesphere/edge-watcher-agent:v0.1.0 +- openpolicyagent/gatekeeper:v3.5.2 +- jimmidyson/configmap-reload:v0.3.0 +- prom/prometheus:v2.26.0 +- kubesphere/prometheus-config-reloader:v0.43.2 +- kubesphere/prometheus-operator:v0.43.2 +- kubesphere/kube-rbac-proxy:v0.8.0 +- kubesphere/kube-state-metrics:v1.9.7 +- prom/node-exporter:v0.18.1 +- kubesphere/k8s-prometheus-adapter-amd64:v0.6.0 +- prom/alertmanager:v0.21.0 +- thanosio/thanos:v0.18.0 +- grafana/grafana:7.4.3 +- kubesphere/kube-rbac-proxy:v0.8.0 +- kubesphere/notification-manager-operator:v1.4.0 +- kubesphere/notification-manager:v1.4.0 +- kubesphere/notification-tenant-sidecar:v3.2.0 +- kubesphere/elasticsearch-curator:v5.7.6 +- kubesphere/elasticsearch-oss:6.7.0-1 +- kubesphere/fluentbit-operator:v0.11.0 +- docker:19.03 +- kubesphere/fluent-bit:v1.8.3 +- kubesphere/log-sidecar-injector:1.1 +- elastic/filebeat:6.7.0 +- kubesphere/kube-events-operator:v0.3.0 +- kubesphere/kube-events-exporter:v0.3.0 +- kubesphere/kube-events-ruler:v0.3.0 +- kubesphere/kube-auditing-operator:v0.2.0 +- kubesphere/kube-auditing-webhook:v0.2.0 +- istio/pilot:1.11.1 +- istio/proxyv2:1.11.1 +- jaegertracing/jaeger-operator:1.27 +- jaegertracing/jaeger-agent:1.27 +- jaegertracing/jaeger-collector:1.27 +- jaegertracing/jaeger-query:1.27 +- jaegertracing/jaeger-es-index-cleaner:1.27 +- kubesphere/kiali-operator:v1.38.1 +- kubesphere/kiali:v1.38 +- busybox:1.31.1 +- nginx:1.14-alpine +- joosthofman/wget:1.0 +- nginxdemos/hello:plain-text +- wordpress:4.8-apache +- mirrorgooglecontainers/hpa-example:latest +- java:openjdk-8-jre-alpine +- fluent/fluentd:v1.4.2-2.0 +- perl:latest +- kubesphere/examples-bookinfo-productpage-v1:1.16.2 +- kubesphere/examples-bookinfo-reviews-v1:1.16.2 +- kubesphere/examples-bookinfo-reviews-v2:1.16.2 +- kubesphere/examples-bookinfo-details-v1:1.16.2 +- kubesphere/examples-bookinfo-ratings-v1:1.16.3 +- weaveworks/scope:1.13.0 diff --git a/ansible/requirements.yml b/ansible/requirements.yml index 0553952c..b2781b43 100644 --- a/ansible/requirements.yml +++ b/ansible/requirements.yml @@ -3,7 +3,7 @@ - src: https://qingcloudappcenter.github.io/ansible-roles/arping-1.0.5.tar.gz - src: https://qingcloudappcenter.github.io/ansible-roles/confd-files-1.1.0.tar.gz - src: https://qingcloudappcenter.github.io/ansible-roles/create-service-user-1.0.0.tar.gz -- src: https://qingcloudappcenter.github.io/ansible-roles/docker-1.0.10.tar.gz +- src: https://qingcloudappcenter.github.io/ansible-roles/docker-1.0.11.tar.gz - src: https://qingcloudappcenter.github.io/ansible-roles/golang-1.0.3.tar.gz - src: https://qingcloudappcenter.github.io/ansible-roles/etcd-1.1.0.tar.gz - src: https://qingcloudappcenter.github.io/ansible-roles/install-1.0.6.tar.gz diff --git a/ansible/roles/app-role-k8s/files/opt/app/current/bin/node/k8s-ctl.sh b/ansible/roles/app-role-k8s/files/opt/app/current/bin/node/k8s-ctl.sh index 49559754..5ed2b67f 100644 --- a/ansible/roles/app-role-k8s/files/opt/app/current/bin/node/k8s-ctl.sh +++ b/ansible/roles/app-role-k8s/files/opt/app/current/bin/node/k8s-ctl.sh @@ -176,7 +176,6 @@ upgrade() { fi applyKubeProxyLogLevel # restart metrics-server to avoid https://github.com/kubernetes/kubernetes/pull/96371 - runKubectl -n kube-system rollout restart deploy metrics-server setUpNetwork setUpCloudControllerMgr execute setUpStorage @@ -192,6 +191,7 @@ upgrade() { if $IS_UPGRADING_FROM_V3; then resetAuditingModule fi + runKubectl -n kube-system rollout restart deploy metrics-server launchKs fi _initCluster @@ -962,13 +962,13 @@ checkCertDaysBeyond() { } getCertValidDays() { - local earliestExpireDate; earliestExpireDate="$(runKubeadm alpha certs check-expiration | awk '$1!~/^$|^CERTIFICATE/ {print "date -d\"",$2,$3,$4,$5,"\" +%s" | "/bin/bash"}' | sort -n | head -1)" + local earliestExpireDate; earliestExpireDate="$(runKubeadm certs check-expiration | awk '$1!~/^$|^CERTIFICATE/ {print "date -d\"",$2,$3,$4,$5,"\" +%s" | "/bin/bash"}' | sort -n | head -1)" local today; today="$(date +%s)" echo -n $(( ($earliestExpireDate - $today) / (24 * 60 * 60) )) } renewCerts() { - local crt; for crt in ${@:-admin.conf apiserver apiserver-kubelet-client controller-manager.conf front-proxy-client scheduler.conf}; do kubeadm alpha certs renew $crt; done + local crt; for crt in ${@:-admin.conf apiserver apiserver-kubelet-client controller-manager.conf front-proxy-client scheduler.conf}; do kubeadm certs renew $crt; done reloadKubeMasterProcs if isFirstMaster; then distributeKubeConfig; fi } diff --git a/ansible/roles/k8s-node/files/opt/app/current/conf/systemd/kube-certs.service b/ansible/roles/k8s-node/files/opt/app/current/conf/systemd/kube-certs.service index 18bd3605..729302ee 100644 --- a/ansible/roles/k8s-node/files/opt/app/current/conf/systemd/kube-certs.service +++ b/ansible/roles/k8s-node/files/opt/app/current/conf/systemd/kube-certs.service @@ -3,7 +3,7 @@ Description=check and rotate kube certs if neccessary [Service] Type=oneshot -ExecStart=/usr/bin/kubeadm alpha certs renew all +ExecStart=/usr/bin/kubeadm certs renew all [Install] WantedBy=multi-user.target diff --git a/app/OWNERS b/app/OWNERS deleted file mode 100644 index 69d5ee40..00000000 --- a/app/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -labels: -- area/config \ No newline at end of file diff --git a/app/cluster.json.mustache b/app/cluster.json.mustache index 6b5de88b..848d0e6e 100644 --- a/app/cluster.json.mustache +++ b/app/cluster.json.mustache @@ -10,17 +10,15 @@ "need_intranet_api_server": true, "exclude_node_columns": ["instance_class"], "advanced_actions": ["scale_horizontal"], - "custom_tags": ["del_node_force_level_ignore_error"], "unsupported_actions": ["rollback"], "upgrade_policy": [ - "appv-doh2u06i", - "appv-egh21tjg", - "appv-0exsrgnh" + "appv-p07y3ld8", + "appv-doh2u06i" ], "upgrading_policy": "in-place-parallel", "in-place-upgrade-nodes": [{ "container":{ - "snapshot": "ss-yoyvs52o", + "snapshot": "ss-ial72b2d", "zone": "pek3" }, "copy":[{ @@ -46,7 +44,7 @@ }] }, { "container":{ - "snapshot": "ss-j6lj3m67", + "snapshot": "ss-fvzpy0t8", "zone": "pek3" }, "copy":[{ @@ -60,7 +58,7 @@ "role": "master", "container": { "type": "kvm", - "image": "img-jjd1rrri", + "image": "img-9cdypymv", "zone": "pek3" }, "instance_class": {{cluster.master.instance_class}}, @@ -147,7 +145,7 @@ "role": "node_perf", "container": { "type": "kvm", - "image": "img-jjd1rrri", + "image": "img-9cdypymv", "zone": "pek3" }, "instance_class": {{cluster.node_perf.instance_class}}, @@ -181,9 +179,8 @@ }, "destroy": { "post_stop_service": true, - "allow_force": true, - "cmd": "true", - "timeout": 1200 + "allow_force": false, + "cmd": "true" }, "restart": { "cmd": "appctl restart" @@ -210,7 +207,7 @@ "role": "node_super_perf", "container": { "type": "kvm", - "image": "img-jjd1rrri", + "image": "img-9cdypymv", "zone": "pek3" }, "instance_class": {{cluster.node_super_perf.instance_class}}, @@ -244,9 +241,8 @@ }, "destroy": { "post_stop_service": true, - "allow_force": true, - "cmd": "true", - "timeout": 1200 + "allow_force": false, + "cmd": "true" }, "restart": { "cmd": "appctl restart" @@ -273,7 +269,7 @@ "role": "node_gpu", "container": { "type": "kvm", - "image": "img-b1mwsrtk", + "image": "img-lv1ts5gs", "zone": "pek3" }, "instance_class": {{cluster.node_gpu.instance_class}}, @@ -309,9 +305,8 @@ }, "destroy": { "post_stop_service": true, - "allow_force": true, - "cmd": "true", - "timeout": 1200 + "allow_force": false, + "cmd": "true" }, "restart": { "cmd": "appctl restart" @@ -335,7 +330,7 @@ "role": "client", "container": { "type": "kvm", - "image": "img-6u9kpu76", + "image": "img-u7twid3l", "zone": "pek3" }, "instance_class": {{cluster.client.instance_class}}, @@ -382,7 +377,7 @@ "action_timeout_sec": 30, "healthy_threshold": 3, "unhealthy_threshold": 3, - "check_cmd": "appctl check", + "check_cmd": "appctl check && appctl reloadKsEip", "action_cmd": "appctl revive" }, "monitor": { @@ -472,3 +467,4 @@ } } } + diff --git a/app/locale/en.json b/app/locale/en.json index 9ce44890..b1f33329 100644 --- a/app/locale/en.json +++ b/app/locale/en.json @@ -1,8 +1,8 @@ { - "Get KubeSphere Console": "KubeSphere Console URL (default username:admin@kubesphere.io password:P@88w0rd); please change the password after the first login.", - "KS is not installed.": "KubeSphere is not installed.\nTo install KubeSphere, you can refer to [docs](https://docs.qingcloud.com/product/container/qke/), or [submit a ticket](https://console.qingcloud.com/tickets/) for support.", - "Using master node IP. Please try again later when external IP is ready.": "The LB with EIP is being created, please refresh this tab later; currently the IP of the first control plane is displayed; if this message appears for a long time, please check the ks-console service under kubesphere-system namespace", - "API access key id": "QingCloud IaaS [API Access Key](https://console.qingcloud.com/access_keys/), which will be used to create QingCloud resources, such as load balancers, volumes, etc.", - "Whether to install kubesphere": "Whether to install KubeSphere, KubeSphere and monitoring components will be installed by default;To install KubeSphere, you can refer to [docs](https://docs.qingcloud.com/product/container/qke/), or [submit a ticket](https://console.qingcloud.com/tickets/) for support.", - "Resource Group Description": "The resource configuration of the service. Single master cluster cannot upgrade to HA mster cluster." + "Get KubeSphere Console": "KubeSphere Console URL (default username:admin@kubesphere.io password:P@88w0rd); please change the password after the first login.", + "KS is not installed.": "KubeSphere is not installed.\nTo install KubeSphere, you can refer to [docs](https://docs.qingcloud.com/product/container/qke/), or [submit a ticket](https://console.qingcloud.com/tickets/) for support.", + "Using master node IP. Please try again later when external IP is ready.": "The LB with EIP is being created, please refresh this tab later; currently the IP of the first control plane is displayed; if this message appears for a long time, please check the ks-console service under kubesphere-system namespace", + "API access key id": "QingCloud IaaS [API Access Key](https://console.qingcloud.com/access_keys/), which will be used to create QingCloud resources, such as load balancers, volumes, etc.", + "Whether to install kubesphere": "Whether to install KubeSphere, KubeSphere and monitoring components will be installed by default;To install KubeSphere, you can refer to [docs](https://docs.qingcloud.com/product/container/qke/), or [submit a ticket](https://console.qingcloud.com/tickets/) for support.", + "Resource Group Description": "The resource configuration of the service. Single master cluster cannot upgrade to HA mster cluster." } diff --git a/app/locale/zh-cn.json b/app/locale/zh-cn.json index 0ec1f5e3..1a9891c8 100644 --- a/app/locale/zh-cn.json +++ b/app/locale/zh-cn.json @@ -1,175 +1,176 @@ { - "QKE properties": "QKE 属性", - "The name of the QKE cluster": "QKE 集群名称", - "name": "名称", - "description": "描述", - "Description of QKE cluster": "QKE 集群描述", - "VxNet": "私有网络", - "master": "主节点", - "master properties": "主节点属性", - "Memory": "内存", - "memory of each node (in MB)": "每个节点的内存", - "count": "数量", - "Number of master for the cluster to create": "主节点数量", - "resource type": "资源类型", - "volume class": "持久存储卷类型", - "The volume class": "持久存储卷类型", - "volume size": "硬盘大小", - "The volume size for each instance": "每个机器的硬盘大小", - "load balancer": "负载均衡器", - "Choose an loadbalancer service to use": "选择一个负载均衡器", - "node properties": "节点的属性", - "standard node": "基础型节点", - "high-performance node": "企业型节点", - "standard node properties": "基础型节点属性", - "high-performance node properties": "企业型节点属性", - "application configuration properties": "应用配置属性", - "The vxnet for pod network": "给 Pod 预留网段", - "CPUs of each node": "每个节点 CPU", - "ssd node properties": "企业型节点属性", - "Number of nodes for the cluster to create": "要创建的工作节点数量", - "Number of log nodes for the cluster to create, set it to zero if you want to use external ELK service": "要创建的日志节点数量,**集群创建后不能增删日志节点**,如使用外部 ELK 服务,可将其设为0", - "Number of client nodes for the cluster to create": "要创建的客户端节点数量", - "Choose a vxnet to join": "选择集群要加入的私网", - "DNS Domain": "集群内 DNS 域名", - "the dns domain used by k8s services": "集群内的 DNS 域名,用于 Kubernetes Services", - "gpu node": "GPU 节点", - "client": "客户端节点", - "CPU model of each node": "CPU 体系架构", - "GPUs of each node": "每个节点 GPU 数量", - "GPU class": "GPU 类型,比如 0(NVIDIA Tesla P100)", - "Get KubeSphere Console": "KubeSphere 控制台链接 (初始登录账号:admin@kubesphere.io 密码:P@88w0rd),登录后请及时修改密码。", - "KS is not installed.": "此集群没有安装 KubeSphere。\n若您需安装 KubeSphere,可以[参考文档](https://docs.qingcloud.com/product/container/qke/)自行安装,或者[提交工单](https://console.qingcloud.com/tickets/)联系我们协助您安装", - "Failed to retrieve ks-console info. Please try again later.": "暂时无法获取 ks-console 服务的信息,请重试。", - "Using master node IP. Please try again later when external IP is ready.": "绑定 EIP 的负载均衡器正在创建中,请在几分钟后重试;当前使用主节点的 IP 地址供临时使用;如果长时间显示此提示,可通过 kubectl 或者 KubeSphere 界面查看 kubesphere-system 项目下 ks-console 服务的相关信息分析原因", - "Something went wrong, but you should ensure ks-console service in kubesphere-system is of type 'LoadBalancer' or 'NodePort'.": "请调整 kubesphere-system 项目下 ks-console 服务的状态为 NodePort 或 LoadBalancer 后再重试。", - "ELK service": "ELK 服务", - "Choose an external ELK cluster to store QKE logging data, leave empty if you choose to use internal ES service in QKE cluster. For resizing ES, It is recommended that using external ELK service": "请添加依赖的 ELK 5.5.1 以上集群用于存储 QKE 集群日志数据,否则将使用集群内自带 ES 服务。为了便于扩容,推荐添加依赖的 ELK 服务", - "etcd service": "etcd 服务", - "Choose an etcd cluster to store QKE cluster data, leave empty if you choose to use internal etcd service in QKE cluster. For better performance, It is recommended that using external etcd service": "请添加依赖的 etcd 3.2.24 集群用于存储 QKE 集群数据,否则将使用集群内自带 etcd 服务. 为了更强性能,推荐添加依赖的 etcd 服务", - "The vxnets for Kubernetes pod, use a blank to split multi vxnet": "Kubernetes 的 pod 所在的私有网络,使用空格分割多个私有网络 ID。创建后只能增加,请勿删除,否则会导致网络问题", - "max-pods": "最大 pod 数量", - "Number of Pods that can run on each working node/kubelet": "每个节点上可运行的最大 pod 数量,默认为 120", - "update": "更新", - "API access key id": "QingCloud IaaS [API 密钥](https://console.qingcloud.com/access_keys/),此密钥将被用来创建云平台的资源,比如负载均衡器、PV 挂盘等", - "Private registry": "私有镜像服务器", - "The URL of private Docker registry": "私有镜像服务器地址", - "Username of Docker registry": "镜像服务器用户名", - "Password of Docker registry": "镜像服务器密码", - "The username of Docker registry, could be dockerhub.qingcloud.com or your private registry": "用于 dockerhub.qingcloud.com 或私有镜像服务器的用户名", - "The password of Docker registry": "用于 dockerhub.qingcloud.com 或私有镜像服务器的的密码", - "The insecure Docker registries, use a blank to split multi registry": "需要通过非安全的方式(http)访问的 Docker 仓库,多个地址通过空格切分", - "IP address and netmask for Docker bridge network": "Docker 网桥地址", - "IP address and netmask for Docker bridge network, using standard CIDR notation. For example: 192.168.1.5/24": "Docker 网桥的 IP 地址和子网掩码,请按照标准的 CIDR 格式填写。默认为 172.30.0.1/16;注意:修改此项需要通过集群菜单依次重启所有 k8s 节点(包括主节点、基础型节点、企业型节点和 GPU 节点),请在业务低谷时操作", - "Keep log days": "日志保留天数", - "docker_prune": "清理 Docker", - "Docker prune days": "Docker 清理天数", - "only remove containers, images, and networks created before given days": "只清理指定天数之前创建的 Docker 容器、镜像、网络等闲置资源,默认为 3 表示只清理创建时间超过 3 天(72 小时)的闲置资源", - "Legible K8s Node Names": "重命名 k8s 节点", - "Using legible node names, e.g. master1, worker-p001, worker-s002, instead of the default random generated ones": "默认为 true 表示使用易读的 k8s 节点名,比如 master1(一号主节点),worker-p001(一号基础型工作节点),worker-s002(二号企业型工作节点),false 表示使用系统自动生成的随机节点名", - "renew_certs": "更新证书", - "Keep log on log node for x days, 0 indicates that the log is not cleared. This is a KubeSphere parameter": "KubeSphere 使用 ElasticSearch 存储日志,可通过配置此参数自动清理指定天数之前的日志,0 表示不自动清理", - "Defined the maximum number of days to retain old audit log files": "自动清理指定天数之前产生的审计日志文件,默认为 7 表示自动清理创建时间超过 7 天的审计日志文件;注意:在开启审计日志的情况下修改此项会重启所有主节点的 kube-apiserver 从而导致 kube-apiserver 服务短暂中断,建议在业务低谷时间操作", - "the maximum size in megabytes of the audit log file before it gets rotated": "自动轮转达到指定文件大小的审计日志文件,以 MB 为单位,默认为 1 表示当审计日志文件达到 1 MB 以后触发自动轮转;注意:在开启审计日志的情况下修改此项会重启所有主节点的 kube-apiserver 从而导致 kube-apiserver 服务短暂中断,建议在业务低谷时间操作", - "the maximum number of audit log files to retain": "最多保留指定数量的审计日志文件,默认为 100 表示最多保留最近的 100 个审计日志文件;注意:在开启审计日志的情况下修改此项会重启所有主节点的 kube-apiserver 从而导致 kube-apiserver 服务短暂中断,建议在业务低谷时间操作", - "Keep Docker for x days, 0 indicates that the log is not cleared": "每天凌晨 2 点 35 分自动清理指定天数之前创建的 Docker 容器、镜像、网络等闲置资源,默认为 3 表示只清理创建时间超过 3 天(72 小时)的闲置资源;0 表示不自动清理", - "Kubernetes log level": "Kubernetes 日志级别", - "The log level for Kubernetes system": "Kubernetes 的日志级别,数字越大记录越详细,也会占用更多日志空间。遇到问题可以调整日志级别进行 debug;注意:修改此项会重启所有 k8s 组件(kube-apiserver、kube-controller-manager、kube-scheduler、kube-proxy)从而导致服务短暂中断,建议在业务低谷时间操作", - "kube-proxy iptables parameters": "kube-proxy iptables 参数", - "parameters for kube-proxy iptables": "kube-proxy iptables 参数,自定义配置,支持多项配置,需严格遵循每行配置一项且保持 `key: value` 的格式,配置示例:`masqueradeAll: true`, 其他配置项请参考 [kube-proxy iptables configurations](https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyIPTablesConfiguration)", - "kube-controller-manager parameters": "kube-controller-manager 参数", - "parameters for kube controller manager": "kube-controller-manager 参数,自定义配置,支持多项配置,需严格遵循每行配置一项且保持 `key: value` 的格式,配置示例:`add-dir-header: \"true\"`,其他配置项请参考官网文档 [kube-controller-manager configurations](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/),使用时请去掉 `--` 符号", - "kube-apiserver parameters": "kube-apiserver 参数", - "parameters for kube apiserver": "kube-apiserver 参数,自定义配置,支持多项配置,需严格遵循每行配置一项且保持 `key: value` 的格式,配置示例:`add-dir-header: \"true\"`,其他配置项请参考官网文档 [kube-apiserver configurations](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/),使用时请去掉 `--` 符号", - "kube-scheduler parameters": "kube-scheduler 参数", - "parameters for kube scheduler": "kube-scheduler 参数,自定义配置,支持多项配置,需严格遵循每行配置一项且保持 `key: value` 的格式,配置示例:`add-dir-header: \"true\"`,其他配置项请参考官网文档 [kube-scheduler configurations](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/),使用时请去掉 `--` 符号", - "Kubelet Parameters": "kubelet 参数", - "parameters for kubelet": "kubelet 参数,自定义配置,支持多项配置,需严格遵循每行配置一项且保持 `key=value` 的格式,配置示例:`--add-dir-header=true`,默认值 `--eviction-hard=memory.available<5%` 表示当节点剩余内存不足 5% 时 kubelet 会立即关掉选中的容器组来释放内存,`--eviction-soft=memory.available<10%` 与 `--eviction-soft-grace-period=memory.available=2m` 表示当可用内存连续 2 分钟不足 10% 时,会平滑关闭(graceful shutdown)选中的容器组;注意 `--eviction-soft` 与 `--eviction-soft-grace-period` 必须同时指定,否则 kubelet 将无法启动;其他配置项请参考官方文档 [kubelet configurations](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/),使用时请保留 `--` 符号", - "kubelet_running_container_count": "正在运行的容器数量", - "kubelet_running_pod_count": "正在运行的 Pod 数量", - "kubelet_running_containers": "正在运行的容器数量", - "kubelet_running_pods": "正在运行的 Pod 数量", - "Fluent forward server": "Fluent 日志转发服务", - "The fluent log server address to forward server, format host:port": "Fluent 日志服务器,用于将 Kubernetes 收集到的日志转发到用户自定义的日志服务,格式 host:port", - "The Docker hub registry mirrors, use a blank to split multi registry mirrors": "完整的 Docker 镜像服务地址,比如 https://mirror.harbor.local;多个地址之间用空格隔开", - "Install KubeSphere": "安装 KubeSphere", - "Whether to install kubesphere": "选择是否安装 KubeSphere,默认为 true 表示安装 KubeSphere 以及监控组件;若您需安装 KubeSphere,可以[参考文档](https://docs.qingcloud.com/product/container/qke/)自行安装,或者[提交工单](https://console.qingcloud.com/tickets/)联系我们协助您安装", - "Elastic Search server": "Elastic Search 服务器", - "User SSH Key": "用户 SSH 公钥", - "User's SSH key to connect to client node": "在此填入 SSH 公钥来通过 SSH 连接到集群节点", - "The Elastic Search server address, format host:port": "Elastic Search 服务器地址,格式为 host:port", - "notice_when_upgrade": "升级会消耗较多的磁盘 IO 和 CPU,请在业务低谷期进行升级;自行 SSH 到集群节点上做的改动会在重启后抹除,如果有此类数据或文件请先备份后再升级;请确保客户端节点处于开机运行状态;请确保所有 K8s 节点数据盘至少有 50G 可用空间;原有的 K8s audit policy 配置将被重置,请在升级后通过集群参数重新配置;升级一般需要 1〜2 小时,请耐心等待", - "err_code128": "无法获取 ks-installer 容器组信息,请重新创建集群,如需协助请工单联系", - "err_code129": "无法获取 ks-installer 容器组日志,请重新创建集群,如需协助请工单联系", - "err_code130": "KubeSphere 安装失败,可通过 kubectl -n kubesphere-system logs -f $(kubectl -n kubesphere-system get po -l app=ks-install -oname) 查看日志获取详细原因,如需协助请工单联系", - "err_code131": "KubeSphere 安装超时,可通过 kubectl -n kubesphere-system logs -f $(kubectl -n kubesphere-system get po -l app=ks-install -oname) 查看日志获取详细信息,如需协助请工单联系", - "err_code132": "KubeSphere 安装完成,但某些组件不正常,可通过 kubectl -n kubesphere-system logs -f $(kubectl -n kubesphere-system get po -l app=ks-install -oname) 查看日志获取详细信息,如需协助请工单联系", - "err_code133": "无法为 HA 集群配置负载均衡器,如果重新创建集群后仍然失败,请通过工单联系", - "err_code134": "迁移待删除节点上的工作负载时出错,为了保证业务数据安全,请每次只删除一个节点并确保集群的剩余节点有足够资源承载现有业务,也可以先手动执行 kubectl drain nodes xxx 命令迁移所有工作负载以后再删除节点,如持续失败请通过工单联系", - "err_code135": "为了确保集群可以正常服务,请至少保留两个工作节点", - "err_code136": "迁移待删除节点上的工作负载时出错,已尝试回退此节点的状态但执行 kubectl uncordon 时出错,请检查主节点的 kube-apiserver 服务是否正常,可以在服务稳定后重新尝试删除节点,如需协助请工单联系", - "err_code137": "请勿删除主节点", - "err_code138": "无法迁移 Docker 镜像文件到数据盘,如需协助请工单联系", - "err_code139": "检测到集群参数配置里包含无效的私有网络 ID,请修改后重试,如需协助请工单联系", - "err_code140": "检测到集群的最大可绑网卡数超过了选择的私有网络承载数,请通过集群参数配置更多私有网络或者减少节点,确保满足条件:hostnic 最大网卡数 * K8s 节点数 <= 私有网络数 * 252(单个网络最大网卡数),如需协助请工单联系", - "err_code141": "暂时无法获取私有网络信息,请重试,如需协助请工单联系", - "err_code142": "检测到集群所在的私有网络与选择的 hostnic 私有网络不处于同一个 VPC,请通过集群参数修改为同一个 VPC 下的私有网络,并确保满足条件:hostnic 最大网卡数 * K8s 节点数 <= 私有网络数 * 252(单个网络最大网卡数),如需协助请工单联系", - "err_code143": "检测到某些节点的 DNS 解析异常,请重试,如需协助请工单联系", - "err_code150": "检测到某些节点没有足够的可用磁盘空间,请确保所有 k8s 节点至少有 50G 空闲磁盘空间再尝试升级", - "err_code151": "检测到某些节点为非就绪(Ready)状态,请确保所有节点均包含就绪(Ready)状态后再升级;节点状态可通过 kubectl get nodes 查看", - "network plugin": "网卡插件", - "Choose a network plugin": "选择网卡插件", - "hostnic maxNic": "hostnic 最大网卡数", - "hostnic max nics per host": "定义 hostnic 插件在一台主机中最多创建多少个网卡,默认为 60,更多说明可参考 [hostnic 官网](https://github.com/yunify/hostnic-cni)", - "hostnic vxnets": "hostnic 私有网络", - "hostnic vxnets, one vxnet per line": "一个 vxnet 最多能够容纳 252 张网卡,hostnic 根据最大网卡数将 vxnet 均分给 K8s 节点;此处可填写多个私有网络 ID(以 `vxnet-` 开头),每行一个;请在此处填写与此 QKE 集群处于同一个 VPC 下的私有网络以避免网络不通;建议准备未被使用的私有网络专门供此 QKE 集群使用,并填写至少两个私有网络以避免容器组无法分配到足够的网卡而无法启动,具体计算方法为:hostnic 最大网卡数 * K8s 节点数 <= 私有网络数 * 252(单个网络最大网卡数);更多说明可参考 [hostnic 官网](https://github.com/yunify/hostnic-cni)", - "setting hairpin to true to forward pod traffic on the same virtual machine to physical machine; default to false, indicating pod traffic of the same virtual machine will not be forwarded to physical machines": "默认情况下,同节点上的 pod 流量只在虚拟机内部转发, 如果希望同节点 pod 流量经过宿主机转发,那么设置为 true", - "SSD node label": "企业型节点标记", - "HDD node label": "基础型节点标记", - "Port range of each node": "NodePort 范围", - "A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range, it is important to include 30880 port when `Install KubeSphere` is `true`.": "每个节点可分配的 NodePort 范围,例如 ‘30000-32767’,由于 kubesphere 的对外端口号是 30880,在 `安装 KubeSphere` 为 `true` 的情况下,请保证 30880 在该范围内。", - "Resource Configuration": "快速配置", - "standard dev": "基础型开发环境", - "high-performance test": "企业型测试环境", - "standard prod": "基础型生产环境", - "high-performance prod": "企业型生产环境", - "Resource Group Description": "请选择合适的预制资源配置类型,快速定义集群配置。也可根据自身需求自定义节点配置。非高可用集群不可以升级到高可用集群", - "host aliases": "主机 hosts 记录", - "Set host aliases": "自定义添加到 /etc/hosts 文件的记录,比如 '192.168.2.2 host1,192.168.2.3 host2',多条记录用逗号分割", - "The insecure Docker registry, use a blank to split multi registry": "需要通过非安全的 HTTP 或不受信任的 HTTPS 访问的 Docker 仓库,比如 mirror.harbor.local,多个地址通过空格切分", - "KubeSphere Dashboard EIP ID": "KubeSphere 控制台 EIP", - "IP id for exposing KubeSphere Dashboard. If not installed KubeSphere, Shall not set this field.": "如果希望通过公网 LoadBalancer 方式访问 KubeSphere 控制台,可在此选择可用的 EIP,将为此 EIP 自动创建一个负载均衡器并绑定;请保证集群至少有一个工作节点,否则无法通过此方法访问 KubeSphere 控制台;如果没安装 KubeSphere,无需设置此参数", - "Enable Web Console": "开启文件查看器", - "logging files can be viewed from the web console": "用户可通过文件查看器查看、上传或下载审计规则文件和日志文件,在浏览器里通过 http://[主节点 IP] 访问", - "Web Console Username": "文件查看器用户名", - "The username of the admin web console, can contain uppercase & lowercase letters, digits with 4-12 characters in total": "可以由大小写字母或数字组成,长度为 4 到 12 个字符", - "Web Console Password": "文件查看器密码", - "The password of the admin web console, can contain uppercase & lowercase letters, digits, and the following 10 special characters !@#$%^&*() with 4-32 characters in total": "可以由大小写字母、数字或特殊字符 !@#$%^&*() 组成,长度为 4 到 12 个字符,默认密码为 admin,请尽快修改", - "kubesphere_console": "KubeSphere 控制台链接", - "ks_console_notes": "备注", - "Extra Modules": "选装组件", - "extra modules to install with KubeSphere": "选装希望安装的组件,注意:此选项只有在选项 [安装 KubeSphere] 为 true 时才会生效,而且此选项只负责安装组件,清空此选项并不会执行卸载操作,如需卸载请手动操作;安装这些组件可能需要较长时间,请耐心等待", - "metrics-server": "Metrics Server", - "networkpolicy": "网络策略", - "ks-auditing": "审计", - "ks-events": "事件", - "ks-logging": "日志", - "ks-openpitrix": "OpenPitrix 应用商店", - "ks-devops": "DevOps", - "ks-servicemesh": "服务治理", - "ks-alerting": "告警", - "Access key id": "API 密钥", - "Pod Subnet": "Pod 网段", - "Service Subnet": "Service 网段", - "Choose a proxy mode": "选择一种 Proxy Mode", - "EIP ID for exposing kube-apiserver; if set, we'll create an LB and associate this EIP to it": "如果希望通过公网访问 K8s apiserver,请在此处填写可用的 Kubernetes EIP ID,系统将会自动创建一个 LB 并绑定此 EIP", - "Kubernetes EIP Address": "Kubernetes EIP 地址", - "Kubernetes EIP Port": "Kubernetes EIP 端口", - "Get Kubeconfig": "如果用户希望本地使用 kubectl 通过 EIP 访问 Kubernetes Apiserver ,请在 QKE 配置参数中填写 Kubernetes EIP 地址,需要用户自己确保 EIP 和 Kubernetes Apiserver 的连通性,并将 kubeconfig 中 server 的 IP 地址修改为 EIP 地址,端口修改为适当的端口;注意:此处的 kubeconfig 拥有集群管理的最高权限,请谨慎使用,请勿执行类似 kubectl delete nodes 等高危操作!", - "EIP address for accessing remote Kubernetes cluster, using Dotted Decimal Notation. For example: 139.198.123.23": "Kubernetes 的外网访问地址,请按照 IPv4 格式填写。例如:139.198.123.23;注意:修改此项会重启所有主节点的 kube-apiserver 从而导致 kube-apiserver 服务短暂中断,建议在业务低谷时间操作", - "Pod Subnet, using standard CIDR notation. For example: 10.10.0.0/16": "Pod 网段,请按照标准的 CIDR 格式填写。例如:10.10.0.0/16", - "Service Subnet, using standard CIDR notation. For example: 10.96.0.0/16": "Service 网段,请按照标准的 CIDR 格式填写。例如:10.96.0.0/16", - "K8s audit policy in YAML format; if it is empty, no events are logged": "K8s audit policy,以 YAML 格式表示,留空此项表示禁用 K8s 审计功能,详细配置请参考文档 [Audit policy](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-policy)", - "K8s audit webhook configurations in YAML format; if it is empty, events will be logged into files under /etc/kubernetes/audit/logs/": "K8s audit webhook,以 YAML 格式表示,留空此项表示把日志存储在主节点本地目录 /etc/kubernetes/audit/logs/ 下,详细配置请参考文档 [Webhook backend](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#webhook-backend)", - "External port for accessing remote Kubernetes cluster, default value 6443": "Kubernetes 的外网访问端口,默认是 6443" + "QKE properties": "QKE 属性", + "The name of the QKE cluster": "QKE 集群名称", + "name": "名称", + "description": "描述", + "Description of QKE cluster": "QKE 集群描述", + "VxNet": "私有网络", + "master": "主节点", + "master properties": "主节点属性", + "Memory": "内存", + "memory of each node (in MB)": "每个节点的内存", + "count": "数量", + "Number of master for the cluster to create": "主节点数量", + "resource type": "资源类型", + "volume class": "持久存储卷类型", + "The volume class": "持久存储卷类型", + "volume size": "硬盘大小", + "The volume size for each instance": "每个机器的硬盘大小", + "load balancer": "负载均衡器", + "Choose an loadbalancer service to use": "选择一个负载均衡器", + "node properties": "节点的属性", + "standard node": "基础型节点", + "high-performance node": "企业型节点", + "standard node properties": "基础型节点属性", + "high-performance node properties": "企业型节点属性", + "application configuration properties": "应用配置属性", + "The vxnet for pod network": "给 Pod 预留网段", + "CPUs of each node": "每个节点 CPU", + "ssd node properties": "企业型节点属性", + "Number of nodes for the cluster to create": "要创建的工作节点数量", + "Number of log nodes for the cluster to create, set it to zero if you want to use external ELK service": "要创建的日志节点数量,**集群创建后不能增删日志节点**,如使用外部 ELK 服务,可将其设为0", + "Number of client nodes for the cluster to create": "要创建的客户端节点数量", + "Choose a vxnet to join": "选择集群要加入的私网", + "DNS Domain": "集群内 DNS 域名", + "the dns domain used by k8s services": "集群内的 DNS 域名,用于 Kubernetes Services", + "gpu node": "GPU 节点", + "client": "客户端节点", + "CPU model of each node": "CPU 体系架构", + "GPUs of each node": "每个节点 GPU 数量", + "GPU class": "GPU 类型,比如 0(NVIDIA Tesla P100)", + "Get KubeSphere Console": "KubeSphere 控制台链接 (初始登录账号:admin@kubesphere.io 密码:P@88w0rd),登录后请及时修改密码。", + "KS is not installed.": "此集群没有安装 KubeSphere。\n若您需安装 KubeSphere,可以[参考文档](https://docs.qingcloud.com/product/container/qke/)自行安装,或者[提交工单](https://console.qingcloud.com/tickets/)联系我们协助您安装", + "Failed to retrieve ks-console info. Please try again later.": "暂时无法获取 ks-console 服务的信息,请重试。", + "Using master node IP. Please try again later when external IP is ready.": "绑定 EIP 的负载均衡器正在创建中,请在几分钟后重试;当前使用主节点的 IP 地址供临时使用;如果长时间显示此提示,可通过 kubectl 或者 KubeSphere 界面查看 kubesphere-system 项目下 ks-console 服务的相关信息分析原因", + "Something went wrong, but you should ensure ks-console service in kubesphere-system is of type 'LoadBalancer' or 'NodePort'.": "请调整 kubesphere-system 项目下 ks-console 服务的状态为 NodePort 或 LoadBalancer 后再重试。", + "ELK service": "ELK 服务", + "Choose an external ELK cluster to store QKE logging data, leave empty if you choose to use internal ES service in QKE cluster. For resizing ES, It is recommended that using external ELK service": "请添加依赖的 ELK 5.5.1 以上集群用于存储 QKE 集群日志数据,否则将使用集群内自带 ES 服务。为了便于扩容,推荐添加依赖的 ELK 服务", + "etcd service": "etcd 服务", + "Choose an etcd cluster to store QKE cluster data, leave empty if you choose to use internal etcd service in QKE cluster. For better performance, It is recommended that using external etcd service": "请添加依赖的 etcd 3.2.24 集群用于存储 QKE 集群数据,否则将使用集群内自带 etcd 服务. 为了更强性能,推荐添加依赖的 etcd 服务", + "The vxnets for Kubernetes pod, use a blank to split multi vxnet": "Kubernetes 的 pod 所在的私有网络,使用空格分割多个私有网络 ID。创建后只能增加,请勿删除,否则会导致网络问题", + "max-pods": "最大 pod 数量", + "Number of Pods that can run on each working node/kubelet": "每个节点上可运行的最大 pod 数量,默认为 120", + "update": "更新", + "API access key id": "QingCloud IaaS [API 密钥](https://console.qingcloud.com/access_keys/),此密钥将被用来创建云平台的资源,比如负载均衡器、PV 挂盘等", + "Private registry": "私有镜像服务器", + "The URL of private Docker registry": "私有镜像服务器地址", + "Username of Docker registry": "镜像服务器用户名", + "Password of Docker registry": "镜像服务器密码", + "The username of Docker registry, could be dockerhub.qingcloud.com or your private registry": "用于 dockerhub.qingcloud.com 或私有镜像服务器的用户名", + "The password of Docker registry": "用于 dockerhub.qingcloud.com 或私有镜像服务器的的密码", + "The insecure Docker registries, use a blank to split multi registry": "需要通过非安全的方式(http)访问的 Docker 仓库,多个地址通过空格切分", + "IP address and netmask for Docker bridge network": "Docker 网桥地址", + "IP address and netmask for Docker bridge network, using standard CIDR notation. For example: 192.168.1.5/24": "Docker 网桥的 IP 地址和子网掩码,请按照标准的 CIDR 格式填写。默认为 172.30.0.1/16;注意:修改此项需要通过集群菜单依次重启所有 k8s 节点(包括主节点、基础型节点、企业型节点和 GPU 节点),请在业务低谷时操作", + "Keep log days": "日志保留天数", + "docker_prune": "清理 Docker", + "Docker prune days": "Docker 清理天数", + "only remove containers, images, and networks created before given days": "只清理指定天数之前创建的 Docker 容器、镜像、网络等闲置资源,默认为 3 表示只清理创建时间超过 3 天(72 小时)的闲置资源", + "Legible K8s Node Names": "重命名 k8s 节点", + "Using legible node names, e.g. master1, worker-p001, worker-s002, instead of the default random generated ones": "默认为 true 表示使用易读的 k8s 节点名,比如 master1(一号主节点),worker-p001(一号基础型工作节点),worker-s002(二号企业型工作节点),false 表示使用系统自动生成的随机节点名", + "renew_certs": "更新证书", + "Keep log on log node for x days, 0 indicates automatically clean up. This is a KubeSphere parameter": "KubeSphere 使用 ElasticSearch 存储日志,可通过配置此参数自动清理指定天数之前的日志,0 表示每日9点自动清理", + "Defined the maximum number of days to retain old audit log files": "自动清理指定天数之前产生的审计日志文件,默认为 7 表示自动清理创建时间超过 7 天的审计日志文件;注意:在开启审计日志的情况下修改此项会重启所有主节点的 kube-apiserver 从而导致 kube-apiserver 服务短暂中断,建议在业务低谷时间操作", + "the maximum size in megabytes of the audit log file before it gets rotated": "自动轮转达到指定文件大小的审计日志文件,以 MB 为单位,默认为 1 表示当审计日志文件达到 1 MB 以后触发自动轮转;注意:在开启审计日志的情况下修改此项会重启所有主节点的 kube-apiserver 从而导致 kube-apiserver 服务短暂中断,建议在业务低谷时间操作", + "the maximum number of audit log files to retain": "最多保留指定数量的审计日志文件,默认为 100 表示最多保留最近的 100 个审计日志文件;注意:在开启审计日志的情况下修改此项会重启所有主节点的 kube-apiserver 从而导致 kube-apiserver 服务短暂中断,建议在业务低谷时间操作", + "Keep Docker for x days, 0 indicates that the log is not cleared": "每天凌晨 2 点 35 分自动清理指定天数之前创建的 Docker 容器、镜像、网络等闲置资源,默认为 3 表示只清理创建时间超过 3 天(72 小时)的闲置资源;0 表示不自动清理", + "Kubernetes log level": "Kubernetes 日志级别", + "The log level for Kubernetes system": "Kubernetes 的日志级别,数字越大记录越详细,也会占用更多日志空间。遇到问题可以调整日志级别进行 debug;注意:修改此项会重启所有 k8s 组件(kube-apiserver、kube-controller-manager、kube-scheduler、kube-proxy)从而导致服务短暂中断,建议在业务低谷时间操作", + "kube-proxy iptables parameters": "kube-proxy iptables 参数", + "parameters for kube-proxy iptables": "kube-proxy iptables 参数,自定义配置,支持多项配置,需严格遵循每行配置一项且保持 `key: value` 的格式,配置示例:`masqueradeAll: true`, 其他配置项请参考 [kube-proxy iptables configurations](https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyIPTablesConfiguration)", + "kube-controller-manager parameters": "kube-controller-manager 参数", + "parameters for kube controller manager": "kube-controller-manager 参数,自定义配置,支持多项配置,需严格遵循每行配置一项且保持 `key: value` 的格式,配置示例:`add-dir-header: \"true\"`,其他配置项请参考官网文档 [kube-controller-manager configurations](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/),使用时请去掉 `--` 符号", + "kube-apiserver parameters": "kube-apiserver 参数", + "parameters for kube apiserver":"kube-apiserver 参数,自定义配置,支持多项配置,需严格遵循每行配置一项且保持 `key: value` 的格式,配置示例:`add-dir-header: \"true\"`,其他配置项请参考官网文档 [kube-apiserver configurations](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/),使用时请去掉 `--` 符号", + "kube-scheduler parameters": "kube-scheduler 参数", + "parameters for kube scheduler": "kube-scheduler 参数,自定义配置,支持多项配置,需严格遵循每行配置一项且保持 `key: value` 的格式,配置示例:`add-dir-header: \"true\"`,其他配置项请参考官网文档 [kube-scheduler configurations](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/),使用时请去掉 `--` 符号", + "Kubelet Parameters": "kubelet 参数", + "parameters for kubelet": "kubelet 参数,自定义配置,支持多项配置,需严格遵循每行配置一项且保持 `key=value` 的格式,配置示例:`--add-dir-header=true`,默认值 `--eviction-hard=memory.available<5%` 表示当节点剩余内存不足 5% 时 kubelet 会立即关掉选中的容器组来释放内存,`--eviction-soft=memory.available<10%` 与 `--eviction-soft-grace-period=memory.available=2m` 表示当可用内存连续 2 分钟不足 10% 时,会平滑关闭(graceful shutdown)选中的容器组;注意 `--eviction-soft` 与 `--eviction-soft-grace-period` 必须同时指定,否则 kubelet 将无法启动;其他配置项请参考官方文档 [kubelet configurations](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/),使用时请保留 `--` 符号", + "kubelet_running_container_count": "正在运行的容器数量", + "kubelet_running_pod_count": "正在运行的 Pod 数量", + "kubelet_running_containers": "正在运行的容器数量", + "kubelet_running_pods": "正在运行的 Pod 数量", + "Fluent forward server": "Fluent 日志转发服务", + "The fluent log server address to forward server, format host:port": "Fluent 日志服务器,用于将 Kubernetes 收集到的日志转发到用户自定义的日志服务,格式 host:port", + "The Docker hub registry mirrors, use a blank to split multi registry mirrors": "完整的 Docker 镜像服务地址,比如 https://mirror.harbor.local;多个地址之间用空格隔开", + "Install KubeSphere": "安装 KubeSphere", + "Whether to install kubesphere": "选择是否安装 KubeSphere,默认为 true 表示安装 KubeSphere 以及监控组件;若您需安装 KubeSphere,可以[参考文档](https://docs.qingcloud.com/product/container/qke/)自行安装,或者[提交工单](https://console.qingcloud.com/tickets/)联系我们协助您安装", + "Elastic Search server": "Elastic Search 服务器", + "User SSH Key": "用户 SSH 公钥", + "User's SSH key to connect to client node": "在此填入 SSH 公钥来通过 SSH 连接到集群节点", + "The Elastic Search server address, format host:port": "Elastic Search 服务器地址,格式为 host:port", + "notice_when_upgrade": "升级会消耗较多的磁盘 IO 和 CPU,请在业务低谷期进行升级;自行 SSH 到集群节点上做的改动会在重启后抹除,如果有此类数据或文件请先备份后再升级;请确保客户端节点处于开机运行状态;请确保所有 K8s 节点数据盘至少有 50G 可用空间;原有的 K8s audit policy 配置将被重置,请在升级后通过集群参数重新配置;升级一般需要 1〜2 小时,请耐心等待", + "err_code128": "无法获取 ks-installer 容器组信息,请重新创建集群,如需协助请工单联系", + "err_code129": "无法获取 ks-installer 容器组日志,请重新创建集群,如需协助请工单联系", + "err_code130": "KubeSphere 安装失败,可通过 kubectl -n kubesphere-system logs -f $(kubectl -n kubesphere-system get po -l app=ks-install -oname) 查看日志获取详细原因,如需协助请工单联系", + "err_code131": "KubeSphere 安装超时,可通过 kubectl -n kubesphere-system logs -f $(kubectl -n kubesphere-system get po -l app=ks-install -oname) 查看日志获取详细信息,如需协助请工单联系", + "err_code132": "KubeSphere 安装完成,但某些组件不正常,可通过 kubectl -n kubesphere-system logs -f $(kubectl -n kubesphere-system get po -l app=ks-install -oname) 查看日志获取详细信息,如需协助请工单联系", + "err_code133": "无法为 HA 集群配置负载均衡器,如果重新创建集群后仍然失败,请通过工单联系", + "err_code134": "迁移待删除节点上的工作负载时出错,为了保证业务数据安全,请每次只删除一个节点并确保集群的剩余节点有足够资源承载现有业务,也可以先手动执行 kubectl drain nodes xxx 命令迁移所有工作负载以后再删除节点,如持续失败请通过工单联系", + "err_code135": "为了确保集群可以正常服务,请至少保留两个工作节点", + "err_code136": "迁移待删除节点上的工作负载时出错,已尝试回退此节点的状态但执行 kubectl uncordon 时出错,请检查主节点的 kube-apiserver 服务是否正常,可以在服务稳定后重新尝试删除节点,如需协助请工单联系", + "err_code137": "请勿删除主节点", + "err_code138": "无法迁移 Docker 镜像文件到数据盘,如需协助请工单联系", + "err_code139": "检测到集群参数配置里包含无效的私有网络 ID,请修改后重试,如需协助请工单联系", + "err_code140": "检测到集群的最大可绑网卡数超过了选择的私有网络承载数,请通过集群参数配置更多私有网络或者减少节点,确保满足条件:hostnic 最大网卡数 * K8s 节点数 <= 私有网络数 * 252(单个网络最大网卡数),如需协助请工单联系", + "err_code141": "暂时无法获取私有网络信息,请重试,如需协助请工单联系", + "err_code142": "检测到集群所在的私有网络与选择的 hostnic 私有网络不处于同一个 VPC,请通过集群参数修改为同一个 VPC 下的私有网络,并确保满足条件:hostnic 最大网卡数 * K8s 节点数 <= 私有网络数 * 252(单个网络最大网卡数),如需协助请工单联系", + "err_code143": "检测到某些节点的 DNS 解析异常,请重试,如需协助请工单联系", + "err_code150": "检测到某些节点没有足够的可用磁盘空间,请确保所有 k8s 节点至少有 50G 空闲磁盘空间再尝试升级", + "err_code151": "检测到某些节点为非就绪(Ready)状态,请确保所有节点均包含就绪(Ready)状态后再升级;节点状态可通过 kubectl get nodes 查看", + "network plugin": "网卡插件", + "Choose a network plugin": "选择网卡插件", + "hostnic maxNic": "hostnic 最大网卡数", + "hostnic max nics per host": "定义 hostnic 插件在一台主机中最多创建多少个网卡,默认为 60,更多说明可参考 [hostnic 官网](https://github.com/yunify/hostnic-cni)", + "hostnic vxnets": "hostnic 私有网络", + "hostnic vxnets, one vxnet per line": "一个 vxnet 最多能够容纳 252 张网卡,hostnic 根据最大网卡数将 vxnet 均分给 K8s 节点;此处可填写多个私有网络 ID(以 `vxnet-` 开头),每行一个;请在此处填写与此 QKE 集群处于同一个 VPC 下的私有网络以避免网络不通;建议准备未被使用的私有网络专门供此 QKE 集群使用,并填写至少两个私有网络以避免容器组无法分配到足够的网卡而无法启动,具体计算方法为:hostnic 最大网卡数 * K8s 节点数 <= 私有网络数 * 252(单个网络最大网卡数);更多说明可参考 [hostnic 官网](https://github.com/yunify/hostnic-cni)", + "setting hairpin to true to forward pod traffic on the same virtual machine to physical machine; default to false, indicating pod traffic of the same virtual machine will not be forwarded to physical machines": "默认情况下,同节点上的 pod 流量只在虚拟机内部转发, 如果希望同节点 pod 流量经过宿主机转发,那么设置为 true", + "SSD node label": "企业型节点标记", + "HDD node label": "基础型节点标记", + "Port range of each node": "NodePort 范围", + "A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range, it is important to include 30880 port when `Install KubeSphere` is `true`." : "每个节点可分配的 NodePort 范围,例如 ‘30000-32767’,由于 kubesphere 的对外端口号是 30880,在 `安装 KubeSphere` 为 `true` 的情况下,请保证 30880 在该范围内。", + "Resource Configuration": "快速配置", + "standard dev": "基础型开发环境", + "high-performance test": "企业型测试环境", + "standard prod": "基础型生产环境", + "high-performance prod": "企业型生产环境", + "Resource Group Description": "请选择合适的预制资源配置类型,快速定义集群配置。也可根据自身需求自定义节点配置。非高可用集群不可以升级到高可用集群", + "host aliases": "主机 hosts 记录", + "Set host aliases": "自定义添加到 /etc/hosts 文件的记录,比如 '192.168.2.2 host1,192.168.2.3 host2',多条记录用逗号分割", + "The insecure Docker registry, use a blank to split multi registry": "需要通过非安全的 HTTP 或不受信任的 HTTPS 访问的 Docker 仓库,比如 mirror.harbor.local,多个地址通过空格切分", + "KubeSphere Dashboard EIP ID": "KubeSphere 控制台 EIP", + "IP id for exposing KubeSphere Dashboard. If not installed KubeSphere, Shall not set this field.": "如果希望通过公网 LoadBalancer 方式访问 KubeSphere 控制台,可在此选择可用的 EIP,将为此 EIP 自动创建一个负载均衡器并绑定;请保证集群至少有一个工作节点,否则无法通过此方法访问 KubeSphere 控制台;如果没安装 KubeSphere,无需设置此参数", + "Enable Web Console": "开启文件查看器", + "logging files can be viewed from the web console": "用户可通过文件查看器查看、上传或下载审计规则文件和日志文件,在浏览器里通过 http://[主节点 IP] 访问", + "Web Console Username": "文件查看器用户名", + "The username of the admin web console, can contain uppercase & lowercase letters, digits with 4-12 characters in total": "可以由大小写字母或数字组成,长度为 4 到 12 个字符", + "Web Console Password": "文件查看器密码", + "The password of the admin web console, can contain uppercase & lowercase letters, digits, and the following 10 special characters !@#$%^&*() with 4-32 characters in total": "可以由大小写字母、数字或特殊字符 !@#$%^&*() 组成,长度为 4 到 12 个字符,默认密码为 admin,请尽快修改", + "kubesphere_console": "KubeSphere 控制台链接", + "ks_console_notes": "备注", + "Extra Modules": "选装组件", + "extra modules to install with KubeSphere": "选装希望安装的组件,注意:此选项只有在选项 [安装 KubeSphere] 为 true 时才会生效,而且此选项只负责安装组件,清空此选项并不会执行卸载操作,如需卸载请手动操作;安装这些组件可能需要较长时间,请耐心等待", + "metrics-server": "Metrics Server", + "networkpolicy": "网络策略", + "ks-auditing": "审计", + "ks-events": "事件", + "ks-logging": "日志", + "ks-openpitrix": "OpenPitrix 应用商店", + "ks-devops": "DevOps", + "ks-servicemesh": "服务治理", + "ks-notification": "通知", + "ks-alerting": "告警", + "Access key id": "API 密钥", + "Pod Subnet": "Pod 网段", + "Service Subnet": "Service 网段", + "Choose a proxy mode": "选择一种 Proxy Mode", + "EIP ID for exposing kube-apiserver; if set, we'll create an LB and associate this EIP to it": "如果希望通过公网访问 K8s apiserver,请在此处填写可用的 Kubernetes EIP ID,系统将会自动创建一个 LB 并绑定此 EIP", + "Kubernetes EIP Address": "Kubernetes EIP 地址", + "Kubernetes EIP Port": "Kubernetes EIP 端口", + "Get Kubeconfig": "如果用户希望本地使用 kubectl 通过 EIP 访问 Kubernetes Apiserver ,请在 QKE 配置参数中填写 Kubernetes EIP 地址,需要用户自己确保 EIP 和 Kubernetes Apiserver 的连通性,并将 kubeconfig 中 server 的 IP 地址修改为 EIP 地址,端口修改为适当的端口;注意:此处的 kubeconfig 拥有集群管理的最高权限,请谨慎使用,请勿执行类似 kubectl delete nodes 等高危操作!", + "EIP address for accessing remote Kubernetes cluster, using Dotted Decimal Notation. For example: 139.198.123.23": "Kubernetes 的外网访问地址,请按照 IPv4 格式填写。例如:139.198.123.23;注意:修改此项会重启所有主节点的 kube-apiserver 从而导致 kube-apiserver 服务短暂中断,建议在业务低谷时间操作", + "Pod Subnet, using standard CIDR notation. For example: 10.10.0.0/16": "Pod 网段,请按照标准的 CIDR 格式填写。例如:10.10.0.0/16", + "Service Subnet, using standard CIDR notation. For example: 10.96.0.0/16": "Service 网段,请按照标准的 CIDR 格式填写。例如:10.96.0.0/16", + "K8s audit policy in YAML format; if it is empty, no events are logged": "K8s audit policy,以 YAML 格式表示,留空此项表示禁用 K8s 审计功能,详细配置请参考文档 [Audit policy](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-policy)", + "K8s audit webhook configurations in YAML format; if it is empty, events will be logged into files under /etc/kubernetes/audit/logs/": "K8s audit webhook,以 YAML 格式表示,留空此项表示把日志存储在主节点本地目录 /etc/kubernetes/audit/logs/ 下,详细配置请参考文档 [Webhook backend](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#webhook-backend)", + "External port for accessing remote Kubernetes cluster, default value 6443": "Kubernetes 的外网访问端口,默认是 6443" }