kubernetes-部署
一:安装可用网络插件
1.在本地(不是虚拟机,是本机)创建文件kube-flannel.yml
github上的原文件:https : //github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml
内容
--- apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: psp.flannel.unprivileged annotations: seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default spec: privileged: false volumes: - configMap - secret - emptyDir - hostPath allowedHostPaths: - pathPrefix: "/etc/cni/net.d" - pathPrefix: "/etc/kube-flannel" - pathPrefix: "/run/flannel" readOnlyRootFilesystem: false # Users and groups runAsUser: rule: RunAsAny supplementalGroups: rule: RunAsAny fsGroup: rule: RunAsAny # Privilege Escalation allowPrivilegeEscalation: false defaultAllowPrivilegeEscalation: false # Capabilities allowedCapabilities: ['NET_ADMIN', 'NET_RAW'] defaultAddCapabilities: [] requiredDropCapabilities: [] # Host namespaces hostPID: false hostIPC: false hostNetwork: true hostPorts: - min: 0 max: 65535 # SELinux seLinux: # SELinux is unused in CaaSP rule: 'RunAsAny' --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: flannel rules: - apiGroups: ['extensions'] resources: ['podsecuritypolicies'] verbs: ['use'] resourceNames: ['psp.flannel.unprivileged'] - apiGroups: - "" resources: - pods verbs: - get - apiGroups: - "" resources: - nodes verbs: - list - watch - apiGroups: - "" resources: - nodes/status verbs: - patch --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: flannel roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: flannel subjects: - kind: ServiceAccount name: flannel namespace: kube-system --- apiVersion: v1 kind: ServiceAccount metadata: name: flannel namespace: kube-system --- kind: ConfigMap apiVersion: v1 metadata: name: kube-flannel-cfg namespace: kube-system labels: tier: node app: flannel data: cni-conf.json: | { "name": "cbr0", "cniVersion": "0.3.1", "plugins": [ { "type": "flannel", "delegate": { "hairpinMode": true, "isDefaultGateway": true } }, { "type": "portmap", "capabilities": { "portMappings": true } } ] } net-conf.json: | { "Network": "10.244.0.0/16", "Backend": { "Type": "vxlan" } } --- apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel-ds namespace: kube-system labels: tier: node app: flannel spec: selector: matchLabels: app: flannel template: metadata: labels: tier: node app: flannel spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/os operator: In values: - linux hostNetwork: true priorityClassName: system-node-critical tolerations: - operator: Exists effect: NoSchedule serviceAccountName: flannel initContainers: - name: install-cni image: registry.cn-hangzhou.aliyuncs.com/darker-x/flannel:v0.13.1-rc1 command: - cp args: - -f - /etc/kube-flannel/cni-conf.json - /etc/cni/net.d/10-flannel.conflist volumeMounts: - name: cni mountPath: /etc/cni/net.d - name: flannel-cfg mountPath: /etc/kube-flannel/ containers: - name: kube-flannel image: registry.cn-hangzhou.aliyuncs.com/darker-x/flannel:v0.13.1-rc1 command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr resources: requests: cpu: "100m" memory: "50Mi" limits: cpu: "100m" memory: "50Mi" securityContext: privileged: false capabilities: add: ["NET_ADMIN", "NET_RAW"] env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - name: run mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d - name: flannel-cfg configMap: name: kube-flannel-cfg
2.用FinalShell
或XShell
上传kube-flannel.yml
到master上游虚拟机
2.应用kube-flannel.yml
kubectl apply -f kube-flannel.yml
二:Node节点加入重建
1.master生成令牌
kubeadm token create --print-join-command
[root@kubernetes-master-01 ~]# kubeadm token create --print-join-command W1208 20:13:11.218906 4648 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io] kubeadm join 192.168.50.50:6443 --token y3brz1.vvudm1pumm37z40e --discovery-token-ca-cert-hash sha256:15f0fbd28811b841c8799af98188563c89b12e7151bc9e54df7ba421326db469
2.将令牌粘贴到1个node子节点中
[root@kubernetes-node-01 ~]# kubeadm join 192.168.50.50:6443 --token y3brz1.vvudm1pumm37z40e --discovery-token-ca-cert-hash sha256:15f0fbd28811b841c8799af98188563c89b12e7151bc9e54df7ba421326db469 [preflight] Running pre-flight checks [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/ [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service' [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Starting the kubelet [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap... This node has joined the cluster: * Certificate signing request was sent to apiserver and a response was received. * The Kubelet was informed of the new secure connection details. Run 'kubectl get nodes' on the control-plane to see this node join the cluster. # 出现这句话,表示成功! [root@kubernetes-node-02 ~]# kubeadm join 192.168.50.50:6443 --token y3brz1.vvudm1pumm37z40e --discovery-token-ca-cert-hash sha256:15f0fbd28811b841c8799af98188563c89b12e7151bc9e54df7ba421326db469 [preflight] Running pre-flight checks [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/ [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service' [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Starting the kubelet [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap... This node has joined the cluster: * Certificate signing request was sent to apiserver and a response was received. * The Kubelet was informed of the new secure connection details. Run 'kubectl get nodes' on the control-plane to see this node join the cluster. # 出现这句话,表示成功!
3.去master中查看
kubectl get nodes
此时,三台虚拟机的STATUS
为Ready
才算正常功能
[root@kubernetes-master-01 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION kubernetes-master-01 Ready master 176m v1.19.4 kubernetes-node-01 Ready <none> 171m v1.19.4 kubernetes-node-02 Ready <none> 171m v1.19.4
4.测试充分的DNS是否正常
kubectl run test -it --rm --image=busybox:1.28.3 nslookup kubernetes
下面是测试效果
[root@kubernetes-master-01 ~]# kubectl run test -it --rm --image=busybox:1.28.3 If you don't see a command prompt, try pressing enter. / # nslookup kubernetes Server: 10.96.0.10 Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local Name: kubernetes Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local / #
5.查看状态
kubectl get pods -n kube-system # 如果加了 -w 选项,就会一直处于监听(watch)状态 kubectl get pods -n kube-system -w
[root@kubernetes-master-01 ~]# kubectl get pods -n kube-system NAME READY STATUS RESTARTS AGE coredns-7dcc599b9f-n4wh2 1/1 Running 0 177m coredns-7dcc599b9f-wgv4x 1/1 Running 0 177m etcd-kubernetes-master-01 1/1 Running 0 178m kube-apiserver-kubernetes-master-01 1/1 Running 0 178m kube-controller-manager-kubernetes-master-01 1/1 Running 0 178m kube-flannel-ds-9q8wm 1/1 Running 0 2m33s kube-flannel-ds-p7gvx 1/1 Running 0 2m33s kube-flannel-ds-w68hc 1/1 Running 0 2m33s kube-proxy-nvxh4 1/1 Running 0 173m kube-proxy-t6x5d 1/1 Running 0 177m kube-proxy-xt4t7 1/1 Running 0 173m kube-scheduler-kubernetes-master-01 1/1 Running 0 178m
此时,当前窗口如果处于监听状态,如果开另一个窗口将某人进行删除,在当前窗口就可以实时看到被删除,并且将会再启动一个
kubectl delete pods -n kube-system kube-proxy-xt4t7
6.node子节点拉取2个一体
在kubernetes-node-01
和kubernetes-node-02
中拉取下面的2个一体
docker pull registry.cn-hangzhou.aliyuncs.com/darker-x/flannel:v0.13.1-rc1 docker pull registry.cn-hangzhou.aliyuncs.com/darker-x/kube-proxy:v1.19.4
如果相对拉取速度很慢,可以先将master上游的2个合并打包成tar
包,然后用scp传输到另外2个node子节点,再解压tar
包
# 压缩 flannel镜像 docker save registry.cn-hangzhou.aliyuncs.com/darker-x/flannel:v0.13.1-rc1 > flannel.tar # 压缩 kube-proxy镜像 docker save registry.cn-hangzhou.aliyuncs.com/darker-x/kube-proxy:v1.19.4 > kube-proxy.tar # 用scp将镜像传输到2个node子节点的家目录中 scp flannel.tar kubernetes-node-01:~ scp flannel.tar kubernetes-node-02:~ scp kube-proxy.tar kubernetes-node-01:~ scp kube-proxy.tar kubernetes-node-01:~ # 在2个子节点中,解压镜像压缩包 docker load < flannel.tar docker load < kube-proxy.tar
三:部署仪表板
1.在本地(不是虚拟机,是本机)创建文件recommended.yaml
github原文件:https : //github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
# Copyright 2017 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: v1 kind: Namespace metadata: name: kubernetes-dashboard --- apiVersion: v1 kind: ServiceAccount metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard --- kind: Service apiVersion: v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard spec: ports: - port: 443 targetPort: 8443 selector: k8s-app: kubernetes-dashboard --- apiVersion: v1 kind: Secret metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-certs namespace: kubernetes-dashboard type: Opaque --- apiVersion: v1 kind: Secret metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-csrf namespace: kubernetes-dashboard type: Opaque data: csrf: "" --- apiVersion: v1 kind: Secret metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-key-holder namespace: kubernetes-dashboard type: Opaque --- kind: ConfigMap apiVersion: v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-settings namespace: kubernetes-dashboard --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard rules: # Allow Dashboard to get, update and delete Dashboard exclusive secrets. - apiGroups: [""] resources: ["secrets"] resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] verbs: ["get", "update", "delete"] # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. - apiGroups: [""] resources: ["configmaps"] resourceNames: ["kubernetes-dashboard-settings"] verbs: ["get", "update"] # Allow Dashboard to get metrics. - apiGroups: [""] resources: ["services"] resourceNames: ["heapster", "dashboard-metrics-scraper"] verbs: ["proxy"] - apiGroups: [""] resources: ["services/proxy"] resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] verbs: ["get"] --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard rules: # Allow Metrics Scraper to get metrics from the Metrics server - apiGroups: ["metrics.k8s.io"] resources: ["pods", "nodes"] verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: kubernetes-dashboard subjects: - kind: ServiceAccount name: kubernetes-dashboard namespace: kubernetes-dashboard --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: kubernetes-dashboard roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: kubernetes-dashboard subjects: - kind: ServiceAccount name: kubernetes-dashboard namespace: kubernetes-dashboard --- kind: Deployment apiVersion: apps/v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard spec: replicas: 1 revisionHistoryLimit: 10 selector: matchLabels: k8s-app: kubernetes-dashboard template: metadata: labels: k8s-app: kubernetes-dashboard spec: containers: - name: kubernetes-dashboard image: registry.cn-hangzhou.aliyuncs.com/k8sos/dashboard:v2.0.5 imagePullPolicy: Always ports: - containerPort: 8443 protocol: TCP args: - --auto-generate-certificates - --namespace=kubernetes-dashboard # Uncomment the following line to manually specify Kubernetes API server Host # If not specified, Dashboard will attempt to auto discover the API server and connect # to it. Uncomment only if the default does not work. # - --apiserver-host=http://my-address:port volumeMounts: - name: kubernetes-dashboard-certs mountPath: /certs # Create on-disk volume to store exec logs - mountPath: /tmp name: tmp-volume livenessProbe: httpGet: scheme: HTTPS path: / port: 8443 initialDelaySeconds: 30 timeoutSeconds: 30 securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true runAsUser: 1001 runAsGroup: 2001 volumes: - name: kubernetes-dashboard-certs secret: secretName: kubernetes-dashboard-certs - name: tmp-volume emptyDir: {} serviceAccountName: kubernetes-dashboard nodeSelector: "kubernetes.io/os": linux # Comment the following tolerations if Dashboard must not be deployed on master tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule --- kind: Service apiVersion: v1 metadata: labels: k8s-app: dashboard-metrics-scraper name: dashboard-metrics-scraper namespace: kubernetes-dashboard spec: ports: - port: 8000 targetPort: 8000 selector: k8s-app: dashboard-metrics-scraper type: NodePort --- kind: Deployment apiVersion: apps/v1 metadata: labels: k8s-app: dashboard-metrics-scraper name: dashboard-metrics-scraper namespace: kubernetes-dashboard spec: replicas: 1 revisionHistoryLimit: 10 selector: matchLabels: k8s-app: dashboard-metrics-scraper template: metadata: labels: k8s-app: dashboard-metrics-scraper annotations: seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' spec: containers: - name: dashboard-metrics-scraper image: registry.cn-hangzhou.aliyuncs.com/k8sos/metrics-scraper:v1.0.6 ports: - containerPort: 8000 protocol: TCP livenessProbe: httpGet: scheme: HTTP path: / port: 8000 initialDelaySeconds: 30 timeoutSeconds: 30 volumeMounts: - mountPath: /tmp name: tmp-volume securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true runAsUser: 1001 runAsGroup: 2001 serviceAccountName: kubernetes-dashboard nodeSelector: "kubernetes.io/os": linux # Comment the following tolerations if Dashboard must not be deployed on master tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule volumes: - name: tmp-volume emptyDir: {}
2.用FinalShell
或XShell
上传recommended.yaml
到master上游虚拟机
3.应用recommended.yaml
kubectl apply -f recommended.yaml
[root@kubernetes-master-01 ~]# kubectl apply -f recommended.yaml namespace/kubernetes-dashboard created serviceaccount/kubernetes-dashboard created service/kubernetes-dashboard created secret/kubernetes-dashboard-certs created secret/kubernetes-dashboard-csrf created secret/kubernetes-dashboard-key-holder created configmap/kubernetes-dashboard-settings created role.rbac.authorization.k8s.io/kubernetes-dashboard created clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created deployment.apps/kubernetes-dashboard created service/dashboard-metrics-scraper created deployment.apps/dashboard-metrics-scraper created
4.查看dashboard
和metrics-scraper
的荚
kubectl get pods -n kubernetes-dashboard
[root@kubernetes-master-01 ~]# kubectl get pods -n kubernetes-dashboard NAME READY STATUS RESTARTS AGE dashboard-metrics-scraper-7896b957f8-xh6xd 1/1 ErrImagePull 0 66s kubernetes-dashboard-66f7cc8bbf-w6wmm 1/1 ErrImagePull 0 66s
5.查看dashboard
和metrics-scraper
的svc
kubectl get svc -n kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE dashboard-metrics-scraper ClusterIP 10.100.35.195 <none> 8000/TCP 2m53s kubernetes-dashboard ClusterIP 10.97.125.131 <none> 443/TCP 2m53s
6.vim创建token.yaml
apiVersion: v1 kind: ServiceAccount metadata: name: admin-user namespace: kube-system apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: admin-user annotations: rbac.authorization.kubernetes.io/autoupdate: "true" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: admin-user namespace: kube-system
7.应用token.yaml
kubectl apply -f token.yaml
8.生成访问的令牌
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
[root@kubernetes-master-01 ~]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') Name: admin-user-token-bfhbh Namespace: kube-system Labels: <none> Annotations: kubernetes.io/service-account.name: admin-user kubernetes.io/service-account.uid: f0dd42d8-7b99-4725-81cd-0f26bae97fa1 Type: kubernetes.io/service-account-token Data ==== token: eyJhbGciOiJSUzI1NiIsImtpZCI6IjZvODlnWU05dlVfa2pXNmdXWnRxNEVaZ0dkZ3FRVm5SY0xUV0F5ZTBORncifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWJmaGJoIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJmMGRkNDJkOC03Yjk5LTQ3MjUtODFjZC0wZjI2YmFlOTdmYTEiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.srt35c2BnKKbzAX4lcKnq1s4qoYO9qxOiVXwtkW5G7kzyLpDnrRJRSn2S27RiIZA7vhwrGyKNciLXxhcWPDUhN7ohensQy7XK-jri92-Kgwv1B1a6gG1PwiQY60GTfLvHmDMe3On_bqlchTDmGelSWkJt2CtHZULb4V9R6ZEu6xc8qcWt8rZyoZaM51BFB8QSW9nISYDpKkUxETy5bqSfSjbRfhIaNme40m8FAbC1Oq8W8ggMEZCoB9a_D0jC3n-iDaMyaNiZ7QZFIfWig77ahSGsNckMXC8ZPUJoy6MGEFK4rxbnQmT3MSmTZLG-9uFAtOneoo-6xubxqnVckzVPw ca.crt: 1066 bytes namespace: 11 bytes
9.再次查看dashboard
和metrics-scraper
的svc
kubectl get svc -n kubernetes-dashboard
[root@kubernetes-master-01 ~]# kubectl get svc -n kubernetes-dashboard NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE dashboard-metrics-scraper ClusterIP 10.100.35.195 <none> 8000/TCP 9m27s kubernetes-dashboard NodePort 10.97.125.131 <none> 443:32019/TCP 9m27s