阿里nacos k8s部署
[root@master1 nacos]# cat nacos-quick-start.yaml --- apiVersion: v1 kind: Service metadata: namespace: testhip name: nacos-headless labels: app: nacos-headless spec: ports: - port: 8848 name: server targetPort: 8848 selector: app: nacos --- apiVersion: v1 kind: ConfigMap metadata: namespace: testhip name: nacos-cm data: mysql.master.db.name: "nacos_devtest" mysql.master.port: "3306" mysql.slave.port: "3306" mysql.master.user: "nacos" mysql.master.password: "nacos" --- apiVersion: apps/v1 kind: StatefulSet metadata: namespace: testhip name: nacos spec: serviceName: nacos-headless replicas: 3 template: metadata: labels: app: nacos annotations: pod.alpha.kubernetes.io/initialized: "true" spec: affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - key: "app" operator: In values: - nacos-headless topologyKey: "kubernetes.io/hostname" containers: - name: k8snacos imagePullPolicy: Always image: 192.168.200.10/nacos/nacos-server:latest resources: requests: memory: "2Gi" cpu: "500m" ports: - containerPort: 8848 name: client env: - name: NACOS_REPLICAS value: "3" - name: MYSQL_MASTER_SERVICE_DB_NAME valueFrom: configMapKeyRef: name: nacos-cm key: mysql.master.db.name - name: MYSQL_MASTER_SERVICE_PORT valueFrom: configMapKeyRef: name: nacos-cm key: mysql.master.port - name: MYSQL_SLAVE_SERVICE_PORT valueFrom: configMapKeyRef: name: nacos-cm key: mysql.slave.port - name: MYSQL_MASTER_SERVICE_USER valueFrom: configMapKeyRef: name: nacos-cm key: mysql.master.user - name: MYSQL_MASTER_SERVICE_PASSWORD valueFrom: configMapKeyRef: name: nacos-cm key: mysql.master.password - name: NACOS_SERVER_PORT value: "8848" - name: PREFER_HOST_MODE value: "hostname" - name: NACOS_SERVERS value: "nacos-0.nacos-headless.testhip.svc.cluster.local.:8848 nacos-1.nacos-headless.testhip.svc.cluster.local.:8848 nacos-2.nacos-headless.testhip.svc.cluster.local.:8848" volumeMounts: - name: datadir mountPath: /home/nacos/data - name: logdir mountPath: /home/nacos/logs volumeClaimTemplates: - metadata: namespace: testhip name: datadir spec: accessModes: [ "ReadWriteOnce" ] storageClassName: ceph-rbd-provisioner-testhip resources: requests: storage: 30Gi - metadata: namespace: testhip name: logdir spec: accessModes: [ "ReadWriteOnce" ] storageClassName: ceph-rbd-provisioner-testhip resources: requests: storage: 30Gi selector: matchLabels: app: nacos
kubectl label pod nacos-1 nacosInst=1 -n testhip kubectl expose pod nacos-1 --port=8848 --target-port=8848 --name=nacos-1 --selector=nacosInst=1 --type=NodePort -n testhip
最容易出问题的是 nacos的主机名,与 nacos的配置文件不相同
#注意 nacos-0.nacos.senyint.svc.cluster.local. 带一个点, yaml 中没有点,
[root@nacos-0 conf]# cat cluster.conf nacos-0.nacos.testhip.svc.cluster.local.:8848 nacos-1.nacos.testhip.svc.cluster.local.:8848 nacos-2.nacos.testhip.svc.cluster.local.:8848
nacos-0 /etc/hosts 中不带点
[root@nacos-0 conf]# cat /etc/hosts # Kubernetes-managed hosts file. 127.0.0.1 localhost ::1 localhost ip6-localhost ip6-loopback fe00::0 ip6-localnet fe00::0 ip6-mcastprefix fe00::1 ip6-allnodes fe00::2 ip6-allrouters 10.20.3.90 nacos-0.nacos.testhip.svc.cluster.local. nacos-0
#错误日志
[root@nacos-0 logs]# cat naming-raft.log WARN [IS LEADER] no leader is available now! #没有选leader,所以需要部署集群
kubectl label pod nacos-0 nacosInst=0 -n senyint kubectl expose pod nacos-0 --port=8848 --target-port=8848 -name=nacos-0 --selector=nacosInst=0 --type=NodePort -n senyint
###############################################################################################################################################
去掉点
阿里nacos k8s部署
[root@master1 nacos]# cat nacos-quick-start.yaml
---
apiVersion: v1
kind: Service
metadata:
namespace: testhip
name: nacos-headless
labels:
app: nacos-headless
spec:
ports:
- port: 8848
name: server
targetPort: 8848
selector:
app: nacos
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: testhip
name: nacos-cm
data:
mysql.master.db.name: "nacos_devtest"
mysql.master.port: "3306"
mysql.slave.port: "3306"
mysql.master.user: "nacos"
mysql.master.password: "nacos"
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
namespace: testhip
name: nacos
spec:
serviceName: nacos-headless
replicas: 3
template:
metadata:
labels:
app: nacos
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- nacos-headless
topologyKey: "kubernetes.io/hostname"
containers:
- name: k8snacos
imagePullPolicy: Always
image: 192.168.200.10/nacos/nacos-server:latest
resources:
requests:
memory: "2Gi"
cpu: "500m"
ports:
- containerPort: 8848
name: client
env:
- name: NACOS_REPLICAS
value: "3"
- name: MYSQL_MASTER_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: nacos-cm
key: mysql.master.db.name
- name: MYSQL_MASTER_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: nacos-cm
key: mysql.master.port
- name: MYSQL_SLAVE_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: nacos-cm
key: mysql.slave.port
- name: MYSQL_MASTER_SERVICE_USER
valueFrom:
configMapKeyRef:
name: nacos-cm
key: mysql.master.user
- name: MYSQL_MASTER_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: nacos-cm
key: mysql.master.password
- name: NACOS_SERVER_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: NACOS_SERVERS
value: "nacos-0.nacos-headless.testhip.svc.cluster.local:8848 nacos-1.nacos-headless.testhip.svc.cluster.local:8848 nacos-2.nacos-headless.testhip.svc.cluster.local:8848"
volumeMounts:
- name: datadir
mountPath: /home/nacos/data
- name: logdir
mountPath: /home/nacos/logs
volumeClaimTemplates:
- metadata:
namespace: testhip
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: ceph-rbd-provisioner-testhip
resources:
requests:
storage: 30Gi
- metadata:
namespace: testhip
name: logdir
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: ceph-rbd-provisioner-testhip
resources:
requests:
storage: 30Gi
selector:
matchLabels:
app: nacos
kubectl label pod nacos-1 nacosInst=1 -n testhip
kubectl expose pod nacos-1 --port=8848 --target-port=8848 --name=nacos-1 --selector=nacosInst=1 --type=NodePort -n testhip
最容易出问题的是 nacos的主机名,与 nacos的配置文件不相同
#注意 nacos-0.nacos.senyint.svc.cluster.local. 带一个点, yaml 中没有点,
[root@nacos-0 conf]# cat cluster.conf
nacos-0.nacos.testhip.svc.cluster.local:8848
nacos-1.nacos.testhip.svc.cluster.local:8848
nacos-2.nacos.testhip.svc.cluster.local:8848
nacos-0 /etc/hosts 中不带点
[root@nacos-0 conf]# cat /etc/hosts
# Kubernetes-managed hosts file.
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
10.20.3.90 nacos-0.nacos.testhip.svc.cluster.local nacos-0
#错误日志
[root@nacos-0 logs]# cat naming-raft.log
WARN [IS LEADER] no leader is available now!
#没有选leader,所以需要部署集群
kubectl label pod nacos-0 nacosInst=0 -n senyint
kubectl expose pod nacos-0 --port=8848 --target-port=8848 -name=nacos-0 --selector=nacosInst=0 --type=NodePort -n senyint