java
export JAVA_HOME=/home/software/jdk1.8.0_202 export JRE_HOME=${JAVA_HOME}/jre export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib:$CLASSPATH export JAVA_PATH=${JAVA_HOME}/bin:${JRE_HOME}/bin export PATH=$PATH:${JAVA_PATH}
etcd
docker run -d --name etcd-server --publish 2379:2379 --publish 2380:2380 --env ALLOW_NONE_AUTHENTICATION=yes --env ETCD_ADVERTISE_CLIENT_URLS=http://etcd-server:2379 bitnami/etcd:latest
grafana
docker run -d -p 3000:3000 --name=grafana grafana/grafana
h2 版本问题控制台连不上
wget https://h2database.com/h2-2019-10-14.zip --no-check-certificate nohup java -cp "h2-1.4.199.jar:$H2DRIVERS:$CLASSPATH" org.h2.tools.Server -tcpAllowOthers -webAllowOthers -webPort 8082 "$@" &
zookeeper
docker run --privileged=true -d --name zookeeper --publish 2181:2181 -d zookeeper:latest
mysql
docker run -p 3306:3306 --name mysql -e MYSQL_ROOT_PASSWORD=123456 -d mysql:5.7
skywalking + es
docker pull elasticsearch:7.5.1 docker pull apache/skywalking-oap-server:6.6.0-es7 docker pull apache/skywalking-ui:6.6.0 docker run -d --name=es7 -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" elasticsearch:7.5.1 docker run --name oap --restart always -d --restart=always -e TZ=Asia/Shanghai -p 12800:12800 -p 11800:11800 --link es7:es7 -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=es7:9200 apache/skywalking-oap-server:6.6.0-es7 docker run -d --name skywalking-ui --restart=always -e TZ=Asia/Shanghai -p 8088:8080 --link oap:oap -e SW_OAP_ADDRESS=oap:12800 apache/skywalking-ui:6.6.0
promemetheus -v 将本地的某个目录挂在到容器中,后续指定的是挂在后的容器内目录
docker run --name prometheus -d -p 9099:9090 -v /home/conf/prometheus-data/:/prometheus-data 227ae20e1b04 --web.enable-lifecycle --config.file=/prometheus-data/prometheus.yml
----prometheus.yml
global: scrape_interval: 15s external_labels: monitor: 'codelab-monitor' scrape_configs: - job_name: 'prometheus' scrape_interval: 5s static_configs: - targets: ['localhost:9090']
kubectl
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl chmod +x ./kubectl sudo mv ./kubectl /usr/local/bin/kubectl
minikube
curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && chmod +x minikube sudo cp minikube /usr/local/bin && rm minikube
k8s安装开启
minikube start --memory=4068 --cpus=4 --force --driver=docker --kubernetes-version=v1.22.0
创建个nginx pod,安装控制台查看
minikube addons enable ingress kubectl run nginx --image=nginx --port=80 启个控制台看一下 minikube dashboard 开启代理访问 kubectl proxy --port=33458 --address='0.0.0.0' --accept-hosts='^.*' & http://139.224.65.218:33458/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/#/workloads?namespace=default
deployment
apiVersion: apps/v1 kind: Deployment metadata: name: my-nginx spec: selector: matchLabels: app: my-nginx replicas: 2 template: metadata: labels: app: my-nginx spec: containers: - name: my-nginx image: nginx ports: - containerPort: 80
service
apiVersion: v1 kind: Service metadata: name: nginx-service labels: app: nginx-service spec: type: NodePort selector: app: my-nginx ports: - port: 8000 targetPort: 80 nodePort: 32500
ingress job 不成功
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml apiVersion: batch/v1 kind: Job metadata: name: ingress-nginx-admission-create namespace: ingress-nginx annotations: helm.sh/hook: pre-install,pre-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: helm.sh/chart: ingress-nginx-4.0.4 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 1.0.3 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: template: metadata: name: ingress-nginx-admission-create labels: helm.sh/chart: ingress-nginx-4.0.4 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 1.0.3 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: containers: - name: create image: itworker365/kube-webhook-certgen:latest imagePullPolicy: IfNotPresent args: - create - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc - --namespace=$(POD_NAMESPACE) - --secret-name=ingress-nginx-admission env: - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace restartPolicy: OnFailure serviceAccountName: ingress-nginx-admission nodeSelector: kubernetes.io/os: linux securityContext: runAsNonRoot: true runAsUser: 2000 --- # Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml apiVersion: batch/v1 kind: Job metadata: name: ingress-nginx-admission-patch namespace: ingress-nginx annotations: helm.sh/hook: post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: helm.sh/chart: ingress-nginx-4.0.4 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 1.0.3 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: template: metadata: name: ingress-nginx-admission-patch labels: helm.sh/chart: ingress-nginx-4.0.4 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 1.0.3 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: containers: - name: patch image: itworker365/kube-webhook-certgen:latest imagePullPolicy: IfNotPresent args: - patch - --webhook-name=ingress-nginx-admission - --namespace=$(POD_NAMESPACE) - --patch-mutating=false - --secret-name=ingress-nginx-admission - --patch-failure-policy=Fail env: - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace restartPolicy: OnFailure serviceAccountName: ingress-nginx-admission nodeSelector: kubernetes.io/os: linux securityContext: runAsNonRoot: true runAsUser: 2000
nginx-ingress-controller不成功
# Source: ingress-nginx/templates/controller-deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: labels: helm.sh/chart: ingress-nginx-4.0.4 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 1.0.3 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller namespace: ingress-nginx spec: selector: matchLabels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/component: controller revisionHistoryLimit: 10 minReadySeconds: 0 template: metadata: labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/component: controller spec: dnsPolicy: ClusterFirst containers: - name: controller image: itworker365/controller:v1.0.3 imagePullPolicy: IfNotPresent lifecycle: preStop: exec: command: - /wait-shutdown args: - /nginx-ingress-controller - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller - --election-id=ingress-controller-leader - --controller-class=k8s.io/ingress-nginx - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller - --validating-webhook=:8443 - --validating-webhook-certificate=/usr/local/certificates/cert - --validating-webhook-key=/usr/local/certificates/key securityContext: capabilities: drop: - ALL add: - NET_BIND_SERVICE runAsUser: 101 allowPrivilegeEscalation: true env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: LD_PRELOAD value: /usr/local/lib/libmimalloc.so livenessProbe: failureThreshold: 5 httpGet: path: /healthz port: 10254 scheme: HTTP initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 readinessProbe: failureThreshold: 3 httpGet: path: /healthz port: 10254 scheme: HTTP initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 ports: - name: http containerPort: 80 protocol: TCP - name: https containerPort: 443 protocol: TCP - name: webhook containerPort: 8443 protocol: TCP volumeMounts: - name: webhook-cert mountPath: /usr/local/certificates/ readOnly: true resources: requests: cpu: 100m memory: 90Mi nodeSelector: kubernetes.io/os: linux serviceAccountName: ingress-nginx terminationGracePeriodSeconds: 300 volumes: - name: webhook-cert secret: secretName: ingress-nginx-admission
建一条ingress配置试试
apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: test-ingress namespace: default spec: ingressClassName: nginx rules: - http: paths: - path: /testpath pathType: Prefix backend: service: name: nginx-service port: number: 80
循环访问测试
kubectl exec -it **** /bin/bash cat >> /usr/share/nginx/html/index.html << EOF X EOF for i in `seq 1 10`; do curl http://192.168.49.2:32500 --silent -w "Status: %{http_code} " ;done
helm
wget https://get.helm.sh/helm-v3.7.1-linux-amd64.tar.gz mv linux-amd64/helm /usr/local/bin/helm
istio
wget https://github.com/istio/istio/releases/download/1.11.4/istio-1.11.4-linux-arm64.tar.gz cp istio-1.11.0/bin/istioctl /usr/local/bin/ istioctl install --set profile=demo -y
需要其他相关组件(grafana jaeger kiali prometheus)或例子的话
kubectl apply -f samples/addons
kubectl apply -f samples/addons/extras
istio注入管理 - 查看、打开
kubectl get ns -L istio-injection
kubectl label namespace default istio-injection=enabled
部署一个istio测试例子
kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml
部署gateway
kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml
查看网关地址
kubectl get svc istio-ingressgateway -n istio-system kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}' #由之前的信息可知minikube的ip为192.168.49.2 minikube service --url nginx-service #以上信息组合后可以访问 curl http://192.168.49.2:30926/productpage
apisix
kubectl create namespace apisix helm repo add apisix https://charts.apiseven.com helm repo update helm install apisix apisix/apisix --set admin.allow.ipList="{0.0.0.0/0}" --set etcd.enabled=false --set etcd.host={http://139.224.65.218:2379} --namespace apisix helm install apisix-dashboard apisix/apisix-dashboard --set config.conf.etcd.endpoints={http://139.224.65.218:2379} --namespace apisix helm install apisix-ingress-controller apisix/apisix-ingress-controller --namespace apisix --set config.apisix.baseURL=http://apisix-admin:9180/apisix/admin --set config.apisix.adminKey=edd1c9f034335f136f87ad84b625c8f1
------
kaili 外部访问service均需改nodeport
apiVersion: v1 kind: Service metadata: name: kiali namespace: istio-system labels: helm.sh/chart: kiali-server-1.42.0 app: kiali app.kubernetes.io/name: kiali app.kubernetes.io/instance: kiali version: "v1.42.0" app.kubernetes.io/version: "v1.42.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: "kiali" annotations: spec: type: NodePort selector: app.kubernetes.io/name: kiali app.kubernetes.io/instance: kiali ports: - name: http protocol: TCP port: 20001 targetPort: 20001 nodePort: 32501 - name: http-metrics protocol: TCP port: 9090 targetPort: 9090 nodePort: 32502
再通过nginx转发
/usr/local/conf/nginx.conf upstream nginxsvctest { server 192.168.49.2:32500; } upstream test32501 { server 192.168.49.2:32501; } upstream test32502 { server 192.168.49.2:32502; } server { listen 32500; location / { proxy_pass http://nginxsvctest; } } server { listen 32501; location / { proxy_pass http://test32501; } } server { listen 32502; location / { proxy_pass http://test32502; } }
提示grafana没配,就加一下重启
external_services: custom_dashboards: enabled: true grafana: url: "http://10.105.56.167:3000"
apisix
helm repo add apisix https://charts.apiseven.com helm repo update
// install apisix helm install apisix apisix/apisix --set admin.allow.ipList="{0.0.0.0/0}" --set etcd.enabled=false --set etcd.host={http://139.xxx.xxx.218:2379} --namespace default export NODE_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services apisix-gateway) export NODE_IP=$(kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}") echo http://$NODE_IP:$NODE_PORT http://192.168.49.2:32535 //install apisix-dashboard helm install apisix-dashboard apisix/apisix-dashboard --set config.conf.etcd.endpoints={http://139.xxx.xxx.218:2379} --namespace default export POD_NAME=$(kubectl get pods --namespace default -l "app.kubernetes.io/name=apisix-dashboard,app.kubernetes.io/instance=apisix-dashboard" -o jsonpath="{.items[0].metadata.name}") export CONTAINER_PORT=$(kubectl get pod --namespace default $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") echo "Visit http://127.0.0.1:8080 to use your application" kubectl --namespace default port-forward $POD_NAME 8080:$CONTAINER_PORT //install apisix-ingress-controller helm install apisix-ingress-controller apisix/apisix-ingress-controller --namespace default --set config.apisix.baseURL=http://apisix-admin:9180/apisix/admin --set config.apisix.adminKey=edd1c9f034335f136f87ad84b625c8f1