You can use kubectl logs
to retrieve logs from a previous instantiation of a container with --previous
flag, in case the container has crashed. #跟踪已经崩溃的容器
sidecar container
containers:
- name: count
image: busybox
args:
- /bin/sh
- -c
- >
i=0;
while true;
do
echo "$i: $(date)" >> /var/log/1.log;
echo "$(date) INFO $i" >> /var/log/2.log;
i=$((i+1));
sleep 1;
done
volumeMounts:
- name: varlog
mountPath: /var/log
- name: count-log-1
image: busybox
args: [/bin/sh, -c, 'tail -n+1 -f /var/log/1.log']
volumeMounts:
- name: varlog
mountPath: /var/log
- name: count-log-2
image: busybox
args: [/bin/sh, -c, 'tail -n+1 -f /var/log/2.log']
volumeMounts:
- name: varlog
mountPath: /var/log
volumes:
- name: varlog
emptyDir: {}
# count-log-1 三轮摩托车的测边
# count-log-2
kubectl logs counter count-log-1
kubectl logs counter count-log-2
记录容器count的不同日志
##################################################################################
节点状态查看:
a. kubectl get nodes
b.查看组件状态:
kubectl get cs #####################################
这是一个不错的命令:
[root@node1 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
etcd-1 Unhealthy Get https://172.16.5.92:2379/health: dial tcp 172.16.5.92:2379: getsockopt: connection refused
scheduler Healthy ok
controller-manager Healthy ok
etcd-2 Healthy {"health": "true"}
etcd-0 Healthy {"health": "true"}
##################################################