定义及原理
-
很多生产环境中的应用程序配置较为复杂,可能需要多个config文件、命令行参数和环境变量的组合。使用容器部署时,把配置应该从应用程序镜像中解耦出来,以保证镜像的可移植性。尽管Secret允许类似于验证信息和秘钥等信息从应用中解耦出来,但在K8S1.2前并没有为了普通的或者非secret配置而存在的对象。在K8S1.2后引入ConfigMap来处理这种类型的配置数据。
-
ConfigMap是存储通用的配置变量的,类似于配置文件,使用户可以将分布式系统中用于不同模块的环境变量统一到一个对象中管理;而它与配置文件的区别在于它是存在集群的“环境”中的,并且支持K8S集群中所有通用的操作调用方式。
-
从数据角度来看,ConfigMap的类型只是键值组,用于存储被Pod或者其他资源对象(如RC)访问的信息。这与secret的设计理念有异曲同工之妙,主要区别在于ConfigMap通常不用于存储敏感信息,而只存储简单的文本信息。
-
ConfigMap可以保存环境变量的属性,也可以保存配置文件。
-
创建pod时,对configmap进行绑定,pod内的应用可以直接引用ConfigMap的配置。相当于configmap为应用/运行环境封装配置。
-
pod使用ConfigMap,通常用于:设置环境变量的值、设置命令行参数、创建配置文件。
创configmap
通过命令kubelet创建cm
通过一个或者多个配置文件创建cm
[root@192 123]# cat 1.conf
123
[root@192 123]# cat 2.conf
123
[root@192 123]# cat 3.conf
123
[root@192 123]# pwd
/root/configmap/123
[root@192 123]# kubectl create cm cm-11 --from-file=1.conf --from-file=2.conf
configmap "cm-11" created
[root@192 123]# kubectl get cm cm-11 -o yaml
apiVersion: v1
data:
1.conf: |
123
2.conf: |
123
kind: ConfigMap
metadata:
creationTimestamp: 2017-11-09T15:09:39Z
name: cm-11
namespace: default
resourceVersion: "1234349"
selfLink: /api/v1/namespaces/default/configmaps/cm-11
uid: 01ceea04-c560-11e7-8e40-000c29827a88
通过一个目录创建cm,目录下面的全部文件都会被加入到data中
[root@192 123]# kubectl create cm cm-12 --from-file=/root/configmap/123
configmap "cm-12" created
[root@192 123]# kubectl get cm cm-12 -o yaml
apiVersion: v1
data:
1.conf: |
123
2.conf: |
123
3.conf: |
123
kind: ConfigMap
metadata:
creationTimestamp: 2017-11-09T15:11:13Z
name: cm-12
namespace: default
resourceVersion: "1234486"
selfLink: /api/v1/namespaces/default/configmaps/cm-12
uid: 39e25e81-c560-11e7-8e40-000c29827a88
通过写入键值对的方式创建literal values
[root@192 configmap]# kubectl create configmap cm-15 --from-literal=name=panjb --from-literal=age=9999
configmap "cm-15" created
[root@192 configmap]# kubectl get cm cm-15 -o yaml
apiVersion: v1
data:
age: "9999"
name: panjb
kind: ConfigMap
metadata:
creationTimestamp: 2017-11-09T15:17:21Z
name: cm-15
namespace: default
resourceVersion: "1235022"
selfLink: /api/v1/namespaces/default/configmaps/cm-15
uid: 15113138-c561-11e7-8e40-000c29827a88
通过yaml配置文件创建cm
[root@192 configmap]# cat cm-13.yaml
apiVersion: v1
data:
special.how: very
special.type: charm
kind: ConfigMap
metadata:
name: special-config
[root@192 configmap]# kubectl apply -f cm-13.yaml
configmap "special-config" created
[root@192 configmap]# kubectl get cm special-config -o yaml
apiVersion: v1
data:
special.how: very
special.type: charm
kind: ConfigMap
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"v1","data":{"special.how":"very","special.type":"charm"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"special-config","namespace":"default"}}
creationTimestamp: 2017-11-09T15:14:10Z
name: special-config
namespace: default
resourceVersion: "1234745"
selfLink: /api/v1/namespaces/default/configmaps/special-config
uid: a37eae70-c560-11e7-8e40-000c29827a88
将cm中key的值传入容器中当作变量
[root@192 12345]# kubectl get cm cm-21 -o yaml
apiVersion: v1
data:
age: "999"
name: panjunbai
kind: ConfigMap
metadata:
creationTimestamp: 2017-11-09T15:33:50Z
name: cm-21
namespace: default
resourceVersion: "1236595"
selfLink: /api/v1/namespaces/default/configmaps/cm-21
uid: 628e94d3-c563-11e7-8e40-000c29827a88
[root@192 configmap]# cat rc.yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: my-web
spec:
replicas: 1
selector:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: myngin
image: 192.168.19.124/test/nginx:1.0
ports:
- containerPort: 80
env:
- name: panjbname
valueFrom:
configMapKeyRef:
name: cm-21
key: name
- name: panjbage
valueFrom:
configMapKeyRef:
name: cm-21
key: age
[root@192 12345]# kubectl apply -f rc.yaml
replicationcontroller "my-web" created
[root@192 12345]# kubectl get pod
NAME READY STATUS RESTARTS AGE
my-web-rhq40 1/1 Running 0 3s
[root@192 12345]# kubectl exec -it my-web-rhq40 -- env
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
HOSTNAME=my-web-rhq40
panjbage=999
panjbname=panjunbai
MY_SERVICE_PORT_80_TCP_PORT=80
KUBERNETES_PORT_443_TCP_PROTO=tcp
KUBERNETES_SERVICE_PORT=443
KUBERNETES_SERVICE_PORT_HTTPS=443
KUBERNETES_PORT=tcp://10.254.0.1:443
MY_SERVICE_SERVICE_HOST=10.254.20.132
MY_SERVICE_PORT=tcp://10.254.20.132:80
MY_SERVICE_PORT_80_TCP_ADDR=10.254.20.132
KUBERNETES_PORT_443_TCP=tcp://10.254.0.1:443
KUBERNETES_PORT_443_TCP_PORT=443
KUBERNETES_PORT_443_TCP_ADDR=10.254.0.1
MY_SERVICE_PORT_80_TCP_PROTO=tcp
KUBERNETES_SERVICE_HOST=10.254.0.1
MY_SERVICE_SERVICE_PORT=80
MY_SERVICE_PORT_80_TCP=tcp://10.254.20.132:80
NGINX_VERSION=1.13.6-1~stretch
NJS_VERSION=1.13.6.0.1.14-1~stretch
HOME=/root
将configmap中的值传入容器中洪当作卷挂载为文件
[root@192 configmap]# cat /etc/kubernetes/controller-manager
###
# The following values are used to configure the kubernetes controller-manager
# defaults from config and apiserver should be adequate
# Add your own!
KUBE_CONTROLLER_MANAGER_ARGS="--address=127.0.0.1 --service-cluster-ip-range=10.254.0.0/16 --cluster-cidr=10.250.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem --root-ca-file=/etc/kubernetes/ssl/ca.pem --leader-elect=true"
[root@192 configmap]# kubectl create cm manager --from-file=/etc/kubernetes/controller-manager
configmap "manager" created
[root@192 configmap]# cat rc.yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: my-web
spec:
replicas: 1
selector:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: myngin
image: 192.168.19.124/test/nginx:1.0
ports:
- containerPort: 80
volumeMounts:
- name: cm
mountPath: /root/pan
volumes:
- name: cm
configMap:
name: manager
items:
- key: controller-manager
path: panjunba
[root@192 configmap]# kubectl apply -f rc.yaml
replicationcontroller "my-web" created
[root@192 configmap]# kubectl get pod
NAME READY STATUS RESTARTS AGE
my-web-dqsld 1/1 Running 0 3s
[root@192 configmap]# kubectl exec my-web-dqsld -- cat /root/pan/panjunba
###
# The following values are used to configure the kubernetes controller-manager
# defaults from config and apiserver should be adequate
# Add your own!
KUBE_CONTROLLER_MANAGER_ARGS="--address=127.0.0.1 --service-cluster-ip-range=10.254.0.0/16 --cluster-cidr=10.250.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem --root-ca-file=/etc/kubernetes/ssl/ca.pem --leader-elect=true"