ansible安装etcd 的play-book配置
1.目录结构
roles/etcd ├── defaults │ └── main.yml ├── files │ ├── jwt_RS256 │ └── jwt_RS256.pub ├── README.md ├── tasks │ ├── configure.yml │ ├── install.yml │ └── main.yml └── templates ├── etcd.conf.j2 └── etcd.service.j2
2.主配置
etcd]# more etcd/defaults/main.ym
--- software_files_path: "/usr/local" software_install_path: "/usr/local/etcd" etcd_version: "3.3.22" etcdctl_api_version: "3" etcd_file: "etcd-v{{ etcd_version }}-linux-amd64.tar.gz" etcd_file_path: "{{ software_files_path }}/{{ etcd_file }}" etcd_file_url: "https://github.com/coreos/etcd/releases/download/v{{ etcd_version }}/{{ etcd_file }}" etcd_port: 2379 etcd_peer_port: 2380 etcd_home: "/data/etcd_data" etcd_datadir: "{{ etcd_home }}/data" etcd_confdir: "{{ etcd_home }}/conf" etcd_wardir: "{{ etcd_home }}/war" etcd_jwtkey: "{{ etcd_home }}/key" etcd_user: "apache" etcd_name: "etcd_cluster_0" etcd_service_name: "etcd{% if etcd_port != 2379 %}{{ etcd_port }}{% endif %}" etcd_initial_advertise_peer_urls: "http://{{ ansible_default_ipv4.address }}:{{ etcd_peer_port }}" etcd_listen_peer_urls: "http://{{ ansible_default_ipv4.address }}:{{ etcd_peer_port }}" etcd_listen_client_urls: "http://{{ ansible_default_ipv4.address }}:{{ etcd_port }},http://127.0.0.1:{{ etcd_port }}" etcd_advertise_client_urls: "http://{{ ansible_default_ipv4.address }}:2{{ etcd_port }}" etcd_trusted_ca_file: false etcd_cert_file: false etcd_key_file: false etcd_peer_trusted_ca_file: false etcd_peer_cert_file: false etcd_peer_key_file: false etcd_auto_tls: false etcd_discovery: false etcd_initial_cluster: false etcd_initial_cluster_token: "etcd_cluster" etcd_proxy: false etcd_force_new_cluster: false etcd_debug: false etcd_gomaxprocs: 1
# 秘钥文件
etcd]# more etcd/files/jwt_RS256
-----BEGIN RSA PRIVATE KEY----- MIIJKQIBAAKCAgEAqPkVQJ2ATlOk8p13rvExMWYH+i7b8keQea6CEGOdd+EPLQlq SJFf5FxkQ3TVF5m3SUP5smMqWVGV9AizQfcV7UpQFkAzcDmP5NODZDqVHBSZku5n +zw793eh6qxw1Swxjvn57b+BBvWmLzhFuZZWrD9dajt+0xztkDR/pbx8iQ3te3X4 Vq3iYr9IgqbkrnFb8FoL4sE3BE0PMtiMRln7S5WTel9gT3ruiuoT7E80MeDDTBhW RwbaZXBeuDWyZJt9HjOEkwpDyCOrILR5dslhJICQoT6/LAu14MRg5uHlPglJUbDU Zri/2ImG3xEqMz24RhTzlCEM2GY6m/hEunQSl4vIvpMDXHxji+AWeq5v78zT18cs rxLHoVC7SCBk3hZZZS41grFi5nY6bIA++DWK0AN+M1a2rgkNVEQ5GK67vA+iar9A uRH9MV6vMIkEqdEQ5RTmbzHHC9jwX05fdKfx338Y1gpWrS7ITEkBF1aG0mIbjAn4 xxx -----END RSA PRIVATE KEY----- etcd/files/jwt_RS256.pub -----BEGIN PUBLIC KEY----- MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAqPkVQJ2ATlOk8p13rvEx MWYH+i7b8keQea6CEGOdd+EPLQlqSJFf5FxkQ3TVF5m3SUP5smMqWVGV9AizQfcV xxx -----END PUBLIC KEY-----
etcd]# more etcd/tasks/configure.yml
--- - name: configure | Create etcd Directory. file: path={{ item }} state=directory owner={{ etcd_user }} group={{ etcd_user }} with_items: - "{{ etcd_datadir }}" - "{{ etcd_wardir }}" - "{{ etcd_confdir }}" - "{{ etcd_jwtkey }}" - name: Copy jwt public key file copy: src: jwt_RS256 dest: "{{etcd_jwtkey}}/jwt_RS256" owner: "{{ etcd_user }}" group: "{{ etcd_user }}" mode: '0644' - name: Copy jwt private key file copy: src: jwt_RS256.pub dest: "{{etcd_jwtkey}}/jwt_RS256.pub" owner: "{{ etcd_user }}" group: "{{ etcd_user }}" mode: '0644' - name: configure | Setup etcd.conf file. template: dest: "{{ etcd_confdir }}/etcd.conf" mode: 0644 src: etcd.conf.j2 owner: "{{ etcd_user }}" group: "{{ etcd_user }}" - block: - name: configure | Setup systemd etcd service. template: dest: "/usr/lib/systemd/system/{{ etcd_service_name }}.service" mode: 0644 src: etcd.service.j2 - name: "configure | Ensure {{ etcd_service_name }} is running for centos7." systemd: "name={{ etcd_service_name }} state=started enabled=true" ignore_errors: true - name: "configure | Wait {{ etcd_port }} port is opening." wait_for: port={{ etcd_port }} when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7" - name: configure | Change the initial cluster state to already exist. replace: path: "{{ etcd_confdir }}/etcd.conf" regexp: "initial-cluster-state: 'new'" replace: "initial-cluster-state: 'existing'"
etcd]# more etcd/tasks/install.yml
--- - name: install | Check if etcd local file is already configured. stat: path={{ etcd_file_path }} connection: local register: etcd_file_result - name: install | Create software directory. file: path={{ software_files_path }} state=directory connection: local when: not etcd_file_result.stat.exists - name: install | Download etcd file. get_url: url={{ etcd_file_url }} dest={{ software_files_path }} connection: local when: not etcd_file_result.stat.exists - name: install | Confirm the existence of the installation directory. file: path={{ software_install_path }} state=directory - name: install | Copy etcd file to agent. unarchive: src: "{{ etcd_file_path }}" dest: "{{ software_install_path }}" creates: "{{ software_install_path}}/etcd-v{{ etcd_version }}-linux-amd64" - name: install | Check if etcd remote soft link is already configured. stat: path="{{ software_install_path }}/etcd" register: etcd_soft_link_result - name: install | Create etcd dir soft link. file: "src={{ software_install_path }}/etcd-v{{ etcd_version }}-linux-amd64 dest={{ software_install_path }}/etcd state=link" when: not etcd_soft_link_result.stat.exists - name: install | Config environment variable. lineinfile: dest=/etc/profile line='export PATH={{ software_install_path }}/etcd:$PATH' - name: install | Config etcdctl API version environment variable. lineinfile: dest=/etc/profile line='export ETCDCTL_API={{ etcdctl_api_version }}' - name: install | Create etcd group. group: name={{ etcd_user }} - name: install | Create etcd user. user: name={{ etcd_user }} group=users shell=/bin/bash
# 配置和安装
# more etcd/tasks/main.yml
--- - import_tasks: install.yml - import_tasks: configure.yml
# 模板配置
etcd]# more etcd/templates/etcd.conf.j2
# This is the configuration file for the etcd server. # Human-readable name for this member. name: '{{ etcd_name }}' # Path to the data directory. data-dir: {{ etcd_datadir }} # Path to the dedicated wal directory. wal-dir: {{ etcd_wardir }} # Number of committed transactions to trigger a snapshot to disk. snapshot-count: 10000 # Time (in milliseconds) of a heartbeat interval. heartbeat-interval: 100 # Time (in milliseconds) for an election to timeout. election-timeout: 1000 # Raise alarms when backend size exceeds the given quota. 0 means use the # default quota. quota-backend-bytes: 0 # List of comma separated URLs to listen on for peer traffic. listen-peer-urls: {{ etcd_listen_peer_urls }} # List of comma separated URLs to listen on for client traffic. listen-client-urls: {{ etcd_listen_client_urls }} # Maximum number of snapshot files to retain (0 is unlimited). max-snapshots: 5 # Maximum number of wal files to retain (0 is unlimited). max-wals: 5 # Comma-separated white list of origins for CORS (cross-origin resource sharing). cors: # List of this member's peer URLs to advertise to the rest of the cluster. # The URLs needed to be a comma-separated list. initial-advertise-peer-urls: {{ etcd_initial_advertise_peer_urls }} # List of this member's client URLs to advertise to the public. # The URLs needed to be a comma-separated list. advertise-client-urls: {{ etcd_advertise_client_urls }} {% if etcd_discovery %} # Discovery URL used to bootstrap the cluster. discovery: {{ etcd_discovery }} # Valid values include 'exit', 'proxy' discovery-fallback: 'proxy' # HTTP proxy to use for traffic to discovery service. discovery-proxy: # DNS domain used to bootstrap initial cluster. discovery-srv: {% endif %} {% if etcd_initial_cluster %} # Initial cluster configuration for bootstrapping. initial-cluster: {{ etcd_initial_cluster }} # Initial cluster token for the etcd cluster during bootstrap. initial-cluster-token: '{{ etcd_initial_cluster_token }}' # auth token auth-token: 'jwt,pub-key={{ etcd_jwtkey }}/jwt_RS256.pub,priv-key={{ etcd_jwtkey }}/jwt_RS256,sign-method=RS256' # Initial cluster state ('new' or 'existing'). initial-cluster-state: 'new' {% endif %} # Reject reconfiguration requests that would cause quorum loss. strict-reconfig-check: false # Accept etcd V2 client requests enable-v2: true {% if etcd_proxy %} # Valid values include 'on', 'readonly', 'off' proxy: 'on' # Time (in milliseconds) an endpoint will be held in a failed state. proxy-failure-wait: 5000 # Time (in milliseconds) of the endpoints refresh interval. proxy-refresh-interval: 30000 # Time (in milliseconds) for a dial to timeout. proxy-dial-timeout: 1000 # Time (in milliseconds) for a write to timeout. proxy-write-timeout: 5000 # Time (in milliseconds) for a read to timeout. proxy-read-timeout: 0 {% endif %} client-transport-security: {% if etcd_trusted_ca_file %} # DEPRECATED: Path to the client server TLS CA file. ca-file: {{ etcd_trusted_ca_file }} {% endif %} {% if etcd_cert_file %} # Path to the client server TLS cert file. cert-file: {{ etcd_cert_file }} {% endif %} {% if etcd_key_file %} # Path to the client server TLS key file. key-file: {{ etcd_key_file }} {% endif %} {% if etcd_trusted_ca_file and etcd_cert_file and etcd_key_file %} # Enable client cert authentication. client-cert-auth: true {% endif %} {% if etcd_trusted_ca_file %} # Path to the client server TLS trusted CA key file. trusted-ca-file: {{ etcd_trusted_ca_file }} {% endif %} {% if etcd_auto_tls %} # Client TLS using generated certificates auto-tls: {{ etcd_auto_tls }} {% endif %} peer-transport-security: {% if etcd_peer_trusted_ca_file %} # DEPRECATED: Path to the peer server TLS CA file. ca-file: {{ etcd_peer_trusted_ca_file }} {% endif %} {% if etcd_peer_cert_file %} # Path to the peer server TLS cert file. cert-file: {{ etcd_peer_cert_file }} {% endif %} {% if etcd_peer_key_file %} # Path to the peer server TLS key file. key-file: {{ etcd_peer_key_file }} {% endif %} {% if etcd_peer_trusted_ca_file and etcd_peer_key_file and etcd_peer_cert_file %} # Enable peer client cert authentication. client-cert-auth: true {% endif %} {% if etcd_peer_trusted_ca_file %} # Path to the peer server TLS trusted CA key file. trusted-ca-file: {{ etcd_peer_trusted_ca_file }} {% endif %} {% if etcd_auto_tls %} # Peer TLS using generated certificates. auto-tls: {{ etcd_auto_tls }} {% endif %} # Enable debug-level logging for etcd. debug: {{ etcd_debug }} # Specify a particular log level for each etcd package (eg: 'etcdmain=CRITICAL,etcdserver=DEBUG'. log-package-levels: # Force to create a new one member cluster. force-new-cluster: {{ etcd_force_new_cluster }}
# systemctl 服务管理
etcd]# more etcd/templates/etcd.service.j2
[Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target [Service] Type=notify WorkingDirectory={{ etcd_datadir }} User={{ etcd_user }} TimeoutStartSec=10 # set GOMAXPROCS to number of processors ExecStart=/bin/bash -c "GOMAXPROCS={{ etcd_gomaxprocs }} {{ software_install_path }}/etcd/etcd --config-file="{{ etcd_confdir }}/etcd.conf"" Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target
3.入口文件
ansible]# more etcd_cluster.yml
## 示例:ansible-playbook --ask-valut-pass -i online/yt etcd_cluster.yml # # 注意 etcd_name 的值 和 etcd_initial_cluster 中的名称值要一致 # 如: etcd_initial_cluster: "custorm-etcd01=http://x.x.x.x:2380,http://x.x.x.x:2380,http://x.x.x.x:2380" # etcd_name: "custorm-etcd01" - hosts: szlocal_etcd01 vars: - etcd_initial_cluster: "szlocal_etcd01=http://10.10.18.214:2380,szlocal_etcd02=http://192.168.11.36:2380,szlocal_etcd03=http://10.10.17.14:2380" roles: - { role: etcd, etcd_name: "szlocal_etcd01" } - hosts: szlocal_etcd02 vars: - etcd_initial_cluster: "szlocal_etcd01=http://10.10.18.214:2380,szlocal_etcd02=http://192.168.11.36:2380,szlocal_etcd03=http://10.10.17.14:2380" roles: - { role: etcd, etcd_name: "szlocal_etcd02" } - hosts: szlocal_etcd03 vars: - etcd_initial_cluster: "szlocal_etcd01=http://10.10.18.214:2380,szlocal_etcd02=http://192.168.11.36:2380,szlocal_etcd03=http://10.10.17.14:2380" roles: - { role: etcd, etcd_name: "szlocal_etcd03" }
# 主机列表
ansible]# more etcd_test_hosts
[szlocal_etcd] # test_etcd szlocal_etcd01 ansible_host=10.10.18.214 ansible_port=2016 ansible_user=root ansible_ssh_pass='pass' szlocal_etcd02 ansible_host=192.168.11.36 ansible_port=2016 ansible_user=root ansible_ssh_pass='pass' szlocal_etcd03 ansible_host=10.10.17.14 ansible_port=2016 ansible_user=root ansible_ssh_pass='pass'
4.常用的etcd命令
# 查看etcd的健康状态
``` export ETCDCTL_API=3 # 带认证 etcdctl --endpoints='http://172.18.44.39:2379,http://172.18.54.153:2379,http://172.18.54.154:2379' --user=root:111 endpoint health # 无认证 etcdctl --endpoints='http://172.25.130.150:2379,http://172.25.130.153:2379,http://172.25.130.154:2379' endpoint health etcdctl --endpoints='http://172.30.0.7:2379,http://172.30.0.8:2379,http://172.30.0.11:2379' endpoint health ``` # 开启用户认证,只需要在集群中任意一台上执行就行 创建用户认证 ``` etcdctl user add root etcdctl --endpoints=http://127.0.0.1:2379 user add root 会提示输入密码 ``` 查看用户 ``` # 无认证 etcdctl user list # 带认证 etcdctl --user root:123456 user list ``` # 给用户赋权限 ``` etcdctl user grant-role root root etcdctl user grant-role core_srv_user root etcdctl --endpoints=http://127.0.0.1:2379 --user=root:'123456' user add core_srv_user ``` # 默认是关闭的,需要开启 ``` etcdctl auth enable ``` # 测试写入信息 ``` etcdctl --endpoints=http://127.0.0.1:2379 --user=root:123456 put /test 'test' etcdctl --endpoints=http://127.0.0.1:2379 get /test etcdctl --endpoints=http://127.0.0.1:2379 del /test
# 使用playbook安装etcd
ansible-playbook --ask-valut-pass -i online/yt etcd_cluster.yml
秘钥生成:
# gen key pair for RS256
openssl genrsa -out jwt_RS256 4096
openssl rsa -in jwt_RS256 -pubout > jwt_RS256.pub
etcd server flags
--auth-token=jwt,pub-key=/srv/jwt_RS256.pub,priv-key=/srv/jwt_RS256,sign-method=RS256
https://github.com/etcd-io/etcd/issues/10144