controller节点 2C6G ens33 : 192.168.223.201
ens34 : 192.168.209.201
computer节点 1C2G ens33 : 192.168.223.210
ens34 : 192.168.209.210
需要开启cpu支持虚拟化
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-8.repo
yum makecache
yum install -y vim net-tools
systemctl stop firewalld.service && systemctl disable firewalld.service
# 关闭swap
swapoff -a && sed -i '/ swap / s/^(.*)$/#1/g' /etc/fstab
# 禁用SELINUX
setenforce 0 && sed -i s/^SELINUX=.*$/SELINUX=disabled/ /etc/selinux/config
#配置时间同步
vim /etc/chrony.conf
#controller节点
server ntp6.aliyun.com iburst
allow 192.168.0.0/16
#其它节点
server controller iburst
#所有节点
systemctl restart chronyd && systemctl enable chronyd
#安装OpenStack仓库和相关包
yum install centos-release-openstack-ussuri -y
yum config-manager --set-enabled PowerTools
dnf install https://www.rdoproject.org/repos/rdo-release.el8.rpm -y
yum upgrade -y && yum install python3-openstackclient openstack-selinux -y
#controller节点
#安装数据库
yum install mariadb mariadb-server python2-PyMySQL -y
cat <<EOF> /etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address = 192.168.223.201
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
EOF
systemctl enable mariadb.service && systemctl start mariadb.service
#数据库安全配置
mysql_secure_installation
#安装mq
yum install rabbitmq-server -y
systemctl enable rabbitmq-server.service && systemctl start rabbitmq-server.service
rabbitmqctl add_user openstack openstack
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
#安装memcached缓存
yum install memcached python3-memcached -y
vim /etc/sysconfig/memcached
OPTIONS="-l 127.0.0.1,::1,controller"
systemctl enable memcached.service && systemctl start memcached.service
#安装etcd
yum install etcd -y
cp /etc/etcd/etcd.conf /etc/etcd/etcd.conf.bak
cat <<EOF> /etc/etcd/etcd.conf
#[Member]
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://192.168.223.201:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.223.201:2379"
ETCD_NAME="controller"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.223.201:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.223.201:2379"
ETCD_INITIAL_CLUSTER="controller=http://192.168.223.201:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
systemctl enable etcd && systemctl start etcd
#创建数据库
mysql
CREATE DATABASE keystone;
CREATE DATABASE glance;
CREATE DATABASE placement;
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
CREATE DATABASE neutron;
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY '123456';
FLUSH PRIVILEGES;
#安装keystone服务
yum install openstack-keystone httpd python3-mod_wsgi -y
cp /etc/keystone/keystone.conf /etc/keystone/keystone.conf.bak
grep -Ev '^$|#' /etc/keystone/keystone.conf.bak > /etc/keystone/keystone.conf
vim /etc/keystone/keystone.conf
[database]
connection = mysql+pymysql://keystone:123456@controller/keystone
[token]
provider = fernet
su -s /bin/sh -c "keystone-manage db_sync" keystone
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
keystone-manage bootstrap --bootstrap-password 123456
--bootstrap-admin-url http://controller:5000/v3/
--bootstrap-internal-url http://controller:5000/v3/
--bootstrap-public-url http://controller:5000/v3/
--bootstrap-region-id RegionOne
vim /etc/httpd/conf/httpd.conf
ServerName controller
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
systemctl enable httpd.service && systemctl start httpd.service
cat <<EOF> /root/admin-openrc
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=123456
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF
. admin-openrc
openstack project create --domain default --description "Service Project" service
#验证keystore服务安装结果
openstack token issue
#安装glance镜像服务
openstack user create --domain default --password-prompt glance
openstack role add --project service --user glance admin
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292
yum install openstack-glance -y
cp /etc/glance/glance-api.conf /etc/glance/glance-api.conf.bak
grep -Ev '^$|#' /etc/glance/glance-api.conf.bak > /etc/glance/glance-api.conf
vim /etc/glance/glance-api.conf
[database]
connection = mysql+pymysql://glance:123456@controller/glance
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = 123456
[paste_deploy]
flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
su -s /bin/sh -c "glance-manage db_sync" glance
systemctl enable openstack-glance-api.service && systemctl start openstack-glance-api.service
wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
glance image-create --name "cirros"
--file cirros-0.4.0-x86_64-disk.img
--disk-format qcow2 --container-format bare
--visibility=public
glance image-list
#安装placement服务
openstack user create --domain default --password-prompt placement
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
yum install openstack-placement-api -y
cp /etc/placement/placement.conf /etc/placement/placement.conf.bak
grep -Ev '^$|#' /etc/placement/placement.conf.bak > /etc/placement/placement.conf
vim /etc/placement/placement.conf
[placement_database]
connection = mysql+pymysql://placement:123456@controller/placement
[api]
auth_strategy = keystone
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = 123456
su -s /bin/sh -c "placement-manage db sync" placement
systemctl restart httpd
#验证placement服务
placement-status upgrade check
#安装nova服务
controller节点
openstack user create --domain default --password-prompt nova
openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler -y
cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
grep -Ev '^$|#' /etc/nova/nova.conf.bak > /etc/nova/nova.conf
vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:openstack@controller:5672/
my_ip = 192.168.223.201
[api_database]
connection = mysql+pymysql://nova:123456@controller/nova_api
[database]
connection = mysql+pymysql://nova:123456@controller/nova
[api]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = 123456
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 123456
vim /etc/httpd/conf.d/00-placement-api.conf
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
systemctl restart httpd
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
#Verify nova cell0 and cell1 are registered correctly
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
#计算节点
yum install openstack-nova-compute -y
cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
grep -Ev '^$|#' /etc/nova/nova.conf.bak > /etc/nova/nova.conf
vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:openstack@controller
my_ip = 192.168.223.210
[api]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = 123456
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 123456
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service
#controller节点执行,发现计算节点
openstack compute service list --service nova-compute
#添加数据库
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
#定时自动发现计算节点
vim /etc/nova/nova.conf
[scheduler]
discover_hosts_in_cells_interval = 300
#验证
openstack compute service list
openstack catalog list
nova-status upgrade check
#安装neutron(网络)服务
controller节点
openstack user create --domain default --password-prompt neutron
openstack role add --project service --user neutron admin
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696
#配置网络
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y
cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
grep -Ev '^$|#' /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf
vim /etc/neutron/neutron.conf
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
transport_url = rabbit://openstack:openstack@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[database]
connection = mysql+pymysql://neutron:123456@controller/neutron
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 123456
[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = 123456
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bak
grep -Ev '^$|#' /etc/neutron/plugins/ml2/ml2_conf.ini.bak > /etc/neutron/plugins/ml2/ml2_conf.ini
vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_ipset = true
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
grep -Ev '^$|#' /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:ens34
[vxlan]
enable_vxlan = true
local_ip = 192.168.209.201
l2_population = true
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
cat >> /etc/sysctl.conf <<EOF
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
sysctl -p /etc/sysctl.conf
modprobe br_netfilter
vim /etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = linuxbridge
vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
vim /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = 123456
vim /etc/nova/nova.conf
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 123456
service_metadata_proxy = true
metadata_proxy_shared_secret = 123456
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf
--config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
systemctl restart openstack-nova-api.service
systemctl enable neutron-server.service
neutron-linuxbridge-agent.service neutron-dhcp-agent.service
neutron-metadata-agent.service
systemctl start neutron-server.service
neutron-linuxbridge-agent.service neutron-dhcp-agent.service
neutron-metadata-agent.service
systemctl enable neutron-l3-agent.service
systemctl start neutron-l3-agent.service
#计算节点网络服务
yum install openstack-neutron-linuxbridge ebtables ipset -y
cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
grep -Ev '^$|#' /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf
vim /etc/neutron/neutron.conf
[DEFAULT]
transport_url = rabbit://openstack:openstack@controller
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 123456
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
grep -Ev '^$|#' /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:ens34
[vxlan]
enable_vxlan = true
local_ip = 192.168.209.210
l2_population = true
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
cat >> /etc/sysctl.conf <<EOF
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
modprobe br_netfilter
sysctl -p /etc/sysctl.conf
vim /etc/nova/nova.conf
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 123456
systemctl restart openstack-nova-compute.service
systemctl enable neutron-linuxbridge-agent.service
systemctl start neutron-linuxbridge-agent.service
#安装dasboard,controller节点
yum install openstack-dashboard -y
vim /etc/openstack-dashboard/local_settings
OPENSTACK_HOST = "controller"
WEBROOT='/dashboard/'
ALLOWED_HOSTS = ['*']
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
}
}
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 3,
}
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
TIME_ZONE = "Asia/Shanghai"
vim /etc/httpd/conf.d/openstack-dashboard.conf
WSGIApplicationGroup %{GLOBAL}
systemctl restart httpd.service memcached.service
#安装块存储服务
#controller节点
openstack user create --domain default --password-prompt cinder
openstack role add --project service --user cinder admin
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%(project_id)s
openstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%(project_id)s
openstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%(project_id)s
openstack endpoint create --region RegionOne volumev3 public http://controller:8776/v3/%(project_id)s
openstack endpoint create --region RegionOne volumev3 internal http://controller:8776/v3/%(project_id)s
openstack endpoint create --region RegionOne volumev3 admin http://controller:8776/v3/%(project_id)s
yum install openstack-cinder -y
cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
grep -Ev '^$|#' /etc/cinder/cinder.conf.bak > /etc/cinder/cinder.conf
vim /etc/cinder/cinder.conf
[DEFAULT]
transport_url = rabbit://openstack:openstack@controller
auth_strategy = keystone
my_ip = 192.168.223.201
[database]
connection = mysql+pymysql://cinder:123456@controller/cinder
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = 123456
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
su -s /bin/sh -c "cinder-manage db sync" cinder
vim /etc/nova/nova.conf
[cinder]
os_region_name = RegionOne
systemctl restart openstack-nova-api.service
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
#computer node
yum install lvm2 device-mapper-persistent-data -y
#下边两行执行会报错
systemctl enable lvm2-lvmetad.service
systemctl start lvm2-lvmetad.service
pvcreate /dev/sdb
vgcreate cinder-volumes /dev/sdb
vim /etc/lvm/lvm.conf
devices {
...
filter = [ "a/sdb/", "r/.*/"]
yum install openstack-cinder targetcli python3-keystone -y
cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
grep -Ev '^$|#' /etc/cinder/cinder.conf.bak > /etc/cinder/cinder.conf
vim /etc/cinder/cinder.conf
[DEFAULT]
transport_url = rabbit://openstack:openstack@controller
auth_strategy = keystone
my_ip = 192.168.223.210
enabled_backends = lvm
glance_api_servers = http://controller:9292
[database]
connection = mysql+pymysql://cinder:123456@controller/cinder
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = 123456
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
target_protocol = iscsi
target_helper = lioadm
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
systemctl enable openstack-cinder-volume.service target.service
systemctl start openstack-cinder-volume.service target.service