一、使用ceph做glance后端
1.1 创建用于存储镜像的池
[root@serverc ~]# ceph osd pool create images 128 128
pool 'images' created
[root@serverc ~]# ceph osd pool application enable images rbd
enabled application 'rbd' on pool 'images'
1.2 创建client.glance账号并授权
[root@serverc ~]# ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=images' -o /etc/ceph/ceph.client.glance.keyring
[root@serverc ~]# ll /etc/ceph/ceph.client.glance.keyring
-rw-r--r-- 1 root root 64 Mar 31 10:33 /etc/ceph/ceph.client.glance.keyring
1.3 在glance服务器上安装ceph客户端
[root@serverb ~]# yum -y install ceph-common
从ceph服务端将ceph.conf以及ceph.client.glance.keyring复制到glance服务器
[root@serverc ceph]# scp -r /etc/ceph/ceph.conf /etc/ceph/ceph.client.glance.keyring serverb:/etc/ceph/
ceph.conf 100% 1262 2.5MB/s 00:00 ceph.client.glance.keyring 100% 64 153.1KB/s 00:00
1.4 在客户端修改相关权限
[root@serverb ~]# chown glance.glance /etc/ceph/ceph.client.glance.keyring
1.5 修改配合文件
修改客户端的/etc/ceph/ceph.conf
[root@serverb ~]# vim /etc/ceph/ceph.conf
[client.glance]
keyring = /etc/ceph/ceph.client.glance.keyring
修改/etc/glance/glance-api.conf
[glance_store] stores = rbd default_store = rbd filesystem_store_datadir = /var/lib/glance/images/ rbd_store_chunk_size = 8 rbd_store_pool = images rbd_store_user = glance rbd_store_ceph_conf = /etc/ceph/ceph.conf os_region_name=RegionOne
[root@serverb ~]# grep -Ev "^$|^[#;]" /etc/glance/glance-api.conf
[DEFAULT] bind_host = 0.0.0.0 bind_port = 9292 workers = 2 image_cache_dir = /var/lib/glance/image-cache registry_host = 0.0.0.0 debug = False log_file = /var/log/glance/api.log log_dir = /var/log/glance [cors] [cors.subdomain] [database] connection = mysql+pymysql://glance:27c082e7c4a9413c@172.25.250.11/glance [glance_store] stores = rbd default_store = rbd filesystem_store_datadir = /var/lib/glance/images/ rbd_store_chunk_size = 8 rbd_store_pool = images rbd_store_user = glance rbd_store_ceph_conf = /etc/ceph/ceph.conf os_region_name=RegionOne [image_format] [keystone_authtoken] auth_uri = http://172.25.250.11:5000/v2.0 auth_type = password project_name=services username=glance password=99b29d9142514f0f auth_url=http://172.25.250.11:35357 [matchmaker_redis] [oslo_concurrency] [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_messaging_zmq] [oslo_middleware] [oslo_policy] policy_file = /etc/glance/policy.json [paste_deploy] flavor = keystone [profiler] [store_type_location_strategy] [task] [taskflow_executor]
重启glance-api
[root@serverb ~]# systemctl restart openstack-glance-api
1.6 验证
下载镜像
[root@foundation ~]# wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
[root@foundation ~]# scp cirros-0.4.0-x86_64-disk.img root@serverb:/tmp/
[root@serverb ~]# cd /tmp/
-rw-r--r-- 1 root root 12716032 Mar 31 10:39 cirros-0.4.0-x86_64-disk.img -rw-r--r--. 1 root root 173 Mar 30 17:54 rht -rw-r--r--. 1 root root 404 Mar 30 17:54 rht-vm-hosts -rw-r--r--. 1 root root 180 Mar 30 17:54 rht-wks drwx------ 3 root root 17 Mar 30 17:54 systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-chronyd.service-I1ANDV drwx------ 3 root root 17 Mar 30 17:54 systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-epmd@0.0.0.0.service-0il3SD drwx------ 3 root root 17 Mar 30 17:54 systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-httpd.service-mWaw6A drwx------ 3 root root 17 Mar 30 17:54 systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-mariadb.service-xt5VbD drwx------ 3 root root 17 Mar 30 18:29 systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-openstack-glance-api.service-RVKYpk drwx------ 3 root root 17 Mar 30 17:54 systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-openstack-glance-registry.service-Bc5DYB
[root@serverb tmp]# glance image-list
You must provide a username via either --os-username or env[OS_USERNAME]
[root@serverb ~]# source keystonerc_admin
[root@serverb ~(keystone_admin)]# glance image-list
+----+------+ | ID | Name | +----+------+ +----+------+
[root@serverb ~(keystone_admin)]# glance image-create --name cirros --file /tmp/cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare --progress
[=============================>] 100% +------------------+--------------------------------------+ | Property | Value | +------------------+--------------------------------------+ | checksum | 443b7623e27ecf03dc9e01ee93f67afe | | container_format | bare | | created_at | 2019-03-30T10:31:44Z | | disk_format | qcow2 | | id | 79cfc319-f60a-45d4-834f-b70dc20c7975 | | min_disk | 0 | | min_ram | 0 | | name | cirros | | owner | 79cf145d371e48ef96f608cbf85d1788 | | protected | False | | size | 12716032 | | status | active | | tags | [] | | updated_at | 2019-03-30T10:31:47Z | | virtual_size | None | | visibility | private | +------------------+--------------------------------------+
[root@serverb ~(keystone_admin)]# glance image-create --name cirros --file /tmp/cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare --progress
[=============================>] 100% +------------------+--------------------------------------+ | Property | Value | +------------------+--------------------------------------+ | checksum | 443b7623e27ecf03dc9e01ee93f67afe | | container_format | bare | | created_at | 2019-03-30T01:10:49Z | | disk_format | qcow2 | | id | ab67abe6-7d65-407f-88e9-7b46d873b477 | | min_disk | 0 | | min_ram | 0 | | name | cirros | | owner | 79cf145d371e48ef96f608cbf85d1788 | | protected | False | | size | 12716032 | | status | active | | tags | [] | | updated_at | 2019-03-30T01:10:49Z | | virtual_size | None | | visibility | private | +------------------+--------------------------------------+
[root@serverb ~(keystone_admin)]# glance image-list
+--------------------------------------+--------+ | ID | Name | +--------------------------------------+--------+ | 79cfc319-f60a-45d4-834f-b70dc20c7975 | cirros | | ab67abe6-7d65-407f-88e9-7b46d873b477 | cirros | +--------------------------------------+--------+
1.7 删除一个image
[root@serverb tmp(keystone_admin)]# glance image-delete ab67abe6-7d65-407f-88e9-7b46d873b477
[root@serverb ~(keystone_admin)]# glance image-list
+--------------------------------------+--------+ | ID | Name | +--------------------------------------+--------+ | 79cfc319-f60a-45d4-834f-b70dc20c7975 | cirros | +--------------------------------------+--------+
[root@serverc ~]# rados -p images ls
rbd_object_map.105f76fe073c.0000000000000004
rbd_directory
rbd_data.105f76fe073c.0000000000000001
rbd_info
rbd_id.79cfc319-f60a-45d4-834f-b70dc20c7975
rbd_object_map.105f76fe073c
rbd_data.105f76fe073c.0000000000000000
rbd_header.105f76fe073c
[root@serverc ~]# rbd ls images
79cfc319-f60a-45d4-834f-b70dc20c7975
[root@serverc ~]# rbd info images/79cfc319-f60a-45d4-834f-b70dc20c7975
rbd image '79cfc319-f60a-45d4-834f-b70dc20c7975': size 12418 kB in 2 objects order 23 (8192 kB objects) block_name_prefix: rbd_data.105f76fe073c format: 2 features: layering, exclusive-lock, object-map, fast-diff, deep-flatten flags: create_timestamp: Sun Mar 31 10:40:38 2019
[root@serverb ~(keystone_admin)]# nova flavor-list
+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+ | ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public | +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+ | 1 | m1.tiny | 512 | 1 | 0 | | 1 | 1.0 | True | | 2 | m1.small | 2048 | 20 | 0 | | 1 | 1.0 | True | | 3 | m1.medium | 4096 | 40 | 0 | | 2 | 1.0 | True | | 4 | m1.large | 8192 | 80 | 0 | | 4 | 1.0 | True | | 5 | m1.xlarge | 16384 | 160 | 0 | | 8 | 1.0 | True | +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
1.8 使用上传的镜像创建一个nova虚拟机
# nova boot --flavor 1 --image <image id> <虚拟机名称>
[root@serverb ~(keystone_admin)]# nova boot --flavor 1 --image 3d80ba00-b4c7-4f3c-98b8-17d9fd140216 vm1
+--------------------------------------+-----------------------------------------------+ | Property | Value | +--------------------------------------+-----------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | | | OS-EXT-SRV-ATTR:host | - | | OS-EXT-SRV-ATTR:hostname | vm1 | | OS-EXT-SRV-ATTR:hypervisor_hostname | - | | OS-EXT-SRV-ATTR:instance_name | | | OS-EXT-SRV-ATTR:kernel_id | | | OS-EXT-SRV-ATTR:launch_index | 0 | | OS-EXT-SRV-ATTR:ramdisk_id | | | OS-EXT-SRV-ATTR:reservation_id | r-7ygb36rz | | OS-EXT-SRV-ATTR:root_device_name | - | | OS-EXT-SRV-ATTR:user_data | - | | OS-EXT-STS:power_state | 0 | | OS-EXT-STS:task_state | scheduling | | OS-EXT-STS:vm_state | building | | OS-SRV-USG:launched_at | - | | OS-SRV-USG:terminated_at | - | | accessIPv4 | | | accessIPv6 | | | adminPass | 3j2dpZjCXZn8 | | config_drive | | | created | 2019-03-29T12:09:30Z | | description | - | | flavor | m1.tiny (1) | | hostId | | | host_status | | | id | dec39eb4-75f5-47eb-b335-1e2b1833253d | | image | cirros (3d80ba00-b4c7-4f3c-98b8-17d9fd140216) | | key_name | - | | locked | False | | metadata | {} | | name | vm1 | | os-extended-volumes:volumes_attached | [] | | progress | 0 | | security_groups | default | | status | BUILD | | tags | [] | | tenant_id | 79cf145d371e48ef96f608cbf85d1788 | | updated | 2019-03-29T12:09:30Z | | user_id | 8e0be34493e04722ba03ab30fbbf3bf8 | +--------------------------------------+-----------------------------------------------+
[root@serverb ~(keystone_admin)]# nova list
+--------------------------------------+------+--------+------------+-------------+----------------------------+ | ID | Name | Status | Task State | Power State | Networks | +--------------------------------------+------+--------+------------+-------------+----------------------------+ | dec39eb4-75f5-47eb-b335-1e2b1833253d | vm1 | ERROR | - | NOSTATE | novanetwork=192.168.32.255 | +--------------------------------------+------+--------+------------+-------------+----------------------------+
虚拟机状态错误,有与Openstack的配置需要完善,后续可以继续更新
二、 使用ceph作为cinder后端
[root@serverb ~(keystone_admin)]# cinder list
+----+--------+------+------+-------------+----------+-------------+ | ID | Status | Name | Size | Volume Type | Bootable | Attached to | +----+--------+------+------+-------------+----------+-------------+ +----+--------+------+------+-------------+----------+-------------+
2.1 为cinder也创建一个rbd池
[root@serverc ~]# ceph osd pool create volumes 64 64
pool 'volumes' created
[root@serverc ~]# ceph osd pool application enable volumes rbd
enabled application 'rbd' on pool 'volumes'
2.2 为client.cinder用户授权
[root@serverc ~]# ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images, allow rwx pool=volumes' -o /etc/ceph/ceph.client.cinder.keyring
[root@serverc ~]# ceph auth get-key client.cinder -o /etc/ceph/temp.client.cinder.key
[root@serverc ~]# cat /etc/ceph/temp.client.cinder.key
AQA5KaBcszk/JxAAPdymqbMzqrfhZ+GyqZgUvg==
[root@serverc ~]# scp -r /etc/ceph/ceph.client.glance.keyring /etc/ceph/ceph.client.cinder.keyring serverb:/etc/ceph/
root@serverb's password: ceph.client.glance.keyring 100% 64 185.6KB/s 00:00 ceph.client.cinder.keyring 100% 64 89.5KB/s 00:00
2.3 在cinder服务器上安装ceph的客户端
[root@serverb ~]# yum -y install ceph-commom
[root@serverb tmp]# chown cinder.cinder -R /etc/ceph/ceph.client.cinder.keyring
2.4 修改客户端/etc/ceph/ceph.conf
[root@serverb tmp]# vim /etc/ceph/ceph.conf
[client.cinder]
keyring = /etc/ceph/ceph.client.cinder.keyring
2.5 生成libvirt的secret
[root@serverb tmp(keystone_admin)]# uuidgen
ade72e47-ce6f-4f44-a97d-d7dff6aef99c
[root@serverb tmp(keystone_admin)]# vim /etc/ceph/secret.xml
<secret ephemeral="no" private="no"> <uuid>ade72e47-ce6f-4f44-a97d-d7dff6aef99c</uuid> <usage type="ceph"> <name>client.cinder secret</name> </usage> </secret>
[root@serverb tmp(keystone_admin)]# virsh secret-define --file /etc/ceph/secret.xml
Secret ade72e47-ce6f-4f44-a97d-d7dff6aef99c created
[root@serverb tmp(keystone_admin)]# virsh secret-list
UUID Usage -------------------------------------------------------------------------------- ade72e47-ce6f-4f44-a97d-d7dff6aef99c ceph client.cinder secret
2.6 将密钥设置到secret中
[root@serverb tmp(keystone_admin)]# virsh secret-set-value --secret ade72e47-ce6f-4f44-a97d-d7dff6aef99c --base64 $(cat /etc/ceph/temp.client.cinder.key)
Secret value set
[root@serverb tmp(keystone_admin)]# virsh secret-list
UUID Usage -------------------------------------------------------------------------------- ade72e47-ce6f-4f44-a97d-d7dff6aef99c ceph client.cinder secret
[root@serverb tmp(keystone_admin)]# ceph -s --id cinder
cluster: id: 70ec7a0b-7b4d-4c4d-8705-3eb5ce3e8e50 health: HEALTH_OK services: mon: 3 daemons, quorum serverc,serverd,servere mgr: servere(active), standbys: serverc, serverd osd: 9 osds: 9 up, 9 in data: pools: 2 pools, 192 pgs objects: 8 objects, 12418 kB usage: 1007 MB used, 133 GB / 134 GB avail pgs: 192 active+clean
2.7 修改 /etc/cinder/cinder.conf
[root@serverb tmp(keystone_admin)]# vim /etc/cinder/cinder.conf
[DEFALUT] enabled_backends = rbd2 default_volume_type = rbd2 glance_api_version = 2 [rbd2] volume_driver = cinder.volume.drivers.rbd.RBDDriver rbd_pool = volumes rbd_user = cinder rbd_ceph_conf = /etc/ceph/ceph.conf rbd_flatten_volume_from_snapshot = false rbd_secret_uuid = f50719e8-e5b7-404e-980a-c80254e4541c rbd_max_clone_depth = 5 rbd_store_chunk_size = 4 rados_connect_timeout = -1 # 指定volume_backend_name,可忽略 #volume_backend_name = rbd2
2.8 创建指定的cinder的volume类型
[root@serverb tmp(keystone_admin)]# cinder type-create rbd2
+--------------------------------------+------+-------------+-----------+ | ID | Name | Description | Is_Public | +--------------------------------------+------+-------------+-----------+ | c92590e9-33f8-4152-8853-945dc3eb4548 | rbd2 | - | True | +--------------------------------------+------+-------------+-----------+
[root@serverb tmp(keystone_admin)]# cinder type-key rbd2 set volume_backend_name=rbd2
[root@serverb tmp(keystone_admin)]# systemctl restart openstack-cinder-volume
[root@serverb tmp(keystone_admin)]# systemctl restart openstack-cinder-api
[root@serverb tmp(keystone_admin)]# systemctl restart openstack-cinder-scheduler
2.9 验证
[root@serverb tmp(keystone_admin)]# cinder create --name new-volume --display-name 'ceph storage' 2 --volume_type rbd2
+--------------------------------+--------------------------------------+ | Property | Value | +--------------------------------+--------------------------------------+ | attachments | [] | | availability_zone | nova | | bootable | false | | consistencygroup_id | None | | created_at | 2019-03-31T03:01:34.000000 | | description | None | | encrypted | False | | id | 5aa151ad-978c-40b3-bca9-ead7c34358ff | | metadata | {} | | migration_status | None | | multiattach | False | | name | ceph storage | | os-vol-host-attr:host | None | | os-vol-mig-status-attr:migstat | None | | os-vol-mig-status-attr:name_id | None | | os-vol-tenant-attr:tenant_id | 79cf145d371e48ef96f608cbf85d1788 | | replication_status | disabled | | size | 2 | | snapshot_id | None | | source_volid | None | | status | creating | | updated_at | None | | user_id | 8e0be34493e04722ba03ab30fbbf3bf8 | | volume_type | rbd2 | +--------------------------------+--------------------------------------+
[root@serverb tmp(keystone_admin)]# cinder list
+--------------------------------------+-----------+--------------+------+-------------+----------+-------------+ | ID | Status | Name | Size | Volume Type | Bootable | Attached to | +--------------------------------------+-----------+--------------+------+-------------+----------+-------------+ | 5aa151ad-978c-40b3-bca9-ead7c34358ff | available | ceph storage | 2 | rbd2 | false | | +--------------------------------------+-----------+--------------+------+-------------+----------+-------------+
三、使用keystone验证rados网关
3.1 配置rados服务器端
[root@serverc ~]# yum install -y ceph-radosgw
[root@serverc ~]# ceph auth get-or-create client.rgw.serverc mon 'allow rwx' osd 'allow rwx' -o /etc/ceph/ceph.client.rgw.serverc.keyring
[root@serverc ~]# vim /etc/ceph/ceph.conf
[root@serverc ~]# systemctl restart ceph-radosgw@rgw.serverc
[root@serverc ~]# ps -ef|grep rados
root 46163 1 0 11:19 ? 00:00:00 /usr/bin/radosgw -f --cluster ceph --name client.rgw.serverc --setuser ceph --setgroup ceph
3.2 在keystone上创建服务与端点
[root@serverb tmp(keystone_admin)]# openstack service create --description "Swift Service" --name swift object-store
+-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Swift Service | | enabled | True | | id | 1dd0d40cd61d4bed870cc3c302a001da | | name | swift | | type | object-store | +-------------+----------------------------------+
[root@serverb tmp(keystone_admin)]# openstack endpoint create --region RegionOne --publicurl "http://serverc.lab.example.com/swift/v1" --adminurl "http://serverc.lab.example.com/swift/v1" --internalurl "http://serverc.lab.example.com/swift/v1" swift
+--------------+-----------------------------------------+ | Field | Value | +--------------+-----------------------------------------+ | adminurl | http://serverc.lab.example.com/swift/v1 | | id | 47f906c29a904571a44dcd99ea27561c | | internalurl | http://serverc.lab.example.com/swift/v1 | | publicurl | http://serverc.lab.example.com/swift/v1 | | region | RegionOne | | service_id | 1dd0d40cd61d4bed870cc3c302a001da | | service_name | swift | | service_type | object-store | +--------------+-----------------------------------------+
[root@serverb tmp(keystone_admin)]# openstack service list
+----------------------------------+----------+--------------+
| ID | Name | Type |
+----------------------------------+----------+--------------+
| 1dd0d40cd61d4bed870cc3c302a001da | swift | object-store |
| 26a3d56178cd4da2bca93e775ce4efac | cinderv3 | volumev3 |
| 834ee6fe73b2425fb5bb667ccdfdf6a7 | cinderv2 | volumev2 |
| 9581f6be4b4e4112bdb8d1cb8ef2794b | keystone | identity |
| a43b4be139364c4fbf9555e12eeabfed | glance | image |
| a63dad7778b744bfbc263dd73caf0fdb | cinder | volume |
| f3f2b987cdc14d7996bacbd13d3301e1 | nova | compute |
+----------------------------------+----------+--------------+
[root@serverb tmp(keystone_admin)]# openstack service show swift
+-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Swift Service | | enabled | True | | id | 1dd0d40cd61d4bed870cc3c302a001da | | name | swift | | type | object-store | +-------------+----------------------------------+
[root@serverb tmp(keystone_admin)]# openstack endpoint show swift
+--------------+-----------------------------------------+ | Field | Value | +--------------+-----------------------------------------+ | adminurl | http://serverc.lab.example.com/swift/v1 | | enabled | True | | id | 47f906c29a904571a44dcd99ea27561c | | internalurl | http://serverc.lab.example.com/swift/v1 | | publicurl | http://serverc.lab.example.com/swift/v1 | | region | RegionOne | | service_id | 1dd0d40cd61d4bed870cc3c302a001da | | service_name | swift | | service_type | object-store | +--------------+-----------------------------------------+
3.3 获取keystone admin token
[root@serverb tmp(keystone_admin)]# cat /etc/keystone/keystone.conf |grep admin_token
# value is ignored and the `admin_token` middleware is effectively disabled. # However, to completely disable `admin_token` in production (highly # `AdminTokenAuthMiddleware` (the `admin_token_auth` filter) from your paste #admin_token = <None> admin_token = fb032ccf285a432b81c6fe347be8a07d
3.4 修改 /etc/ceph/ceph.conf
[root@serverc ~]# vim /etc/ceph/ceph.conf
[client.rgw.serverc] host = serverc keyring = /etc/ceph/ceph.client.rgw.serverc.keyring rgw_frontends = civetweb port=80 num_threads=100 log = /var/log/ceph/$cluster.$name.log rgw_dns_name = serverc.lab.example.com rgw_keystone_url = http://serverb.lab.example.com:5000 rgw_keystone_admin_token = fb032ccf285a432b81c6fe347be8a07d rgw_keystone_accepted_roles = admin member swiftoperator rgw_keystone_token_cache_size = 200 rgw_keystone_revocation_interval = 300 rgw_keystone_verify_ssl = false
[root@serverc ~]# systemctl restart ceph-radosgw@rgw.serverc
[root@serverc ~]# ps -ef|grep rados
ceph 46492 1 2 11:45 ? 00:00:00 /usr/bin/radosgw -f --cluster ceph --name client.rgw.serverc --setuser ceph --setgroup ceph
[root@serverb tmp(keystone_admin)]# ps -ef |grep keystone
keystone 1109 987 0 09:05 ? 00:00:01 keystone-admin -DFOREGROUND keystone 1110 987 0 09:05 ? 00:00:01 keystone-admin -DFOREGROUND keystone 1111 987 0 09:05 ? 00:00:02 keystone-main -DFOREGROUND keystone 1121 987 0 09:05 ? 00:00:02 keystone-main -DFOREGROUND
[root@serverb tmp(keystone_admin)]# netstat -ntlp |grep 987
tcp6 0 0 :::80 :::* LISTEN 987/httpd tcp6 0 0 :::35357 :::* LISTEN 987/httpd tcp6 0 0 :::5000 :::* LISTEN 987/httpd
3.5 客户端验证
[root@serverb tmp(keystone_admin)]# swift list
[root@serverb tmp(keystone_admin)]# swift post testbucket
[root@serverb tmp(keystone_admin)]# swift list
testbucket
[root@serverc ~]# ceph osd pool ls
images
volumes
.rgw.root
default.rgw.control
default.rgw.meta
default.rgw.log
default.rgw.buckets.index
[root@serverc ~]# rados -p default.rgw.buckets.index ls
.dir.ce5b2073-728f-42d5-8fac-b2e0aa2a41a3.4333.1
[root@serverb tmp(keystone_admin)]# swift upload testbucket /etc/ceph/secret.xml
etc/ceph/secret.xml
[root@serverc ~]# rados -p default.rgw.buckets.data ls
ce5b2073-728f-42d5-8fac-b2e0aa2a41a3.4333.1_etc/ceph/secret.xml
博主声明:本文的内容来源主要来自誉天教育晏威老师,由本人实验完成操作验证,需要的博友请联系誉天教育(http://www.yutianedu.com/),获得官方同意或者晏老师(https://www.cnblogs.com/breezey/)本人同意即可转载,谢谢!