基于llvm +iscsi协议进行分析
1、客戶端接受请求并路由到 VolumeAttachmentController
其对应的restfull请求格式如下:
delete /servers/{server_id}/os-volume_attachments/{volume_id}
nova-api处理该请求的入口函数为 nova.api.openstack.compute.volumes.VolumeAttachmentController.delete
nova/api/openstack/compute/volumes.py from nova.compute import api as compute class VolumeAttachmentController(wsgi.Controller): def __init__(self): self.compute_api = compute.API() self.volume_api = cinder.API() super(VolumeAttachmentController, self).__init__() @wsgi.response(202) @wsgi.expected_errors((400, 403, 404, 409)) def delete(self, req, server_id, id): """Detach a volume from an instance.""" context = req.environ['nova.context'] context.can(va_policies.POLICY_ROOT % 'delete') volume_id = id instance = common.get_instance(self.compute_api, context, server_id,-------根据虚机uuid获取instance实例对象 expected_attrs=['device_metadata']) if instance.vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED): _check_request_version(req, '2.20', 'detach_volume', server_id, instance.vm_state) try: volume = self.volume_api.get(context, volume_id)-----------调用cinderclient,根据卷uuid获取卷实例对象 except exception.VolumeNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) try: bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(-------获取block_device_mapping表 中该 instance挂在该卷的映射信息 context, volume_id, instance.uuid) except exception.VolumeBDMNotFound: msg = (_("Instance %(instance)s is not attached " "to volume %(volume)s") % {'instance': server_id, 'volume': volume_id}) raise exc.HTTPNotFound(explanation=msg) if bdm.is_root: msg = _("Cannot detach a root device volume") raise exc.HTTPBadRequest(explanation=msg) try: self.compute_api.detach_volume(context, instance, volume)----s1 nova-api处理过程 .....
s1 nova-api处理过程详解
nova api服务调用nova-compute服务对外的呈现接口
nova.compute.api.API.detach_volume nova/compute/api.py class API(base.Base): def detach_volume(self, context, instance, volume): """Detach a volume from an instance.""" if instance.vm_state == vm_states.SHELVED_OFFLOADED: self._detach_volume_shelved_offloaded(context, instance, volume) else: self._detach_volume(context, instance, volume)-----一般情况走这个分支 def _detach_volume(self, context, instance, volume): """Detach volume from instance. This method is separated to make it easier for cells version to override. """ try: self.volume_api.begin_detaching(context, volume['id'])------调用cinderclient,更新cinder数据库中卷的状态为detaching except exception.InvalidInput as exc: raise exception.InvalidVolume(reason=exc.format_message()) attachments = volume.get('attachments', {}) attachment_id = None if attachments and instance.uuid in attachments: attachment_id = attachments[instance.uuid]['attachment_id'] self._record_action_start(context, instance, instance_actions.DETACH_VOLUME)-----记录对虚机的一次操作 self.compute_rpcapi.detach_volume(context, instance=instance,------调用nova-compute rpc客户端发送卸载卷的rpc请求 volume_id=volume['id'], attachment_id=attachment_id)
2、nova-compute服务接受发送过来的卸载卷rpc请求并处理,其处理入口函数为
nova/compute/manager.py class ComputeManager(manager.Manager): def detach_volume(self, context, volume_id, instance, attachment_id): """Detach a volume from an instance. :param context: security context :param volume_id: the volume id :param instance: the Instance object to detach the volume from :param attachment_id: The volume attachment_id for the given instance and volume. """ @utils.synchronized(instance.uuid) def do_detach_volume(context, volume_id, instance, attachment_id): bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( context, volume_id, instance.uuid) self._detach_volume(context, bdm, instance, attachment_id=attachment_id) do_detach_volume(context, volume_id, instance, attachment_id)----实际调用的是 _detach_volume def _detach_volume(self, context, bdm, instance, destroy_bdm=True, attachment_id=None): """Detach a volume from an instance. """ volume_id = bdm.volume_id compute_utils.notify_about_volume_attach_detach( context, instance, self.host, action=fields.NotificationAction.VOLUME_DETACH, phase=fields.NotificationPhase.START, volume_id=volume_id) self._notify_volume_usage_detach(context, instance, bdm) LOG.info('Detaching volume %(volume_id)s', {'volume_id': volume_id}, instance=instance) driver_bdm = driver_block_device.convert_volume(bdm)------获取bdm driver驱动,该参数的值为nova.virt.block_device.DriverVolumeBlockDevice driver_bdm.detach(context, instance, self.volume_api, self.driver,-------- s1 调用bdm 驱动执行卸载卷操作,self.driver = nova.virt.libvirt.driver.LibvirtDriver attachment_id=attachment_id, destroy_bdm=destroy_bdm) info = dict(volume_id=volume_id) self._notify_about_instance_usage( context, instance, "volume.detach", extra_usage_info=info) compute_utils.notify_about_volume_attach_detach( context, instance, self.host, action=fields.NotificationAction.VOLUME_DETACH, phase=fields.NotificationPhase.END, volume_id=volume_id) if 'tag' in bdm and bdm.tag: self._delete_disk_metadata(instance, bdm) if destroy_bdm: bdm.destroy()-----设置nova 数据库中该卷的bdm deleted标志位删除状态
s1 BDM driver detach操作
nova/virt/block_device.py class DriverVolumeBlockDevice(DriverBlockDevice): def detach(self, context, instance, volume_api, virt_driver, attachment_id=None, destroy_bdm=False): volume = self._get_volume(context, volume_api, self.volume_id) if volume.get('shared_targets', False): # Lock the detach call using the provided service_uuid. @utils.synchronized(volume['service_uuid']) def _do_locked_detach(*args, **_kwargs): self._do_detach(*args, **_kwargs) _do_locked_detach(context, instance, volume_api, virt_driver, attachment_id, destroy_bdm) else: # We don't need to (or don't know if we need to) lock. self._do_detach(context, instance, volume_api, virt_driver,-----调试走了该分支 attachment_id, destroy_bdm) def _do_detach(self, context, instance, volume_api, virt_driver, attachment_id=None, destroy_bdm=False): """Private method that actually does the detach. This is separate from the detach() method so the caller can optionally lock this call. """ volume_id = self.volume_id # Only attempt to detach and disconnect from the volume if the instance # is currently associated with the local compute host. if CONF.host == instance.host: self.driver_detach(context, instance, volume_api, virt_driver)--------s1.1 虚机端卸载卷操作 elif not destroy_bdm: LOG.debug("Skipping driver_detach during remote rebuild.", instance=instance) elif destroy_bdm: LOG.error("Unable to call for a driver detach of volume " "%(vol_id)s due to the instance being " "registered to the remote host %(inst_host)s.", {'vol_id': volume_id, 'inst_host': instance.host}, instance=instance) # NOTE(jdg): For now we need to actually inspect the bdm for an # attachment_id as opposed to relying on what may have been passed # in, we want to force usage of the old detach flow for now and only # use the new flow when we explicitly used it for the attach. if not self['attachment_id']: connector = virt_driver.get_volume_connector(instance) connection_info = self['connection_info'] if connection_info and not destroy_bdm and ( connector.get('host') != instance.host): # If the volume is attached to another host (evacuate) then # this connector is for the wrong host. Use the connector that # was stored in connection_info instead (if we have one, and it # is for the expected host). stashed_connector = connection_info.get('connector') if not stashed_connector: # Volume was attached before we began stashing connectors LOG.warning("Host mismatch detected, but stashed " "volume connector not found. Instance host is " "%(ihost)s, but volume connector host is " "%(chost)s.", {'ihost': instance.host, 'chost': connector.get('host')}) elif stashed_connector.get('host') != instance.host: # Unexpected error. The stashed connector is also not # matching the needed instance host. LOG.error("Host mismatch detected in stashed volume " "connector. Will use local volume connector. " "Instance host is %(ihost)s. Local volume " "connector host is %(chost)s. Stashed volume " "connector host is %(schost)s.", {'ihost': instance.host, 'chost': connector.get('host'), 'schost': stashed_connector.get('host')}) else: # Fix found. Use stashed connector. LOG.debug("Host mismatch detected. Found usable stashed " "volume connector. Instance host is %(ihost)s. " "Local volume connector host was %(chost)s. " "Stashed volume connector host is %(schost)s.", {'ihost': instance.host, 'chost': connector.get('host'), 'schost': stashed_connector.get('host')}) connector = stashed_connector volume_api.terminate_connection(context, volume_id, connector)-----s1.2 调用cinderclient,发送os-terminate_connection请求,cinder端取消后端存储卷的挂载关系 volume_api.detach(context.elevated(), volume_id, instance.uuid,attachment_id)-----s1.3 调用cinderclient,发送os-detach请求,更新cinder 数据库中,卷的状态 else: volume_api.attachment_delete(context, self['attachment_id']) s1.1 虚机端卸载卷操作 self.driver_detach(context, instance, volume_api, virt_driver) nova/virt/block_device.py class DriverVolumeBlockDevice(DriverBlockDevice): def driver_detach(self, context, instance, volume_api, virt_driver): #virt_driver的值为 nova.virt.libvirt.driver.LibvirtDriver connection_info = self['connection_info'] mp = self['mount_device'] volume_id = self.volume_id LOG.info('Attempting to driver detach volume %(volume_id)s from ' 'mountpoint %(mp)s', {'volume_id': volume_id, 'mp': mp}, instance=instance) try: if not virt_driver.instance_exists(instance): LOG.warning('Detaching volume from unknown instance', instance=instance) encryption = encryptors.get_encryption_metadata(context, volume_api, volume_id, connection_info) virt_driver.detach_volume(context, connection_info, instance, mp,------------s1.1.1 调用libvirt里面的实际驱动进行处理 encryption=encryption) except exception.DiskNotFound as err: LOG.warning('Ignoring DiskNotFound exception while ' 'detaching volume %(volume_id)s from ' '%(mp)s : %(err)s', {'volume_id': volume_id, 'mp': mp, 'err': err}, instance=instance) except exception.DeviceDetachFailed as err: with excutils.save_and_reraise_exception(): LOG.warning('Guest refused to detach volume %(vol)s', {'vol': volume_id}, instance=instance) volume_api.roll_detaching(context, volume_id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to detach volume ' '%(volume_id)s from %(mp)s', {'volume_id': volume_id, 'mp': mp}, instance=instance) volume_api.roll_detaching(context, volume_id) s1.1.1 详解 nova/virt/libvirt/driver.py class LibvirtDriver(driver.ComputeDriver): def detach_volume(self, context, connection_info, instance, mountpoint, encryption=None): disk_dev = mountpoint.rpartition("/")[2] try: guest = self._host.get_guest(instance)-------获取虚机的xml信息 state = guest.get_power_state(self._host) live = state in (power_state.RUNNING, power_state.PAUSED) wait_for_detach = guest.detach_device_with_retry(guest.get_disk,disk_dev,live=live)----s1删除xml文件中挂载卷的信息 wait_for_detach() self._disconnect_volume(context, connection_info, instance,-------s2主机端执行issci logout操作 encryption=encryption) s2主机端执行issci logout操作 nova/virt/libvirt/driver.py class LibvirtDriver(driver.ComputeDriver): def _disconnect_volume(self, context, connection_info, instance, encryption=None): self._detach_encryptor(context, connection_info, encryption=encryption) if self._should_disconnect_target(context, connection_info, instance): vol_driver = self._get_volume_driver(connection_info) vol_driver.disconnect_volume(connection_info, instance) else: LOG.info("Detected multiple connections on this host for volume: " "%s, skipping target disconnect.", driver_block_device.get_volume_id(connection_info), instance=instance) 根据block_device_mapping中,connection_info的 driver_volume_type 类型来获取对应的驱动, 由于使用的是iscsi协议,因此找的 LibvirtISCSIVolumeDriver 最终走的是vol_driver.disconnect_volume=nova.virt.libvirt.volume.iscsi.LibvirtISCSIVolumeDriver.disconnect_volume