"Fossies" - the Fresh Open Source Software Archive

Member "nova-22.0.1/nova/virt/libvirt/guest.py" (19 Nov 2020, 41365 Bytes) of package /linux/misc/openstack/nova-22.0.1.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "guest.py" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 22.0.0_vs_22.0.1.

    1 # Copyright 2010 United States Government as represented by the
    2 # Administrator of the National Aeronautics and Space Administration.
    3 # All Rights Reserved.
    4 # Copyright (c) 2010 Citrix Systems, Inc.
    5 # Copyright (c) 2011 Piston Cloud Computing, Inc
    6 # Copyright (c) 2012 University Of Minho
    7 # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
    8 # Copyright (c) 2015 Red Hat, Inc
    9 #
   10 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
   11 #    not use this file except in compliance with the License. You may obtain
   12 #    a copy of the License at
   13 #
   14 #         http://www.apache.org/licenses/LICENSE-2.0
   15 #
   16 #    Unless required by applicable law or agreed to in writing, software
   17 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   18 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   19 #    License for the specific language governing permissions and limitations
   20 #    under the License.
   21 
   22 """
   23 Manages information about the guest.
   24 
   25 This class encapsulates libvirt domain provides certain
   26 higher level APIs around the raw libvirt API. These APIs are
   27 then used by all the other libvirt related classes
   28 """
   29 
   30 import time
   31 
   32 from lxml import etree
   33 from oslo_log import log as logging
   34 from oslo_service import loopingcall
   35 from oslo_utils import encodeutils
   36 from oslo_utils import excutils
   37 from oslo_utils import importutils
   38 
   39 from nova.compute import power_state
   40 from nova import exception
   41 from nova.i18n import _
   42 from nova.virt import hardware
   43 from nova.virt.libvirt import config as vconfig
   44 
   45 libvirt = None
   46 
   47 LOG = logging.getLogger(__name__)
   48 
   49 VIR_DOMAIN_NOSTATE = 0
   50 VIR_DOMAIN_RUNNING = 1
   51 VIR_DOMAIN_BLOCKED = 2
   52 VIR_DOMAIN_PAUSED = 3
   53 VIR_DOMAIN_SHUTDOWN = 4
   54 VIR_DOMAIN_SHUTOFF = 5
   55 VIR_DOMAIN_CRASHED = 6
   56 VIR_DOMAIN_PMSUSPENDED = 7
   57 
   58 LIBVIRT_POWER_STATE = {
   59     VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
   60     VIR_DOMAIN_RUNNING: power_state.RUNNING,
   61     # The DOMAIN_BLOCKED state is only valid in Xen.  It means that
   62     # the VM is running and the vCPU is idle. So, we map it to RUNNING
   63     VIR_DOMAIN_BLOCKED: power_state.RUNNING,
   64     VIR_DOMAIN_PAUSED: power_state.PAUSED,
   65     # The libvirt API doc says that DOMAIN_SHUTDOWN means the domain
   66     # is being shut down. So technically the domain is still
   67     # running. SHUTOFF is the real powered off state.  But we will map
   68     # both to SHUTDOWN anyway.
   69     # http://libvirt.org/html/libvirt-libvirt.html
   70     VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
   71     VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
   72     VIR_DOMAIN_CRASHED: power_state.CRASHED,
   73     VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
   74 }
   75 
   76 # https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainBlockJobType
   77 VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN = 0
   78 VIR_DOMAIN_BLOCK_JOB_TYPE_PULL = 1
   79 VIR_DOMAIN_BLOCK_JOB_TYPE_COPY = 2
   80 VIR_DOMAIN_BLOCK_JOB_TYPE_COMMIT = 3
   81 VIR_DOMAIN_BLOCK_JOB_TYPE_ACTIVE_COMMIT = 4
   82 VIR_DOMAIN_BLOCK_JOB_TYPE_BACKUP = 5
   83 VIR_DOMAIN_BLOCK_JOB_TYPE_LAST = 6
   84 
   85 LIBVIRT_BLOCK_JOB_TYPE = {
   86     VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN: 'UNKNOWN',
   87     VIR_DOMAIN_BLOCK_JOB_TYPE_PULL: 'PULL',
   88     VIR_DOMAIN_BLOCK_JOB_TYPE_COPY: 'COPY',
   89     VIR_DOMAIN_BLOCK_JOB_TYPE_COMMIT: 'COMMIT',
   90     VIR_DOMAIN_BLOCK_JOB_TYPE_ACTIVE_COMMIT: 'ACTIVE_COMMIT',
   91     VIR_DOMAIN_BLOCK_JOB_TYPE_BACKUP: 'BACKUP',
   92     VIR_DOMAIN_BLOCK_JOB_TYPE_LAST: 'LAST',
   93 }
   94 
   95 
   96 class Guest(object):
   97 
   98     def __init__(self, domain):
   99 
  100         global libvirt
  101         if libvirt is None:
  102             libvirt = importutils.import_module('libvirt')
  103 
  104         self._domain = domain
  105 
  106     def __repr__(self):
  107         return "<Guest %(id)d %(name)s %(uuid)s>" % {
  108             'id': self.id,
  109             'name': self.name,
  110             'uuid': self.uuid
  111         }
  112 
  113     @property
  114     def id(self):
  115         return self._domain.ID()
  116 
  117     @property
  118     def uuid(self):
  119         return self._domain.UUIDString()
  120 
  121     @property
  122     def name(self):
  123         return self._domain.name()
  124 
  125     @property
  126     def _encoded_xml(self):
  127         return encodeutils.safe_decode(self._domain.XMLDesc(0))
  128 
  129     @classmethod
  130     def create(cls, xml, host):
  131         """Create a new Guest
  132 
  133         :param xml: XML definition of the domain to create
  134         :param host: host.Host connection to define the guest on
  135 
  136         :returns guest.Guest: Guest ready to be launched
  137         """
  138         try:
  139             if isinstance(xml, bytes):
  140                 xml = xml.decode('utf-8')
  141             guest = host.write_instance_config(xml)
  142         except Exception:
  143             with excutils.save_and_reraise_exception():
  144                 LOG.error('Error defining a guest with XML: %s',
  145                           encodeutils.safe_decode(xml))
  146         return guest
  147 
  148     def launch(self, pause=False):
  149         """Starts a created guest.
  150 
  151         :param pause: Indicates whether to start and pause the guest
  152         """
  153         flags = pause and libvirt.VIR_DOMAIN_START_PAUSED or 0
  154         try:
  155             return self._domain.createWithFlags(flags)
  156         except Exception:
  157             with excutils.save_and_reraise_exception():
  158                 LOG.error('Error launching a defined domain '
  159                           'with XML: %s',
  160                           self._encoded_xml, errors='ignore')
  161 
  162     def poweroff(self):
  163         """Stops a running guest."""
  164         self._domain.destroy()
  165 
  166     def sync_guest_time(self):
  167         """Try to set VM time to the current value.  This is typically useful
  168         when clock wasn't running on the VM for some time (e.g. during
  169         suspension or migration), especially if the time delay exceeds NTP
  170         tolerance.
  171 
  172         It is not guaranteed that the time is actually set (it depends on guest
  173         environment, especially QEMU agent presence) or that the set time is
  174         very precise (NTP in the guest should take care of it if needed).
  175         """
  176         t = time.time()
  177         seconds = int(t)
  178         nseconds = int((t - seconds) * 10 ** 9)
  179         try:
  180             self._domain.setTime(time={'seconds': seconds,
  181                                        'nseconds': nseconds})
  182         except libvirt.libvirtError as e:
  183             code = e.get_error_code()
  184             if code == libvirt.VIR_ERR_AGENT_UNRESPONSIVE:
  185                 LOG.debug('Failed to set time: QEMU agent unresponsive',
  186                           instance_uuid=self.uuid)
  187             elif code == libvirt.VIR_ERR_OPERATION_UNSUPPORTED:
  188                 LOG.debug('Failed to set time: not supported',
  189                           instance_uuid=self.uuid)
  190             elif code == libvirt.VIR_ERR_ARGUMENT_UNSUPPORTED:
  191                 LOG.debug('Failed to set time: agent not configured',
  192                           instance_uuid=self.uuid)
  193             else:
  194                 LOG.warning('Failed to set time: %(reason)s',
  195                             {'reason': e}, instance_uuid=self.uuid)
  196         except Exception as ex:
  197             # The highest priority is not to let this method crash and thus
  198             # disrupt its caller in any way.  So we swallow this error here,
  199             # to be absolutely safe.
  200             LOG.debug('Failed to set time: %(reason)s',
  201                       {'reason': ex}, instance_uuid=self.uuid)
  202         else:
  203             LOG.debug('Time updated to: %d.%09d', seconds, nseconds,
  204                       instance_uuid=self.uuid)
  205 
  206     def inject_nmi(self):
  207         """Injects an NMI to a guest."""
  208         self._domain.injectNMI()
  209 
  210     def resume(self):
  211         """Resumes a paused guest."""
  212         self._domain.resume()
  213 
  214     def get_interfaces(self):
  215         """Returns a list of all network interfaces for this domain."""
  216         doc = None
  217 
  218         try:
  219             doc = etree.fromstring(self._encoded_xml)
  220         except Exception:
  221             return []
  222 
  223         interfaces = []
  224 
  225         nodes = doc.findall('./devices/interface/target')
  226         for target in nodes:
  227             interfaces.append(target.get('dev'))
  228 
  229         return interfaces
  230 
  231     def get_interface_by_cfg(self, cfg):
  232         """Lookup a full LibvirtConfigGuestDevice with
  233         LibvirtConfigGuesDevice generated
  234         by nova.virt.libvirt.vif.get_config.
  235 
  236         :param cfg: config object that represents the guest interface.
  237         :type cfg: a subtype of LibvirtConfigGuestDevice object
  238         :returns: nova.virt.libvirt.config.LibvirtConfigGuestDevice instance
  239             if found, else None
  240         """
  241 
  242         if cfg:
  243             interfaces = self.get_all_devices(type(cfg))
  244             for interface in interfaces:
  245                 # NOTE(leehom) LibvirtConfigGuest get from domain and
  246                 # LibvirtConfigGuest generated by
  247                 # nova.virt.libvirt.vif.get_config must be identical.
  248                 # NOTE(gibi): LibvirtConfigGuest subtypes does a custom
  249                 # equality check based on available information on nova side
  250                 if cfg == interface:
  251                     return interface
  252 
  253     def get_vcpus_info(self):
  254         """Returns virtual cpus information of guest.
  255 
  256         :returns: guest.VCPUInfo
  257         """
  258         vcpus = self._domain.vcpus()
  259         for vcpu in vcpus[0]:
  260             yield VCPUInfo(
  261                 id=vcpu[0], cpu=vcpu[3], state=vcpu[1], time=vcpu[2])
  262 
  263     def delete_configuration(self, support_uefi=False):
  264         """Undefines a domain from hypervisor."""
  265         try:
  266             flags = libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE
  267             if support_uefi:
  268                 flags |= libvirt.VIR_DOMAIN_UNDEFINE_NVRAM
  269             self._domain.undefineFlags(flags)
  270         except libvirt.libvirtError:
  271             LOG.debug("Error from libvirt during undefineFlags for guest "
  272                       "%d. Retrying with undefine", self.id)
  273             self._domain.undefine()
  274         except AttributeError:
  275             # Older versions of libvirt don't support undefine flags,
  276             # trying to remove managed image
  277             try:
  278                 if self._domain.hasManagedSaveImage(0):
  279                     self._domain.managedSaveRemove(0)
  280             except AttributeError:
  281                 pass
  282             self._domain.undefine()
  283 
  284     def has_persistent_configuration(self):
  285         """Whether domain config is persistently stored on the host."""
  286         return self._domain.isPersistent()
  287 
  288     def attach_device(self, conf, persistent=False, live=False):
  289         """Attaches device to the guest.
  290 
  291         :param conf: A LibvirtConfigObject of the device to attach
  292         :param persistent: A bool to indicate whether the change is
  293                            persistent or not
  294         :param live: A bool to indicate whether it affect the guest
  295                      in running state
  296         """
  297         flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
  298         flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
  299 
  300         device_xml = conf.to_xml()
  301         if isinstance(device_xml, bytes):
  302             device_xml = device_xml.decode('utf-8')
  303 
  304         LOG.debug("attach device xml: %s", device_xml)
  305         self._domain.attachDeviceFlags(device_xml, flags=flags)
  306 
  307     def get_config(self):
  308         """Returns the config instance for a guest
  309 
  310         :returns: LibvirtConfigGuest instance
  311         """
  312         config = vconfig.LibvirtConfigGuest()
  313         config.parse_str(self._domain.XMLDesc(0))
  314         return config
  315 
  316     def get_disk(self, device):
  317         """Returns the disk mounted at device
  318 
  319         :returns LivirtConfigGuestDisk: mounted at device or None
  320         """
  321         try:
  322             doc = etree.fromstring(self._domain.XMLDesc(0))
  323         except Exception:
  324             return None
  325 
  326         # FIXME(lyarwood): Workaround for the device being either a target dev
  327         # when called via swap_volume or source file when called via
  328         # live_snapshot. This should be removed once both are refactored to use
  329         # only the target dev of the device.
  330         node = doc.find("./devices/disk/target[@dev='%s'].." % device)
  331         if node is None:
  332             node = doc.find("./devices/disk/source[@file='%s'].." % device)
  333 
  334         if node is not None:
  335             conf = vconfig.LibvirtConfigGuestDisk()
  336             conf.parse_dom(node)
  337             return conf
  338 
  339     def get_all_disks(self):
  340         """Returns all the disks for a guest
  341 
  342         :returns: a list of LibvirtConfigGuestDisk instances
  343         """
  344 
  345         return self.get_all_devices(vconfig.LibvirtConfigGuestDisk)
  346 
  347     def get_all_devices(self, devtype=None):
  348         """Returns all devices for a guest
  349 
  350         :param devtype: a LibvirtConfigGuestDevice subclass class
  351 
  352         :returns: a list of LibvirtConfigGuestDevice instances
  353         """
  354 
  355         try:
  356             config = vconfig.LibvirtConfigGuest()
  357             config.parse_str(
  358                 self._domain.XMLDesc(0))
  359         except Exception:
  360             return []
  361 
  362         devs = []
  363         for dev in config.devices:
  364             if (devtype is None or
  365                 isinstance(dev, devtype)):
  366                 devs.append(dev)
  367         return devs
  368 
  369     def detach_device_with_retry(self, get_device_conf_func, device, live,
  370                                  max_retry_count=7, inc_sleep_time=10,
  371                                  max_sleep_time=60,
  372                                  alternative_device_name=None,
  373                                  supports_device_missing_error_code=False):
  374         """Detaches a device from the guest. After the initial detach request,
  375         a function is returned which can be used to ensure the device is
  376         successfully removed from the guest domain (retrying the removal as
  377         necessary).
  378 
  379         :param get_device_conf_func: function which takes device as a parameter
  380                                      and returns the configuration for device
  381         :param device: device to detach
  382         :param live: bool to indicate whether it affects the guest in running
  383                      state
  384         :param max_retry_count: number of times the returned function will
  385                                 retry a detach before failing
  386         :param inc_sleep_time: incremental time to sleep in seconds between
  387                                detach retries
  388         :param max_sleep_time: max sleep time in seconds beyond which the sleep
  389                                time will not be incremented using param
  390                                inc_sleep_time. On reaching this threshold,
  391                                max_sleep_time will be used as the sleep time.
  392         :param alternative_device_name: This is an alternative identifier for
  393             the device if device is not an ID, used solely for error messages.
  394         :param supports_device_missing_error_code: does the installed version
  395                                                    of libvirt provide the
  396                                                    VIR_ERR_DEVICE_MISSING error
  397                                                    code.
  398         """
  399         alternative_device_name = alternative_device_name or device
  400         unplug_libvirt_error_codes = set([
  401             libvirt.VIR_ERR_OPERATION_FAILED,
  402             libvirt.VIR_ERR_INTERNAL_ERROR
  403         ])
  404 
  405         def _try_detach_device(conf, persistent=False, live=False):
  406             # Raise DeviceNotFound if the device isn't found during detach
  407             try:
  408                 self.detach_device(conf, persistent=persistent, live=live)
  409                 if get_device_conf_func(device) is None:
  410                     LOG.debug('Successfully detached device %s from guest. '
  411                               'Persistent? %s. Live? %s',
  412                               device, persistent, live)
  413 
  414             except libvirt.libvirtError as ex:
  415                 with excutils.save_and_reraise_exception(reraise=False) as ctx:
  416                     errcode = ex.get_error_code()
  417                     # TODO(lyarwood): Remove libvirt.VIR_ERR_OPERATION_FAILED
  418                     # and libvirt.VIR_ERR_INTERNAL_ERROR once
  419                     # MIN_LIBVIRT_VERSION is >= 4.1.0
  420                     if supports_device_missing_error_code:
  421                         unplug_libvirt_error_codes.add(
  422                             libvirt.VIR_ERR_DEVICE_MISSING)
  423                     if errcode in unplug_libvirt_error_codes:
  424                         # TODO(lyarwood): Remove the following error message
  425                         # check once we only care about VIR_ERR_DEVICE_MISSING
  426                         errmsg = ex.get_error_message()
  427                         if 'not found' in errmsg:
  428                             # This will be raised if the live domain
  429                             # detach fails because the device is not found
  430                             raise exception.DeviceNotFound(
  431                                 device=alternative_device_name)
  432                     # TODO(lyarwood): Remove libvirt.VIR_ERR_INVALID_ARG once
  433                     # MIN_LIBVIRT_VERSION is >= 4.1.0
  434                     elif errcode == libvirt.VIR_ERR_INVALID_ARG:
  435                         errmsg = ex.get_error_message()
  436                         if 'no target device' in errmsg:
  437                             # This will be raised if the persistent domain
  438                             # detach fails because the device is not found
  439                             raise exception.DeviceNotFound(
  440                                 device=alternative_device_name)
  441                     # Re-raise the original exception if we're not raising
  442                     # DeviceNotFound instead. This will avoid logging of a
  443                     # "Original exception being dropped" traceback.
  444                     ctx.reraise = True
  445 
  446         conf = get_device_conf_func(device)
  447         if conf is None:
  448             raise exception.DeviceNotFound(device=alternative_device_name)
  449 
  450         persistent = self.has_persistent_configuration()
  451 
  452         LOG.debug('Attempting initial detach for device %s',
  453                   alternative_device_name)
  454         try:
  455             _try_detach_device(conf, persistent, live)
  456         except exception.DeviceNotFound:
  457             # NOTE(melwitt): There are effectively two configs for an instance.
  458             # The persistent config (affects instance upon next boot) and the
  459             # live config (affects running instance). When we detach a device,
  460             # we need to detach it from both configs if the instance has a
  461             # persistent config and a live config. If we tried to detach the
  462             # device with persistent=True and live=True and it was not found,
  463             # we should still try to detach from the live config, so continue.
  464             if persistent and live:
  465                 pass
  466             else:
  467                 raise
  468         LOG.debug('Start retrying detach until device %s is gone.',
  469                   alternative_device_name)
  470 
  471         @loopingcall.RetryDecorator(max_retry_count=max_retry_count,
  472                                     inc_sleep_time=inc_sleep_time,
  473                                     max_sleep_time=max_sleep_time,
  474                                     exceptions=exception.DeviceDetachFailed)
  475         def _do_wait_and_retry_detach():
  476             config = get_device_conf_func(device)
  477             if config is not None:
  478                 # Device is already detached from persistent config
  479                 # and only the live config needs to be updated.
  480                 _try_detach_device(config, persistent=False, live=live)
  481 
  482                 reason = _("Unable to detach the device from the live config.")
  483                 raise exception.DeviceDetachFailed(
  484                     device=alternative_device_name, reason=reason)
  485 
  486         return _do_wait_and_retry_detach
  487 
  488     def detach_device(self, conf, persistent=False, live=False):
  489         """Detaches device to the guest.
  490 
  491         :param conf: A LibvirtConfigObject of the device to detach
  492         :param persistent: A bool to indicate whether the change is
  493                            persistent or not
  494         :param live: A bool to indicate whether it affect the guest
  495                      in running state
  496         """
  497         flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
  498         flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
  499 
  500         device_xml = conf.to_xml()
  501         if isinstance(device_xml, bytes):
  502             device_xml = device_xml.decode('utf-8')
  503 
  504         LOG.debug("detach device xml: %s", device_xml)
  505         self._domain.detachDeviceFlags(device_xml, flags=flags)
  506 
  507     def get_xml_desc(self, dump_inactive=False, dump_sensitive=False,
  508                      dump_migratable=False):
  509         """Returns xml description of guest.
  510 
  511         :param dump_inactive: Dump inactive domain information
  512         :param dump_sensitive: Dump security sensitive information
  513         :param dump_migratable: Dump XML suitable for migration
  514 
  515         :returns string: XML description of the guest
  516         """
  517         flags = dump_inactive and libvirt.VIR_DOMAIN_XML_INACTIVE or 0
  518         flags |= dump_sensitive and libvirt.VIR_DOMAIN_XML_SECURE or 0
  519         flags |= dump_migratable and libvirt.VIR_DOMAIN_XML_MIGRATABLE or 0
  520         return self._domain.XMLDesc(flags=flags)
  521 
  522     def save_memory_state(self):
  523         """Saves the domain's memory state. Requires running domain.
  524 
  525         raises: raises libvirtError on error
  526         """
  527         self._domain.managedSave(0)
  528 
  529     def get_block_device(self, disk):
  530         """Returns a block device wrapper for disk."""
  531         return BlockDevice(self, disk)
  532 
  533     def set_user_password(self, user, new_pass):
  534         """Configures a new user password."""
  535         self._domain.setUserPassword(user, new_pass, 0)
  536 
  537     def _get_domain_info(self):
  538         """Returns information on Guest.
  539 
  540         :returns list: [state, maxMem, memory, nrVirtCpu, cpuTime]
  541         """
  542         return self._domain.info()
  543 
  544     def get_info(self, host):
  545         """Retrieve information from libvirt for a specific instance name.
  546 
  547         If a libvirt error is encountered during lookup, we might raise a
  548         NotFound exception or Error exception depending on how severe the
  549         libvirt error is.
  550 
  551         :returns hardware.InstanceInfo:
  552         """
  553         try:
  554             dom_info = self._get_domain_info()
  555         except libvirt.libvirtError as ex:
  556             error_code = ex.get_error_code()
  557             if error_code == libvirt.VIR_ERR_NO_DOMAIN:
  558                 raise exception.InstanceNotFound(instance_id=self.uuid)
  559 
  560             msg = (_('Error from libvirt while getting domain info for '
  561                      '%(instance_name)s: [Error Code %(error_code)s] %(ex)s') %
  562                    {'instance_name': self.name,
  563                     'error_code': error_code,
  564                     'ex': ex})
  565             raise exception.InternalError(msg)
  566 
  567         return hardware.InstanceInfo(
  568             state=LIBVIRT_POWER_STATE[dom_info[0]],
  569             internal_id=self.id)
  570 
  571     def get_power_state(self, host):
  572         return self.get_info(host).state
  573 
  574     def is_active(self):
  575         "Determines whether guest is currently running."
  576         return self._domain.isActive()
  577 
  578     def freeze_filesystems(self):
  579         """Freeze filesystems within guest."""
  580         self._domain.fsFreeze()
  581 
  582     def thaw_filesystems(self):
  583         """Thaw filesystems within guest."""
  584         self._domain.fsThaw()
  585 
  586     def snapshot(self, conf, no_metadata=False,
  587                  disk_only=False, reuse_ext=False, quiesce=False):
  588         """Creates a guest snapshot.
  589 
  590         :param conf: libvirt.LibvirtConfigGuestSnapshotDisk
  591         :param no_metadata: Make snapshot without remembering it
  592         :param disk_only: Disk snapshot, no system checkpoint
  593         :param reuse_ext: Reuse any existing external files
  594         :param quiesce: Use QGA to quiece all mounted file systems
  595         """
  596         flags = no_metadata and (
  597             libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA or 0)
  598         flags |= disk_only and (
  599             libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY or 0)
  600         flags |= reuse_ext and (
  601             libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT or 0)
  602         flags |= quiesce and libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE or 0
  603 
  604         device_xml = conf.to_xml()
  605         if isinstance(device_xml, bytes):
  606             device_xml = device_xml.decode('utf-8')
  607 
  608         self._domain.snapshotCreateXML(device_xml, flags=flags)
  609 
  610     def shutdown(self):
  611         """Shutdown guest"""
  612         self._domain.shutdown()
  613 
  614     def pause(self):
  615         """Suspends an active guest
  616 
  617         Process is frozen without further access to CPU resources and
  618         I/O but the memory used by the domain at the hypervisor level
  619         will stay allocated.
  620 
  621         See method "resume()" to reactive guest.
  622         """
  623         self._domain.suspend()
  624 
  625     def migrate(self, destination, migrate_uri=None, migrate_disks=None,
  626                 destination_xml=None, flags=0, bandwidth=0):
  627         """Migrate guest object from its current host to the destination
  628 
  629         :param destination: URI of host destination where guest will be migrate
  630         :param migrate_uri: URI for invoking the migration
  631         :param migrate_disks: List of disks to be migrated
  632         :param destination_xml: The guest XML to be used on the target host
  633         :param flags: May be one of more of the following:
  634            VIR_MIGRATE_LIVE Do not pause the VM during migration
  635            VIR_MIGRATE_PEER2PEER Direct connection between source &
  636                                  destination hosts
  637            VIR_MIGRATE_TUNNELLED Tunnel migration data over the
  638                                  libvirt RPC channel
  639            VIR_MIGRATE_PERSIST_DEST If the migration is successful,
  640                                     persist the domain on the
  641                                     destination host.
  642            VIR_MIGRATE_UNDEFINE_SOURCE If the migration is successful,
  643                                        undefine the domain on the
  644                                        source host.
  645            VIR_MIGRATE_NON_SHARED_INC Migration with non-shared
  646                                       storage with incremental disk
  647                                       copy
  648            VIR_MIGRATE_AUTO_CONVERGE Slow down domain to make sure it does
  649                                      not change its memory faster than a
  650                                      hypervisor can transfer the changed
  651                                      memory to the destination host
  652            VIR_MIGRATE_POSTCOPY Tell libvirt to enable post-copy migration
  653            VIR_MIGRATE_TLS Use QEMU-native TLS
  654         :param bandwidth: The maximum bandwidth in MiB/s
  655         """
  656         params = {}
  657         # In migrateToURI3 these parameters are extracted from the
  658         # `params` dict
  659         params['bandwidth'] = bandwidth
  660 
  661         if destination_xml:
  662             params['destination_xml'] = destination_xml
  663             params['persistent_xml'] = destination_xml
  664         if migrate_disks:
  665             params['migrate_disks'] = migrate_disks
  666         if migrate_uri:
  667             params['migrate_uri'] = migrate_uri
  668 
  669         # Due to a quirk in the libvirt python bindings,
  670         # VIR_MIGRATE_NON_SHARED_INC with an empty migrate_disks is
  671         # interpreted as "block migrate all writable disks" rather than
  672         # "don't block migrate any disks". This includes attached
  673         # volumes, which will potentially corrupt data on those
  674         # volumes. Consequently we need to explicitly unset
  675         # VIR_MIGRATE_NON_SHARED_INC if there are no disks to be block
  676         # migrated.
  677         if (flags & libvirt.VIR_MIGRATE_NON_SHARED_INC != 0 and
  678                 not params.get('migrate_disks')):
  679             flags &= ~libvirt.VIR_MIGRATE_NON_SHARED_INC
  680 
  681         self._domain.migrateToURI3(
  682             destination, params=params, flags=flags)
  683 
  684     def abort_job(self):
  685         """Requests to abort current background job"""
  686         self._domain.abortJob()
  687 
  688     def migrate_configure_max_downtime(self, mstime):
  689         """Sets maximum time for which domain is allowed to be paused
  690 
  691         :param mstime: Downtime in milliseconds.
  692         """
  693         self._domain.migrateSetMaxDowntime(mstime)
  694 
  695     def migrate_start_postcopy(self):
  696         """Switch running live migration to post-copy mode"""
  697         self._domain.migrateStartPostCopy()
  698 
  699     def get_job_info(self):
  700         """Get job info for the domain
  701 
  702         Query the libvirt job info for the domain (ie progress
  703         of migration, or snapshot operation)
  704 
  705         :returns: a JobInfo of guest
  706         """
  707         if JobInfo._have_job_stats:
  708             try:
  709                 stats = self._domain.jobStats()
  710                 return JobInfo(**stats)
  711             except libvirt.libvirtError as ex:
  712                 if ex.get_error_code() == libvirt.VIR_ERR_NO_SUPPORT:
  713                     # Remote libvirt doesn't support new API
  714                     LOG.debug("Missing remote virDomainGetJobStats: %s", ex)
  715                     JobInfo._have_job_stats = False
  716                     return JobInfo._get_job_stats_compat(self._domain)
  717                 elif ex.get_error_code() in (
  718                         libvirt.VIR_ERR_NO_DOMAIN,
  719                         libvirt.VIR_ERR_OPERATION_INVALID):
  720                     # Transient guest finished migration, so it has gone
  721                     # away completclsely
  722                     LOG.debug("Domain has shutdown/gone away: %s", ex)
  723                     return JobInfo(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
  724                 else:
  725                     LOG.debug("Failed to get job stats: %s", ex)
  726                     raise
  727             except AttributeError as ex:
  728                 # Local python binding doesn't support new API
  729                 LOG.debug("Missing local virDomainGetJobStats: %s", ex)
  730                 JobInfo._have_job_stats = False
  731                 return JobInfo._get_job_stats_compat(self._domain)
  732         else:
  733             return JobInfo._get_job_stats_compat(self._domain)
  734 
  735 
  736 class BlockDevice(object):
  737     """Wrapper around block device API"""
  738 
  739     REBASE_DEFAULT_BANDWIDTH = 0  # in MiB/s - 0 unlimited
  740     COMMIT_DEFAULT_BANDWIDTH = 0  # in MiB/s - 0 unlimited
  741 
  742     def __init__(self, guest, disk):
  743         self._guest = guest
  744         self._disk = disk
  745 
  746     def abort_job(self, async_=False, pivot=False):
  747         """Request to cancel a live block device job
  748 
  749         :param async_: Cancel the block device job (e.g. 'copy' or
  750                        'commit'), and return as soon as possible, without
  751                        waiting for job completion
  752         :param pivot: Pivot to the destination image when ending a
  753                       'copy' or "active commit" (meaning: merging the
  754                       contents of current active disk into its backing
  755                       file) job
  756         """
  757         flags = async_ and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC or 0
  758         flags |= pivot and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT or 0
  759         self._guest._domain.blockJobAbort(self._disk, flags=flags)
  760 
  761     def get_job_info(self):
  762         """Returns information about job currently running
  763 
  764         :returns: BlockDeviceJobInfo, or None if no job exists
  765         :raises: libvirt.libvirtError on error fetching block job info
  766         """
  767 
  768         # libvirt's blockJobInfo() raises libvirt.libvirtError if there was an
  769         # error. It returns {} if the job no longer exists, or a fully
  770         # populated dict if the job exists.
  771         status = self._guest._domain.blockJobInfo(self._disk, flags=0)
  772 
  773         # The job no longer exists
  774         if not status:
  775             return None
  776 
  777         return BlockDeviceJobInfo(
  778             job=status['type'],
  779             bandwidth=status['bandwidth'],
  780             cur=status['cur'],
  781             end=status['end'])
  782 
  783     def copy(self, dest_xml, shallow=False, reuse_ext=False, transient=False):
  784         """Copy the guest-visible contents into a new disk
  785 
  786         http://libvirt.org/html/libvirt-libvirt-domain.html#virDomainBlockCopy
  787 
  788         :param: dest_xml: XML describing the destination disk to copy to
  789         :param: shallow: Limit copy to top of source backing chain
  790         :param: reuse_ext: Reuse existing external file for a copy
  791         :param: transient: Don't force usage of recoverable job for the copy
  792                            operation
  793          """
  794         flags = shallow and libvirt.VIR_DOMAIN_BLOCK_COPY_SHALLOW or 0
  795         flags |= reuse_ext and libvirt.VIR_DOMAIN_BLOCK_COPY_REUSE_EXT or 0
  796         flags |= transient and libvirt.VIR_DOMAIN_BLOCK_COPY_TRANSIENT_JOB or 0
  797         return self._guest._domain.blockCopy(self._disk, dest_xml, flags=flags)
  798 
  799     def rebase(self, base, shallow=False, reuse_ext=False,
  800                copy=False, relative=False, copy_dev=False):
  801         """Copy data from backing chain into a new disk
  802 
  803         This copies data from backing file(s) into overlay(s), giving
  804         control over several aspects like what part of a disk image
  805         chain to be copied, whether to reuse an existing destination
  806         file, etc.  And updates the backing file to the new disk
  807 
  808         :param shallow: Limit copy to top of the source backing chain
  809         :param reuse_ext: Reuse an existing external file that was
  810                           pre-created
  811         :param copy: Start a copy job
  812         :param relative: Keep backing chain referenced using relative names
  813         :param copy_dev: Treat the destination as type="block"
  814         """
  815         flags = shallow and libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW or 0
  816         flags |= reuse_ext and libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT or 0
  817         flags |= copy and libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY or 0
  818         flags |= copy_dev and libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY_DEV or 0
  819         flags |= relative and libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE or 0
  820         return self._guest._domain.blockRebase(
  821             self._disk, base, self.REBASE_DEFAULT_BANDWIDTH, flags=flags)
  822 
  823     def commit(self, base, top, relative=False):
  824         """Merge data from overlays into backing file
  825 
  826         This live merges (or "commits") contents from backing files into
  827         overlays, thus reducing the length of a disk image chain.
  828 
  829         :param relative: Keep backing chain referenced using relative names
  830         """
  831         flags = relative and libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE or 0
  832         return self._guest._domain.blockCommit(
  833             self._disk, base, top, self.COMMIT_DEFAULT_BANDWIDTH, flags=flags)
  834 
  835     def resize(self, size):
  836         """Resize block device to the given size in bytes.
  837 
  838         This resizes the block device within the instance to the given size.
  839 
  840         :param size: The size to resize the device to in bytes.
  841         """
  842         flags = libvirt.VIR_DOMAIN_BLOCK_RESIZE_BYTES
  843         self._guest._domain.blockResize(self._disk, size, flags=flags)
  844 
  845     def is_job_complete(self):
  846         """Return True if the job is complete, False otherwise
  847 
  848         :returns: True if the job is complete, False otherwise
  849         :raises: libvirt.libvirtError on error fetching block job info
  850         """
  851         # NOTE(mdbooth): This method polls for block job completion. It returns
  852         # true if either we get a status which indicates completion, or there
  853         # is no longer a record of the job. Ideally this method and its
  854         # callers would be rewritten to consume libvirt events from the job.
  855         # This would provide a couple of advantages. Firstly, as it would no
  856         # longer be polling it would notice completion immediately rather than
  857         # at the next 0.5s check, and would also consume fewer resources.
  858         # Secondly, with the current method we only know that 'no job'
  859         # indicates completion. It does not necessarily indicate successful
  860         # completion: the job could have failed, or been cancelled. When
  861         # polling for block job info we have no way to detect this, so we
  862         # assume success.
  863 
  864         status = self.get_job_info()
  865 
  866         # If the job no longer exists, it is because it has completed
  867         # NOTE(mdbooth): See comment above: it may not have succeeded.
  868         if status is None:
  869             return True
  870 
  871         # Track blockjob progress in DEBUG, helpful when reviewing failures.
  872         job_type = LIBVIRT_BLOCK_JOB_TYPE.get(
  873             status.job, f"Unknown to Nova ({status.job})")
  874         LOG.debug("%(job_type)s block job progress, current cursor: %(cur)s "
  875                   "final cursor: %(end)s",
  876                   {'job_type': job_type, 'cur': status.cur, 'end': status.end})
  877 
  878         # NOTE(lyarwood): Use the mirror element to determine if we can pivot
  879         # to the new disk once blockjobinfo reports progress as complete.
  880         if status.cur == status.end:
  881             disk = self._guest.get_disk(self._disk)
  882             if disk and disk.mirror:
  883                 return disk.mirror.ready == 'yes'
  884 
  885         return False
  886 
  887     def blockStats(self):
  888         """Extracts block device statistics for a domain"""
  889         return self._guest._domain.blockStats(self._disk)
  890 
  891 
  892 class VCPUInfo(object):
  893     def __init__(self, id, cpu, state, time):
  894         """Structure for information about guest vcpus.
  895 
  896         :param id: The virtual cpu number
  897         :param cpu: The host cpu currently associated
  898         :param state: The running state of the vcpu (0 offline, 1 running, 2
  899                       blocked on resource)
  900         :param time: The cpu time used in nanoseconds
  901         """
  902         self.id = id
  903         self.cpu = cpu
  904         self.state = state
  905         self.time = time
  906 
  907 
  908 class BlockDeviceJobInfo(object):
  909     def __init__(self, job, bandwidth, cur, end):
  910         """Structure for information about running job.
  911 
  912         :param job: The running job (0 placeholder, 1 pull,
  913                       2 copy, 3 commit, 4 active commit)
  914         :param bandwidth: Used in MiB/s
  915         :param cur: Indicates the position between 0 and 'end'
  916         :param end: Indicates the position for this operation
  917         """
  918         self.job = job
  919         self.bandwidth = bandwidth
  920         self.cur = cur
  921         self.end = end
  922 
  923 
  924 class JobInfo(object):
  925     """Information about libvirt background jobs
  926 
  927     This class encapsulates information about libvirt
  928     background jobs. It provides a mapping from either
  929     the old virDomainGetJobInfo API which returned a
  930     fixed list of fields, or the modern virDomainGetJobStats
  931     which returns an extendable dict of fields.
  932     """
  933 
  934     _have_job_stats = True
  935 
  936     def __init__(self, **kwargs):
  937 
  938         self.type = kwargs.get("type", libvirt.VIR_DOMAIN_JOB_NONE)
  939         self.time_elapsed = kwargs.get("time_elapsed", 0)
  940         self.time_remaining = kwargs.get("time_remaining", 0)
  941         self.downtime = kwargs.get("downtime", 0)
  942         self.setup_time = kwargs.get("setup_time", 0)
  943         self.data_total = kwargs.get("data_total", 0)
  944         self.data_processed = kwargs.get("data_processed", 0)
  945         self.data_remaining = kwargs.get("data_remaining", 0)
  946         self.memory_total = kwargs.get("memory_total", 0)
  947         self.memory_processed = kwargs.get("memory_processed", 0)
  948         self.memory_remaining = kwargs.get("memory_remaining", 0)
  949         self.memory_iteration = kwargs.get("memory_iteration", 0)
  950         self.memory_constant = kwargs.get("memory_constant", 0)
  951         self.memory_normal = kwargs.get("memory_normal", 0)
  952         self.memory_normal_bytes = kwargs.get("memory_normal_bytes", 0)
  953         self.memory_bps = kwargs.get("memory_bps", 0)
  954         self.disk_total = kwargs.get("disk_total", 0)
  955         self.disk_processed = kwargs.get("disk_processed", 0)
  956         self.disk_remaining = kwargs.get("disk_remaining", 0)
  957         self.disk_bps = kwargs.get("disk_bps", 0)
  958         self.comp_cache = kwargs.get("compression_cache", 0)
  959         self.comp_bytes = kwargs.get("compression_bytes", 0)
  960         self.comp_pages = kwargs.get("compression_pages", 0)
  961         self.comp_cache_misses = kwargs.get("compression_cache_misses", 0)
  962         self.comp_overflow = kwargs.get("compression_overflow", 0)
  963 
  964     @classmethod
  965     def _get_job_stats_compat(cls, dom):
  966         # Make the old virDomainGetJobInfo method look similar to the
  967         # modern virDomainGetJobStats method
  968         try:
  969             info = dom.jobInfo()
  970         except libvirt.libvirtError as ex:
  971             # When migration of a transient guest completes, the guest
  972             # goes away so we'll see NO_DOMAIN error code
  973             #
  974             # When migration of a persistent guest completes, the guest
  975             # merely shuts off, but libvirt unhelpfully raises an
  976             # OPERATION_INVALID error code
  977             #
  978             # Lets pretend both of these mean success
  979             if ex.get_error_code() in (libvirt.VIR_ERR_NO_DOMAIN,
  980                                        libvirt.VIR_ERR_OPERATION_INVALID):
  981                 LOG.debug("Domain has shutdown/gone away: %s", ex)
  982                 return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
  983             else:
  984                 LOG.debug("Failed to get job info: %s", ex)
  985                 raise
  986 
  987         return cls(
  988             type=info[0],
  989             time_elapsed=info[1],
  990             time_remaining=info[2],
  991             data_total=info[3],
  992             data_processed=info[4],
  993             data_remaining=info[5],
  994             memory_total=info[6],
  995             memory_processed=info[7],
  996             memory_remaining=info[8],
  997             disk_total=info[9],
  998             disk_processed=info[10],
  999             disk_remaining=info[11])