"Fossies" - the Fresh Open Source Software Archive

Member "nova-22.0.1/nova/tests/unit/virt/libvirt/test_driver.py" (19 Nov 2020, 1273687 Bytes) of package /linux/misc/openstack/nova-22.0.1.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. See also the latest Fossies "Diffs" side-by-side code changes report for "test_driver.py": 22.0.0_vs_22.0.1.

    1 #    Copyright 2010 OpenStack Foundation
    2 #    Copyright 2012 University Of Minho
    3 #
    4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
    5 #    not use this file except in compliance with the License. You may obtain
    6 #    a copy of the License at
    7 #
    8 #         http://www.apache.org/licenses/LICENSE-2.0
    9 #
   10 #    Unless required by applicable law or agreed to in writing, software
   11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   13 #    License for the specific language governing permissions and limitations
   14 #    under the License.
   15 
   16 import binascii
   17 from collections import defaultdict
   18 from collections import deque
   19 from collections import OrderedDict
   20 import contextlib
   21 import copy
   22 import datetime
   23 import errno
   24 import glob
   25 import io
   26 import os
   27 import random
   28 import re
   29 import shutil
   30 import signal
   31 import sys
   32 import testtools
   33 import threading
   34 import time
   35 import unittest
   36 
   37 from castellan import key_manager
   38 import ddt
   39 import eventlet
   40 from eventlet import greenthread
   41 import fixtures
   42 from lxml import etree
   43 import mock
   44 from os_brick import encryptors
   45 from os_brick import exception as brick_exception
   46 from os_brick.initiator import connector
   47 import os_resource_classes as orc
   48 import os_traits as ot
   49 import os_vif
   50 from oslo_concurrency import lockutils
   51 from oslo_concurrency import processutils
   52 from oslo_config import cfg
   53 from oslo_serialization import jsonutils
   54 from oslo_service import loopingcall
   55 from oslo_utils import fileutils
   56 from oslo_utils import fixture as utils_fixture
   57 from oslo_utils.fixture import uuidsentinel as uuids
   58 from oslo_utils import strutils
   59 from oslo_utils import units
   60 from oslo_utils import uuidutils
   61 from oslo_utils import versionutils
   62 import six
   63 from six.moves import range
   64 
   65 from nova.api.metadata import base as instance_metadata
   66 from nova.compute import manager
   67 from nova.compute import power_state
   68 from nova.compute import provider_tree
   69 from nova.compute import task_states
   70 from nova.compute import utils as compute_utils
   71 from nova.compute import vm_states
   72 import nova.conf
   73 from nova import context
   74 from nova.db import api as db
   75 from nova.db import constants as db_const
   76 from nova import exception
   77 from nova.network import model as network_model
   78 from nova import objects
   79 from nova.objects import block_device as block_device_obj
   80 from nova.objects import fields
   81 from nova.objects import migrate_data as migrate_data_obj
   82 from nova.objects import virtual_interface as obj_vif
   83 from nova.pci import manager as pci_manager
   84 from nova.pci import utils as pci_utils
   85 import nova.privsep.fs
   86 import nova.privsep.libvirt
   87 from nova.storage import rbd_utils
   88 from nova import test
   89 from nova.tests import fixtures as nova_fixtures
   90 from nova.tests.unit import fake_block_device
   91 from nova.tests.unit import fake_diagnostics
   92 from nova.tests.unit import fake_flavor
   93 from nova.tests.unit import fake_instance
   94 from nova.tests.unit import fake_network
   95 import nova.tests.unit.image.fake as fake_image
   96 from nova.tests.unit import matchers
   97 from nova.tests.unit.objects import test_diagnostics
   98 from nova.tests.unit.objects import test_pci_device
   99 from nova.tests.unit.objects import test_vcpu_model
  100 from nova.tests.unit import utils as test_utils
  101 from nova.tests.unit.virt.libvirt import fake_imagebackend
  102 from nova.tests.unit.virt.libvirt import fakelibvirt
  103 from nova import utils
  104 from nova import version
  105 from nova.virt import block_device as driver_block_device
  106 from nova.virt import driver
  107 from nova.virt import fake
  108 from nova.virt import hardware
  109 from nova.virt.image import model as imgmodel
  110 from nova.virt.libvirt import blockinfo
  111 from nova.virt.libvirt import config as vconfig
  112 from nova.virt.libvirt import designer
  113 from nova.virt.libvirt import driver as libvirt_driver
  114 from nova.virt.libvirt import guest as libvirt_guest
  115 from nova.virt.libvirt import host
  116 from nova.virt.libvirt.host import SEV_KERNEL_PARAM_FILE
  117 from nova.virt.libvirt import imagebackend
  118 from nova.virt.libvirt import imagecache
  119 from nova.virt.libvirt import migration as libvirt_migrate
  120 from nova.virt.libvirt.storage import dmcrypt
  121 from nova.virt.libvirt.storage import lvm
  122 from nova.virt.libvirt import utils as libvirt_utils
  123 from nova.virt.libvirt import vif as libvirt_vif
  124 from nova.virt.libvirt.volume import fs as fs_drivers
  125 from nova.virt.libvirt.volume import volume as volume_drivers
  126 
  127 
  128 CONF = nova.conf.CONF
  129 
  130 _fake_network_info = fake_network.fake_get_instance_nw_info
  131 
  132 _fake_NodeDevXml = {
  133     "pci_0000_04_00_3": """
  134         <device>
  135         <name>pci_0000_04_00_3</name>
  136         <parent>pci_0000_00_01_1</parent>
  137         <driver>
  138             <name>igb</name>
  139         </driver>
  140         <capability type='pci'>
  141             <domain>0</domain>
  142             <bus>4</bus>
  143             <slot>0</slot>
  144             <function>3</function>
  145             <product id='0x1521'>I350 Gigabit Network Connection</product>
  146             <vendor id='0x8086'>Intel Corporation</vendor>
  147             <capability type='virt_functions'>
  148               <address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/>
  149               <address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/>
  150               <address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/>
  151               <address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/>
  152             </capability>
  153         </capability>
  154       </device>""",
  155     "pci_0000_04_10_7": """
  156       <device>
  157          <name>pci_0000_04_10_7</name>
  158          <parent>pci_0000_04_00_3</parent>
  159          <driver>
  160          <name>igbvf</name>
  161          </driver>
  162          <capability type='pci'>
  163           <domain>0</domain>
  164           <bus>4</bus>
  165           <slot>16</slot>
  166           <function>7</function>
  167           <product id='0x1520'>I350 Ethernet Controller Virtual Function
  168             </product>
  169           <vendor id='0x8086'>Intel Corporation</vendor>
  170           <capability type='phys_function'>
  171              <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
  172           </capability>
  173           <capability type='virt_functions'>
  174           </capability>
  175         </capability>
  176     </device>""",
  177     "pci_0000_04_11_7": """
  178       <device>
  179          <name>pci_0000_04_11_7</name>
  180          <parent>pci_0000_04_00_3</parent>
  181          <driver>
  182          <name>igbvf</name>
  183          </driver>
  184          <capability type='pci'>
  185           <domain>0</domain>
  186           <bus>4</bus>
  187           <slot>17</slot>
  188           <function>7</function>
  189           <product id='0x1520'>I350 Ethernet Controller Virtual Function
  190             </product>
  191           <vendor id='0x8086'>Intel Corporation</vendor>
  192           <numa node='0'/>
  193           <capability type='phys_function'>
  194              <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
  195           </capability>
  196           <capability type='virt_functions'>
  197           </capability>
  198         </capability>
  199     </device>""",
  200     "pci_0000_04_00_1": """
  201     <device>
  202       <name>pci_0000_04_00_1</name>
  203       <path>/sys/devices/pci0000:00/0000:00:02.0/0000:04:00.1</path>
  204       <parent>pci_0000_00_02_0</parent>
  205       <driver>
  206         <name>mlx5_core</name>
  207       </driver>
  208       <capability type='pci'>
  209         <domain>0</domain>
  210         <bus>4</bus>
  211         <slot>0</slot>
  212         <function>1</function>
  213         <product id='0x1013'>MT27700 Family [ConnectX-4]</product>
  214         <vendor id='0x15b3'>Mellanox Technologies</vendor>
  215         <iommuGroup number='15'>
  216           <address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
  217           <address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
  218         </iommuGroup>
  219         <numa node='0'/>
  220         <pci-express>
  221           <link validity='cap' port='0' speed='8' width='16'/>
  222           <link validity='sta' speed='8' width='16'/>
  223         </pci-express>
  224       </capability>
  225     </device>""",
  226     # libvirt  >= 1.3.0 nodedev-dumpxml
  227     "pci_0000_03_00_0": """
  228     <device>
  229         <name>pci_0000_03_00_0</name>
  230         <path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.0</path>
  231         <parent>pci_0000_00_02_0</parent>
  232         <driver>
  233         <name>mlx5_core</name>
  234         </driver>
  235         <capability type='pci'>
  236         <domain>0</domain>
  237         <bus>3</bus>
  238         <slot>0</slot>
  239         <function>0</function>
  240         <product id='0x1013'>MT27700 Family [ConnectX-4]</product>
  241         <vendor id='0x15b3'>Mellanox Technologies</vendor>
  242         <capability type='virt_functions' maxCount='16'>
  243           <address domain='0x0000' bus='0x03' slot='0x00' function='0x2'/>
  244           <address domain='0x0000' bus='0x03' slot='0x00' function='0x3'/>
  245           <address domain='0x0000' bus='0x03' slot='0x00' function='0x4'/>
  246           <address domain='0x0000' bus='0x03' slot='0x00' function='0x5'/>
  247         </capability>
  248         <iommuGroup number='15'>
  249           <address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
  250           <address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
  251         </iommuGroup>
  252         <numa node='0'/>
  253         <pci-express>
  254           <link validity='cap' port='0' speed='8' width='16'/>
  255           <link validity='sta' speed='8' width='16'/>
  256         </pci-express>
  257       </capability>
  258     </device>""",
  259     "pci_0000_03_00_1": """
  260     <device>
  261       <name>pci_0000_03_00_1</name>
  262       <path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.1</path>
  263       <parent>pci_0000_00_02_0</parent>
  264       <driver>
  265         <name>mlx5_core</name>
  266       </driver>
  267       <capability type='pci'>
  268         <domain>0</domain>
  269         <bus>3</bus>
  270         <slot>0</slot>
  271         <function>1</function>
  272         <product id='0x1013'>MT27700 Family [ConnectX-4]</product>
  273         <vendor id='0x15b3'>Mellanox Technologies</vendor>
  274         <capability type='virt_functions' maxCount='16'/>
  275         <iommuGroup number='15'>
  276           <address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
  277           <address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
  278         </iommuGroup>
  279         <numa node='0'/>
  280         <pci-express>
  281           <link validity='cap' port='0' speed='8' width='16'/>
  282           <link validity='sta' speed='8' width='16'/>
  283         </pci-express>
  284       </capability>
  285     </device>""",
  286         "net_enp2s1_02_9a_a1_37_be_54": """
  287     <device>
  288       <name>net_enp2s1_02_9a_a1_37_be_54</name>
  289       <path>/sys/devices/pci0000:00/0000:04:00.3/0000:04:10.7/net/enp2s1</path>
  290       <parent>pci_0000_04_10_7</parent>
  291       <capability type='net'>
  292         <interface>enp2s1</interface>
  293         <address>02:9a:a1:37:be:54</address>
  294         <link state='down'/>
  295         <feature name='rx'/>
  296         <feature name='tx'/>
  297         <feature name='sg'/>
  298         <feature name='tso'/>
  299         <feature name='gso'/>
  300         <feature name='gro'/>
  301         <feature name='rxvlan'/>
  302         <feature name='txvlan'/>
  303         <capability type='80203'/>
  304       </capability>
  305     </device>""",
  306     "net_enp2s2_02_9a_a1_37_be_54": """
  307     <device>
  308       <name>net_enp2s2_02_9a_a1_37_be_54</name>
  309       <path>/sys/devices/pci0000:00/0000:00:02.0/0000:02:02.0/net/enp2s2</path>
  310       <parent>pci_0000_04_11_7</parent>
  311       <capability type='net'>
  312         <interface>enp2s2</interface>
  313         <address>02:9a:a1:37:be:54</address>
  314         <link state='down'/>
  315         <feature name='rx'/>
  316         <feature name='tx'/>
  317         <feature name='sg'/>
  318         <feature name='tso'/>
  319         <feature name='gso'/>
  320         <feature name='gro'/>
  321         <feature name='rxvlan'/>
  322         <feature name='txvlan'/>
  323         <capability type='80203'/>
  324       </capability>
  325     </device>""",
  326      "pci_0000_06_00_0": """
  327     <device>
  328       <name>pci_0000_06_00_0</name>
  329       <path>/sys/devices/pci0000:00/0000:00:06.0</path>
  330       <parent></parent>
  331       <driver>
  332         <name>nvidia</name>
  333       </driver>
  334       <capability type="pci">
  335         <domain>0</domain>
  336         <bus>10</bus>
  337         <slot>1</slot>
  338         <function>5</function>
  339         <product id="0x0FFE">GRID M60-0B</product>
  340         <vendor id="0x10DE">Nvidia</vendor>
  341         <numa node="8"/>
  342         <capability type='mdev_types'>
  343           <type id='nvidia-11'>
  344             <name>GRID M60-0B</name>
  345             <deviceAPI>vfio-pci</deviceAPI>
  346             <availableInstances>16</availableInstances>
  347           </type>
  348         </capability>
  349       </capability>
  350     </device>""",
  351      "mdev_4b20d080_1b54_4048_85b3_a6a62d165c01": """
  352     <device>
  353       <name>mdev_4b20d080_1b54_4048_85b3_a6a62d165c01</name>
  354       <path>/sys/devices/pci0000:00/0000:00:02.0/4b20d080-1b54-4048-85b3-a6a62d165c01</path>
  355       <parent>pci_0000_00_02_0</parent>
  356       <driver>
  357         <name>vfio_mdev</name>
  358       </driver>
  359       <capability type='mdev'>
  360         <type id='nvidia-11'/>
  361         <iommuGroup number='12'/>
  362       </capability>
  363     </device>
  364     """,
  365     }
  366 
  367 _fake_NodeDevXml_parents = {
  368     name: etree.fromstring(xml).find("parent").text
  369     for name, xml in _fake_NodeDevXml.items()
  370 }
  371 
  372 _fake_NodeDevXml_children = defaultdict(list)
  373 for key, val in _fake_NodeDevXml_parents.items():
  374     _fake_NodeDevXml_children[val].append(key)
  375 
  376 _fake_cpu_info = {
  377     "arch": "test_arch",
  378     "model": "test_model",
  379     "vendor": "test_vendor",
  380     "topology": {
  381         "sockets": 1,
  382         "cores": 8,
  383         "threads": 16
  384     },
  385     "features": ["feature1", "feature2"]
  386 }
  387 
  388 _fake_cpu_info_aarch64 = {
  389     "arch": fields.Architecture.AARCH64,
  390     "model": "test_model",
  391     "vendor": "test_vendor",
  392     "topology": {
  393         "sockets": 1,
  394         "cores": 8,
  395         "threads": 16
  396     },
  397     "features": ["feature1", "feature2"]
  398 }
  399 
  400 eph_default_ext = utils.get_hash_str(nova.privsep.fs._DEFAULT_FILE_SYSTEM)[:7]
  401 
  402 _fake_qemu64_cpu_feature = """
  403 <cpu mode='custom' match='exact'>
  404   <model fallback='forbid'>qemu64</model>
  405   <feature policy='require' name='svm'/>
  406   <feature policy='require' name='lm'/>
  407   <feature policy='require' name='nx'/>
  408   <feature policy='require' name='syscall'/>
  409   <feature policy='require' name='cx16'/>
  410   <feature policy='require' name='pni'/>
  411   <feature policy='require' name='sse2'/>
  412   <feature policy='require' name='sse'/>
  413   <feature policy='require' name='fxsr'/>
  414   <feature policy='require' name='mmx'/>
  415   <feature policy='require' name='clflush'/>
  416   <feature policy='require' name='pse36'/>
  417   <feature policy='require' name='pat'/>
  418   <feature policy='require' name='cmov'/>
  419   <feature policy='require' name='mca'/>
  420   <feature policy='require' name='pge'/>
  421   <feature policy='require' name='mtrr'/>
  422   <feature policy='require' name='sep'/>
  423   <feature policy='require' name='apic'/>
  424   <feature policy='require' name='cx8'/>
  425   <feature policy='require' name='mce'/>
  426   <feature policy='require' name='pae'/>
  427   <feature policy='require' name='msr'/>
  428   <feature policy='require' name='tsc'/>
  429   <feature policy='require' name='pse'/>
  430   <feature policy='require' name='de'/>
  431   <feature policy='require' name='fpu'/>
  432 </cpu>
  433 """
  434 
  435 _fake_sandy_bridge_cpu_feature = """<cpu mode='custom' match='exact'>
  436   <model fallback='forbid'>SandyBridge</model>
  437   <feature policy='require' name='aes'/>
  438   <feature policy='require' name='apic'/>
  439   <feature policy='require' name='avx'/>
  440   <feature policy='require' name='clflush'/>
  441   <feature policy='require' name='cmov'/>
  442   <feature policy='require' name='cx16'/>
  443   <feature policy='require' name='cx8'/>
  444   <feature policy='require' name='de'/>
  445   <feature policy='require' name='fpu'/>
  446   <feature policy='require' name='fxsr'/>
  447   <feature policy='require' name='lahf_lm'/>
  448   <feature policy='require' name='lm'/>
  449   <feature policy='require' name='mca'/>
  450   <feature policy='require' name='mce'/>
  451   <feature policy='require' name='mmx'/>
  452   <feature policy='require' name='msr'/>
  453   <feature policy='require' name='mtrr'/>
  454   <feature policy='require' name='nx'/>
  455   <feature policy='require' name='pae'/>
  456   <feature policy='require' name='pat'/>
  457   <feature policy='require' name='pclmuldq'/>
  458   <feature policy='require' name='pge'/>
  459   <feature policy='require' name='pni'/>
  460   <feature policy='require' name='popcnt'/>
  461   <feature policy='require' name='pse'/>
  462   <feature policy='require' name='pse36'/>
  463   <feature policy='require' name='rdtscp'/>
  464   <feature policy='require' name='sep'/>
  465   <feature policy='require' name='sse'/>
  466   <feature policy='require' name='sse2'/>
  467   <feature policy='require' name='sse4.1'/>
  468   <feature policy='require' name='sse4.2'/>
  469   <feature policy='require' name='ssse3'/>
  470   <feature policy='require' name='syscall'/>
  471   <feature policy='require' name='tsc'/>
  472   <feature policy='require' name='tsc-deadline'/>
  473   <feature policy='require' name='x2apic'/>
  474   <feature policy='require' name='xsave'/>
  475 </cpu>
  476 """
  477 
  478 _fake_broadwell_cpu_feature = """
  479 <cpu mode='custom' match='exact'>
  480   <model fallback='forbid'>Broadwell-noTSX</model>
  481   <vendor>Intel</vendor>
  482   <feature policy='require' name='smap'/>
  483   <feature policy='require' name='adx'/>
  484   <feature policy='require' name='rdseed'/>
  485   <feature policy='require' name='invpcid'/>
  486   <feature policy='require' name='erms'/>
  487   <feature policy='require' name='bmi2'/>
  488   <feature policy='require' name='smep'/>
  489   <feature policy='require' name='avx2'/>
  490   <feature policy='require' name='bmi1'/>
  491   <feature policy='require' name='fsgsbase'/>
  492   <feature policy='require' name='3dnowprefetch'/>
  493   <feature policy='require' name='lahf_lm'/>
  494   <feature policy='require' name='lm'/>
  495   <feature policy='require' name='rdtscp'/>
  496   <feature policy='require' name='nx'/>
  497   <feature policy='require' name='syscall'/>
  498   <feature policy='require' name='avx'/>
  499   <feature policy='require' name='xsave'/>
  500   <feature policy='require' name='aes'/>
  501   <feature policy='require' name='tsc-deadline'/>
  502   <feature policy='require' name='popcnt'/>
  503   <feature policy='require' name='movbe'/>
  504   <feature policy='require' name='x2apic'/>
  505   <feature policy='require' name='sse4.2'/>
  506   <feature policy='require' name='sse4.1'/>
  507   <feature policy='require' name='pcid'/>
  508   <feature policy='require' name='cx16'/>
  509   <feature policy='require' name='fma'/>
  510   <feature policy='require' name='ssse3'/>
  511   <feature policy='require' name='pclmuldq'/>
  512   <feature policy='require' name='pni'/>
  513   <feature policy='require' name='sse2'/>
  514   <feature policy='require' name='sse'/>
  515   <feature policy='require' name='fxsr'/>
  516   <feature policy='require' name='mmx'/>
  517   <feature policy='require' name='clflush'/>
  518   <feature policy='require' name='pse36'/>
  519   <feature policy='require' name='pat'/>
  520   <feature policy='require' name='cmov'/>
  521   <feature policy='require' name='mca'/>
  522   <feature policy='require' name='pge'/>
  523   <feature policy='require' name='mtrr'/>
  524   <feature policy='require' name='sep'/>
  525   <feature policy='require' name='apic'/>
  526   <feature policy='require' name='cx8'/>
  527   <feature policy='require' name='mce'/>
  528   <feature policy='require' name='pae'/>
  529   <feature policy='require' name='msr'/>
  530   <feature policy='require' name='tsc'/>
  531   <feature policy='require' name='pse'/>
  532   <feature policy='require' name='de'/>
  533   <feature policy='require' name='fpu'/>
  534 </cpu>
  535 """
  536 
  537 
  538 def eph_name(size):
  539     return ('ephemeral_%(size)s_%(ext)s' %
  540             {'size': size, 'ext': eph_default_ext})
  541 
  542 
  543 def fake_disk_info_byname(instance, type='qcow2'):
  544     """Return instance_disk_info corresponding accurately to the properties of
  545     the given Instance object. The info is returned as an OrderedDict of
  546     name->disk_info for each disk.
  547 
  548     :param instance: The instance we're generating fake disk_info for.
  549     :param type: libvirt's disk type.
  550     :return: disk_info
  551     :rtype: OrderedDict
  552     """
  553     instance_dir = os.path.join(CONF.instances_path, instance.uuid)
  554 
  555     def instance_path(name):
  556         return os.path.join(instance_dir, name)
  557 
  558     disk_info = OrderedDict()
  559 
  560     # root disk
  561     if (instance.image_ref is not None and
  562             instance.image_ref != uuids.fake_volume_backed_image_ref):
  563         cache_name = imagecache.get_cache_fname(instance.image_ref)
  564         disk_info['disk'] = {
  565             'type': type,
  566             'path': instance_path('disk'),
  567             'virt_disk_size': instance.flavor.root_gb * units.Gi,
  568             'backing_file': cache_name,
  569             'disk_size': instance.flavor.root_gb * units.Gi,
  570             'over_committed_disk_size': 0}
  571 
  572     swap_mb = instance.flavor.swap
  573     if swap_mb > 0:
  574         disk_info['disk.swap'] = {
  575             'type': type,
  576             'path': instance_path('disk.swap'),
  577             'virt_disk_size': swap_mb * units.Mi,
  578             'backing_file': 'swap_%s' % swap_mb,
  579             'disk_size': swap_mb * units.Mi,
  580             'over_committed_disk_size': 0}
  581 
  582     eph_gb = instance.flavor.ephemeral_gb
  583     if eph_gb > 0:
  584         disk_info['disk.local'] = {
  585             'type': type,
  586             'path': instance_path('disk.local'),
  587             'virt_disk_size': eph_gb * units.Gi,
  588             'backing_file': eph_name(eph_gb),
  589             'disk_size': eph_gb * units.Gi,
  590             'over_committed_disk_size': 0}
  591 
  592     if instance.config_drive:
  593         disk_info['disk.config'] = {
  594             'type': 'raw',
  595             'path': instance_path('disk.config'),
  596             'virt_disk_size': 1024,
  597             'backing_file': '',
  598             'disk_size': 1024,
  599             'over_committed_disk_size': 0}
  600 
  601     return disk_info
  602 
  603 
  604 def fake_diagnostics_object(with_cpus=False, with_disks=False, with_nic=False):
  605     diag_dict = {'config_drive': False,
  606                  'driver': 'libvirt',
  607                  'hypervisor': 'kvm',
  608                  'hypervisor_os': 'linux',
  609                  'memory_details': {'maximum': 2048, 'used': 1234},
  610                  'state': 'running',
  611                  'uptime': 10}
  612 
  613     if with_cpus:
  614         diag_dict['cpu_details'] = []
  615         for id, t in enumerate([15340000000, 1640000000,
  616                                 3040000000, 1420000000]):
  617             diag_dict['cpu_details'].append({'id': id, 'time': t})
  618 
  619     if with_disks:
  620         diag_dict['disk_details'] = []
  621         for i in range(2):
  622             diag_dict['disk_details'].append(
  623                 {'read_bytes': 688640,
  624                  'read_requests': 169,
  625                  'write_bytes': 0,
  626                  'write_requests': 0,
  627                  'errors_count': 1})
  628 
  629     if with_nic:
  630         diag_dict['nic_details'] = [
  631             {'mac_address': '52:54:00:a4:38:38',
  632              'rx_drop': 0,
  633              'rx_errors': 0,
  634              'rx_octets': 4408,
  635              'rx_packets': 82,
  636              'tx_drop': 0,
  637              'tx_errors': 0,
  638              'tx_octets': 0,
  639              'tx_packets': 0}]
  640 
  641     return fake_diagnostics.fake_diagnostics_obj(**diag_dict)
  642 
  643 
  644 def fake_disk_info_json(instance, type='qcow2'):
  645     """Return fake instance_disk_info corresponding accurately to the
  646     properties of the given Instance object.
  647 
  648     :param instance: The instance we're generating fake disk_info for.
  649     :param type: libvirt's disk type.
  650     :return: JSON representation of instance_disk_info for all disks.
  651     :rtype: str
  652     """
  653     disk_info = fake_disk_info_byname(instance, type)
  654     return jsonutils.dumps(disk_info.values())
  655 
  656 
  657 def get_injection_info(network_info=None, admin_pass=None, files=None):
  658     return libvirt_driver.InjectionInfo(
  659         network_info=network_info, admin_pass=admin_pass, files=files)
  660 
  661 
  662 def _concurrency(signal, wait, done, target, is_block_dev=False):
  663     signal.send()
  664     wait.wait()
  665     done.send()
  666 
  667 
  668 class FakeVirtDomain(object):
  669 
  670     def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None,
  671                  info=None):
  672         if uuidstr is None:
  673             uuidstr = uuids.fake
  674         self.uuidstr = uuidstr
  675         self.id = id
  676         self.domname = name
  677         self._info = info or (
  678             [power_state.RUNNING, 2048 * units.Mi,
  679              1234 * units.Mi, None, None])
  680         if fake_xml:
  681             self._fake_dom_xml = fake_xml
  682         else:
  683             self._fake_dom_xml = """
  684                 <domain type='kvm'>
  685                     <name>testinstance1</name>
  686                     <devices>
  687                         <disk type='file'>
  688                             <source file='filename'/>
  689                         </disk>
  690                     </devices>
  691                 </domain>
  692             """
  693 
  694     def name(self):
  695         if self.domname is None:
  696             return "fake-domain %s" % self
  697         else:
  698             return self.domname
  699 
  700     def ID(self):
  701         return self.id
  702 
  703     def info(self):
  704         return self._info
  705 
  706     def create(self):
  707         pass
  708 
  709     def managedSave(self, *args):
  710         pass
  711 
  712     def createWithFlags(self, launch_flags):
  713         pass
  714 
  715     def XMLDesc(self, flags):
  716         return self._fake_dom_xml
  717 
  718     def UUIDString(self):
  719         return self.uuidstr
  720 
  721     def attachDeviceFlags(self, xml, flags):
  722         pass
  723 
  724     def attachDevice(self, xml):
  725         pass
  726 
  727     def detachDeviceFlags(self, xml, flags):
  728         pass
  729 
  730     def snapshotCreateXML(self, xml, flags):
  731         pass
  732 
  733     def blockCommit(self, disk, base, top, bandwidth=0, flags=0):
  734         pass
  735 
  736     def blockRebase(self, disk, base, bandwidth=0, flags=0):
  737         pass
  738 
  739     def blockJobInfo(self, path, flags):
  740         pass
  741 
  742     def blockJobAbort(self, path, flags):
  743         pass
  744 
  745     def resume(self):
  746         pass
  747 
  748     def destroy(self):
  749         pass
  750 
  751     def fsFreeze(self, disks=None, flags=0):
  752         pass
  753 
  754     def fsThaw(self, disks=None, flags=0):
  755         pass
  756 
  757     def isActive(self):
  758         return True
  759 
  760     def isPersistent(self):
  761         return True
  762 
  763     def undefine(self):
  764         return True
  765 
  766 
  767 class CacheConcurrencyTestCase(test.NoDBTestCase):
  768     def setUp(self):
  769         super(CacheConcurrencyTestCase, self).setUp()
  770 
  771         self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
  772 
  773         # utils.synchronized() will create the lock_path for us if it
  774         # doesn't already exist. It will also delete it when it's done,
  775         # which can cause race conditions with the multiple threads we
  776         # use for tests. So, create the path here so utils.synchronized()
  777         # won't delete it out from under one of the threads.
  778         self.lock_path = os.path.join(CONF.instances_path, 'locks')
  779         fileutils.ensure_tree(self.lock_path)
  780 
  781         def fake_exists(fname):
  782             basedir = os.path.join(CONF.instances_path,
  783                                    CONF.image_cache.subdirectory_name)
  784             if fname == basedir or fname == self.lock_path:
  785                 return True
  786             return False
  787 
  788         self.stub_out('os.path.exists', fake_exists)
  789         self.stub_out('oslo_concurrency.processutils.execute',
  790                       lambda *a, **kw: None)
  791         self.stub_out('nova.virt.disk.api.extend',
  792                       lambda image, size, use_cow=False: None)
  793 
  794     def _fake_instance(self, uuid):
  795         return objects.Instance(id=1, uuid=uuid)
  796 
  797     def test_same_fname_concurrency(self):
  798         # Ensures that the same fname cache runs at a sequentially.
  799         uuid = uuids.fake
  800 
  801         backend = imagebackend.Backend(False)
  802         wait1 = eventlet.event.Event()
  803         done1 = eventlet.event.Event()
  804         sig1 = eventlet.event.Event()
  805         thr1 = eventlet.spawn(backend.by_name(self._fake_instance(uuid),
  806                                               'name').cache,
  807                 _concurrency, 'fname', None,
  808                 signal=sig1, wait=wait1, done=done1)
  809         eventlet.sleep(0)
  810         # Thread 1 should run before thread 2.
  811         sig1.wait()
  812 
  813         wait2 = eventlet.event.Event()
  814         done2 = eventlet.event.Event()
  815         sig2 = eventlet.event.Event()
  816         thr2 = eventlet.spawn(backend.by_name(self._fake_instance(uuid),
  817                                               'name').cache,
  818                 _concurrency, 'fname', None,
  819                 signal=sig2, wait=wait2, done=done2)
  820 
  821         wait2.send()
  822         eventlet.sleep(0)
  823         try:
  824             self.assertFalse(done2.ready())
  825         finally:
  826             wait1.send()
  827         done1.wait()
  828         eventlet.sleep(0)
  829         self.assertTrue(done2.ready())
  830         # Wait on greenthreads to assert they didn't raise exceptions
  831         # during execution
  832         thr1.wait()
  833         thr2.wait()
  834 
  835     def test_different_fname_concurrency(self):
  836         # Ensures that two different fname caches are concurrent.
  837         uuid = uuids.fake
  838 
  839         backend = imagebackend.Backend(False)
  840         wait1 = eventlet.event.Event()
  841         done1 = eventlet.event.Event()
  842         sig1 = eventlet.event.Event()
  843         thr1 = eventlet.spawn(backend.by_name(self._fake_instance(uuid),
  844                                               'name').cache,
  845                 _concurrency, 'fname2', None,
  846                 signal=sig1, wait=wait1, done=done1)
  847         eventlet.sleep(0)
  848         # Thread 1 should run before thread 2.
  849         sig1.wait()
  850 
  851         wait2 = eventlet.event.Event()
  852         done2 = eventlet.event.Event()
  853         sig2 = eventlet.event.Event()
  854         thr2 = eventlet.spawn(backend.by_name(self._fake_instance(uuid),
  855                                               'name').cache,
  856                 _concurrency, 'fname1', None,
  857                 signal=sig2, wait=wait2, done=done2)
  858         eventlet.sleep(0)
  859         # Wait for thread 2 to start.
  860         sig2.wait()
  861 
  862         wait2.send()
  863         tries = 0
  864         while not done2.ready() and tries < 10:
  865             eventlet.sleep(0)
  866             tries += 1
  867         try:
  868             self.assertTrue(done2.ready())
  869         finally:
  870             wait1.send()
  871             eventlet.sleep(0)
  872         # Wait on greenthreads to assert they didn't raise exceptions
  873         # during execution
  874         thr1.wait()
  875         thr2.wait()
  876 
  877 
  878 class FakeInvalidVolumeDriver(object):
  879     def __init__(self, *args, **kwargs):
  880         raise brick_exception.InvalidConnectorProtocol('oops!')
  881 
  882 
  883 class FakeConfigGuestDisk(object):
  884     def __init__(self, *args, **kwargs):
  885         self.source_type = None
  886         self.driver_cache = None
  887 
  888 
  889 class FakeConfigGuest(object):
  890     def __init__(self, *args, **kwargs):
  891         self.driver_cache = None
  892 
  893 
  894 class FakeNodeDevice(object):
  895     def __init__(self, fakexml):
  896         self.xml = fakexml
  897 
  898     def XMLDesc(self, flags):
  899         return self.xml
  900 
  901 
  902 def _create_test_instance():
  903     flavor = objects.Flavor(memory_mb=2048,
  904                             swap=0,
  905                             vcpu_weight=None,
  906                             root_gb=10,
  907                             id=2,
  908                             name=u'm1.small',
  909                             ephemeral_gb=20,
  910                             rxtx_factor=1.0,
  911                             flavorid=u'1',
  912                             vcpus=2,
  913                             extra_specs={})
  914     return {
  915         'id': 1,
  916         'uuid': uuids.instance,
  917         'memory_kb': '1024000',
  918         'basepath': '/some/path',
  919         'bridge_name': 'br100',
  920         'display_name': "Acme webserver",
  921         'vcpus': 2,
  922         'project_id': 'fake',
  923         'bridge': 'br101',
  924         'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
  925         'root_gb': 10,
  926         'ephemeral_gb': 20,
  927         'instance_type_id': '5',  # m1.small
  928         'extra_specs': {},
  929         'system_metadata': {
  930             'image_disk_format': 'raw'
  931         },
  932         'flavor': flavor,
  933         'new_flavor': None,
  934         'old_flavor': None,
  935         'pci_devices': objects.PciDeviceList(),
  936         'numa_topology': None,
  937         'config_drive': None,
  938         'vm_mode': None,
  939         'kernel_id': None,
  940         'ramdisk_id': None,
  941         'os_type': 'linux',
  942         'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb',
  943         'ephemeral_key_uuid': None,
  944         'vcpu_model': None,
  945         'host': 'fake-host',
  946         'node': 'fake-node',
  947         'task_state': None,
  948         'vm_state': None,
  949         'trusted_certs': None,
  950         'resources': None,
  951         'migration_context': None,
  952     }
  953 
  954 
  955 @ddt.ddt
  956 class LibvirtConnTestCase(test.NoDBTestCase,
  957                           test_diagnostics.DiagnosticsComparisonMixin):
  958 
  959     REQUIRES_LOCKING = True
  960 
  961     _EPHEMERAL_20_DEFAULT = eph_name(20)
  962 
  963     def setUp(self):
  964         super(LibvirtConnTestCase, self).setUp()
  965         self.user_id = 'fake'
  966         self.project_id = 'fake'
  967         self.context = context.get_admin_context()
  968         temp_dir = self.useFixture(fixtures.TempDir()).path
  969         self.flags(instances_path=temp_dir)
  970         self.flags(snapshots_directory=temp_dir, group='libvirt')
  971 
  972         self.flags(sysinfo_serial="hardware", group="libvirt")
  973 
  974         # normally loaded during nova-compute startup
  975         os_vif.initialize()
  976 
  977         self.stub_out('nova.virt.disk.api.extend',
  978                       lambda image, size, use_cow=False: None)
  979 
  980         self.stub_out('nova.virt.libvirt.imagebackend.Image.'
  981                       'resolve_driver_format',
  982                       imagebackend.Image._get_driver_format)
  983 
  984         self.stub_out('nova.compute.utils.get_machine_ips', lambda: [])
  985 
  986         self.useFixture(fakelibvirt.FakeLibvirtFixture())
  987         self.test_instance = _create_test_instance()
  988         self.test_image_meta = {
  989             "disk_format": "raw",
  990         }
  991         self.image_service = self.useFixture(nova_fixtures.GlanceFixture(self))
  992         self.device_xml_tmpl = """
  993         <domain type='kvm'>
  994           <devices>
  995             <disk type='block' device='disk'>
  996               <driver name='qemu' type='raw' cache='none'/>
  997               <source dev='{device_path}'/>
  998               <target bus='virtio' dev='vdb'/>
  999               <serial>58a84f6d-3f0c-4e19-a0af-eb657b790657</serial>
 1000               <address type='pci' domain='0x0' bus='0x0' slot='0x04' \
 1001               function='0x0'/>
 1002             </disk>
 1003           </devices>
 1004         </domain>
 1005         """
 1006 
 1007     def relpath(self, path):
 1008         return os.path.relpath(path, CONF.instances_path)
 1009 
 1010     def tearDown(self):
 1011         super(LibvirtConnTestCase, self).tearDown()
 1012 
 1013     def test_driver_capabilities(self):
 1014         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
 1015         self.assertTrue(drvr.capabilities['has_imagecache'],
 1016                         'Driver capabilities for \'has_imagecache\' '
 1017                         'is invalid')
 1018         self.assertTrue(drvr.capabilities['supports_evacuate'],
 1019                         'Driver capabilities for \'supports_evacuate\' '
 1020                         'is invalid')
 1021         self.assertFalse(drvr.capabilities['supports_migrate_to_same_host'],
 1022                          'Driver capabilities for '
 1023                          '\'supports_migrate_to_same_host\' is invalid')
 1024         self.assertTrue(drvr.capabilities['supports_attach_interface'],
 1025                         'Driver capabilities for '
 1026                         '\'supports_attach_interface\' '
 1027                         'is invalid')
 1028         self.assertTrue(drvr.capabilities['supports_extend_volume'],
 1029                         'Driver capabilities for '
 1030                         '\'supports_extend_volume\' '
 1031                         'is invalid')
 1032         self.assertTrue(drvr.capabilities['supports_trusted_certs'],
 1033                         'Driver capabilities for '
 1034                         '\'supports_trusted_certs\' '
 1035                         'is invalid')
 1036         self.assertTrue(drvr.capabilities['supports_image_type_qcow2'],
 1037                         'Driver capabilities for '
 1038                         '\'supports_image_type_qcow2\' '
 1039                         'is invalid')
 1040         self.assertFalse(drvr.capabilities['supports_image_type_ploop'],
 1041                          'Driver capabilities for '
 1042                          '\'supports_image_type_ploop\' '
 1043                          'is invalid')
 1044         self.assertFalse(
 1045             drvr.capabilities['supports_vtpm'],
 1046             "Driver capabilities for 'supports_vtpm' is invalid",
 1047         )
 1048 
 1049     def test_driver_capabilities_qcow2_with_rbd(self):
 1050         self.flags(images_type='rbd', group='libvirt')
 1051         self.flags(force_raw_images=False)
 1052         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
 1053         self.assertFalse(drvr.capabilities['supports_image_type_qcow2'],
 1054                          'Driver capabilities for '
 1055                          '\'supports_image_type_qcow2\' '
 1056                          'is invalid when \'images_type=rbd\'')
 1057 
 1058         self.flags(images_type='rbd', group='libvirt')
 1059         self.flags(force_raw_images=True)
 1060         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
 1061         self.assertTrue(drvr.capabilities['supports_image_type_qcow2'])
 1062 
 1063     def test_driver_capabilities_qcow2_with_lvm(self):
 1064         self.flags(images_type='lvm', group='libvirt')
 1065         self.flags(force_raw_images=False)
 1066         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
 1067         self.assertFalse(drvr.capabilities['supports_image_type_qcow2'],
 1068                          'Driver capabilities for '
 1069                          '\'supports_image_type_qcow2\' '
 1070                          'is invalid when \'images_type=lvm\'')
 1071 
 1072         self.flags(images_type='lvm', group='libvirt')
 1073         self.flags(force_raw_images=True)
 1074         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
 1075         self.assertTrue(drvr.capabilities['supports_image_type_qcow2'])
 1076 
 1077     def test_driver_capabilities_ploop_with_virtuozzo(self):
 1078         self.flags(virt_type='kvm', group='libvirt')
 1079         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
 1080         self.assertFalse(drvr.capabilities['supports_image_type_ploop'],
 1081                          'Driver capabilities for '
 1082                          '\'supports_image_type_ploop\' '
 1083                          'is invalid when virt_type=kvm')
 1084 
 1085         self.flags(virt_type='parallels', group='libvirt')
 1086         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
 1087         self.assertTrue(drvr.capabilities['supports_image_type_ploop'])
 1088 
 1089     def test_driver_capabilities_vtpm(self):
 1090         self.flags(swtpm_enabled=True, group='libvirt')
 1091         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
 1092         self.assertTrue(
 1093             drvr.capabilities['supports_vtpm'],
 1094             "Driver capabilities for 'supports_vtpm' is invalid when "
 1095             "'swtpm_enabled=True'"
 1096         )
 1097 
 1098     def test_driver_raises_on_non_linux_platform(self):
 1099         with utils.temporary_mutation(sys, platform='darwin'):
 1100             self.assertRaises(
 1101                 exception.InternalError, libvirt_driver.LibvirtDriver,
 1102                 fake.FakeVirtAPI(), False)
 1103 
 1104     def create_fake_libvirt_mock(self, **kwargs):
 1105         """Defining mocks for LibvirtDriver(libvirt is not used)."""
 1106 
 1107         # A fake libvirt.virConnect
 1108         class FakeLibvirtDriver(object):
 1109             def defineXML(self, xml):
 1110                 return FakeVirtDomain()
 1111 
 1112         # Creating mocks
 1113         fake = FakeLibvirtDriver()
 1114         # Customizing above fake if necessary
 1115         for key, val in kwargs.items():
 1116             fake.__setattr__(key, val)
 1117 
 1118         self.stub_out('nova.virt.libvirt.driver.LibvirtDriver._conn', fake)
 1119         self.stub_out('nova.virt.libvirt.host.Host.get_connection',
 1120                       lambda x: fake)
 1121 
 1122     def fake_lookup(self, instance_name):
 1123         return FakeVirtDomain()
 1124 
 1125     def fake_execute(self, *args, **kwargs):
 1126         open(args[-1], "a").close()
 1127 
 1128     def _create_service(self, **kwargs):
 1129         service_ref = {'host': kwargs.get('host', 'dummy'),
 1130                        'disabled': kwargs.get('disabled', False),
 1131                        'binary': 'nova-compute',
 1132                        'topic': 'compute',
 1133                        'report_count': 0}
 1134 
 1135         return objects.Service(**service_ref)
 1136 
 1137     def _get_pause_flag(self, drvr, network_info, power_on=True,
 1138                           vifs_already_plugged=False):
 1139         timeout = CONF.vif_plugging_timeout
 1140 
 1141         events = []
 1142         if (
 1143             CONF.libvirt.virt_type in ('kvm', 'qemu') and
 1144             not vifs_already_plugged and power_on and timeout
 1145         ):
 1146             events = drvr._get_neutron_events(network_info)
 1147 
 1148         return bool(events)
 1149 
 1150     def test_public_api_signatures(self):
 1151         baseinst = driver.ComputeDriver(None)
 1152         inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1153         self.assertPublicAPISignatures(baseinst, inst)
 1154 
 1155     def test_legacy_block_device_info(self):
 1156         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1157         self.assertFalse(drvr.need_legacy_block_device_info)
 1158 
 1159     @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_cpu_traits')
 1160     @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_storage_bus_traits')
 1161     @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_video_model_traits')
 1162     @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_vif_model_traits')
 1163     def test_static_traits(
 1164         self, mock_vif_traits, mock_video_traits, mock_storage_traits,
 1165         mock_cpu_traits,
 1166     ):
 1167         """Ensure driver capabilities are correctly retrieved and cached."""
 1168 
 1169         # we don't mock out calls to os_traits intentionally, so we need to
 1170         # return valid traits here
 1171         mock_cpu_traits.return_value = {'HW_CPU_HYPERTHREADING': True}
 1172         mock_storage_traits.return_value = {'COMPUTE_STORAGE_BUS_VIRTIO': True}
 1173         mock_video_traits.return_value = {'COMPUTE_GRAPHICS_MODEL_VGA': True}
 1174         mock_vif_traits.return_value = {'COMPUTE_NET_VIF_MODEL_VIRTIO': True}
 1175 
 1176         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
 1177         expected = {
 1178             'HW_CPU_HYPERTHREADING': True,
 1179             'COMPUTE_STORAGE_BUS_VIRTIO': True,
 1180             'COMPUTE_GRAPHICS_MODEL_VGA': True,
 1181             'COMPUTE_NET_VIF_MODEL_VIRTIO': True,
 1182             'COMPUTE_SECURITY_TPM_1_2': False,
 1183             'COMPUTE_SECURITY_TPM_2_0': False,
 1184         }
 1185 
 1186         static_traits = drvr.static_traits
 1187 
 1188         # check that results are as expected and the individual helper
 1189         # functions were called once each
 1190         self.assertEqual(expected, static_traits)
 1191         for mock_traits in (
 1192             mock_vif_traits, mock_video_traits, mock_storage_traits,
 1193             mock_cpu_traits,
 1194         ):
 1195             mock_traits.assert_called_once_with()
 1196             mock_traits.reset_mock()
 1197 
 1198         static_traits = drvr.static_traits
 1199 
 1200         # now check that the results are still as expected but the helpers
 1201         # weren't called since the value was cached
 1202         self.assertEqual(expected, static_traits)
 1203         for mock_traits in (
 1204             mock_vif_traits, mock_video_traits, mock_storage_traits,
 1205             mock_cpu_traits,
 1206         ):
 1207             mock_traits.assert_not_called()
 1208 
 1209     @mock.patch.object(libvirt_driver.LOG, 'debug')
 1210     @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_cpu_traits')
 1211     @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_storage_bus_traits')
 1212     @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_video_model_traits')
 1213     @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_vif_model_traits')
 1214     def test_static_traits__invalid_trait(
 1215         self, mock_vif_traits, mock_video_traits, mock_storage_traits,
 1216         mock_cpu_traits, mock_log,
 1217     ):
 1218         """Ensure driver capabilities are correctly retrieved and cached."""
 1219         mock_cpu_traits.return_value = {'foo': True}
 1220         mock_storage_traits.return_value = {'bar': True}
 1221         mock_video_traits.return_value = {'baz': True}
 1222         mock_vif_traits.return_value = {'COMPUTE_NET_VIF_MODEL_VIRTIO': True}
 1223 
 1224         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
 1225         expected = {
 1226             'COMPUTE_NET_VIF_MODEL_VIRTIO': True,
 1227             'COMPUTE_SECURITY_TPM_1_2': False,
 1228             'COMPUTE_SECURITY_TPM_2_0': False,
 1229         }
 1230 
 1231         static_traits = drvr.static_traits
 1232 
 1233         self.assertEqual(expected, static_traits)
 1234         mock_log.assert_has_calls([
 1235             mock.call("Trait '%s' is not valid; ignoring.", "foo"),
 1236             mock.call("Trait '%s' is not valid; ignoring.", "bar"),
 1237             mock.call("Trait '%s' is not valid; ignoring.", "baz"),
 1238         ],
 1239         any_order=True)
 1240 
 1241     @mock.patch.object(host.Host, "has_min_version")
 1242     def test_min_version_start_ok(self, mock_version):
 1243         mock_version.return_value = True
 1244         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1245         drvr.init_host("dummyhost")
 1246 
 1247     @mock.patch.object(host.Host, "has_min_version")
 1248     def test_min_version_start_abort(self, mock_version):
 1249         mock_version.return_value = False
 1250         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1251         self.assertRaises(exception.NovaException,
 1252                           drvr.init_host,
 1253                           "dummyhost")
 1254 
 1255     @mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
 1256                        return_value=versionutils.convert_version_to_int(
 1257                             libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) - 1)
 1258     @mock.patch.object(libvirt_driver.LOG, 'warning')
 1259     def test_next_min_version_deprecation_warning(self, mock_warning,
 1260                                                   mock_get_libversion):
 1261         # Skip test if there's no currently planned new min version
 1262         if (versionutils.convert_version_to_int(
 1263                 libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) ==
 1264             versionutils.convert_version_to_int(
 1265                 libvirt_driver.MIN_LIBVIRT_VERSION)):
 1266             self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION")
 1267 
 1268         # Test that a warning is logged if the libvirt version is less than
 1269         # the next required minimum version.
 1270         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1271         drvr.init_host("dummyhost")
 1272         # assert that the next min version is in a warning message
 1273         expected_arg = {'version': versionutils.convert_version_to_str(
 1274             versionutils.convert_version_to_int(
 1275                 libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))}
 1276         version_arg_found = False
 1277         for call in mock_warning.call_args_list:
 1278             if call[0][1] == expected_arg:
 1279                 version_arg_found = True
 1280                 break
 1281         self.assertTrue(version_arg_found)
 1282 
 1283     @mock.patch.object(fakelibvirt.Connection, 'getVersion',
 1284                        return_value=versionutils.convert_version_to_int(
 1285                             libvirt_driver.NEXT_MIN_QEMU_VERSION) - 1)
 1286     @mock.patch.object(libvirt_driver.LOG, 'warning')
 1287     def test_next_min_qemu_version_deprecation_warning(self, mock_warning,
 1288                                                        mock_get_libversion):
 1289         # Skip test if there's no currently planned new min version
 1290         if (versionutils.convert_version_to_int(
 1291                 libvirt_driver.NEXT_MIN_QEMU_VERSION) ==
 1292             versionutils.convert_version_to_int(
 1293                 libvirt_driver.MIN_QEMU_VERSION)):
 1294             self.skipTest("NEXT_MIN_QEMU_VERSION == MIN_QEMU_VERSION")
 1295 
 1296         # Test that a warning is logged if the libvirt version is less than
 1297         # the next required minimum version.
 1298         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1299         drvr.init_host("dummyhost")
 1300         # assert that the next min version is in a warning message
 1301         expected_arg = {'version': versionutils.convert_version_to_str(
 1302             versionutils.convert_version_to_int(
 1303                 libvirt_driver.NEXT_MIN_QEMU_VERSION))}
 1304         version_arg_found = False
 1305         for call in mock_warning.call_args_list:
 1306             if call[0][1] == expected_arg:
 1307                 version_arg_found = True
 1308                 break
 1309         self.assertTrue(version_arg_found)
 1310 
 1311     @mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
 1312                        return_value=versionutils.convert_version_to_int(
 1313                             libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))
 1314     @mock.patch.object(libvirt_driver.LOG, 'warning')
 1315     def test_next_min_version_ok(self, mock_warning, mock_get_libversion):
 1316         # Skip test if there's no currently planned new min version
 1317 
 1318         if (versionutils.convert_version_to_int(
 1319                 libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) ==
 1320             versionutils.convert_version_to_int(
 1321                 libvirt_driver.MIN_LIBVIRT_VERSION)):
 1322             self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION")
 1323 
 1324         # Test that a warning is not logged if the libvirt version is greater
 1325         # than or equal to NEXT_MIN_LIBVIRT_VERSION.
 1326         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1327         drvr.init_host("dummyhost")
 1328         # assert that the next min version is in a warning message
 1329         expected_arg = {'version': versionutils.convert_version_to_str(
 1330             versionutils.convert_version_to_int(
 1331                 libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))}
 1332         version_arg_found = False
 1333         for call in mock_warning.call_args_list:
 1334             if call[0][1] == expected_arg:
 1335                 version_arg_found = True
 1336                 break
 1337         self.assertFalse(version_arg_found)
 1338 
 1339     @mock.patch.object(fakelibvirt.Connection, 'getVersion',
 1340                        return_value=versionutils.convert_version_to_int(
 1341                             libvirt_driver.NEXT_MIN_QEMU_VERSION))
 1342     @mock.patch.object(libvirt_driver.LOG, 'warning')
 1343     def test_next_min_qemu_version_ok(self, mock_warning, mock_get_libversion):
 1344         # Skip test if there's no currently planned new min version
 1345 
 1346         if (versionutils.convert_version_to_int(
 1347                 libvirt_driver.NEXT_MIN_QEMU_VERSION) ==
 1348             versionutils.convert_version_to_int(
 1349                 libvirt_driver.MIN_QEMU_VERSION)):
 1350             self.skipTest("NEXT_MIN_QEMU_VERSION == MIN_QEMU_VERSION")
 1351 
 1352         # Test that a warning is not logged if the libvirt version is greater
 1353         # than or equal to NEXT_MIN_QEMU_VERSION.
 1354         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1355         drvr.init_host("dummyhost")
 1356         # assert that the next min version is in a warning message
 1357         expected_arg = {'version': versionutils.convert_version_to_str(
 1358             versionutils.convert_version_to_int(
 1359                 libvirt_driver.NEXT_MIN_QEMU_VERSION))}
 1360         version_arg_found = False
 1361         for call in mock_warning.call_args_list:
 1362             if call[0][1] == expected_arg:
 1363                 version_arg_found = True
 1364                 break
 1365         self.assertFalse(version_arg_found)
 1366 
 1367     @mock.patch.object(fields.Architecture, "from_host",
 1368                        return_value=fields.Architecture.PPC64)
 1369     def test_min_version_ppc_ok(self, mock_arch):
 1370         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1371         drvr.init_host("dummyhost")
 1372 
 1373     @mock.patch.object(fields.Architecture, "from_host",
 1374                        return_value=fields.Architecture.S390X)
 1375     def test_min_version_s390_ok(self, mock_arch):
 1376         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1377         drvr.init_host("dummyhost")
 1378 
 1379     def test_file_backed_memory_support_called(self):
 1380         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1381         with mock.patch.object(drvr,
 1382                 '_check_file_backed_memory_support') as mock_check_fb_support:
 1383             drvr.init_host("dummyhost")
 1384             self.assertTrue(mock_check_fb_support.called)
 1385 
 1386     def test_min_version_file_backed_ok(self):
 1387         self.flags(file_backed_memory=1024, group='libvirt')
 1388         self.flags(ram_allocation_ratio=1.0)
 1389         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1390         drvr._check_file_backed_memory_support()
 1391 
 1392     def test_min_version_file_backed_bad_ram_allocation_ratio(self):
 1393         self.flags(file_backed_memory=1024, group="libvirt")
 1394         self.flags(ram_allocation_ratio=1.5)
 1395         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1396         self.assertRaises(exception.InternalError,
 1397                           drvr._check_file_backed_memory_support)
 1398 
 1399     def test__check_file_backed_memory_support__total_lt_reserved(self):
 1400         """Ensure an error is raised if total memory < reserved.
 1401 
 1402         Placement won't allow $resource.total < $resource.reserved, so we need
 1403         to catch this early.
 1404         """
 1405         self.flags(file_backed_memory=1024, group='libvirt')
 1406         self.flags(ram_allocation_ratio=1.0, reserved_host_memory_mb=4096)
 1407         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1408         self.assertRaises(
 1409             exception.InternalError, drvr._check_file_backed_memory_support,
 1410         )
 1411 
 1412     @mock.patch.object(libvirt_driver.LOG, 'warning')
 1413     def test__check_file_backed_memory_support__has_reserved(self, mock_log):
 1414         """Ensure a warning is issued if memory is reserved.
 1415 
 1416         It doesn't make sense to "reserve" memory when file-backed memory is in
 1417         use. We should report things so as to avoid confusion.
 1418         """
 1419         self.flags(file_backed_memory=8192, group='libvirt')
 1420         self.flags(ram_allocation_ratio=1.0)
 1421         # we don't need to configure '[DEFAULT] reserved_host_memory_mb' since
 1422         # it defaults to 512 (MB)
 1423         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1424         drvr._check_file_backed_memory_support()
 1425         mock_log.assert_called_once()
 1426         self.assertIn(
 1427             "Reserving memory via '[DEFAULT] reserved_host_memory_mb' is not "
 1428             "compatible",
 1429             six.text_type(mock_log.call_args[0]),
 1430         )
 1431 
 1432     def test__check_cpu_compatibility_start_ok(self):
 1433         self.flags(cpu_mode="custom",
 1434                    cpu_models=["Penryn"],
 1435                    group="libvirt")
 1436         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1437         drvr.init_host("dummyhost")
 1438 
 1439     def test__check_cpu_compatibility_none_models(self):
 1440         self.flags(cpu_mode="custom",
 1441                    cpu_models=[],
 1442                    group="libvirt")
 1443         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1444         self.assertRaises(exception.Invalid, drvr.init_host, "dummyhost")
 1445 
 1446     def test__check_cpu_compatibility_none_mode(self):
 1447         self.flags(cpu_mode="none",
 1448                    cpu_models=["Penryn"],
 1449                    group="libvirt")
 1450         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1451         self.assertRaises(exception.Invalid, drvr.init_host, "dummyhost")
 1452 
 1453     @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
 1454     def test__check_cpu_compatibility_advance_model(self, mocked_compare):
 1455         mocked_compare.side_effect = (2, 0)
 1456         self.flags(cpu_mode="custom",
 1457                    cpu_models=["qemu64", "Broadwell-noTSX"],
 1458                    group="libvirt")
 1459         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1460         self.assertRaises(exception.InvalidCPUInfo,
 1461                           drvr.init_host, "dummyhost")
 1462 
 1463     def test__check_cpu_compatibility_with_flag(self):
 1464         self.flags(cpu_mode="custom",
 1465                    cpu_models=["Penryn"],
 1466                    cpu_model_extra_flags = ["aes"],
 1467                    group="libvirt")
 1468         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1469         drvr.init_host("dummyhost")
 1470 
 1471     @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
 1472     def test__check_cpu_compatibility_advance_flag(self, mocked_compare):
 1473         mocked_compare.side_effect = (2, 0)
 1474         self.flags(cpu_mode="custom",
 1475                    cpu_models=["qemu64"],
 1476                    cpu_model_extra_flags = ["avx", "avx2"],
 1477                    group="libvirt")
 1478         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1479         self.assertRaises(exception.InvalidCPUInfo,
 1480                           drvr.init_host, "dummyhost")
 1481 
 1482     @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
 1483     def test__check_cpu_compatibility_wrong_flag(self, mocked_compare):
 1484         mocked_compare.side_effect = (2, 0)
 1485         self.flags(cpu_mode="custom",
 1486                    cpu_models=["Broadwell-noTSX"],
 1487                    cpu_model_extra_flags = ["a v x"],
 1488                    group="libvirt")
 1489         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1490         self.assertRaises(exception.InvalidCPUInfo,
 1491                           drvr.init_host, "dummyhost")
 1492 
 1493     def test__check_cpu_compatibility_invalid_virt_type(self):
 1494         """Test getting CPU traits when using a virt_type that doesn't support
 1495         the feature, only kvm and qemu supports reporting CPU traits.
 1496         """
 1497         self.flags(cpu_mode='custom',
 1498                    cpu_models=['IvyBridge'],
 1499                    virt_type='lxc',
 1500                    group='libvirt')
 1501         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1502         self.assertRaises(exception.Invalid, drvr.init_host, "dummyhost")
 1503 
 1504     def test__check_cpu_compatibility_aarch64_qemu_custom_start_OK(self):
 1505         """Test getting CPU traits when using a virt_type that doesn't support
 1506         the feature, only kvm and qemu supports reporting CPU traits.
 1507         """
 1508         self.flags(cpu_mode='custom',
 1509                    cpu_models=['max'],
 1510                    virt_type='qemu',
 1511                    group='libvirt')
 1512         caps = vconfig.LibvirtConfigCaps()
 1513         caps.host = vconfig.LibvirtConfigCapsHost()
 1514         caps.host.cpu = vconfig.LibvirtConfigCPU()
 1515         caps.host.cpu.arch = fields.Architecture.AARCH64
 1516         with mock.patch.object(host.Host, "get_capabilities",
 1517                                return_value=caps):
 1518             drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1519             drvr.init_host("dummyhost")
 1520 
 1521     def test__check_vtpm_support_non_qemu(self):
 1522         """Test checking for vTPM support when we're not using QEMU or KVM."""
 1523         self.flags(swtpm_enabled=True, virt_type='lxc', group='libvirt')
 1524         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1525         exc = self.assertRaises(exception.InvalidConfiguration,
 1526                                 drvr.init_host, 'dummyhost')
 1527         self.assertIn("vTPM support requires '[libvirt] virt_type' of 'qemu' "
 1528                       "or 'kvm'; found 'lxc'.", six.text_type(exc))
 1529 
 1530     @mock.patch.object(host.Host, 'has_min_version', return_value=True)
 1531     @mock.patch('shutil.which')
 1532     def test__check_vtpm_support_missing_exe(self, mock_which, mock_version):
 1533         """Test checking for vTPM support when the swtpm binaries are
 1534         missing.
 1535         """
 1536         self.flags(swtpm_enabled=True, virt_type='kvm', group='libvirt')
 1537         mock_which.return_value = False
 1538 
 1539         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1540         exc = self.assertRaises(exception.InvalidConfiguration,
 1541                                 drvr.init_host, "dummyhost")
 1542         self.assertIn(
 1543             "vTPM support is configured but the 'swtpm' and 'swtpm_setup' "
 1544             "binaries could not be found on PATH.",
 1545             str(exc),
 1546         )
 1547 
 1548         mock_which.assert_has_calls(
 1549             [mock.call('swtpm_setup'), mock.call('swtpm')],
 1550         )
 1551 
 1552     @mock.patch.object(host.Host, 'has_min_version', return_value=True)
 1553     @mock.patch('shutil.which')
 1554     @mock.patch('pwd.getpwnam')
 1555     def test__check_vtpm_support_invalid_user(
 1556         self, mock_getpwnam, mock_which, mock_version,
 1557     ):
 1558         """Test checking for vTPM support when the configured user is
 1559         invalid.
 1560         """
 1561         self.flags(
 1562             swtpm_user='lionel', swtpm_enabled=True, virt_type='kvm',
 1563             group='libvirt')
 1564         mock_which.return_value = True
 1565         mock_getpwnam.side_effect = KeyError
 1566 
 1567         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1568         exc = self.assertRaises(
 1569             exception.InvalidConfiguration,
 1570             drvr.init_host, "dummyhost")
 1571 
 1572         self.assertIn(
 1573             "The user configured in '[libvirt] swtpm_user' does not exist "
 1574             "on this host; expected 'lionel'.",
 1575             str(exc),
 1576         )
 1577         mock_getpwnam.assert_called_with('lionel')
 1578 
 1579     @mock.patch.object(host.Host, 'has_min_version', return_value=True)
 1580     @mock.patch('shutil.which')
 1581     @mock.patch('pwd.getpwnam')
 1582     @mock.patch('grp.getgrnam')
 1583     def test__check_vtpm_support_invalid_group(
 1584         self, mock_getgrnam, mock_getpwnam, mock_which, mock_version,
 1585     ):
 1586         """Test checking for vTPM support when the configured group is
 1587         invalid.
 1588         """
 1589         self.flags(
 1590             swtpm_group='admins', swtpm_enabled=True, virt_type='kvm',
 1591             group='libvirt')
 1592         mock_which.return_value = True
 1593         mock_getgrnam.side_effect = KeyError
 1594 
 1595         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1596         exc = self.assertRaises(
 1597             exception.InvalidConfiguration,
 1598             drvr.init_host, "dummyhost")
 1599 
 1600         self.assertIn(
 1601             "The group configured in '[libvirt] swtpm_group' does not exist "
 1602             "on this host; expected 'admins'.",
 1603             str(exc),
 1604         )
 1605         mock_getgrnam.assert_called_with('admins')
 1606 
 1607     @mock.patch.object(host.Host, 'has_min_version')
 1608     @mock.patch('shutil.which')
 1609     @mock.patch('pwd.getpwnam')
 1610     @mock.patch('grp.getgrnam')
 1611     def test__check_vtpm_support(
 1612         self, mock_getgrnam, mock_getpwnam, mock_which, mock_version,
 1613     ):
 1614         """Test checking for vTPM support when everything is configured
 1615         correctly.
 1616         """
 1617         self.flags(swtpm_enabled=True, virt_type='kvm', group='libvirt')
 1618         mock_version.return_value = True
 1619 
 1620         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1621         drvr.init_host('dummyhost')
 1622 
 1623         mock_which.assert_has_calls(
 1624             [mock.call('swtpm_setup'), mock.call().__bool__()],
 1625         )
 1626         mock_version.assert_called_with(lv_ver=(5, 6, 0))
 1627 
 1628     @mock.patch.object(libvirt_driver.LOG, 'warning')
 1629     def test_check_cpu_set_configuration__no_configuration(self, mock_log):
 1630         """Test that configuring no CPU option results no errors or logs.
 1631         """
 1632         self.flags(vcpu_pin_set=None, reserved_host_cpus=None)
 1633         self.flags(cpu_shared_set=None, cpu_dedicated_set=None,
 1634                    group='compute')
 1635 
 1636         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1637         drvr._check_cpu_set_configuration()
 1638 
 1639         mock_log.assert_not_called()
 1640 
 1641     def test_check_cpu_set_configuration__cpu_shared_set_cpu_dedicated_set(
 1642             self):
 1643         """Test that configuring 'cpu_shared_set' and 'cpu_dedicated_set' such
 1644         that they overlap (are not disjoint) results in an error stating that
 1645         this is not allowed.
 1646         """
 1647         self.flags(vcpu_pin_set=None, reserved_host_cpus=None)
 1648         self.flags(cpu_shared_set='0-3', cpu_dedicated_set='3-5',
 1649                    group='compute')
 1650 
 1651         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1652 
 1653         self.assertRaises(exception.InvalidConfiguration,
 1654                           drvr._check_cpu_set_configuration)
 1655 
 1656     def test_check_cpu_set_configuration__reserved_host_cpus_cpu_shared_set(
 1657             self):
 1658         """Test that configuring 'reserved_host_cpus' with one of the new
 1659         options, in this case '[compute] cpu_shared_set', results in an error
 1660         stating that this is not allowed.
 1661         """
 1662         self.flags(vcpu_pin_set=None, reserved_host_cpus=1)
 1663         self.flags(cpu_shared_set='1-10', cpu_dedicated_set=None,
 1664                    group='compute')
 1665 
 1666         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1667 
 1668         ex = self.assertRaises(exception.InvalidConfiguration,
 1669                                drvr._check_cpu_set_configuration)
 1670         self.assertIn("The 'reserved_host_cpus' config option cannot be "
 1671                       "defined alongside ", six.text_type(ex))
 1672 
 1673     @mock.patch.object(libvirt_driver.LOG, 'warning')
 1674     def test_check_cpu_set_configuration__vcpu_pin_set(self, mock_log):
 1675         """Test that configuring only 'vcpu_pin_set' results in a warning that
 1676         the option is being used for VCPU inventory but this is deprecated
 1677         behavior.
 1678         """
 1679         self.flags(vcpu_pin_set='0-3', reserved_host_cpus=None)
 1680         self.flags(cpu_shared_set=None, cpu_dedicated_set=None,
 1681                    group='compute')
 1682 
 1683         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1684         drvr._check_cpu_set_configuration()
 1685 
 1686         mock_log.assert_called_once()
 1687         self.assertIn("When defined, 'vcpu_pin_set' will be used to calculate "
 1688                       "'VCPU' inventory and schedule instances that have "
 1689                       "'VCPU' allocations.",
 1690                       six.text_type(mock_log.call_args[0]))
 1691 
 1692     @mock.patch.object(libvirt_driver.LOG, 'warning')
 1693     def test_check_cpu_set_configuration__vcpu_pin_set_cpu_shared_set(
 1694             self, mock_log):
 1695         """Test that configuring both 'vcpu_pin_set' and 'cpu_shared_set'
 1696         results in a warning that 'cpu_shared_set' is being ignored for
 1697         calculating VCPU inventory.
 1698         """
 1699         self.flags(vcpu_pin_set='0-3', reserved_host_cpus=None)
 1700         self.flags(cpu_shared_set='4-5', cpu_dedicated_set=None,
 1701                    group='compute')
 1702 
 1703         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1704         drvr._check_cpu_set_configuration()
 1705 
 1706         mock_log.assert_called_once()
 1707         self.assertIn("The '[compute] cpu_shared_set' and 'vcpu_pin_set' "
 1708                       "config options have both been defined.",
 1709                       six.text_type(mock_log.call_args[0]))
 1710 
 1711     def test_check_cpu_set_configuration__vcpu_pin_set_cpu_dedicated_set(
 1712             self):
 1713         """Test that configuring both 'vcpu_pin_set' and 'cpu_dedicated_set'
 1714         results in an error stating that the two options cannot co-exist.
 1715         """
 1716         self.flags(vcpu_pin_set='0-3', reserved_host_cpus=None)
 1717         self.flags(cpu_shared_set=None, cpu_dedicated_set='4-5',
 1718                    group='compute')
 1719 
 1720         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1721 
 1722         ex = self.assertRaises(exception.InvalidConfiguration,
 1723                                drvr._check_cpu_set_configuration)
 1724         self.assertIn("The 'vcpu_pin_set' config option has been deprecated "
 1725                       "and cannot be defined alongside '[compute] "
 1726                       "cpu_dedicated_set'.", six.text_type(ex))
 1727 
 1728     def _do_test_parse_migration_flags(self, lm_expected=None,
 1729                                        bm_expected=None):
 1730         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1731         drvr._parse_migration_flags()
 1732 
 1733         if lm_expected is not None:
 1734             self.assertEqual(lm_expected, drvr._live_migration_flags)
 1735         if bm_expected is not None:
 1736             self.assertEqual(bm_expected, drvr._block_migration_flags)
 1737 
 1738     def test_parse_live_migration_flags_default(self):
 1739         self._do_test_parse_migration_flags(
 1740             lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1741                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1742                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1743                          libvirt_driver.libvirt.VIR_MIGRATE_LIVE))
 1744 
 1745     def test_parse_live_migration_flags(self):
 1746         self._do_test_parse_migration_flags(
 1747             lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1748                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1749                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1750                          libvirt_driver.libvirt.VIR_MIGRATE_LIVE))
 1751 
 1752     def test_parse_block_migration_flags_default(self):
 1753         self._do_test_parse_migration_flags(
 1754             bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1755                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1756                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1757                          libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
 1758                          libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
 1759 
 1760     def test_parse_block_migration_flags(self):
 1761         self._do_test_parse_migration_flags(
 1762             bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1763                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1764                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1765                          libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
 1766                          libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
 1767 
 1768     def test_parse_migration_flags_p2p_xen(self):
 1769         self.flags(virt_type='xen', group='libvirt')
 1770         self._do_test_parse_migration_flags(
 1771             lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1772                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1773                          libvirt_driver.libvirt.VIR_MIGRATE_LIVE),
 1774             bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1775                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1776                          libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
 1777                          libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
 1778 
 1779     def test_live_migration_tunnelled_true(self):
 1780         self.flags(live_migration_tunnelled=True, group='libvirt')
 1781         self._do_test_parse_migration_flags(
 1782             lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
 1783                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1784                          libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1785                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1786                          libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED),
 1787             bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
 1788                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1789                          libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1790                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1791                          libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
 1792                          libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED))
 1793 
 1794     @mock.patch.object(host.Host, 'has_min_version', return_value=True)
 1795     def test_live_migration_with_native_tls(self, host):
 1796         self.flags(live_migration_with_native_tls=True, group='libvirt')
 1797         self._do_test_parse_migration_flags(
 1798             lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
 1799                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1800                          libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1801                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1802                          libvirt_driver.libvirt.VIR_MIGRATE_TLS),
 1803             bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
 1804                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1805                          libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1806                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1807                          libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
 1808                          libvirt_driver.libvirt.VIR_MIGRATE_TLS))
 1809 
 1810     def test_live_migration_permit_postcopy_true(self):
 1811         self.flags(live_migration_permit_post_copy=True, group='libvirt')
 1812         self._do_test_parse_migration_flags(
 1813             lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1814                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1815                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1816                          libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
 1817                          libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY),
 1818             bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1819                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1820                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1821                          libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
 1822                          libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
 1823                          libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY))
 1824 
 1825     def test_live_migration_permit_auto_converge_true(self):
 1826         self.flags(live_migration_permit_auto_converge=True, group='libvirt')
 1827         self._do_test_parse_migration_flags(
 1828             lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1829                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1830                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1831                          libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
 1832                          libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE),
 1833             bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1834                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1835                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1836                          libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
 1837                          libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
 1838                          libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE))
 1839 
 1840     def test_live_migration_permit_auto_converge_and_post_copy_true(self):
 1841         self.flags(live_migration_permit_auto_converge=True, group='libvirt')
 1842         self.flags(live_migration_permit_post_copy=True, group='libvirt')
 1843         self._do_test_parse_migration_flags(
 1844             lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1845                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1846                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1847                          libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
 1848                          libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY),
 1849             bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1850                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1851                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1852                          libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
 1853                          libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
 1854                          libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY))
 1855 
 1856     def test_live_migration_permit_postcopy_false(self):
 1857         self._do_test_parse_migration_flags(
 1858             lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1859                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1860                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1861                          libvirt_driver.libvirt.VIR_MIGRATE_LIVE),
 1862             bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1863                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1864                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1865                          libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
 1866                          libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
 1867 
 1868     def test_live_migration_permit_autoconverge_false(self):
 1869         self._do_test_parse_migration_flags(
 1870             lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1871                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1872                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1873                          libvirt_driver.libvirt.VIR_MIGRATE_LIVE),
 1874             bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
 1875                          libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
 1876                          libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
 1877                          libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
 1878                          libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
 1879 
 1880     @mock.patch('nova.utils.get_image_from_system_metadata')
 1881     @mock.patch.object(host.Host,
 1882                        'has_min_version', return_value=True)
 1883     @mock.patch('nova.virt.libvirt.host.Host.get_guest')
 1884     def test_set_admin_password(self, mock_get_guest, ver, mock_image):
 1885         self.flags(virt_type='kvm', group='libvirt')
 1886         instance = objects.Instance(**self.test_instance)
 1887         mock_image.return_value = {"properties": {
 1888             "hw_qemu_guest_agent": "yes"}}
 1889         mock_guest = mock.Mock(spec=libvirt_guest.Guest)
 1890         mock_get_guest.return_value = mock_guest
 1891 
 1892         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1893         drvr.set_admin_password(instance, "123")
 1894 
 1895         mock_guest.set_user_password.assert_called_once_with("root", "123")
 1896 
 1897     @mock.patch('nova.objects.Instance.save')
 1898     @mock.patch('oslo_serialization.base64.encode_as_text')
 1899     @mock.patch('nova.api.metadata.password.convert_password')
 1900     @mock.patch('nova.crypto.ssh_encrypt_text')
 1901     @mock.patch('nova.utils.get_image_from_system_metadata')
 1902     @mock.patch.object(host.Host,
 1903                        'has_min_version', return_value=True)
 1904     @mock.patch('nova.virt.libvirt.host.Host.get_guest')
 1905     def test_set_admin_password_saves_sysmeta(self, mock_get_guest,
 1906                                               ver, mock_image, mock_encrypt,
 1907                                               mock_convert, mock_encode,
 1908                                               mock_save):
 1909         self.flags(virt_type='kvm', group='libvirt')
 1910         instance = objects.Instance(**self.test_instance)
 1911         # Password will only be saved in sysmeta if the key_data is present
 1912         instance.key_data = 'ssh-rsa ABCFEFG'
 1913         mock_image.return_value = {"properties": {
 1914             "hw_qemu_guest_agent": "yes"}}
 1915         mock_guest = mock.Mock(spec=libvirt_guest.Guest)
 1916         mock_get_guest.return_value = mock_guest
 1917         mock_convert.return_value = {'password_0': 'converted-password'}
 1918 
 1919         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1920         drvr.set_admin_password(instance, "123")
 1921 
 1922         mock_guest.set_user_password.assert_called_once_with("root", "123")
 1923         mock_encrypt.assert_called_once_with(instance.key_data, '123')
 1924         mock_encode.assert_called_once_with(mock_encrypt.return_value)
 1925         mock_convert.assert_called_once_with(None, mock_encode.return_value)
 1926         self.assertEqual('converted-password',
 1927                          instance.system_metadata['password_0'])
 1928         mock_save.assert_called_once_with()
 1929 
 1930     @mock.patch('nova.virt.libvirt.host.Host.get_guest')
 1931     def test_set_admin_password_parallels(self, mock_get_guest):
 1932         self.flags(virt_type='parallels', group='libvirt')
 1933         instance = objects.Instance(**self.test_instance)
 1934         mock_guest = mock.Mock(spec=libvirt_guest.Guest)
 1935         mock_get_guest.return_value = mock_guest
 1936 
 1937         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1938         drvr.set_admin_password(instance, "123")
 1939 
 1940         mock_guest.set_user_password.assert_called_once_with("root", "123")
 1941 
 1942     @mock.patch('nova.utils.get_image_from_system_metadata')
 1943     @mock.patch.object(host.Host,
 1944                        'has_min_version', return_value=True)
 1945     @mock.patch('nova.virt.libvirt.host.Host.get_guest')
 1946     def test_set_admin_password_windows(self, mock_get_guest, ver, mock_image):
 1947         self.flags(virt_type='kvm', group='libvirt')
 1948         instance = objects.Instance(**self.test_instance)
 1949         instance.os_type = "windows"
 1950         mock_image.return_value = {"properties": {
 1951             "hw_qemu_guest_agent": "yes"}}
 1952         mock_guest = mock.Mock(spec=libvirt_guest.Guest)
 1953         mock_get_guest.return_value = mock_guest
 1954 
 1955         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1956         drvr.set_admin_password(instance, "123")
 1957 
 1958         mock_guest.set_user_password.assert_called_once_with(
 1959             "Administrator", "123")
 1960 
 1961     @mock.patch('nova.utils.get_image_from_system_metadata')
 1962     @mock.patch.object(host.Host,
 1963                        'has_min_version', return_value=True)
 1964     @mock.patch('nova.virt.libvirt.host.Host.get_guest')
 1965     def test_set_admin_password_image(self, mock_get_guest, ver, mock_image):
 1966         self.flags(virt_type='kvm', group='libvirt')
 1967         instance = objects.Instance(**self.test_instance)
 1968         mock_image.return_value = {"properties": {
 1969             "hw_qemu_guest_agent": "yes",
 1970             "os_admin_user": "foo"
 1971         }}
 1972         mock_guest = mock.Mock(spec=libvirt_guest.Guest)
 1973         mock_get_guest.return_value = mock_guest
 1974 
 1975         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1976         drvr.set_admin_password(instance, "123")
 1977 
 1978         mock_guest.set_user_password.assert_called_once_with("foo", "123")
 1979 
 1980     @mock.patch('nova.utils.get_image_from_system_metadata')
 1981     @mock.patch.object(host.Host,
 1982                        'has_min_version', return_value=True)
 1983     def test_set_admin_password_bad_hyp(self, mock_svc, mock_image):
 1984         self.flags(virt_type='lxc', group='libvirt')
 1985         instance = objects.Instance(**self.test_instance)
 1986         mock_image.return_value = {"properties": {
 1987             "hw_qemu_guest_agent": "yes"}}
 1988         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1989         self.assertRaises(exception.SetAdminPasswdNotSupported,
 1990                           drvr.set_admin_password, instance, "123")
 1991 
 1992     @mock.patch.object(host.Host,
 1993                        'has_min_version', return_value=True)
 1994     def test_set_admin_password_guest_agent_not_running(self, mock_svc):
 1995         self.flags(virt_type='kvm', group='libvirt')
 1996         instance = objects.Instance(**self.test_instance)
 1997         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 1998         self.assertRaises(exception.QemuGuestAgentNotEnabled,
 1999                           drvr.set_admin_password, instance, "123")
 2000 
 2001     @mock.patch('nova.utils.get_image_from_system_metadata')
 2002     @mock.patch.object(host.Host,
 2003                        'has_min_version', return_value=True)
 2004     @mock.patch('nova.virt.libvirt.host.Host.get_guest')
 2005     def test_set_admin_password_error(self, mock_get_guest, ver, mock_image):
 2006         self.flags(virt_type='kvm', group='libvirt')
 2007         instance = objects.Instance(**self.test_instance)
 2008         mock_image.return_value = {"properties": {
 2009             "hw_qemu_guest_agent": "yes"}}
 2010         mock_guest = mock.Mock(spec=libvirt_guest.Guest)
 2011         mock_guest.set_user_password.side_effect = (
 2012             fakelibvirt.libvirtError("error"))
 2013         mock_get_guest.return_value = mock_guest
 2014 
 2015         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2016         with mock.patch.object(
 2017                 drvr, '_save_instance_password_if_sshkey_present') as save_p:
 2018             self.assertRaises(exception.NovaException,
 2019                               drvr.set_admin_password, instance, "123")
 2020             save_p.assert_not_called()
 2021 
 2022     @mock.patch('nova.utils.get_image_from_system_metadata')
 2023     @mock.patch.object(host.Host,
 2024                        'has_min_version', return_value=True)
 2025     @mock.patch('nova.virt.libvirt.host.Host.get_guest')
 2026     def test_set_admin_password_error_with_unicode(
 2027             self, mock_get_guest, ver, mock_image):
 2028         self.flags(virt_type='kvm', group='libvirt')
 2029         instance = objects.Instance(**self.test_instance)
 2030         mock_image.return_value = {"properties": {
 2031             "hw_qemu_guest_agent": "yes"}}
 2032         mock_guest = mock.Mock(spec=libvirt_guest.Guest)
 2033         mock_guest.set_user_password.side_effect = (
 2034                 fakelibvirt.libvirtError(
 2035                     b"failed: \xe9\x94\x99\xe8\xaf\xaf\xe3\x80\x82"))
 2036         mock_get_guest.return_value = mock_guest
 2037 
 2038         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2039         self.assertRaises(exception.NovaException,
 2040                           drvr.set_admin_password, instance, "123")
 2041 
 2042     @mock.patch('nova.utils.get_image_from_system_metadata')
 2043     @mock.patch.object(host.Host,
 2044                        'has_min_version', return_value=True)
 2045     @mock.patch('nova.virt.libvirt.host.Host.get_guest')
 2046     def test_set_admin_password_not_implemented(
 2047             self, mock_get_guest, ver, mock_image):
 2048         self.flags(virt_type='kvm', group='libvirt')
 2049         instance = objects.Instance(**self.test_instance)
 2050         mock_image.return_value = {"properties": {
 2051             "hw_qemu_guest_agent": "yes"}}
 2052         mock_guest = mock.Mock(spec=libvirt_guest.Guest)
 2053         not_implemented = fakelibvirt.make_libvirtError(
 2054                 fakelibvirt.libvirtError,
 2055                 "Guest agent disappeared while executing command",
 2056                 error_code=fakelibvirt.VIR_ERR_AGENT_UNRESPONSIVE)
 2057         mock_guest.set_user_password.side_effect = not_implemented
 2058         mock_get_guest.return_value = mock_guest
 2059 
 2060         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2061         self.assertRaises(NotImplementedError,
 2062                           drvr.set_admin_password, instance, "123")
 2063 
 2064     @mock.patch.object(objects.Service, 'save')
 2065     @mock.patch.object(objects.Service, 'get_by_compute_host')
 2066     def test_set_host_enabled_with_disable(self, mock_svc, mock_save):
 2067         # Tests disabling an enabled host.
 2068         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2069         svc = self._create_service(host='fake-mini')
 2070         mock_svc.return_value = svc
 2071         with mock.patch.object(
 2072                 drvr, '_update_compute_provider_status') as ucps:
 2073             drvr._set_host_enabled(False)
 2074             ucps.assert_called_once_with(
 2075                 test.MatchType(context.RequestContext), svc)
 2076         self.assertTrue(svc.disabled)
 2077         mock_save.assert_called_once_with()
 2078 
 2079     @mock.patch.object(objects.Service, 'save')
 2080     @mock.patch.object(objects.Service, 'get_by_compute_host')
 2081     def test_set_host_enabled_with_enable(self, mock_svc, mock_save):
 2082         # Tests enabling a disabled host.
 2083         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2084         svc = self._create_service(disabled=True, host='fake-mini')
 2085         mock_svc.return_value = svc
 2086         with mock.patch.object(
 2087                 drvr, '_update_compute_provider_status') as ucps:
 2088             drvr._set_host_enabled(True)
 2089             ucps.assert_not_called()
 2090         # since disabled_reason is not set and not prefixed with "AUTO:",
 2091         # service should not be enabled.
 2092         mock_save.assert_not_called()
 2093         self.assertTrue(svc.disabled)
 2094 
 2095     @mock.patch.object(objects.Service, 'save')
 2096     @mock.patch.object(objects.Service, 'get_by_compute_host')
 2097     def test_set_host_enabled_with_enable_state_enabled(self, mock_svc,
 2098                                                         mock_save):
 2099         # Tests enabling an enabled host.
 2100         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2101         svc = self._create_service(disabled=False, host='fake-mini')
 2102         mock_svc.return_value = svc
 2103         with mock.patch.object(
 2104                 drvr, '_update_compute_provider_status') as ucps:
 2105             drvr._set_host_enabled(True)
 2106             ucps.assert_not_called()
 2107         self.assertFalse(svc.disabled)
 2108         mock_save.assert_not_called()
 2109 
 2110     @mock.patch.object(objects.Service, 'save')
 2111     @mock.patch.object(objects.Service, 'get_by_compute_host')
 2112     def test_set_host_enabled_with_disable_state_disabled(self, mock_svc,
 2113                                                           mock_save):
 2114         # Tests disabling a disabled host.
 2115         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2116         svc = self._create_service(disabled=True, host='fake-mini')
 2117         mock_svc.return_value = svc
 2118         with mock.patch.object(
 2119                 drvr, '_update_compute_provider_status') as ucps:
 2120             drvr._set_host_enabled(False)
 2121             ucps.assert_not_called()
 2122         mock_save.assert_not_called()
 2123         self.assertTrue(svc.disabled)
 2124 
 2125     def test_set_host_enabled_swallows_exceptions(self):
 2126         # Tests that set_host_enabled will swallow exceptions coming from the
 2127         # db_api code so they don't break anything calling it, e.g. the
 2128         # _get_new_connection method.
 2129         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2130         with mock.patch.object(db, 'service_get_by_compute_host') as db_mock:
 2131             # Make db.service_get_by_compute_host raise NovaException; this
 2132             # is more robust than just raising ComputeHostNotFound.
 2133             db_mock.side_effect = exception.NovaException
 2134             drvr._set_host_enabled(False)
 2135 
 2136     def test_update_compute_provider_status(self):
 2137         """Tests happy path of calling _update_compute_provider_status"""
 2138         virtapi = mock.Mock()
 2139         drvr = libvirt_driver.LibvirtDriver(virtapi, read_only=True)
 2140         ctxt = context.get_admin_context()
 2141         service = self._create_service()
 2142         service.compute_node = objects.ComputeNode(uuid=uuids.rp_uuid)
 2143         drvr._update_compute_provider_status(ctxt, service)
 2144         virtapi.update_compute_provider_status.assert_called_once_with(
 2145             ctxt, uuids.rp_uuid, enabled=not service.disabled)
 2146 
 2147     def test_update_compute_provider_status_swallows_exceptions(self):
 2148         """Tests error path handling in _update_compute_provider_status"""
 2149         # First we'll make Service.compute_node loading raise an exception
 2150         # by not setting the field and we cannot lazy-load it from an orphaned
 2151         # Service object.
 2152         virtapi = mock.Mock()
 2153         drvr = libvirt_driver.LibvirtDriver(virtapi, read_only=True)
 2154         ctxt = context.get_admin_context()
 2155         service = self._create_service(host='fake-host', disabled=True)
 2156         drvr._update_compute_provider_status(ctxt, service)
 2157         virtapi.update_compute_provider_status.assert_not_called()
 2158         self.assertIn('An error occurred while updating compute node resource '
 2159                       'provider status to "disabled" for provider: fake-host',
 2160                       self.stdlog.logger.output)
 2161 
 2162         # Now fix Service.compute_node loading but make the VirtAPI call fail.
 2163         service.compute_node = objects.ComputeNode(uuid=uuids.rp_uuid)
 2164         service.disabled = False  # make sure the log message logic works
 2165         error = exception.TraitRetrievalFailed(error='oops')
 2166         virtapi.update_compute_provider_status.side_effect = error
 2167         drvr._update_compute_provider_status(ctxt, service)
 2168         virtapi.update_compute_provider_status.assert_called_once_with(
 2169             ctxt, uuids.rp_uuid, enabled=True)
 2170         log_output = self.stdlog.logger.output
 2171         self.assertIn('An error occurred while updating compute node resource '
 2172                       'provider status to "enabled" for provider: %s' %
 2173                       uuids.rp_uuid, log_output)
 2174         # The error should have been logged as well.
 2175         self.assertIn(six.text_type(error), log_output)
 2176 
 2177     @mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
 2178     def test_prepare_pci_device(self, mock_lookup):
 2179 
 2180         pci_devices = [dict(hypervisor_name='xxx')]
 2181 
 2182         self.flags(virt_type='xen', group='libvirt')
 2183 
 2184         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2185         conn = drvr._host.get_connection()
 2186 
 2187         mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn)
 2188         drvr._prepare_pci_devices_for_use(pci_devices)
 2189 
 2190     @mock.patch('nova.context.get_admin_context')
 2191     @mock.patch('nova.compute.utils.notify_about_libvirt_connect_error')
 2192     def test_versioned_notification(self, mock_notify, mock_get):
 2193         mock_get.return_value = self.context
 2194 
 2195         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2196 
 2197         fake_error = fakelibvirt.make_libvirtError(
 2198             fakelibvirt.libvirtError, "Failed to connect to host",
 2199             error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
 2200 
 2201         with mock.patch('nova.virt.libvirt.host.Host._get_connection',
 2202                         side_effect=fake_error):
 2203             self.assertRaises(exception.HypervisorUnavailable,
 2204                               drvr._host.get_connection)
 2205         mock_get.assert_called_once_with()
 2206         mock_notify.assert_called_once_with(self.context, ip=CONF.my_ip,
 2207                                             exception=fake_error)
 2208 
 2209     @mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
 2210     @mock.patch.object(fakelibvirt.virNodeDevice, "dettach")
 2211     def test_prepare_pci_device_exception(self, mock_detach, mock_lookup):
 2212 
 2213         pci_devices = [dict(hypervisor_name='xxx',
 2214                             id='id1',
 2215                             instance_uuid='uuid')]
 2216 
 2217         self.flags(virt_type='xen', group='libvirt')
 2218         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2219         conn = drvr._host.get_connection()
 2220 
 2221         mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn)
 2222         mock_detach.side_effect = fakelibvirt.libvirtError("xxxx")
 2223 
 2224         self.assertRaises(exception.PciDevicePrepareFailed,
 2225                           drvr._prepare_pci_devices_for_use, pci_devices)
 2226 
 2227     @mock.patch.object(host.Host, "has_min_version", return_value=False)
 2228     def test_device_metadata(self, mock_version):
 2229         xml = """
 2230         <domain>
 2231           <name>dummy</name>
 2232           <uuid>32dfcb37-5af1-552b-357c-be8c3aa38310</uuid>
 2233             <memory>1048576</memory>
 2234               <vcpu>1</vcpu>
 2235             <os>
 2236                 <type arch='x86_64' machine='pc-i440fx-2.4'>hvm</type>
 2237             </os>
 2238           <devices>
 2239             <disk type='block' device='disk'>
 2240               <driver name='qemu' type='qcow2'/>
 2241               <source dev='/dev/mapper/generic'/>
 2242               <target dev='sda' bus='scsi'/>
 2243              <address type='drive' controller='0' bus='0' target='0' unit='0'/>
 2244             </disk>
 2245             <disk type='block' device='disk'>
 2246               <driver name='qemu' type='qcow2'/>
 2247               <source dev='/dev/mapper/generic-1'/>
 2248               <target dev='hda' bus='ide'/>
 2249              <address type='drive' controller='0' bus='1' target='0' unit='0'/>
 2250             </disk>
 2251             <disk type='block' device='disk'>
 2252               <driver name='qemu' type='qcow2'/>
 2253               <source dev='/dev/mapper/generic-2'/>
 2254               <target dev='hdb' bus='ide'/>
 2255              <address type='drive' controller='0' bus='1' target='1' unit='1'/>
 2256             </disk>
 2257             <disk type='block' device='disk'>
 2258               <driver name='qemu' type='qcow2'/>
 2259               <source dev='/dev/mapper/aa1'/>
 2260               <target dev='sdb' bus='usb'/>
 2261             </disk>
 2262             <disk type='block' device='disk'>
 2263               <driver name='qemu' type='qcow2'/>
 2264               <source dev='/var/lib/libvirt/images/centos'/>
 2265               <backingStore/>
 2266               <target dev='vda' bus='virtio'/>
 2267               <boot order='1'/>
 2268               <alias name='virtio-disk0'/>
 2269               <address type='pci' domain='0x0000' bus='0x00' slot='0x09'
 2270               function='0x0'/>
 2271             </disk>
 2272             <disk type='file' device='disk'>
 2273                 <driver name='qemu' type='qcow2' cache='none'/>
 2274                 <source file='/var/lib/libvirt/images/generic.qcow2'/>
 2275                 <target dev='vdb' bus='virtio'/>
 2276                 <address type='virtio-mmio'/>
 2277             </disk>
 2278             <disk type='file' device='disk'>
 2279                 <driver name='qemu' type='qcow2'/>
 2280                 <source file='/var/lib/libvirt/images/test.qcow2'/>
 2281                 <backingStore/>
 2282                 <target dev='vdc' bus='virtio'/>
 2283                 <alias name='virtio-disk1'/>
 2284                 <address type='ccw' cssid='0xfe' ssid='0x0' devno='0x0000'/>
 2285             </disk>
 2286             <interface type='network'>
 2287               <mac address='52:54:00:f6:35:8f'/>
 2288               <source network='default'/>
 2289               <model type='virtio'/>
 2290               <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
 2291               function='0x0'/>
 2292             </interface>
 2293             <interface type='network'>
 2294               <mac address='51:5a:2c:a4:5e:1b'/>
 2295               <source network='default'/>
 2296               <model type='virtio'/>
 2297               <address type='pci' domain='0x0000' bus='0x00' slot='0x04'
 2298               function='0x1'/>
 2299             </interface>
 2300             <interface type='network'>
 2301               <mac address='fa:16:3e:d1:28:e4'/>
 2302               <source network='default'/>
 2303               <model type='virtio'/>
 2304               <address type='virtio-mmio'/>
 2305             </interface>
 2306             <interface type='network'>
 2307                 <mac address='52:54:00:14:6f:50'/>
 2308                 <source network='default' bridge='virbr0'/>
 2309                 <target dev='vnet0'/>
 2310                 <model type='virtio'/>
 2311                 <alias name='net0'/>
 2312                 <address type='ccw' cssid='0xfe' ssid='0x0' devno='0x0001'/>
 2313             </interface>
 2314             <hostdev mode="subsystem" type="pci" managed="yes">
 2315                 <source>
 2316                     <address bus="0x06" domain="0x0000" function="0x1"
 2317                     slot="0x00"/>
 2318                 </source>
 2319             </hostdev>
 2320           </devices>
 2321         </domain>"""
 2322 
 2323         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2324         dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
 2325         guest = libvirt_guest.Guest(dom)
 2326 
 2327         instance_ref = objects.Instance(**self.test_instance)
 2328         bdms = block_device_obj.block_device_make_list_from_dicts(
 2329             self.context, [
 2330                 fake_block_device.FakeDbBlockDeviceDict(
 2331                     {'id': 1,
 2332                      'source_type': 'volume', 'destination_type': 'volume',
 2333                      'device_name': '/dev/sda', 'tag': "db",
 2334                      'volume_id': uuids.volume_1}),
 2335                 fake_block_device.FakeDbBlockDeviceDict(
 2336                     {'id': 2,
 2337                      'source_type': 'volume', 'destination_type': 'volume',
 2338                      'device_name': '/dev/hda', 'tag': "nfvfunc1",
 2339                      'volume_id': uuids.volume_2}),
 2340                 fake_block_device.FakeDbBlockDeviceDict(
 2341                     {'id': 3,
 2342                      'source_type': 'volume', 'destination_type': 'volume',
 2343                      'device_name': '/dev/sdb', 'tag': "nfvfunc2",
 2344                      'volume_id': uuids.volume_3}),
 2345                 fake_block_device.FakeDbBlockDeviceDict(
 2346                     {'id': 4,
 2347                      'source_type': 'volume', 'destination_type': 'volume',
 2348                      'device_name': '/dev/hdb',
 2349                      'volume_id': uuids.volume_4}),
 2350                 fake_block_device.FakeDbBlockDeviceDict(
 2351                     {'id': 5,
 2352                      'source_type': 'volume', 'destination_type': 'volume',
 2353                      'device_name': '/dev/vda', 'tag': "nfvfunc3",
 2354                      'volume_id': uuids.volume_5}),
 2355                 fake_block_device.FakeDbBlockDeviceDict(
 2356                     {'id': 6,
 2357                      'source_type': 'volume', 'destination_type': 'volume',
 2358                      'device_name': '/dev/vdb', 'tag': "nfvfunc4",
 2359                      'volume_id': uuids.volume_6}),
 2360                 fake_block_device.FakeDbBlockDeviceDict(
 2361                     {'id': 7,
 2362                      'source_type': 'volume', 'destination_type': 'volume',
 2363                      'device_name': '/dev/vdc', 'tag': "nfvfunc5",
 2364                      'volume_id': uuids.volume_7}),
 2365             ]
 2366         )
 2367         vif = obj_vif.VirtualInterface(context=self.context)
 2368         vif.address = '52:54:00:f6:35:8f'
 2369         vif.network_id = 123
 2370         vif.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310'
 2371         vif.uuid = '12ec4b21-ef22-6c21-534b-ba3e3ab3a311'
 2372         vif.tag = 'mytag1'
 2373 
 2374         vif1 = obj_vif.VirtualInterface(context=self.context)
 2375         vif1.address = '51:5a:2c:a4:5e:1b'
 2376         vif1.network_id = 123
 2377         vif1.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310'
 2378         vif1.uuid = 'abec4b21-ef22-6c21-534b-ba3e3ab3a312'
 2379 
 2380         vif2 = obj_vif.VirtualInterface(context=self.context)
 2381         vif2.address = 'fa:16:3e:d1:28:e4'
 2382         vif2.network_id = 123
 2383         vif2.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310'
 2384         vif2.uuid = '645686e4-7086-4eab-8c2f-c41f017a1b16'
 2385         vif2.tag = 'mytag2'
 2386 
 2387         vif3 = obj_vif.VirtualInterface(context=self.context)
 2388         vif3.address = '52:54:00:14:6f:50'
 2389         vif3.network_id = 123
 2390         vif3.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310'
 2391         vif3.uuid = '99cc3604-782d-4a32-a27c-bc33ac56ce86'
 2392         vif3.tag = 'mytag3'
 2393 
 2394         vif4 = obj_vif.VirtualInterface(context=self.context)
 2395         vif4.address = 'da:d1:f2:91:95:c1'
 2396         vif4.tag = 'pf_tag'
 2397 
 2398         vifs = [vif, vif1, vif2, vif3, vif4]
 2399 
 2400         network_info = _fake_network_info(self)
 2401         network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT_PHYSICAL
 2402         network_info[0]['address'] = "51:5a:2c:a4:5e:1b"
 2403         network_info[0]['details'] = dict(vlan='2145')
 2404         network_info[0]['profile'] = dict(trusted='true')
 2405         instance_ref.info_cache = objects.InstanceInfoCache(
 2406             network_info=network_info)
 2407 
 2408         with test.nested(
 2409             mock.patch('nova.objects.VirtualInterfaceList'
 2410                        '.get_by_instance_uuid', return_value=vifs),
 2411             mock.patch('nova.objects.BlockDeviceMappingList'
 2412                        '.get_by_instance_uuid', return_value=bdms),
 2413             mock.patch('nova.virt.libvirt.host.Host.get_guest',
 2414                        return_value=guest),
 2415             mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc',
 2416                               return_value=xml),
 2417             mock.patch.object(pci_utils, 'get_mac_by_pci_address',
 2418                               return_value='da:d1:f2:91:95:c1')):
 2419             metadata_obj = drvr._build_device_metadata(self.context,
 2420                                                        instance_ref)
 2421             metadata = metadata_obj.devices
 2422             self.assertEqual(11, len(metadata))
 2423             self.assertIsInstance(metadata[0],
 2424                                   objects.DiskMetadata)
 2425             self.assertIsInstance(metadata[0].bus,
 2426                                   objects.SCSIDeviceBus)
 2427             self.assertEqual(['db'], metadata[0].tags)
 2428             self.assertEqual(uuids.volume_1, metadata[0].serial)
 2429             self.assertFalse(metadata[0].bus.obj_attr_is_set('address'))
 2430             self.assertEqual(['nfvfunc1'], metadata[1].tags)
 2431             self.assertEqual(uuids.volume_2, metadata[1].serial)
 2432             self.assertIsInstance(metadata[1],
 2433                                   objects.DiskMetadata)
 2434             self.assertIsInstance(metadata[1].bus,
 2435                                   objects.IDEDeviceBus)
 2436             self.assertEqual(['nfvfunc1'], metadata[1].tags)
 2437             self.assertFalse(metadata[1].bus.obj_attr_is_set('address'))
 2438             self.assertIsInstance(metadata[2],
 2439                                   objects.DiskMetadata)
 2440             self.assertIsInstance(metadata[2].bus,
 2441                                   objects.USBDeviceBus)
 2442             self.assertEqual(['nfvfunc2'], metadata[2].tags)
 2443             self.assertEqual(uuids.volume_3, metadata[2].serial)
 2444             self.assertFalse(metadata[2].bus.obj_attr_is_set('address'))
 2445             self.assertIsInstance(metadata[3],
 2446                                   objects.DiskMetadata)
 2447             self.assertIsInstance(metadata[3].bus,
 2448                                   objects.PCIDeviceBus)
 2449             self.assertEqual(['nfvfunc3'], metadata[3].tags)
 2450             # NOTE(artom) We're not checking volume 4 because it's not tagged
 2451             # and only tagged devices appear in the metadata
 2452             self.assertEqual(uuids.volume_5, metadata[3].serial)
 2453             self.assertEqual('0000:00:09.0', metadata[3].bus.address)
 2454             self.assertIsInstance(metadata[4],
 2455                                   objects.DiskMetadata)
 2456             self.assertEqual(['nfvfunc4'], metadata[4].tags)
 2457             self.assertEqual(uuids.volume_6, metadata[4].serial)
 2458             self.assertIsInstance(metadata[5],
 2459                                   objects.DiskMetadata)
 2460             self.assertEqual(['nfvfunc5'], metadata[5].tags)
 2461             self.assertEqual(uuids.volume_7, metadata[5].serial)
 2462             self.assertIsInstance(metadata[6],
 2463                                   objects.NetworkInterfaceMetadata)
 2464             self.assertIsInstance(metadata[6].bus,
 2465                                   objects.PCIDeviceBus)
 2466             self.assertEqual(['mytag1'], metadata[6].tags)
 2467             self.assertEqual('0000:00:03.0', metadata[6].bus.address)
 2468             self.assertFalse(metadata[6].vf_trusted)
 2469 
 2470             # Make sure that interface with vlan is exposed to the metadata
 2471             self.assertIsInstance(metadata[7],
 2472                                   objects.NetworkInterfaceMetadata)
 2473             self.assertEqual('51:5a:2c:a4:5e:1b', metadata[7].mac)
 2474             self.assertEqual(2145, metadata[7].vlan)
 2475             self.assertTrue(metadata[7].vf_trusted)
 2476             self.assertIsInstance(metadata[8],
 2477                                   objects.NetworkInterfaceMetadata)
 2478             self.assertEqual(['mytag2'], metadata[8].tags)
 2479             self.assertFalse(metadata[8].vf_trusted)
 2480             self.assertIsInstance(metadata[9],
 2481                                   objects.NetworkInterfaceMetadata)
 2482             self.assertEqual(['mytag3'], metadata[9].tags)
 2483             self.assertFalse(metadata[9].vf_trusted)
 2484             self.assertIsInstance(metadata[10],
 2485                                   objects.NetworkInterfaceMetadata)
 2486             self.assertEqual(['pf_tag'], metadata[10].tags)
 2487             self.assertEqual('da:d1:f2:91:95:c1', metadata[10].mac)
 2488             self.assertEqual('0000:06:00.1', metadata[10].bus.address)
 2489 
 2490     @mock.patch.object(host.Host, 'get_connection')
 2491     @mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc')
 2492     def test_detach_pci_devices(self, mocked_get_xml_desc, mock_conn):
 2493 
 2494         fake_domXML1_with_pci = (
 2495             """<domain> <devices>
 2496             <disk type='file' device='disk'>
 2497             <driver name='qemu' type='qcow2' cache='none'/>
 2498             <source file='xxx'/>
 2499             <target dev='vda' bus='virtio'/>
 2500             <alias name='virtio-disk0'/>
 2501             <address type='pci' domain='0x0000' bus='0x00'
 2502             slot='0x04' function='0x0'/>
 2503             </disk>
 2504             <hostdev mode="subsystem" type="pci" managed="yes">
 2505             <source>
 2506             <address function="0x1" slot="0x10" domain="0x0001"
 2507              bus="0x04"/>
 2508             </source>
 2509             </hostdev></devices></domain>""")
 2510 
 2511         fake_domXML1_without_pci = (
 2512             """<domain> <devices>
 2513             <disk type='file' device='disk'>
 2514             <driver name='qemu' type='qcow2' cache='none'/>
 2515             <source file='xxx'/>
 2516             <target dev='vda' bus='virtio'/>
 2517             <alias name='virtio-disk0'/>
 2518             <address type='pci' domain='0x0001' bus='0x00'
 2519             slot='0x04' function='0x0'/>
 2520             </disk></devices></domain>""")
 2521 
 2522         pci_device_info = {'compute_node_id': 1,
 2523                            'instance_uuid': 'uuid',
 2524                            'address': '0001:04:10.1'}
 2525         pci_device = objects.PciDevice(**pci_device_info)
 2526         pci_devices = [pci_device]
 2527         mocked_get_xml_desc.return_value = fake_domXML1_without_pci
 2528 
 2529         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2530         dom = fakelibvirt.Domain(
 2531             drvr._get_connection(), fake_domXML1_with_pci, False)
 2532         guest = libvirt_guest.Guest(dom)
 2533         drvr._detach_pci_devices(guest, pci_devices)
 2534 
 2535     @mock.patch.object(host.Host, 'get_connection')
 2536     @mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc')
 2537     def test_detach_pci_devices_timeout(self, mocked_get_xml_desc, mock_conn):
 2538 
 2539         fake_domXML1_with_pci = (
 2540             """<domain> <devices>
 2541             <disk type='file' device='disk'>
 2542             <driver name='qemu' type='qcow2' cache='none'/>
 2543             <source file='xxx'/>
 2544             <target dev='vda' bus='virtio'/>
 2545             <alias name='virtio-disk0'/>
 2546             <address type='pci' domain='0x0000' bus='0x00'
 2547             slot='0x04' function='0x0'/>
 2548             </disk>
 2549             <hostdev mode="subsystem" type="pci" managed="yes">
 2550             <source>
 2551             <address function="0x1" slot="0x10" domain="0x0001"
 2552              bus="0x04"/>
 2553             </source>
 2554             </hostdev></devices></domain>""")
 2555 
 2556         pci_device_info = {'compute_node_id': 1,
 2557                            'instance_uuid': 'uuid',
 2558                            'address': '0001:04:10.1'}
 2559         pci_device = objects.PciDevice(**pci_device_info)
 2560         pci_devices = [pci_device]
 2561         mocked_get_xml_desc.return_value = fake_domXML1_with_pci
 2562 
 2563         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2564         dom = fakelibvirt.Domain(
 2565             drvr._get_connection(), fake_domXML1_with_pci, False)
 2566         guest = libvirt_guest.Guest(dom)
 2567         self.assertRaises(exception.PciDeviceDetachFailed,
 2568                           drvr._detach_pci_devices, guest, pci_devices)
 2569 
 2570     @mock.patch.object(connector, 'get_connector_properties')
 2571     def test_get_connector(self, fake_get_connector):
 2572         initiator = 'fake.initiator.iqn'
 2573         ip = 'fakeip'
 2574         host = 'fakehost'
 2575         wwpns = ['100010604b019419']
 2576         wwnns = ['200010604b019419']
 2577         self.flags(my_ip=ip)
 2578         self.flags(host=host)
 2579 
 2580         expected = {
 2581             'ip': ip,
 2582             'initiator': initiator,
 2583             'host': host,
 2584             'wwpns': wwpns,
 2585             'wwnns': wwnns
 2586         }
 2587         volume = {
 2588             'id': 'fake'
 2589         }
 2590 
 2591         # TODO(walter-boring) add the fake in os-brick
 2592         fake_get_connector.return_value = expected
 2593         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2594         result = drvr.get_volume_connector(volume)
 2595         self.assertThat(expected, matchers.DictMatches(result))
 2596 
 2597     @mock.patch.object(connector, 'get_connector_properties')
 2598     def test_get_connector_storage_ip(self, fake_get_connector):
 2599         ip = '100.100.100.100'
 2600         storage_ip = '101.101.101.101'
 2601         self.flags(my_block_storage_ip=storage_ip, my_ip=ip)
 2602         volume = {
 2603             'id': 'fake'
 2604         }
 2605         expected = {
 2606             'ip': storage_ip
 2607         }
 2608         # TODO(walter-boring) add the fake in os-brick
 2609         fake_get_connector.return_value = expected
 2610         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2611         result = drvr.get_volume_connector(volume)
 2612         self.assertEqual(storage_ip, result['ip'])
 2613 
 2614     def test_lifecycle_event_registration(self):
 2615         calls = []
 2616 
 2617         def fake_registerErrorHandler(*args, **kwargs):
 2618             calls.append('fake_registerErrorHandler')
 2619 
 2620         def fake_get_host_capabilities(**args):
 2621             cpu = vconfig.LibvirtConfigGuestCPU()
 2622             cpu.arch = fields.Architecture.ARMV7
 2623 
 2624             caps = vconfig.LibvirtConfigCaps()
 2625             caps.host = vconfig.LibvirtConfigCapsHost()
 2626             caps.host.cpu = cpu
 2627             calls.append('fake_get_host_capabilities')
 2628             return caps
 2629 
 2630         @mock.patch.object(fakelibvirt, 'registerErrorHandler',
 2631                            side_effect=fake_registerErrorHandler)
 2632         @mock.patch.object(host.Host, "get_capabilities",
 2633                             side_effect=fake_get_host_capabilities)
 2634         def test_init_host(get_host_capabilities, register_error_handler):
 2635             drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2636             drvr.init_host("test_host")
 2637 
 2638         test_init_host()
 2639         # NOTE(dkliban): Will fail if get_host_capabilities is called before
 2640         # registerErrorHandler
 2641         self.assertEqual(['fake_registerErrorHandler',
 2642                           'fake_get_host_capabilities'], calls)
 2643 
 2644     def test_sanitize_log_to_xml(self):
 2645         # setup fake data
 2646         data = {'auth_password': 'scrubme'}
 2647         bdm = [{'connection_info': {'data': data}}]
 2648         bdi = {'block_device_mapping': bdm}
 2649 
 2650         # Tests that the parameters to the _get_guest_xml method
 2651         # are sanitized for passwords when logged.
 2652         def fake_debug(*args, **kwargs):
 2653             if 'auth_password' in args[0]:
 2654                 self.assertNotIn('scrubme', args[0])
 2655 
 2656         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2657         conf = mock.Mock()
 2658         with test.nested(
 2659             mock.patch.object(libvirt_driver.LOG, 'debug',
 2660                               side_effect=fake_debug),
 2661             mock.patch.object(drvr, '_get_guest_config', return_value=conf)
 2662         ) as (
 2663             debug_mock, conf_mock
 2664         ):
 2665             drvr._get_guest_xml(self.context, self.test_instance,
 2666                                 network_info={}, disk_info={},
 2667                                 image_meta={}, block_device_info=bdi)
 2668             # we don't care what the log message is, we just want to make sure
 2669             # our stub method is called which asserts the password is scrubbed
 2670             self.assertTrue(debug_mock.called)
 2671 
 2672     @mock.patch.object(time, "time")
 2673     def test_get_guest_config(self, time_mock):
 2674         time_mock.return_value = 1234567.89
 2675 
 2676         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2677 
 2678         test_instance = copy.deepcopy(self.test_instance)
 2679         test_instance["display_name"] = "purple tomatoes"
 2680         test_instance['system_metadata']['owner_project_name'] = 'sweetshop'
 2681         test_instance['system_metadata']['owner_user_name'] = 'cupcake'
 2682 
 2683         ctxt = context.RequestContext(project_id=123,
 2684                                       project_name="aubergine",
 2685                                       user_id=456,
 2686                                       user_name="pie")
 2687 
 2688         flavor = objects.Flavor(name='m1.small',
 2689                                 memory_mb=6,
 2690                                 vcpus=28,
 2691                                 root_gb=496,
 2692                                 ephemeral_gb=8128,
 2693                                 swap=33550336,
 2694                                 extra_specs={})
 2695         instance_ref = objects.Instance(**test_instance)
 2696         instance_ref.flavor = flavor
 2697         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 2698 
 2699         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 2700                                             instance_ref,
 2701                                             image_meta)
 2702 
 2703         cfg = drvr._get_guest_config(instance_ref,
 2704                                      _fake_network_info(self),
 2705                                      image_meta, disk_info,
 2706                                      context=ctxt)
 2707 
 2708         self.assertEqual(cfg.uuid, instance_ref["uuid"])
 2709         self.assertEqual(2, len(cfg.features))
 2710         self.assertIsInstance(cfg.features[0],
 2711                               vconfig.LibvirtConfigGuestFeatureACPI)
 2712         self.assertIsInstance(cfg.features[1],
 2713                               vconfig.LibvirtConfigGuestFeatureAPIC)
 2714         self.assertEqual(cfg.memory, 6 * units.Ki)
 2715         self.assertEqual(cfg.vcpus, 28)
 2716         self.assertEqual(cfg.os_type, fields.VMMode.HVM)
 2717         self.assertEqual(cfg.os_boot_dev, ["hd"])
 2718         self.assertIsNone(cfg.os_root)
 2719         self.assertEqual(len(cfg.devices), 10)
 2720         self.assertIsInstance(cfg.devices[0],
 2721                               vconfig.LibvirtConfigGuestDisk)
 2722         self.assertIsInstance(cfg.devices[1],
 2723                               vconfig.LibvirtConfigGuestDisk)
 2724         self.assertIsInstance(cfg.devices[2],
 2725                               vconfig.LibvirtConfigGuestDisk)
 2726         self.assertIsInstance(cfg.devices[3],
 2727                               vconfig.LibvirtConfigGuestInterface)
 2728         self.assertIsInstance(cfg.devices[4],
 2729                               vconfig.LibvirtConfigGuestSerial)
 2730         self.assertIsInstance(cfg.devices[5],
 2731                               vconfig.LibvirtConfigGuestInput)
 2732         self.assertIsInstance(cfg.devices[6],
 2733                               vconfig.LibvirtConfigGuestGraphics)
 2734         self.assertIsInstance(cfg.devices[7],
 2735                               vconfig.LibvirtConfigGuestVideo)
 2736         self.assertIsInstance(cfg.devices[8],
 2737                               vconfig.LibvirtConfigGuestRng)
 2738         self.assertIsInstance(cfg.devices[9],
 2739                               vconfig.LibvirtConfigMemoryBalloon)
 2740         self.assertEqual(len(cfg.metadata), 1)
 2741         self.assertIsInstance(cfg.metadata[0],
 2742                               vconfig.LibvirtConfigGuestMetaNovaInstance)
 2743         self.assertEqual(version.version_string_with_package(),
 2744                          cfg.metadata[0].package)
 2745         self.assertEqual("purple tomatoes",
 2746                          cfg.metadata[0].name)
 2747         self.assertEqual(1234567.89,
 2748                          cfg.metadata[0].creationTime)
 2749         self.assertEqual("image",
 2750                          cfg.metadata[0].roottype)
 2751         self.assertEqual(str(instance_ref["image_ref"]),
 2752                          cfg.metadata[0].rootid)
 2753 
 2754         self.assertIsInstance(cfg.metadata[0].owner,
 2755                               vconfig.LibvirtConfigGuestMetaNovaOwner)
 2756         self.assertEqual("838a72b0-0d54-4827-8fd6-fb1227633ceb",
 2757                          cfg.metadata[0].owner.userid)
 2758         self.assertEqual("cupcake",
 2759                          cfg.metadata[0].owner.username)
 2760         self.assertEqual("fake",
 2761                          cfg.metadata[0].owner.projectid)
 2762         self.assertEqual("sweetshop",
 2763                          cfg.metadata[0].owner.projectname)
 2764 
 2765         self.assertIsInstance(cfg.metadata[0].flavor,
 2766                               vconfig.LibvirtConfigGuestMetaNovaFlavor)
 2767         self.assertEqual("m1.small",
 2768                          cfg.metadata[0].flavor.name)
 2769         self.assertEqual(6,
 2770                          cfg.metadata[0].flavor.memory)
 2771         self.assertEqual(28,
 2772                          cfg.metadata[0].flavor.vcpus)
 2773         self.assertEqual(496,
 2774                          cfg.metadata[0].flavor.disk)
 2775         self.assertEqual(8128,
 2776                          cfg.metadata[0].flavor.ephemeral)
 2777         self.assertEqual(33550336,
 2778                          cfg.metadata[0].flavor.swap)
 2779 
 2780     def test_get_guest_config_q35(self):
 2781         self.flags(virt_type="kvm",
 2782                    group='libvirt')
 2783 
 2784         TEST_AMOUNT_OF_PCIE_SLOTS = 8
 2785         CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
 2786                 group='libvirt')
 2787 
 2788         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2789         instance_ref = objects.Instance(**self.test_instance)
 2790         image_meta = objects.ImageMeta.from_dict({
 2791             "disk_format": "raw",
 2792             "properties": {"hw_machine_type":
 2793                            "pc-q35-test"}})
 2794 
 2795         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 2796                                             instance_ref,
 2797                                             image_meta)
 2798 
 2799         cfg = drvr._get_guest_config(instance_ref,
 2800                                      _fake_network_info(self),
 2801                                      image_meta, disk_info)
 2802 
 2803         num_ports = 0
 2804         for device in cfg.devices:
 2805             try:
 2806                 if (device.root_name == 'controller' and
 2807                         device.model == 'pcie-root-port'):
 2808                     num_ports += 1
 2809             except AttributeError:
 2810                 pass
 2811 
 2812         self.assertEqual(TEST_AMOUNT_OF_PCIE_SLOTS, num_ports)
 2813 
 2814     def test_get_guest_config_pcie_i440fx(self):
 2815         self.flags(virt_type="kvm",
 2816                    group='libvirt')
 2817 
 2818         TEST_AMOUNT_OF_PCIE_SLOTS = 8
 2819         CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
 2820                 group='libvirt')
 2821 
 2822         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2823         instance_ref = objects.Instance(**self.test_instance)
 2824         image_meta = objects.ImageMeta.from_dict({
 2825             "disk_format": "raw",
 2826             "properties": {"hw_machine_type":
 2827                            "pc-i440fx-test"}})
 2828 
 2829         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 2830                                             instance_ref,
 2831                                             image_meta)
 2832 
 2833         cfg = drvr._get_guest_config(instance_ref,
 2834                                      _fake_network_info(self),
 2835                                      image_meta, disk_info)
 2836 
 2837         num_ports = 0
 2838         for device in cfg.devices:
 2839             try:
 2840                 if (device.root_name == 'controller' and
 2841                         device.model == 'pcie-root-port'):
 2842                     num_ports += 1
 2843             except AttributeError:
 2844                 pass
 2845 
 2846         # i440fx is not pcie machine so there should be no pcie ports
 2847         self.assertEqual(0, num_ports)
 2848 
 2849     def test_get_guest_config_missing_ownership_info(self):
 2850         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2851 
 2852         test_instance = copy.deepcopy(self.test_instance)
 2853 
 2854         ctxt = context.RequestContext(project_id=123,
 2855                                       project_name="aubergine",
 2856                                       user_id=456,
 2857                                       user_name="pie")
 2858 
 2859         flavor = objects.Flavor(name='m1.small',
 2860                                 memory_mb=6,
 2861                                 vcpus=28,
 2862                                 root_gb=496,
 2863                                 ephemeral_gb=8128,
 2864                                 swap=33550336,
 2865                                 extra_specs={})
 2866         instance_ref = objects.Instance(**test_instance)
 2867         instance_ref.flavor = flavor
 2868         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 2869 
 2870         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 2871                                             instance_ref,
 2872                                             image_meta)
 2873 
 2874         cfg = drvr._get_guest_config(instance_ref,
 2875                                      _fake_network_info(self),
 2876                                      image_meta, disk_info,
 2877                                      context=ctxt)
 2878         self.assertEqual("N/A",
 2879                          cfg.metadata[0].owner.username)
 2880         self.assertEqual("N/A",
 2881                          cfg.metadata[0].owner.projectname)
 2882 
 2883     def test_get_guest_config_lxc(self):
 2884         self.flags(virt_type='lxc', group='libvirt')
 2885         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2886         instance_ref = objects.Instance(**self.test_instance)
 2887         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 2888         cfg = drvr._get_guest_config(instance_ref,
 2889                                      _fake_network_info(self),
 2890                                      image_meta, {'mapping': {}})
 2891         self.assertEqual(instance_ref["uuid"], cfg.uuid)
 2892         self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory)
 2893         self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus)
 2894         self.assertEqual(fields.VMMode.EXE, cfg.os_type)
 2895         self.assertEqual("/sbin/init", cfg.os_init_path)
 2896         self.assertEqual("console=tty0 console=ttyS0 console=hvc0",
 2897                          cfg.os_cmdline)
 2898         self.assertEqual("OpenStack Nova", cfg.os_init_env['product_name'])
 2899         self.assertIsNone(cfg.os_root)
 2900         self.assertEqual(3, len(cfg.devices))
 2901         self.assertIsInstance(cfg.devices[0],
 2902                               vconfig.LibvirtConfigGuestFilesys)
 2903         self.assertIsInstance(cfg.devices[1],
 2904                               vconfig.LibvirtConfigGuestInterface)
 2905         self.assertIsInstance(cfg.devices[2],
 2906                               vconfig.LibvirtConfigGuestConsole)
 2907 
 2908     def test_get_guest_config_lxc_with_id_maps(self):
 2909         self.flags(virt_type='lxc', group='libvirt')
 2910         self.flags(uid_maps=['0:1000:100'], group='libvirt')
 2911         self.flags(gid_maps=['0:1000:100'], group='libvirt')
 2912         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2913         instance_ref = objects.Instance(**self.test_instance)
 2914         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 2915         cfg = drvr._get_guest_config(instance_ref,
 2916                                      _fake_network_info(self),
 2917                                      image_meta, {'mapping': {}})
 2918         self.assertEqual(instance_ref["uuid"], cfg.uuid)
 2919         self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory)
 2920         self.assertEqual(instance_ref.vcpus, cfg.vcpus)
 2921         self.assertEqual(fields.VMMode.EXE, cfg.os_type)
 2922         self.assertEqual("/sbin/init", cfg.os_init_path)
 2923         self.assertEqual("console=tty0 console=ttyS0 console=hvc0",
 2924                          cfg.os_cmdline)
 2925         self.assertIsNone(cfg.os_root)
 2926         self.assertEqual(3, len(cfg.devices))
 2927         self.assertIsInstance(cfg.devices[0],
 2928                               vconfig.LibvirtConfigGuestFilesys)
 2929         self.assertIsInstance(cfg.devices[1],
 2930                               vconfig.LibvirtConfigGuestInterface)
 2931         self.assertIsInstance(cfg.devices[2],
 2932                               vconfig.LibvirtConfigGuestConsole)
 2933         self.assertEqual(len(cfg.idmaps), 2)
 2934         self.assertIsInstance(cfg.idmaps[0],
 2935                               vconfig.LibvirtConfigGuestUIDMap)
 2936         self.assertIsInstance(cfg.idmaps[1],
 2937                               vconfig.LibvirtConfigGuestGIDMap)
 2938 
 2939     def test_post_claim_migrate_data(self):
 2940         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2941         instance = objects.Instance(**self.test_instance)
 2942         md = objects.LibvirtLiveMigrateData()
 2943         claim = mock.Mock(autospec=True)
 2944         claimed_numa_topology = objects.InstanceNUMATopology()
 2945         claim.claimed_numa_topology = claimed_numa_topology
 2946         claim.instance_type = instance.flavor
 2947         numa_info = objects.LibvirtLiveMigrateNUMAInfo()
 2948         with test.nested(
 2949             mock.patch.object(drvr, '_get_live_migrate_numa_info',
 2950                               return_value=numa_info),
 2951             mock.patch('nova.objects.Instance.image_meta',
 2952                        new_callable=mock.PropertyMock,
 2953                        return_value='fake-image-meta')
 2954         ) as (mock_get_lm_numa_info, mock_image_meta):
 2955             claim.image_meta = instance.image_meta
 2956             post_claim_md = drvr.post_claim_migrate_data(
 2957                 self.context, instance, md, claim)
 2958             self.assertEqual(post_claim_md.dst_numa_info, numa_info)
 2959             mock_get_lm_numa_info.assert_called_with(
 2960                 claimed_numa_topology, instance.flavor, 'fake-image-meta')
 2961 
 2962     @mock.patch.object(hardware, 'get_vcpu_pin_set', new=mock.Mock())
 2963     def test_get_live_migrate_numa_info(self):
 2964         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 2965 
 2966         vcpupin1 = vconfig.LibvirtConfigGuestCPUTuneVCPUPin()
 2967         vcpupin1.id = 0
 2968         vcpupin1.cpuset = set([0, 1])
 2969         vcpupin2 = vconfig.LibvirtConfigGuestCPUTuneVCPUPin()
 2970         vcpupin2.id = 1
 2971         vcpupin2.cpuset = set([2, 3])
 2972         emulatorpin = vconfig.LibvirtConfigGuestCPUTuneEmulatorPin()
 2973         emulatorpin.cpuset = set([4, 5])
 2974         guest_cpu_tune = vconfig.LibvirtConfigGuestCPUTune()
 2975         guest_cpu_tune.vcpupin = [vcpupin1, vcpupin2]
 2976         guest_cpu_tune.emulatorpin = emulatorpin
 2977         guest_cpu_tune.vcpusched = [
 2978             vconfig.LibvirtConfigGuestCPUTuneVCPUSched()]
 2979         guest_cpu_tune.vcpusched[0].vcpus = set([6, 7])
 2980         guest_cpu_tune.vcpusched[0].priority = 8
 2981 
 2982         memnode1 = vconfig.LibvirtConfigGuestNUMATuneMemNode()
 2983         memnode1.cellid = 2
 2984         memnode1.nodeset = [6, 7]
 2985         memnode2 = vconfig.LibvirtConfigGuestNUMATuneMemNode()
 2986         memnode2.cellid = 3
 2987         memnode2.nodeset = [8, 9]
 2988         guest_numa_tune = vconfig.LibvirtConfigGuestNUMATune()
 2989         guest_numa_tune.memnodes = [memnode1, memnode2]
 2990 
 2991         expected_numa_info = objects.LibvirtLiveMigrateNUMAInfo(
 2992             cpu_pins={'0': set([0, 1]), '1': set([2, 3])},
 2993             cell_pins={'2': set([6, 7]), '3': set([8, 9])},
 2994             emulator_pins=set([4, 5]),
 2995             sched_vcpus=set([7, 6]),
 2996             sched_priority=8)
 2997 
 2998         # NOTE(artom) This is a
 2999         # (cpu_set, guest_cpu_tune, guest_cpu_numa, guest_numa_tune)
 3000         # tuple. See _get_guest_numa_config() docstring for full documenation.
 3001         # _get_live_migrate_numa_info() only cares about guest_cpu_tune for CPU
 3002         # pinning and emulator thread pinning, and guest_numa_tune for cell
 3003         # pinning; so only include those 2 in the tuple.
 3004         guest_numa_config = (None, guest_cpu_tune, None, guest_numa_tune)
 3005 
 3006         with mock.patch.object(drvr, '_get_guest_numa_config',
 3007                                return_value=guest_numa_config):
 3008             self.assertEqual(
 3009                 expected_numa_info.obj_to_primitive(),
 3010                 drvr._get_live_migrate_numa_info(
 3011                     'fake-instance-numa-topology',
 3012                     'fake-flavor', 'fake-image-meta').obj_to_primitive())
 3013 
 3014     @mock.patch.object(hardware, 'get_vcpu_pin_set')
 3015     def test_get_live_migrate_numa_info_empty(self, _):
 3016         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3017         guest_numa_config = (None, None, None, None)
 3018         with mock.patch.object(drvr, '_get_guest_numa_config',
 3019                                return_value=guest_numa_config):
 3020             self.assertEqual(
 3021                 objects.LibvirtLiveMigrateNUMAInfo().obj_to_primitive(),
 3022                 drvr._get_live_migrate_numa_info(
 3023                     'fake-instance-numa-topology',
 3024                     'fake-flavor', 'fake-image-meta').obj_to_primitive())
 3025 
 3026     @mock.patch.object(
 3027         host.Host, "is_cpu_control_policy_capable", return_value=True)
 3028     def test_get_guest_config_numa_host_instance_fits(self, is_able):
 3029         self.flags(cpu_shared_set=None, cpu_dedicated_set=None,
 3030                    group='compute')
 3031         instance_ref = objects.Instance(**self.test_instance)
 3032         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 3033         flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
 3034                                 ephemeral_gb=8128, swap=33550336, name='fake',
 3035                                 extra_specs={})
 3036         instance_ref.flavor = flavor
 3037 
 3038         caps = vconfig.LibvirtConfigCaps()
 3039         caps.host = vconfig.LibvirtConfigCapsHost()
 3040         caps.host.cpu = vconfig.LibvirtConfigCPU()
 3041         caps.host.cpu.arch = fields.Architecture.X86_64
 3042         caps.host.topology = fakelibvirt.NUMATopology()
 3043 
 3044         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3045         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 3046                                             instance_ref,
 3047                                             image_meta)
 3048 
 3049         with test.nested(
 3050                 mock.patch.object(host.Host, 'has_min_version',
 3051                                   return_value=True),
 3052                 mock.patch.object(host.Host, "get_capabilities",
 3053                                   return_value=caps),
 3054                 mock.patch.object(host.Host, 'get_online_cpus',
 3055                                   return_value=set([0, 1])),
 3056                 ):
 3057             cfg = drvr._get_guest_config(instance_ref, [],
 3058                                          image_meta, disk_info)
 3059             self.assertIsNone(cfg.cpuset)
 3060             self.assertEqual(0, len(cfg.cputune.vcpupin))
 3061             self.assertIsNone(cfg.cpu.numa)
 3062 
 3063     @mock.patch('nova.privsep.utils.supports_direct_io',
 3064                 new=mock.Mock(return_value=True))
 3065     @mock.patch.object(
 3066         host.Host, "is_cpu_control_policy_capable", return_value=True)
 3067     def test_get_guest_config_numa_host_instance_no_fit(self, is_able):
 3068         instance_ref = objects.Instance(**self.test_instance)
 3069         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 3070         flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
 3071                                 ephemeral_gb=8128, swap=33550336, name='fake',
 3072                                 extra_specs={})
 3073         instance_ref.flavor = flavor
 3074 
 3075         caps = vconfig.LibvirtConfigCaps()
 3076         caps.host = vconfig.LibvirtConfigCapsHost()
 3077         caps.host.cpu = vconfig.LibvirtConfigCPU()
 3078         caps.host.cpu.arch = fields.Architecture.X86_64
 3079         caps.host.topology = fakelibvirt.NUMATopology()
 3080 
 3081         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3082         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 3083                                             instance_ref,
 3084                                             image_meta)
 3085 
 3086         with test.nested(
 3087                 mock.patch.object(host.Host, "get_capabilities",
 3088                                   return_value=caps),
 3089                 mock.patch.object(random, 'choice'),
 3090                 mock.patch.object(drvr, '_has_numa_support',
 3091                                   return_value=False)
 3092             ) as (_, choice_mock, _):
 3093             cfg = drvr._get_guest_config(instance_ref, [],
 3094                                          image_meta, disk_info)
 3095             self.assertFalse(choice_mock.called)
 3096             self.assertIsNone(cfg.cpuset)
 3097             self.assertEqual(0, len(cfg.cputune.vcpupin))
 3098             self.assertIsNone(cfg.cpu.numa)
 3099 
 3100     def _test_get_guest_memory_backing_config(
 3101             self, host_topology, inst_topology, numatune):
 3102         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3103         flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
 3104                                 ephemeral_gb=8128, swap=33550336, name='fake',
 3105                                 extra_specs={})
 3106         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 3107         with mock.patch.object(
 3108                 drvr, "_get_host_numa_topology",
 3109                 return_value=host_topology):
 3110             return drvr._get_guest_memory_backing_config(
 3111                 inst_topology, numatune, flavor, image_meta)
 3112 
 3113     @mock.patch.object(host.Host,
 3114                        'has_min_version', return_value=True)
 3115     def test_get_guest_memory_backing_config_large_success(self, mock_version):
 3116         host_topology = objects.NUMATopology(cells=[
 3117             objects.NUMACell(
 3118                 id=3,
 3119                 cpuset=set([1]),
 3120                 pcpuset=set(),
 3121                 siblings=[set([1])],
 3122                 memory=1024,
 3123                 mempages=[
 3124                     objects.NUMAPagesTopology(size_kb=4, total=2000, used=0),
 3125                     objects.NUMAPagesTopology(size_kb=2048, total=512, used=0),
 3126                     objects.NUMAPagesTopology(size_kb=1048576, total=0,
 3127                                               used=0),
 3128                 ])])
 3129         inst_topology = objects.InstanceNUMATopology(cells=[
 3130             objects.InstanceNUMACell(
 3131                 id=3, cpuset=set([0, 1]), pcpuset=set(), memory=1024,
 3132                 pagesize=2048),
 3133         ])
 3134 
 3135         numa_tune = vconfig.LibvirtConfigGuestNUMATune()
 3136         numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
 3137         numa_tune.memnodes[0].cellid = 0
 3138         numa_tune.memnodes[0].nodeset = [3]
 3139 
 3140         result = self._test_get_guest_memory_backing_config(
 3141             host_topology, inst_topology, numa_tune)
 3142         self.assertEqual(1, len(result.hugepages))
 3143         self.assertEqual(2048, result.hugepages[0].size_kb)
 3144         self.assertEqual([0], result.hugepages[0].nodeset)
 3145 
 3146     @mock.patch.object(host.Host,
 3147                        'has_min_version', return_value=True)
 3148     def test_get_guest_memory_backing_config_smallest(self, mock_version):
 3149         host_topology = objects.NUMATopology(cells=[
 3150             objects.NUMACell(
 3151                 id=3,
 3152                 cpuset=set([1]),
 3153                 pcpuset=set(),
 3154                 siblings=[set([1])],
 3155                 memory=1024,
 3156                 mempages=[
 3157                     objects.NUMAPagesTopology(size_kb=4, total=2000, used=0),
 3158                     objects.NUMAPagesTopology(size_kb=2048, total=512, used=0),
 3159                     objects.NUMAPagesTopology(size_kb=1048576, total=0,
 3160                                               used=0),
 3161                 ])])
 3162         inst_topology = objects.InstanceNUMATopology(cells=[
 3163             objects.InstanceNUMACell(
 3164                 id=3, cpuset=set([0, 1]), pcpuset=set(), memory=1024,
 3165                 pagesize=4),
 3166         ])
 3167 
 3168         numa_tune = vconfig.LibvirtConfigGuestNUMATune()
 3169         numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
 3170         numa_tune.memnodes[0].cellid = 0
 3171         numa_tune.memnodes[0].nodeset = [3]
 3172 
 3173         result = self._test_get_guest_memory_backing_config(
 3174             host_topology, inst_topology, numa_tune)
 3175         self.assertIsNone(result)
 3176 
 3177     def test_get_guest_memory_backing_config_realtime(self):
 3178         extra_specs = {
 3179             "hw:cpu_realtime": "yes",
 3180             "hw:cpu_policy": "dedicated"
 3181         }
 3182         flavor = objects.Flavor(name='m1.small',
 3183                                 memory_mb=6,
 3184                                 vcpus=28,
 3185                                 root_gb=496,
 3186                                 ephemeral_gb=8128,
 3187                                 swap=33550336,
 3188                                 extra_specs=extra_specs)
 3189         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 3190         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3191         membacking = drvr._get_guest_memory_backing_config(
 3192             None, None, flavor, image_meta)
 3193         self.assertTrue(membacking.locked)
 3194         self.assertFalse(membacking.sharedpages)
 3195 
 3196     def test_get_guest_memory_backing_config_realtime_invalid_share(self):
 3197         """Test behavior when there is no pool of shared CPUS on which to place
 3198         the emulator threads, isolating them from the instance CPU processes.
 3199         """
 3200         extra_specs = {
 3201             "hw:cpu_realtime": "yes",
 3202             "hw:cpu_policy": "dedicated",
 3203             "hw:emulator_threads_policy": "share",
 3204         }
 3205         flavor = objects.Flavor(
 3206             name='m1.small',
 3207             memory_mb=6,
 3208             vcpus=28,
 3209             root_gb=496,
 3210             ephemeral_gb=8128,
 3211             swap=33550336,
 3212             extra_specs=extra_specs)
 3213         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 3214         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3215 
 3216         # this should fail because there is nowhere to place the emulator
 3217         # threads
 3218         self.assertRaises(
 3219             exception.RealtimeMaskNotFoundOrInvalid,
 3220             drvr._get_guest_memory_backing_config,
 3221             None, None, flavor, image_meta,
 3222         )
 3223 
 3224     def _test_sev_enabled(self, expected=None, host_sev_enabled=False,
 3225                           enc_extra_spec=None, enc_image_prop=None,
 3226                           hw_machine_type=None, hw_firmware_type=None):
 3227         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3228         drvr._host._supports_amd_sev = host_sev_enabled
 3229 
 3230         extra_specs = {}
 3231         if enc_extra_spec is not None:
 3232             extra_specs['hw:mem_encryption'] = enc_extra_spec
 3233         flavor = objects.Flavor(name='m1.fake')
 3234         flavor.extra_specs = extra_specs
 3235 
 3236         image_props = {}
 3237         image_props['hw_architecture'] = fields.Architecture.X86_64
 3238         if hw_machine_type is not None:
 3239             image_props['hw_machine_type'] = hw_machine_type
 3240         if hw_firmware_type is not None:
 3241             image_props['hw_firmware_type'] = hw_firmware_type
 3242         if enc_image_prop is not None:
 3243             image_props['hw_mem_encryption'] = enc_image_prop
 3244 
 3245         image_meta = fake_image.fake_image_obj(
 3246             {'id': '150d530b-1c57-4367-b754-1f1b5237923d'},
 3247             {}, image_props)
 3248 
 3249         enabled = drvr._sev_enabled(flavor, image_meta)
 3250 
 3251         if expected is None:
 3252             self.fail("_test_sev_enabled called without an expected "
 3253                       "return value. Maybe you expected an exception?")
 3254 
 3255         self.assertEqual(expected, enabled)
 3256 
 3257     def test_sev_enabled_no_host_support(self):
 3258         self._test_sev_enabled(False)
 3259 
 3260     def test_sev_enabled_host_support_no_flavor_image(self):
 3261         self._test_sev_enabled(False, host_sev_enabled=True)
 3262 
 3263     def test_sev_enabled_no_host_support_flavor_requested(self):
 3264         self._test_sev_enabled(False, enc_extra_spec=True)
 3265 
 3266     def test_sev_enabled_no_host_support_image_requested(self):
 3267         self._test_sev_enabled(False, enc_image_prop=True)
 3268 
 3269     def test_sev_enabled_host_support_flavor_requested(self):
 3270         self._test_sev_enabled(True, host_sev_enabled=True,
 3271                                enc_extra_spec=True, hw_firmware_type='uefi',
 3272                                hw_machine_type='q35')
 3273 
 3274     def test_sev_enabled_host_support_image_requested(self):
 3275         self._test_sev_enabled(True, host_sev_enabled=True,
 3276                                enc_image_prop=True, hw_firmware_type='uefi',
 3277                                hw_machine_type='q35')
 3278 
 3279     # The cases where the flavor and image requests contradict each other
 3280     # are already covered by test_hardware.MemEncryptionConflictTestCase
 3281     # so we don't need to test them in great detail here.
 3282     def test_sev_enabled_host_extra_spec_image_conflict(self):
 3283         exc = self.assertRaises(exception.FlavorImageConflict,
 3284                                 self._test_sev_enabled,
 3285                                 host_sev_enabled=True, enc_extra_spec=False,
 3286                                 enc_image_prop=True)
 3287         self.assertEqual(
 3288             "Flavor m1.fake has hw:mem_encryption extra spec explicitly set "
 3289             "to False, conflicting with image fake_image which has "
 3290             "hw_mem_encryption property explicitly set to True", str(exc))
 3291 
 3292     def test_sev_enabled_host_extra_spec_no_uefi(self):
 3293         exc = self.assertRaises(exception.FlavorImageConflict,
 3294                                 self._test_sev_enabled,
 3295                                 host_sev_enabled=True, enc_extra_spec=True)
 3296         self.assertEqual(
 3297             "Memory encryption requested by hw:mem_encryption extra spec in "
 3298             "m1.fake flavor but image fake_image doesn't have "
 3299             "'hw_firmware_type' property set to 'uefi'", str(exc))
 3300 
 3301     def test_sev_enabled_host_extra_spec_no_machine_type(self):
 3302         exc = self.assertRaises(exception.InvalidMachineType,
 3303                                 self._test_sev_enabled,
 3304                                 host_sev_enabled=True, enc_extra_spec=True,
 3305                                 hw_firmware_type='uefi')
 3306         self.assertEqual(
 3307             "Machine type 'pc' is not compatible with image fake_image "
 3308             "(150d530b-1c57-4367-b754-1f1b5237923d): q35 type is required "
 3309             "for SEV to work", str(exc))
 3310 
 3311     def test_sev_enabled_host_extra_spec_pc(self):
 3312         exc = self.assertRaises(exception.InvalidMachineType,
 3313                                 self._test_sev_enabled,
 3314                                 host_sev_enabled=True, enc_extra_spec=True,
 3315                                 hw_firmware_type='uefi', hw_machine_type='pc')
 3316         self.assertEqual(
 3317             "Machine type 'pc' is not compatible with image fake_image "
 3318             "(150d530b-1c57-4367-b754-1f1b5237923d): q35 type is required "
 3319             "for SEV to work", str(exc))
 3320 
 3321     def _setup_fake_domain_caps(self, fake_domain_caps):
 3322         sev_feature = vconfig.LibvirtConfigDomainCapsFeatureSev()
 3323         sev_feature.cbitpos = 47
 3324         sev_feature.reduced_phys_bits = 1
 3325         domain_caps = vconfig.LibvirtConfigDomainCaps()
 3326         domain_caps._features = vconfig.LibvirtConfigDomainCapsFeatures()
 3327         domain_caps._features.features = [sev_feature]
 3328         fake_domain_caps.return_value = defaultdict(
 3329             dict, {'x86_64': {'q35': domain_caps}})
 3330 
 3331     @mock.patch.object(host.Host, 'get_domain_capabilities')
 3332     def test_find_sev_feature_missing_arch(self, fake_domain_caps):
 3333         self._setup_fake_domain_caps(fake_domain_caps)
 3334         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3335         self.assertIsNone(drvr._find_sev_feature('arm1', 'q35'))
 3336 
 3337     @mock.patch.object(host.Host, 'get_domain_capabilities')
 3338     def test_find_sev_feature_missing_mach_type(self, fake_domain_caps):
 3339         self._setup_fake_domain_caps(fake_domain_caps)
 3340         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3341         self.assertIsNone(drvr._find_sev_feature('x86_64', 'g3beige'))
 3342 
 3343     @mock.patch.object(host.Host, 'get_domain_capabilities')
 3344     def test_find_sev_feature(self, fake_domain_caps):
 3345         self._setup_fake_domain_caps(fake_domain_caps)
 3346         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3347         feature = drvr._find_sev_feature('x86_64', 'q35')
 3348         self.assertIsInstance(feature,
 3349                               vconfig.LibvirtConfigDomainCapsFeatureSev)
 3350         self.assertEqual(47, feature.cbitpos)
 3351         self.assertEqual(1, feature.reduced_phys_bits)
 3352 
 3353     @mock.patch.object(libvirt_driver.LibvirtDriver,
 3354                        "_has_uefi_support", new=mock.Mock(return_value=True))
 3355     def _setup_sev_guest(self, extra_image_properties=None):
 3356         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3357         drvr._host._supports_amd_sev = True
 3358 
 3359         ctxt = context.RequestContext(project_id=123,
 3360                                       project_name="aubergine",
 3361                                       user_id=456,
 3362                                       user_name="pie")
 3363 
 3364         extra_specs = {
 3365             "hw:mem_encryption": True,
 3366         }
 3367         flavor = objects.Flavor(name='m1.small',
 3368                                 memory_mb=6,
 3369                                 vcpus=28,
 3370                                 root_gb=496,
 3371                                 ephemeral_gb=8128,
 3372                                 swap=33550336,
 3373                                 extra_specs=extra_specs)
 3374 
 3375         instance_ref = objects.Instance(**self.test_instance)
 3376         instance_ref.flavor = flavor
 3377         image_meta_properties = {
 3378             'hw_firmware_type': 'uefi',
 3379             'hw_machine_type': 'q35'}
 3380         if extra_image_properties:
 3381             image_meta_properties.update(extra_image_properties)
 3382         image_meta = objects.ImageMeta.from_dict({
 3383             'id': 'd9c6aeee-8258-4bdb-bca4-39940461b182',
 3384             'name': 'fakeimage',
 3385             'disk_format': 'raw',
 3386             'properties': image_meta_properties})
 3387 
 3388         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 3389                                             instance_ref,
 3390                                             image_meta)
 3391 
 3392         return drvr._get_guest_config(instance_ref,
 3393                                       _fake_network_info(self),
 3394                                       image_meta, disk_info,
 3395                                       context=ctxt)
 3396 
 3397     def test_get_guest_config_sev_no_feature(self):
 3398         self.assertRaises(exception.MissingDomainCapabilityFeatureException,
 3399                           self._setup_sev_guest)
 3400 
 3401     @mock.patch.object(host.Host, 'get_domain_capabilities')
 3402     @mock.patch.object(designer, 'set_driver_iommu_for_sev')
 3403     def test_get_guest_config_sev(self, mock_designer, fake_domain_caps):
 3404         self._setup_fake_domain_caps(fake_domain_caps)
 3405         cfg = self._setup_sev_guest()
 3406 
 3407         # SEV-related tag should be set
 3408         self.assertIsInstance(cfg.launch_security,
 3409                               vconfig.LibvirtConfigGuestSEVLaunchSecurity)
 3410         self.assertIsInstance(cfg.membacking,
 3411                               vconfig.LibvirtConfigGuestMemoryBacking)
 3412         self.assertTrue(cfg.membacking.locked)
 3413 
 3414         mock_designer.assert_called_once_with(cfg)
 3415 
 3416     def test_get_guest_memory_backing_config_file_backed(self):
 3417         self.flags(file_backed_memory=1024, group="libvirt")
 3418 
 3419         result = self._test_get_guest_memory_backing_config(
 3420             None, None, None
 3421         )
 3422         self.assertTrue(result.sharedaccess)
 3423         self.assertTrue(result.filesource)
 3424         self.assertTrue(result.allocateimmediate)
 3425 
 3426     @mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
 3427     def test_get_guest_memory_backing_config_file_backed_discard(self,
 3428             mock_lib_version):
 3429         self.flags(file_backed_memory=1024, group='libvirt')
 3430 
 3431         mock_lib_version.return_value = versionutils.convert_version_to_int(
 3432             libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION)
 3433 
 3434         result = self._test_get_guest_memory_backing_config(
 3435             None, None, None
 3436         )
 3437         self.assertTrue(result.discard)
 3438 
 3439     @mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
 3440     def test_get_guest_memory_backing_config_file_backed_discard_libvirt(self,
 3441             mock_lib_version):
 3442         self.flags(file_backed_memory=1024, group='libvirt')
 3443 
 3444         mock_lib_version.return_value = versionutils.convert_version_to_int(
 3445             libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION) - 1
 3446 
 3447         result = self._test_get_guest_memory_backing_config(
 3448             None, None, None
 3449         )
 3450         self.assertFalse(result.discard)
 3451 
 3452     def test_get_guest_memory_backing_config_file_backed_hugepages(self):
 3453         self.flags(file_backed_memory=1024, group="libvirt")
 3454         host_topology = objects.NUMATopology(cells=[
 3455             objects.NUMACell(
 3456                 id=3,
 3457                 cpuset=set([1]),
 3458                 pcpuset=set(),
 3459                 siblings=[set([1])],
 3460                 memory=1024,
 3461                 mempages=[
 3462                     objects.NUMAPagesTopology(size_kb=4, total=2000, used=0),
 3463                     objects.NUMAPagesTopology(size_kb=2048, total=512, used=0),
 3464                     objects.NUMAPagesTopology(size_kb=1048576, total=0,
 3465                                               used=0),
 3466                 ])])
 3467         inst_topology = objects.InstanceNUMATopology(cells=[
 3468             objects.InstanceNUMACell(
 3469                 id=3, cpuset=set([0, 1]), pcpuset=set(), memory=1024,
 3470                 pagesize=2048),
 3471         ])
 3472 
 3473         numa_tune = vconfig.LibvirtConfigGuestNUMATune()
 3474         numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
 3475         numa_tune.memnodes[0].cellid = 0
 3476         numa_tune.memnodes[0].nodeset = [3]
 3477 
 3478         self.assertRaises(exception.MemoryPagesUnsupported,
 3479                           self._test_get_guest_memory_backing_config,
 3480                           host_topology, inst_topology, numa_tune)
 3481 
 3482     @mock.patch.object(
 3483         host.Host, "is_cpu_control_policy_capable", return_value=True)
 3484     def test_get_guest_config_numa_host_instance_pci_no_numa_info(
 3485             self, is_able):
 3486         self.flags(cpu_shared_set='3', cpu_dedicated_set=None,
 3487                    group='compute')
 3488 
 3489         instance_ref = objects.Instance(**self.test_instance)
 3490         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 3491         flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
 3492                                 ephemeral_gb=8128, swap=33550336, name='fake',
 3493                                 extra_specs={})
 3494         instance_ref.flavor = flavor
 3495 
 3496         caps = vconfig.LibvirtConfigCaps()
 3497         caps.host = vconfig.LibvirtConfigCapsHost()
 3498         caps.host.cpu = vconfig.LibvirtConfigCPU()
 3499         caps.host.cpu.arch = fields.Architecture.X86_64
 3500         caps.host.topology = fakelibvirt.NUMATopology()
 3501 
 3502         conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3503         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 3504                                             instance_ref,
 3505                                             image_meta)
 3506         pci_device_info = dict(test_pci_device.fake_db_dev)
 3507         pci_device_info.update(compute_node_id=1,
 3508                                label='fake',
 3509                                status=fields.PciDeviceStatus.AVAILABLE,
 3510                                address='0000:00:00.1',
 3511                                instance_uuid=None,
 3512                                request_id=None,
 3513                                extra_info={},
 3514                                numa_node=None)
 3515         pci_device = objects.PciDevice(**pci_device_info)
 3516 
 3517         with test.nested(
 3518                 mock.patch.object(host.Host, 'has_min_version',
 3519                                   return_value=True),
 3520                 mock.patch.object(host.Host, "get_capabilities",
 3521                                   return_value=caps),
 3522                 mock.patch.object(host.Host, 'get_online_cpus',
 3523                                   return_value=set([3])),
 3524                 mock.patch.object(pci_manager, "get_instance_pci_devs",
 3525                                   return_value=[pci_device])):
 3526             cfg = conn._get_guest_config(instance_ref, [],
 3527                                          image_meta, disk_info)
 3528             self.assertEqual(set([3]), cfg.cpuset)
 3529             self.assertEqual(0, len(cfg.cputune.vcpupin))
 3530             self.assertIsNone(cfg.cpu.numa)
 3531 
 3532     @mock.patch('nova.privsep.utils.supports_direct_io',
 3533                 new=mock.Mock(return_value=True))
 3534     @mock.patch.object(
 3535         host.Host, "is_cpu_control_policy_capable", return_value=True)
 3536     def test_get_guest_config_numa_host_instance_2pci_no_fit(self, is_able):
 3537         self.flags(cpu_shared_set='3', cpu_dedicated_set=None,
 3538                    group='compute')
 3539         instance_ref = objects.Instance(**self.test_instance)
 3540         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 3541         flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
 3542                                 ephemeral_gb=8128, swap=33550336, name='fake',
 3543                                 extra_specs={})
 3544         instance_ref.flavor = flavor
 3545 
 3546         caps = vconfig.LibvirtConfigCaps()
 3547         caps.host = vconfig.LibvirtConfigCapsHost()
 3548         caps.host.cpu = vconfig.LibvirtConfigCPU()
 3549         caps.host.cpu.arch = fields.Architecture.X86_64
 3550         caps.host.topology = fakelibvirt.NUMATopology()
 3551 
 3552         conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3553         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 3554                                             instance_ref,
 3555                                             image_meta)
 3556         pci_device_info = dict(test_pci_device.fake_db_dev)
 3557         pci_device_info.update(compute_node_id=1,
 3558                                label='fake',
 3559                                status=fields.PciDeviceStatus.AVAILABLE,
 3560                                address='0000:00:00.1',
 3561                                instance_uuid=None,
 3562                                request_id=None,
 3563                                extra_info={},
 3564                                numa_node=1)
 3565         pci_device = objects.PciDevice(**pci_device_info)
 3566         pci_device_info.update(numa_node=0, address='0000:00:00.2')
 3567         pci_device2 = objects.PciDevice(**pci_device_info)
 3568         with test.nested(
 3569                 mock.patch.object(
 3570                     host.Host, "get_capabilities", return_value=caps),
 3571                 mock.patch.object(host.Host, 'get_online_cpus',
 3572                                   return_value=set([3])),
 3573                 mock.patch.object(random, 'choice'),
 3574                 mock.patch.object(pci_manager, "get_instance_pci_devs",
 3575                                   return_value=[pci_device, pci_device2]),
 3576                 mock.patch.object(conn, '_has_numa_support',
 3577                                   return_value=False)
 3578             ) as (_, _, choice_mock, pci_mock, _):
 3579             cfg = conn._get_guest_config(instance_ref, [],
 3580                                          image_meta, disk_info)
 3581             self.assertFalse(choice_mock.called)
 3582             self.assertEqual(set([3]), cfg.cpuset)
 3583             self.assertEqual(0, len(cfg.cputune.vcpupin))
 3584             self.assertIsNone(cfg.cpu.numa)
 3585 
 3586     @mock.patch.object(fakelibvirt.Connection, 'getType')
 3587     @mock.patch.object(fakelibvirt.Connection, 'getVersion')
 3588     @mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
 3589     @mock.patch.object(host.Host, 'get_capabilities')
 3590     @mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled')
 3591     def _test_get_guest_config_numa_unsupported(self, fake_lib_version,
 3592                                                 fake_version, fake_type,
 3593                                                 fake_arch, exception_class,
 3594                                                 pagesize, mock_host,
 3595                                                 mock_caps, mock_lib_version,
 3596                                                 mock_version, mock_type):
 3597         instance_topology = objects.InstanceNUMATopology(cells=[
 3598             objects.InstanceNUMACell(
 3599                 id=0, cpuset=set([0]), pcpuset=set(),
 3600                 memory=1024, pagesize=pagesize),
 3601         ])
 3602         instance_ref = objects.Instance(**self.test_instance)
 3603         instance_ref.numa_topology = instance_topology
 3604         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 3605         flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
 3606                                 ephemeral_gb=8128, swap=33550336, name='fake',
 3607                                 extra_specs={})
 3608         instance_ref.flavor = flavor
 3609 
 3610         caps = vconfig.LibvirtConfigCaps()
 3611         caps.host = vconfig.LibvirtConfigCapsHost()
 3612         caps.host.cpu = vconfig.LibvirtConfigCPU()
 3613         caps.host.cpu.arch = fake_arch
 3614         caps.host.topology = fakelibvirt.NUMATopology()
 3615 
 3616         mock_type.return_value = fake_type
 3617         mock_version.return_value = fake_version
 3618         mock_lib_version.return_value = fake_lib_version
 3619         mock_caps.return_value = caps
 3620 
 3621         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3622         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 3623                                             instance_ref,
 3624                                             image_meta)
 3625 
 3626         self.assertRaises(exception_class,
 3627                           drvr._get_guest_config,
 3628                           instance_ref, [],
 3629                           image_meta, disk_info)
 3630 
 3631     def test_get_guest_config_numa_other_arch_qemu(self):
 3632         self.flags(virt_type='kvm', group='libvirt')
 3633 
 3634         self._test_get_guest_config_numa_unsupported(
 3635             versionutils.convert_version_to_int(
 3636                 libvirt_driver.MIN_LIBVIRT_VERSION),
 3637             versionutils.convert_version_to_int(
 3638                 libvirt_driver.MIN_QEMU_VERSION),
 3639             host.HV_DRIVER_QEMU,
 3640             fields.Architecture.S390,
 3641             exception.NUMATopologyUnsupported,
 3642             None)
 3643 
 3644     def test_get_guest_config_numa_xen(self):
 3645         self.flags(virt_type='xen', group='libvirt')
 3646         self._test_get_guest_config_numa_unsupported(
 3647             versionutils.convert_version_to_int(
 3648                 libvirt_driver.MIN_LIBVIRT_VERSION),
 3649             versionutils.convert_version_to_int((4, 5, 0)),
 3650             'XEN',
 3651             fields.Architecture.X86_64,
 3652             exception.NUMATopologyUnsupported,
 3653             None)
 3654 
 3655     @mock.patch.object(
 3656         host.Host, "is_cpu_control_policy_capable", return_value=True)
 3657     def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(
 3658             self, is_able):
 3659         self.flags(cpu_shared_set='2-3', cpu_dedicated_set=None,
 3660                    group='compute')
 3661 
 3662         instance_ref = objects.Instance(**self.test_instance)
 3663         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 3664         flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496,
 3665                                 ephemeral_gb=8128, swap=33550336, name='fake',
 3666                                 extra_specs={})
 3667         instance_ref.flavor = flavor
 3668 
 3669         caps = vconfig.LibvirtConfigCaps()
 3670         caps.host = vconfig.LibvirtConfigCapsHost()
 3671         caps.host.cpu = vconfig.LibvirtConfigCPU()
 3672         caps.host.cpu.arch = fields.Architecture.X86_64
 3673         caps.host.topology = fakelibvirt.NUMATopology(kb_mem=4194304)
 3674 
 3675         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3676         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 3677                                             instance_ref,
 3678                                             image_meta)
 3679 
 3680         with test.nested(
 3681                 mock.patch.object(host.Host, 'has_min_version',
 3682                                   return_value=True),
 3683                 mock.patch.object(host.Host, 'get_capabilities',
 3684                                   return_value=caps),
 3685                 mock.patch.object(host.Host, 'get_online_cpus',
 3686                                   return_value=set([2, 3])),
 3687                 ):
 3688             cfg = drvr._get_guest_config(instance_ref, [],
 3689                                          image_meta, disk_info)
 3690             # NOTE(ndipanov): we make sure that pin_set was taken into account
 3691             # when choosing viable cells
 3692             self.assertEqual(set([2, 3]), cfg.cpuset)
 3693             self.assertEqual(0, len(cfg.cputune.vcpupin))
 3694             self.assertIsNone(cfg.cpu.numa)
 3695 
 3696     @mock.patch.object(
 3697         host.Host, "is_cpu_control_policy_capable", return_value=True)
 3698     def test_get_guest_config_non_numa_host_instance_topo(self, is_able):
 3699         instance_topology = objects.InstanceNUMATopology(cells=[
 3700             objects.InstanceNUMACell(
 3701                 id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
 3702             objects.InstanceNUMACell(
 3703                 id=1, cpuset=set([2]), pcpuset=set(), memory=1024),
 3704         ])
 3705         instance_ref = objects.Instance(**self.test_instance)
 3706         instance_ref.numa_topology = instance_topology
 3707         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 3708         flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
 3709                                 ephemeral_gb=8128, swap=33550336, name='fake',
 3710                                 extra_specs={})
 3711         instance_ref.flavor = flavor
 3712 
 3713         caps = vconfig.LibvirtConfigCaps()
 3714         caps.host = vconfig.LibvirtConfigCapsHost()
 3715         caps.host.cpu = vconfig.LibvirtConfigCPU()
 3716         caps.host.cpu.arch = fields.Architecture.X86_64
 3717         caps.host.topology = None
 3718 
 3719         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3720         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 3721                                             instance_ref,
 3722                                             image_meta)
 3723 
 3724         with test.nested(
 3725                 mock.patch.object(
 3726                     objects.InstanceNUMATopology, "get_by_instance_uuid",
 3727                     return_value=instance_topology),
 3728                 mock.patch.object(host.Host, 'has_min_version',
 3729                                   return_value=True),
 3730                 mock.patch.object(host.Host, "get_capabilities",
 3731                                   return_value=caps)):
 3732             cfg = drvr._get_guest_config(instance_ref, [],
 3733                                          image_meta, disk_info)
 3734             self.assertIsNone(cfg.cpuset)
 3735             self.assertEqual(0, len(cfg.cputune.vcpupin))
 3736             self.assertIsNone(cfg.numatune)
 3737             self.assertIsNotNone(cfg.cpu.numa)
 3738             for instance_cell, numa_cfg_cell in zip(
 3739                     instance_topology.cells, cfg.cpu.numa.cells):
 3740                 self.assertEqual(instance_cell.id, numa_cfg_cell.id)
 3741                 self.assertEqual(instance_cell.total_cpus, numa_cfg_cell.cpus)
 3742                 self.assertEqual(instance_cell.memory * units.Ki,
 3743                                  numa_cfg_cell.memory)
 3744 
 3745     @mock.patch.object(
 3746         host.Host, "is_cpu_control_policy_capable", return_value=True)
 3747     def test_get_guest_config_numa_host_instance_topo(self, is_able):
 3748         self.flags(cpu_shared_set='0-5', cpu_dedicated_set=None,
 3749                    group='compute')
 3750 
 3751         instance_topology = objects.InstanceNUMATopology(cells=[
 3752             objects.InstanceNUMACell(
 3753                 id=1, cpuset=set([0, 1]), pcpuset=set(), memory=1024,
 3754                 pagesize=None),
 3755             objects.InstanceNUMACell(
 3756                 id=2, cpuset=set([2, 3]), pcpuset=set(), memory=1024,
 3757                 pagesize=None),
 3758         ])
 3759         instance_ref = objects.Instance(**self.test_instance)
 3760         instance_ref.numa_topology = instance_topology
 3761         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 3762         flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
 3763                                 ephemeral_gb=8128, swap=33550336, name='fake',
 3764                                 extra_specs={})
 3765         instance_ref.flavor = flavor
 3766 
 3767         caps = vconfig.LibvirtConfigCaps()
 3768         caps.host = vconfig.LibvirtConfigCapsHost()
 3769         caps.host.cpu = vconfig.LibvirtConfigCPU()
 3770         caps.host.cpu.arch = fields.Architecture.X86_64
 3771         caps.host.topology = fakelibvirt.NUMATopology()
 3772 
 3773         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3774         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 3775                                             instance_ref,
 3776                                             image_meta)
 3777 
 3778         with test.nested(
 3779                 mock.patch.object(
 3780                     objects.InstanceNUMATopology, "get_by_instance_uuid",
 3781                     return_value=instance_topology),
 3782                 mock.patch.object(host.Host, 'has_min_version',
 3783                                   return_value=True),
 3784                 mock.patch.object(host.Host, "get_capabilities",
 3785                                   return_value=caps),
 3786                 mock.patch.object(host.Host, 'get_online_cpus',
 3787                                   return_value=set(range(8))),
 3788                 ):
 3789             cfg = drvr._get_guest_config(instance_ref, [],
 3790                                          image_meta, disk_info)
 3791             self.assertIsNone(cfg.cpuset)
 3792             # Test that the pinning is correct and limited to allowed only
 3793             self.assertEqual(0, cfg.cputune.vcpupin[0].id)
 3794             self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[0].cpuset)
 3795             self.assertEqual(1, cfg.cputune.vcpupin[1].id)
 3796             self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[1].cpuset)
 3797             self.assertEqual(2, cfg.cputune.vcpupin[2].id)
 3798             self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[2].cpuset)
 3799             self.assertEqual(3, cfg.cputune.vcpupin[3].id)
 3800             self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[3].cpuset)
 3801             self.assertIsNotNone(cfg.cpu.numa)
 3802 
 3803             self.assertIsInstance(cfg.cputune.emulatorpin,
 3804                                   vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
 3805             self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset)
 3806 
 3807             for instance_cell, numa_cfg_cell, index in zip(
 3808                     instance_topology.cells,
 3809                     cfg.cpu.numa.cells,
 3810                     range(len(instance_topology.cells))):
 3811                 self.assertEqual(index, numa_cfg_cell.id)
 3812                 self.assertEqual(instance_cell.total_cpus, numa_cfg_cell.cpus)
 3813                 self.assertEqual(instance_cell.memory * units.Ki,
 3814                                  numa_cfg_cell.memory)
 3815 
 3816             allnodes = [cell.id for cell in instance_topology.cells]
 3817             self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
 3818             self.assertEqual("strict", cfg.numatune.memory.mode)
 3819 
 3820             for instance_cell, memnode, index in zip(
 3821                     instance_topology.cells,
 3822                     cfg.numatune.memnodes,
 3823                     range(len(instance_topology.cells))):
 3824                 self.assertEqual(index, memnode.cellid)
 3825                 self.assertEqual([instance_cell.id], memnode.nodeset)
 3826                 self.assertEqual("strict", memnode.mode)
 3827 
 3828     def test_get_guest_config_numa_host_instance_topo_reordered(self):
 3829         instance_topology = objects.InstanceNUMATopology(cells=[
 3830             objects.InstanceNUMACell(
 3831                 id=3, cpuset=set([0, 1]), pcpuset=set(), memory=1024),
 3832             objects.InstanceNUMACell(
 3833                 id=0, cpuset=set([2, 3]), pcpuset=set(), memory=1024),
 3834         ])
 3835         instance_ref = objects.Instance(**self.test_instance)
 3836         instance_ref.numa_topology = instance_topology
 3837         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 3838         flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
 3839                                 ephemeral_gb=8128, swap=33550336, name='fake',
 3840                                 extra_specs={})
 3841         instance_ref.flavor = flavor
 3842 
 3843         caps = vconfig.LibvirtConfigCaps()
 3844         caps.host = vconfig.LibvirtConfigCapsHost()
 3845         caps.host.cpu = vconfig.LibvirtConfigCPU()
 3846         caps.host.cpu.arch = fields.Architecture.X86_64
 3847         caps.host.topology = fakelibvirt.NUMATopology()
 3848 
 3849         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3850         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 3851                                             instance_ref,
 3852                                             image_meta)
 3853 
 3854         with test.nested(
 3855                 mock.patch.object(
 3856                     objects.InstanceNUMATopology, "get_by_instance_uuid",
 3857                     return_value=instance_topology),
 3858                 mock.patch.object(host.Host, 'has_min_version',
 3859                                   return_value=True),
 3860                 mock.patch.object(host.Host, "get_capabilities",
 3861                                   return_value=caps),
 3862                 mock.patch.object(host.Host, 'get_online_cpus',
 3863                                   return_value=set(range(8))),
 3864                 ):
 3865             cfg = drvr._get_guest_config(instance_ref, [],
 3866                                          image_meta, disk_info)
 3867             self.assertIsNone(cfg.cpuset)
 3868             # Test that the pinning is correct and limited to allowed only
 3869             self.assertEqual(0, cfg.cputune.vcpupin[0].id)
 3870             self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[0].cpuset)
 3871             self.assertEqual(1, cfg.cputune.vcpupin[1].id)
 3872             self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[1].cpuset)
 3873             self.assertEqual(2, cfg.cputune.vcpupin[2].id)
 3874             self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[2].cpuset)
 3875             self.assertEqual(3, cfg.cputune.vcpupin[3].id)
 3876             self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[3].cpuset)
 3877             self.assertIsNotNone(cfg.cpu.numa)
 3878 
 3879             self.assertIsInstance(cfg.cputune.emulatorpin,
 3880                                   vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
 3881             self.assertEqual(set([0, 1, 6, 7]), cfg.cputune.emulatorpin.cpuset)
 3882 
 3883             for index, (instance_cell, numa_cfg_cell) in enumerate(zip(
 3884                     instance_topology.cells,
 3885                     cfg.cpu.numa.cells)):
 3886                 self.assertEqual(index, numa_cfg_cell.id)
 3887                 self.assertEqual(instance_cell.total_cpus, numa_cfg_cell.cpus)
 3888                 self.assertEqual(instance_cell.memory * units.Ki,
 3889                                  numa_cfg_cell.memory)
 3890                 self.assertIsNone(numa_cfg_cell.memAccess)
 3891 
 3892             allnodes = set([cell.id for cell in instance_topology.cells])
 3893             self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
 3894             self.assertEqual("strict", cfg.numatune.memory.mode)
 3895 
 3896             for index, (instance_cell, memnode) in enumerate(zip(
 3897                     instance_topology.cells,
 3898                     cfg.numatune.memnodes)):
 3899                 self.assertEqual(index, memnode.cellid)
 3900                 self.assertEqual([instance_cell.id], memnode.nodeset)
 3901                 self.assertEqual("strict", memnode.mode)
 3902 
 3903     def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self):
 3904         instance_topology = objects.InstanceNUMATopology(cells=[
 3905             objects.InstanceNUMACell(
 3906                 id=1, cpuset=set(), pcpuset=set([0, 1]), memory=1024,
 3907                 cpu_pinning={0: 24, 1: 25}),
 3908             objects.InstanceNUMACell(
 3909                 id=0, cpuset=set(), pcpuset=set([2, 3]), memory=1024,
 3910                 cpu_pinning={2: 0, 3: 1}),
 3911         ])
 3912         instance_ref = objects.Instance(**self.test_instance)
 3913         instance_ref.numa_topology = instance_topology
 3914         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 3915         flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
 3916                                 ephemeral_gb=8128, swap=33550336, name='fake',
 3917                                 extra_specs={})
 3918         instance_ref.flavor = flavor
 3919 
 3920         caps = vconfig.LibvirtConfigCaps()
 3921         caps.host = vconfig.LibvirtConfigCapsHost()
 3922         caps.host.cpu = vconfig.LibvirtConfigCPU()
 3923         caps.host.cpu.arch = fields.Architecture.X86_64
 3924         caps.host.topology = fakelibvirt.NUMATopology(
 3925             cpu_nodes=4, cpu_sockets=1, cpu_cores=4, cpu_threads=2)
 3926 
 3927         conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 3928         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 3929                                             instance_ref,
 3930                                             image_meta)
 3931 
 3932         with test.nested(
 3933                 mock.patch.object(
 3934                     objects.InstanceNUMATopology, "get_by_instance_uuid",
 3935                     return_value=instance_topology),
 3936                 mock.patch.object(host.Host, 'has_min_version',
 3937                                   return_value=True),
 3938                 mock.patch.object(host.Host, "get_capabilities",
 3939                                   return_value=caps),
 3940                 mock.patch.object(host.Host, 'get_online_cpus',
 3941                                   return_value=set(range(32))),
 3942                 ):
 3943             cfg = conn._get_guest_config(instance_ref, [],
 3944                                          image_meta, disk_info)
 3945             self.assertIsNone(cfg.cpuset)
 3946             # Test that the pinning is correct and limited to allowed only
 3947             self.assertEqual(0, cfg.cputune.vcpupin[0].id)
 3948             self.assertEqual(set([24]), cfg.cputune.vcpupin[0].cpuset)
 3949             self.assertEqual(1, cfg.cputune.vcpupin[1].id)
 3950             self.assertEqual(set([25]), cfg.cputune.vcpupin[1].cpuset)
 3951             self.assertEqual(2, cfg.cputune.vcpupin[2].id)
 3952             self.assertEqual(set([0]), cfg.cputune.vcpupin[2].cpuset)
 3953             self.assertEqual(3, cfg.cputune.vcpupin[3].id)
 3954             self.assertEqual(set([1]), cfg.cputune.vcpupin[3].cpuset)
 3955             self.assertIsNotNone(cfg.cpu.numa)
 3956 
 3957             # Emulator must be pinned to union of cfg.cputune.vcpupin[*].cpuset
 3958             self.assertIsInstance(cfg.cputune.emulatorpin,
 3959                                   vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
 3960             self.assertEqual(set([0, 1, 24, 25]),
 3961                              cfg.cputune.emulatorpin.cpuset)
 3962 
 3963             for i, (instance_cell, numa_cfg_cell) in enumerate(zip(
 3964                     instance_topology.cells, cfg.cpu.numa.cells)):
 3965                 self.assertEqual(i, numa_cfg_cell.id)
 3966                 self.assertEqual(instance_cell.total_cpus, numa_cfg_cell.cpus)
 3967                 self.assertEqual(instance_cell.memory * units.Ki,
 3968                                  numa_cfg_cell.memory)
 3969                 self.assertIsNone(numa_cfg_cell.memAccess)
 3970 
 3971             allnodes = set([cell.id for cell in instance_topology.cells])
 3972             self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
 3973             self.assertEqual("strict", cfg.numatune.memory.mode)
 3974 
 3975             for i, (instance_cell, memnode) in enumerate(zip(
 3976                     instance_topology.cells, cfg.numatune.memnodes)):
 3977                 self.assertEqual(i, memnode.cellid)
 3978                 self.assertEqual([instance_cell.id], memnode.nodeset)
 3979                 self.assertEqual("strict", memnode.mode)
 3980 
 3981     def test_get_guest_config_numa_host_instance_cpu_mixed(self):
 3982         """Test to create mixed instance libvirt configuration which has a
 3983         default emulator thread policy and verify the NUMA topology related
 3984         settings.
 3985         """
 3986         self.flags(cpu_shared_set='2-5,8-29',
 3987                    cpu_dedicated_set='6,7,30,31',
 3988                    group='compute')
 3989 
 3990         instance_topology = objects.InstanceNUMATopology(cells=[
 3991             objects.InstanceNUMACell(
 3992                 id=3, cpuset=set([0, 1]), pcpuset=set([2, 3]), memory=1024,
 3993                 cpu_pinning={2: 30, 3: 31}
 3994             ),
 3995             objects.InstanceNUMACell(
 3996                 id=0, cpuset=set([4, 5, 6]), pcpuset=set([7]), memory=1024,
 3997                 cpu_pinning={7: 6}
 3998             ),
 3999         ])
 4000         instance_ref = objects.Instance(**self.test_instance)
 4001         instance_ref.numa_topology = instance_topology
 4002         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 4003         flavor = objects.Flavor(memory_mb=2048, vcpus=8, root_gb=496,
 4004                                 ephemeral_gb=8128, swap=33550336, name='fake',
 4005                                 extra_specs={})
 4006         instance_ref.flavor = flavor
 4007 
 4008         caps = vconfig.LibvirtConfigCaps()
 4009         caps.host = vconfig.LibvirtConfigCapsHost()
 4010         caps.host.cpu = vconfig.LibvirtConfigCPU()
 4011         caps.host.cpu.arch = fields.Architecture.X86_64
 4012         caps.host.topology = fakelibvirt.NUMATopology(
 4013             cpu_nodes=4, cpu_sockets=1, cpu_cores=4, cpu_threads=2)
 4014         conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 4015         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 4016                                             instance_ref,
 4017                                             image_meta)
 4018 
 4019         with test.nested(
 4020             mock.patch.object(
 4021                 objects.InstanceNUMATopology, "get_by_instance_uuid",
 4022                 return_value=instance_topology),
 4023             mock.patch.object(host.Host, 'has_min_version', return_value=True),
 4024             mock.patch.object(host.Host, "get_capabilities",
 4025                               return_value=caps),
 4026             mock.patch.object(host.Host, 'get_online_cpus',
 4027                               return_value=set(range(32)))
 4028         ):
 4029             cfg = conn._get_guest_config(instance_ref, [],
 4030                                          image_meta, disk_info)
 4031             self.assertIsNone(cfg.cpuset)
 4032 
 4033             # NOTE(huaqiang): Within a mixed instance, it is expected that the
 4034             # pinned and unpinned CPUs, which belong to the same instance NUMA
 4035             # cell, are scheduled on a same host NUMA cell, the instance pinned
 4036             # CPU is 1:1 scheduled to a dedicated host CPU, each unpinned CPU
 4037             # floats over the shared CPU list of the same host NUMA cell.
 4038             # The host NUMA cell's dedicated CPU list and shared CPU list are
 4039             # calculated from a combination of '[compute]cpu_dedicated_set',
 4040             # '[compute]cpu_shared_set', the host NUMA topology and the online
 4041             # CPUs.
 4042             #
 4043             # The first instance NUMA cell is fit into the fourth host NUMA
 4044             # cell due to having the same 'id' 3. Instance CPU 0 and 1 are
 4045             # unpinned CPUs, check each of them floats on the host NUMA cell's
 4046             # sharing CPUs, which are CPU 24-29.
 4047             self.assertEqual(0, cfg.cputune.vcpupin[0].id)
 4048             self.assertEqual(set(range(24, 30)), cfg.cputune.vcpupin[0].cpuset)
 4049             self.assertEqual(1, cfg.cputune.vcpupin[1].id)
 4050             self.assertEqual(set(range(24, 30)), cfg.cputune.vcpupin[1].cpuset)
 4051             # Check each of the instance NUMA cell's pinned CPUs is pinned to a
 4052             # dedicated CPU from the fourth host NUMA cell.
 4053             self.assertEqual(2, cfg.cputune.vcpupin[2].id)
 4054             self.assertEqual(set([30]), cfg.cputune.vcpupin[2].cpuset)
 4055             self.assertEqual(3, cfg.cputune.vcpupin[3].id)
 4056             self.assertEqual(set([31]), cfg.cputune.vcpupin[3].cpuset)
 4057 
 4058             # Instance CPU 4-7 belong to the second instance NUMA cell, which
 4059             # is fit into host NUMA cell 0. CPU 4-6 are unpinned CPUs, each of
 4060             # them floats on the host NUMA cell's sharing CPU set, CPU 2-5.
 4061             self.assertEqual(4, cfg.cputune.vcpupin[4].id)
 4062             self.assertEqual(set(range(2, 6)), cfg.cputune.vcpupin[4].cpuset)
 4063             self.assertEqual(5, cfg.cputune.vcpupin[5].id)
 4064             self.assertEqual(set(range(2, 6)), cfg.cputune.vcpupin[5].cpuset)
 4065             self.assertEqual(6, cfg.cputune.vcpupin[6].id)
 4066             self.assertEqual(set(range(2, 6)), cfg.cputune.vcpupin[6].cpuset)
 4067             # Instance CPU 7 is pinned to the host NUMA cell's dedicated CPU 6.
 4068             self.assertEqual(set([6]), cfg.cputune.vcpupin[7].cpuset)
 4069             self.assertIsNotNone(cfg.cpu.numa)
 4070 
 4071             # Check emulator thread is pinned to union of
 4072             # cfg.cputune.vcpupin[*].cpuset
 4073             self.assertIsInstance(cfg.cputune.emulatorpin,
 4074                                   vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
 4075             self.assertEqual(
 4076                 set([2, 3, 4, 5, 6, 24, 25, 26, 27, 28, 29, 30, 31]),
 4077                 cfg.cputune.emulatorpin.cpuset)
 4078 
 4079             for i, (instance_cell, numa_cfg_cell) in enumerate(
 4080                 zip(instance_topology.cells, cfg.cpu.numa.cells)
 4081             ):
 4082                 self.assertEqual(i, numa_cfg_cell.id)
 4083                 self.assertEqual(instance_cell.total_cpus, numa_cfg_cell.cpus)
 4084                 self.assertEqual(instance_cell.memory * units.Ki,
 4085                                  numa_cfg_cell.memory)
 4086                 self.assertIsNone(numa_cfg_cell.memAccess)
 4087 
 4088             allnodes = set([cell.id for cell in instance_topology.cells])
 4089             self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
 4090             self.assertEqual("strict", cfg.numatune.memory.mode)
 4091 
 4092             for i, (instance_cell, memnode) in enumerate(
 4093                 zip(instance_topology.cells, cfg.numatune.memnodes)
 4094             ):
 4095                 self.assertEqual(i, memnode.cellid)
 4096                 self.assertEqual([instance_cell.id], memnode.nodeset)
 4097 
 4098     def test_get_guest_config_numa_host_instance_cpu_mixed_isolated_emu(self):
 4099         """Test to create mixed instance libvirt configuration which has an
 4100         ISOLATED emulator thread policy and verify the NUMA topology related
 4101         settings.
 4102         """
 4103         self.flags(cpu_shared_set='2-5,8-29',
 4104                    cpu_dedicated_set='6,7,30,31',
 4105                    group='compute')
 4106         instance_topology = objects.InstanceNUMATopology(
 4107             emulator_threads_policy=fields.CPUEmulatorThreadsPolicy.ISOLATE,
 4108             cells=[objects.InstanceNUMACell(
 4109                 id=0, cpuset=set([0, 1]), pcpuset=set([2]), memory=1024,
 4110                 cpu_pinning={2: 6},
 4111                 cpuset_reserved=set([7]))])
 4112         instance_ref = objects.Instance(**self.test_instance)
 4113         instance_ref.numa_topology = instance_topology
 4114         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 4115         flavor = objects.Flavor(memory_mb=2048, vcpus=8, root_gb=496,
 4116                                 ephemeral_gb=8128, swap=33550336, name='fake',
 4117                                 extra_specs={})
 4118         instance_ref.flavor = flavor
 4119 
 4120         caps = vconfig.LibvirtConfigCaps()
 4121         caps.host = vconfig.LibvirtConfigCapsHost()
 4122         caps.host.cpu = vconfig.LibvirtConfigCPU()
 4123         caps.host.cpu.arch = fields.Architecture.X86_64
 4124         caps.host.topology = fakelibvirt.NUMATopology(
 4125             cpu_nodes=4, cpu_sockets=1, cpu_cores=4, cpu_threads=2)
 4126 
 4127         conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 4128         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 4129                                             instance_ref,
 4130                                             image_meta)
 4131 
 4132         with test.nested(
 4133             mock.patch.object(
 4134                 objects.InstanceNUMATopology, "get_by_instance_uuid",
 4135                 return_value=instance_topology),
 4136             mock.patch.object(host.Host, 'has_min_version',
 4137                               return_value=True),
 4138             mock.patch.object(host.Host, "get_capabilities",
 4139                               return_value=caps),
 4140             mock.patch.object(host.Host, 'get_online_cpus',
 4141                               return_value=set(range(32))),
 4142         ):
 4143             cfg = conn._get_guest_config(instance_ref, [],
 4144                                          image_meta, disk_info)
 4145             self.assertIsNone(cfg.cpuset)
 4146             # NOTE(huaqiang): The instance NUMA cell is fit into the first host
 4147             # NUMA cell, which is matched by the 'id' fields of two objects.
 4148             # CPU 2-5 are the first host NUMA cell's floating CPU set.
 4149             # Check any instance unpinned CPU is floating on this CPU set.
 4150             self.assertEqual(0, cfg.cputune.vcpupin[0].id)
 4151             self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.vcpupin[0].cpuset)
 4152             self.assertEqual(1, cfg.cputune.vcpupin[1].id)
 4153             self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.vcpupin[1].cpuset)
 4154             # Check instance CPU 2, a pinned CPU, is pinned to a dedicated CPU
 4155             # of host's first NUMA cell.
 4156             self.assertEqual(2, cfg.cputune.vcpupin[2].id)
 4157             self.assertEqual(set([6]), cfg.cputune.vcpupin[2].cpuset)
 4158             self.assertIsNotNone(cfg.cpu.numa)
 4159 
 4160             # With an ISOLATE policy, emulator thread will be pinned to the
 4161             # reserved host CPU.
 4162             self.assertIsInstance(cfg.cputune.emulatorpin,
 4163                                   vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
 4164             self.assertEqual(set([7]), cfg.cputune.emulatorpin.cpuset)
 4165 
 4166             for i, (instance_cell, numa_cfg_cell) in enumerate(
 4167                 zip(instance_topology.cells, cfg.cpu.numa.cells)
 4168             ):
 4169                 self.assertEqual(i, numa_cfg_cell.id)
 4170                 self.assertEqual(instance_cell.total_cpus, numa_cfg_cell.cpus)
 4171                 self.assertEqual(instance_cell.memory * units.Ki,
 4172                                  numa_cfg_cell.memory)
 4173                 self.assertIsNone(numa_cfg_cell.memAccess)
 4174 
 4175             allnodes = set([cell.id for cell in instance_topology.cells])
 4176             self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
 4177             self.assertEqual("strict", cfg.numatune.memory.mode)
 4178 
 4179             for i, (instance_cell, memnode) in enumerate(
 4180                 zip(instance_topology.cells, cfg.numatune.memnodes)
 4181             ):
 4182                 self.assertEqual(i, memnode.cellid)
 4183                 self.assertEqual([instance_cell.id], memnode.nodeset)
 4184 
 4185     def test_get_guest_config_numa_host_instance_cpu_mixed_realtime(self):
 4186         """Test of creating mixed instance libvirt configuration. which is
 4187         created through 'hw:cpu_realtime_mask' and 'hw:cpu_realtime' extra
 4188         specs, verifying the NUMA topology and real-time related settings.
 4189         """
 4190         self.flags(cpu_shared_set='2-5,8-29',
 4191                    cpu_dedicated_set='6,7,30,31',
 4192                    group='compute')
 4193 
 4194         instance_topology = objects.InstanceNUMATopology(
 4195             cells=[
 4196                 objects.InstanceNUMACell(
 4197                     id=0, cpuset=set([2]), pcpuset=set([0, 1]),
 4198                     cpu_pinning={0: 6, 1: 7},
 4199                     memory=1024, pagesize=2048),
 4200                 objects.InstanceNUMACell(
 4201                     id=3, cpuset=set([3]), pcpuset=set([4, 5]),
 4202                     cpu_pinning={4: 30, 5: 31},
 4203                     memory=1024, pagesize=2048)])
 4204         instance_ref = objects.Instance(**self.test_instance)
 4205         instance_ref.numa_topology = instance_topology
 4206         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 4207         # NOTE(huaqiang): libvirt driver takes the real-time CPU list from the
 4208         # flavor extra spec 'hw:cpu_realtime_mask'. For a mixed instance with
 4209         # real-time CPUs, the dedicated CPU and the real-time CPU are the
 4210         # same CPU set, this is checked in API layer.
 4211         flavor = objects.Flavor(
 4212             vcpus=6, memory_mb=2048, root_gb=496,
 4213             ephemeral_gb=8128, swap=33550336, name='fake',
 4214             extra_specs={
 4215                 "hw:numa_nodes": "2",
 4216                 "hw:cpu_realtime": "yes",
 4217                 "hw:cpu_policy": "mixed",
 4218                 "hw:cpu_realtime_mask": "^2-3"
 4219             })
 4220         instance_ref.flavor = flavor
 4221 
 4222         caps = vconfig.LibvirtConfigCaps()
 4223         caps.host = vconfig.LibvirtConfigCapsHost()
 4224         caps.host.cpu = vconfig.LibvirtConfigCPU()
 4225         caps.host.cpu.arch = fields.Architecture.X86_64
 4226         caps.host.topology = fakelibvirt.NUMATopology(
 4227             cpu_nodes=4, cpu_sockets=1, cpu_cores=4, cpu_threads=2)
 4228         for i, cell in enumerate(caps.host.topology.cells):
 4229             cell.mempages = fakelibvirt.create_mempages(
 4230                 [(4, 1024 * i), (2048, i)])
 4231 
 4232         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 4233         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 4234                                             instance_ref,
 4235                                             image_meta)
 4236 
 4237         with test.nested(
 4238             mock.patch.object(
 4239                 objects.InstanceNUMATopology, "get_by_instance_uuid",
 4240                 return_value=instance_topology),
 4241             mock.patch.object(host.Host, 'has_min_version', return_value=True),
 4242             mock.patch.object(host.Host, "get_capabilities",
 4243                               return_value=caps),
 4244             mock.patch.object(host.Host, 'get_online_cpus',
 4245                               return_value=set(range(32))),
 4246         ):
 4247             cfg = drvr._get_guest_config(instance_ref, [],
 4248                                          image_meta, disk_info)
 4249 
 4250             for instance_cell, numa_cfg_cell, index in zip(
 4251                 instance_topology.cells,
 4252                 cfg.cpu.numa.cells,
 4253                 range(len(instance_topology.cells))
 4254             ):
 4255                 self.assertEqual(index, numa_cfg_cell.id)
 4256                 self.assertEqual(instance_cell.total_cpus, numa_cfg_cell.cpus)
 4257                 self.assertEqual(instance_cell.memory * units.Ki,
 4258                                  numa_cfg_cell.memory)
 4259                 self.assertEqual("shared", numa_cfg_cell.memAccess)
 4260 
 4261             allnodes = [cell.id for cell in instance_topology.cells]
 4262             self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
 4263             self.assertEqual("strict", cfg.numatune.memory.mode)
 4264 
 4265             for instance_cell, memnode, index in zip(
 4266                 instance_topology.cells,
 4267                 cfg.numatune.memnodes,
 4268                 range(len(instance_topology.cells))
 4269             ):
 4270                 self.assertEqual(index, memnode.cellid)
 4271                 self.assertEqual([instance_cell.id], memnode.nodeset)
 4272                 self.assertEqual("strict", memnode.mode)
 4273 
 4274             # NOTE(huaqiang): Instance first NUMA cell is fit to the first host
 4275             # NUMA cell. In this host NUMA cell, CPU 2-5 are the sharing CPU
 4276             # set, CPU 6, 7 are dedicated CPUs.
 4277             #
 4278             # Check instance CPU 0, 1 are 1:1 pinned on host NUMA cell's
 4279             # dedicated CPUs.
 4280             self.assertEqual(set([6]), cfg.cputune.vcpupin[0].cpuset)
 4281             self.assertEqual(set([7]), cfg.cputune.vcpupin[1].cpuset)
 4282             # Check CPU 2, an unpinned CPU, is floating on this host NUMA
 4283             # cell's sharing CPU set.
 4284             self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.vcpupin[2].cpuset)
 4285 
 4286             # The second instance NUMA cell is fit to the fourth host NUMA
 4287             # cell due to a same 'id'. Host CPU 24-29 are sharing CPU set, host
 4288             # CPU 30, 31 are dedicated CPU to be pinning.
 4289             #
 4290             # Check CPU 3 is floating on the sharing CPU set.
 4291             self.assertEqual(set([24, 25, 26, 27, 28, 29]),
 4292                              cfg.cputune.vcpupin[3].cpuset)
 4293             # Check CPU 4, 5 are pinned on host dedicated CPUs.
 4294             self.assertEqual(set([30]), cfg.cputune.vcpupin[4].cpuset)
 4295             self.assertEqual(set([31]), cfg.cputune.vcpupin[5].cpuset)
 4296 
 4297             # Check the real-time host CPUs are excluded from the host CPU
 4298             # list the emulator is floating on.
 4299             self.assertEqual(set([2, 3, 4, 5, 24, 25, 26, 27, 28, 29]),
 4300                              cfg.cputune.emulatorpin.cpuset)
 4301 
 4302             # Check the real-time scheduler is set, and all real-time CPUs are
 4303             # in the vcpusched[0].vcpus list. In nova, the real-time scheduler
 4304             # is always set to 'fifo', and there is always only one element in
 4305             # cfg.cputune.vcpusched.
 4306             self.assertEqual(1, len(cfg.cputune.vcpusched))
 4307             self.assertEqual("fifo", cfg.cputune.vcpusched[0].scheduler)
 4308             self.assertEqual(set([0, 1, 4, 5]), cfg.cputune.vcpusched[0].vcpus)
 4309 
 4310     def test_get_guest_config_numa_host_mempages_shared(self):
 4311         self.flags(cpu_shared_set='2-5', cpu_dedicated_set=None,
 4312                    group='compute')
 4313 
 4314         instance_topology = objects.InstanceNUMATopology(cells=[
 4315             objects.InstanceNUMACell(
 4316                 id=1, cpuset=set([0, 1]), pcpuset=set(), memory=1024,
 4317                 pagesize=2048),
 4318             objects.InstanceNUMACell(
 4319                 id=2, cpuset=set([2, 3]), pcpuset=set(), memory=1024,
 4320                 pagesize=2048),
 4321         ])
 4322         instance_ref = objects.Instance(**self.test_instance)
 4323         instance_ref.numa_topology = instance_topology
 4324         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 4325         flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
 4326                                 ephemeral_gb=8128, swap=33550336, name='fake',
 4327                                 extra_specs={})
 4328         instance_ref.flavor = flavor
 4329 
 4330         caps = vconfig.LibvirtConfigCaps()
 4331         caps.host = vconfig.LibvirtConfigCapsHost()
 4332         caps.host.cpu = vconfig.LibvirtConfigCPU()
 4333         caps.host.cpu.arch = fields.Architecture.X86_64
 4334         caps.host.topology = fakelibvirt.NUMATopology()
 4335         for i, cell in enumerate(caps.host.topology.cells):
 4336             cell.mempages = fakelibvirt.create_mempages(
 4337                 [(4, 1024 * i), (2048, i)])
 4338 
 4339         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 4340         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 4341                                             instance_ref,
 4342                                             image_meta)
 4343 
 4344         with test.nested(
 4345                 mock.patch.object(
 4346                     objects.InstanceNUMATopology, "get_by_instance_uuid",
 4347                     return_value=instance_topology),
 4348                 mock.patch.object(host.Host, 'has_min_version',
 4349                                   return_value=True),
 4350                 mock.patch.object(host.Host, "get_capabilities",
 4351                                   return_value=caps),
 4352                 mock.patch.object(host.Host, 'get_online_cpus',
 4353                                   return_value=set([2, 3, 4, 5])),
 4354                 ):
 4355             cfg = drvr._get_guest_config(instance_ref, [],
 4356                                          image_meta, disk_info)
 4357 
 4358             for instance_cell, numa_cfg_cell, index in zip(
 4359                     instance_topology.cells,
 4360                     cfg.cpu.numa.cells,
 4361                     range(len(instance_topology.cells))):
 4362                 self.assertEqual(index, numa_cfg_cell.id)
 4363                 self.assertEqual(instance_cell.total_cpus, numa_cfg_cell.cpus)
 4364                 self.assertEqual(instance_cell.memory * units.Ki,
 4365                                  numa_cfg_cell.memory)
 4366                 self.assertEqual("shared", numa_cfg_cell.memAccess)
 4367 
 4368             allnodes = [cell.id for cell in instance_topology.cells]
 4369             self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
 4370             self.assertEqual("strict", cfg.numatune.memory.mode)
 4371 
 4372             for instance_cell, memnode, index in zip(
 4373                     instance_topology.cells,
 4374                     cfg.numatune.memnodes,
 4375                     range(len(instance_topology.cells))):
 4376                 self.assertEqual(index, memnode.cellid)
 4377                 self.assertEqual([instance_cell.id], memnode.nodeset)
 4378                 self.assertEqual("strict", memnode.mode)
 4379 
 4380             self.assertEqual(0, len(cfg.cputune.vcpusched))
 4381             self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset)
 4382 
 4383     def test_get_guest_config_numa_host_instance_cpu_pinning_realtime(self):
 4384         self.flags(cpu_shared_set=None, cpu_dedicated_set='4-7',
 4385                    group='compute')
 4386 
 4387         instance_topology = objects.InstanceNUMATopology(cells=[
 4388             objects.InstanceNUMACell(
 4389                 id=2, cpuset=set(), pcpuset=set([0, 1]),
 4390                 cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
 4391                 cpu_pinning={0: 4, 1: 5},
 4392                 memory=1024, pagesize=2048),
 4393             objects.InstanceNUMACell(
 4394                 id=3, cpuset=set(), pcpuset=set([2, 3]),
 4395                 cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
 4396                 cpu_pinning={2: 6, 3: 7},
 4397                 memory=1024, pagesize=2048),
 4398         ])
 4399         instance_ref = objects.Instance(**self.test_instance)
 4400         instance_ref.numa_topology = instance_topology
 4401         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 4402         flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
 4403                                 ephemeral_gb=8128, swap=33550336, name='fake',
 4404                                 extra_specs={
 4405                                     "hw:numa_nodes": "2",
 4406                                     "hw:cpu_realtime": "yes",
 4407                                     "hw:cpu_policy": "dedicated",
 4408                                     "hw:cpu_realtime_mask": "^0-1"
 4409                                 })
 4410         instance_ref.flavor = flavor
 4411 
 4412         caps = vconfig.LibvirtConfigCaps()
 4413         caps.host = vconfig.LibvirtConfigCapsHost()
 4414         caps.host.cpu = vconfig.LibvirtConfigCPU()
 4415         caps.host.cpu.arch = fields.Architecture.X86_64
 4416         caps.host.topology = fakelibvirt.NUMATopology()
 4417         for i, cell in enumerate(caps.host.topology.cells):
 4418             cell.mempages = fakelibvirt.create_mempages(
 4419                 [(4, 1024 * i), (2048, i)])
 4420 
 4421         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 4422         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 4423                                             instance_ref,
 4424                                             image_meta)
 4425 
 4426         with test.nested(
 4427                 mock.patch.object(
 4428                     objects.InstanceNUMATopology, "get_by_instance_uuid",
 4429                     return_value=instance_topology),
 4430                 mock.patch.object(host.Host, 'has_min_version',
 4431                                   return_value=True),
 4432                 mock.patch.object(host.Host, "get_capabilities",
 4433                                   return_value=caps),
 4434                 mock.patch.object(host.Host, 'get_online_cpus',
 4435                                   return_value=set(range(8))),
 4436                 ):
 4437             cfg = drvr._get_guest_config(instance_ref, [],
 4438                                          image_meta, disk_info)
 4439 
 4440             for instance_cell, numa_cfg_cell, index in zip(
 4441                     instance_topology.cells,
 4442                     cfg.cpu.numa.cells,
 4443                     range(len(instance_topology.cells))):
 4444                 self.assertEqual(index, numa_cfg_cell.id)
 4445                 self.assertEqual(instance_cell.total_cpus, numa_cfg_cell.cpus)
 4446                 self.assertEqual(instance_cell.memory * units.Ki,
 4447                                  numa_cfg_cell.memory)
 4448                 self.assertEqual("shared", numa_cfg_cell.memAccess)
 4449 
 4450             allnodes = [cell.id for cell in instance_topology.cells]
 4451             self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
 4452             self.assertEqual("strict", cfg.numatune.memory.mode)
 4453 
 4454             for instance_cell, memnode, index in zip(
 4455                     instance_topology.cells,
 4456                     cfg.numatune.memnodes,
 4457                     range(len(instance_topology.cells))):
 4458                 self.assertEqual(index, memnode.cellid)
 4459                 self.assertEqual([instance_cell.id], memnode.nodeset)
 4460                 self.assertEqual("strict", memnode.mode)
 4461 
 4462             self.assertEqual(1, len(cfg.cputune.vcpusched))
 4463             self.assertEqual("fifo", cfg.cputune.vcpusched[0].scheduler)
 4464 
 4465             self.assertEqual(set([4]), cfg.cputune.vcpupin[0].cpuset)
 4466             self.assertEqual(set([5]), cfg.cputune.vcpupin[1].cpuset)
 4467             self.assertEqual(set([6]), cfg.cputune.vcpupin[2].cpuset)
 4468             self.assertEqual(set([7]), cfg.cputune.vcpupin[3].cpuset)
 4469 
 4470             # We ensure that emulator threads are pinned on host CPUs
 4471             # 4-5 which are "normal" vCPUs
 4472             self.assertEqual(set([4, 5]), cfg.cputune.emulatorpin.cpuset)
 4473 
 4474             # We ensure that the vCPUs RT are 2-3 set to the host CPUs
 4475             # which are 6, 7
 4476             self.assertEqual(set([2, 3]), cfg.cputune.vcpusched[0].vcpus)
 4477 
 4478     def test_get_guest_config_numa_host_instance_isolated_emulthreads(self):
 4479         self.flags(cpu_shared_set=None, cpu_dedicated_set='4-8',
 4480                    group='compute')
 4481 
 4482         instance_topology = objects.InstanceNUMATopology(
 4483             emulator_threads_policy=(
 4484                 fields.CPUEmulatorThreadsPolicy.ISOLATE),
 4485             cells=[
 4486                 objects.InstanceNUMACell(
 4487                     id=0, cpuset=set(), pcpuset=set([0, 1]),
 4488                     memory=1024, pagesize=2048,
 4489                     cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
 4490                     cpu_pinning={0: 4, 1: 5},
 4491                     cpuset_reserved=set([6])),
 4492                 objects.InstanceNUMACell(
 4493                     id=1, cpuset=set(), pcpuset=set([2, 3]),
 4494                     memory=1024, pagesize=2048,
 4495                     cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
 4496                     cpu_pinning={2: 7, 3: 8}),
 4497             ])
 4498 
 4499         instance_ref = objects.Instance(**self.test_instance)
 4500         instance_ref.numa_topology = instance_topology
 4501         image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
 4502 
 4503         caps = vconfig.LibvirtConfigCaps()
 4504         caps.host = vconfig.LibvirtConfigCapsHost()
 4505         caps.host.cpu = vconfig.LibvirtConfigCPU()
 4506         caps.host.cpu.arch = "x86_64"
 4507         caps.host.topology = fakelibvirt.NUMATopology()
 4508 
 4509         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
 4510         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
 4511                                             instance_ref, image_meta)
 4512 
 4513         with test.nested(
 4514                 mock.patch.object(
 4515                     objects.InstanceNUMATopology, "get_by_instance_uuid",
 4516                     return_value=instance_topology),
 4517                 mock.patch.object(host.Host, 'has_min_version',
 4518                                   return_value=True),
 4519                 mock.patch.object(host.Host, "get_capabilities",
 4520                                   return_value=caps),
 4521                 mock.patch.object(host.Host, 'get_online_cpus',
 4522                                   return_value=set(range(10))),
 4523                 ):
 4524             cfg = drvr._get_guest_config(instance_ref, [],
 4525                                          image_meta, disk_info)
 4526 
 4527             self.assertEqual(set([6]), cfg.cputune.emulatorpin.cpuset)
 4528             self.assertEqual(set([4]), cfg.cputune.vcpupin[0].cpuset)
 4529             self.assertEqual(set([5]), cfg.cputune.vcpupin[1].cpuset)
 4530             self.assertEqual(set([7]), cfg.cputune.vcpupin[2].cpuset)
 4531             self.assertEqual(set([8]), cfg.cputune.vcpupin[3].cpuset)
 4532 
 4533     def test_get_guest_config_numa_host_instance_shared_emulthreads_err(
 4534             self):
 4535         self.flags(cpu_shared_set='48-50', cpu_dedicated_set='4-8',
 4536                    group='compute')
 4537 
 4538         instance_topology = objects.InstanceNUMATopology(
 4539             emulator_threads_policy=(
 4540                 fields.CPUEmulatorThreadsPolicy.SHARE),
 4541             cells=[
 4542                 objects.InstanceNUMACell(
 4543