"Fossies" - the Fresh Open Source Software Archive

Member "nova-22.0.1/nova/tests/functional/libvirt/test_pci_sriov_servers.py" (19 Nov 2020, 31506 Bytes) of package /linux/misc/openstack/nova-22.0.1.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. See also the latest Fossies "Diffs" side-by-side code changes report for "test_pci_sriov_servers.py": 22.0.0_vs_22.0.1.

    1 # Copyright (C) 2016 Red Hat, Inc
    2 # All Rights Reserved.
    3 #
    4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
    5 #    not use this file except in compliance with the License. You may obtain
    6 #    a copy of the License at
    7 #
    8 #         http://www.apache.org/licenses/LICENSE-2.0
    9 #
   10 #    Unless required by applicable law or agreed to in writing, software
   11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   13 #    License for the specific language governing permissions and limitations
   14 #    under the License.
   15 
   16 import copy
   17 import ddt
   18 import fixtures
   19 import mock
   20 
   21 from lxml import etree
   22 from oslo_config import cfg
   23 from oslo_log import log as logging
   24 from oslo_serialization import jsonutils
   25 
   26 import nova
   27 from nova import context
   28 from nova import objects
   29 from nova.objects import fields
   30 from nova.tests import fixtures as nova_fixtures
   31 from nova.tests.functional.api import client
   32 from nova.tests.functional.libvirt import base
   33 from nova.tests.unit import fake_notifier
   34 from nova.tests.unit.virt.libvirt import fakelibvirt
   35 
   36 CONF = cfg.CONF
   37 LOG = logging.getLogger(__name__)
   38 
   39 
   40 class _PCIServersTestBase(base.ServersTestBase):
   41 
   42     ADDITIONAL_FILTERS = ['NUMATopologyFilter', 'PciPassthroughFilter']
   43 
   44     def setUp(self):
   45         self.flags(passthrough_whitelist=self.PCI_PASSTHROUGH_WHITELIST,
   46                    alias=self.PCI_ALIAS,
   47                    group='pci')
   48 
   49         super(_PCIServersTestBase, self).setUp()
   50 
   51         # Mock the 'PciPassthroughFilter' filter, as most tests need to inspect
   52         # this
   53         host_manager = self.scheduler.manager.driver.host_manager
   54         pci_filter_class = host_manager.filter_cls_map['PciPassthroughFilter']
   55         host_pass_mock = mock.Mock(wraps=pci_filter_class().host_passes)
   56         self.mock_filter = self.useFixture(fixtures.MockPatch(
   57             'nova.scheduler.filters.pci_passthrough_filter'
   58             '.PciPassthroughFilter.host_passes',
   59             side_effect=host_pass_mock)).mock
   60 
   61 
   62 class SRIOVServersTest(_PCIServersTestBase):
   63 
   64     microversion = '2.48'
   65 
   66     VFS_ALIAS_NAME = 'vfs'
   67     PFS_ALIAS_NAME = 'pfs'
   68 
   69     PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
   70         {
   71             'vendor_id': fakelibvirt.PCI_VEND_ID,
   72             'product_id': fakelibvirt.PF_PROD_ID,
   73             'physical_network': 'physnet4',
   74         },
   75         {
   76             'vendor_id': fakelibvirt.PCI_VEND_ID,
   77             'product_id': fakelibvirt.VF_PROD_ID,
   78             'physical_network': 'physnet4',
   79         },
   80     )]
   81     # PFs will be removed from pools unless they are specifically
   82     # requested, so we explicitly request them with the 'device_type'
   83     # attribute
   84     PCI_ALIAS = [jsonutils.dumps(x) for x in (
   85         {
   86             'vendor_id': fakelibvirt.PCI_VEND_ID,
   87             'product_id': fakelibvirt.PF_PROD_ID,
   88             'device_type': fields.PciDeviceType.SRIOV_PF,
   89             'name': PFS_ALIAS_NAME,
   90         },
   91         {
   92             'vendor_id': fakelibvirt.PCI_VEND_ID,
   93             'product_id': fakelibvirt.VF_PROD_ID,
   94             'name': VFS_ALIAS_NAME,
   95         },
   96     )]
   97 
   98     def setUp(self):
   99         super().setUp()
  100 
  101         # The ultimate base class _IntegratedTestBase uses NeutronFixture but
  102         # we need a bit more intelligent neutron for these tests. Applying the
  103         # new fixture here means that we re-stub what the previous neutron
  104         # fixture already stubbed.
  105         self.neutron = self.useFixture(base.LibvirtNeutronFixture(self))
  106 
  107     def _disable_sriov_in_pf(self, pci_info):
  108         # Check for PF and change the capability from virt_functions
  109         # Delete all the VFs
  110         vfs_to_delete = []
  111 
  112         for device_name, device in pci_info.devices.items():
  113             if 'virt_functions' in device.pci_device:
  114                 device.generate_xml(skip_capability=True)
  115             elif 'phys_function' in device.pci_device:
  116                 vfs_to_delete.append(device_name)
  117 
  118         for device in vfs_to_delete:
  119             del pci_info.devices[device]
  120 
  121     def test_create_server_with_VF(self):
  122         """Create a server with an SR-IOV VF-type PCI device."""
  123 
  124         pci_info = fakelibvirt.HostPCIDevicesInfo()
  125         self.start_compute(pci_info=pci_info)
  126 
  127         # create a server
  128         extra_spec = {"pci_passthrough:alias": "%s:1" % self.VFS_ALIAS_NAME}
  129         flavor_id = self._create_flavor(extra_spec=extra_spec)
  130         self._create_server(flavor_id=flavor_id, networks='none')
  131 
  132         # ensure the filter was called
  133         self.assertTrue(self.mock_filter.called)
  134 
  135     def test_create_server_with_PF(self):
  136         """Create a server with an SR-IOV PF-type PCI device."""
  137 
  138         pci_info = fakelibvirt.HostPCIDevicesInfo()
  139         self.start_compute(pci_info=pci_info)
  140 
  141         # create a server
  142         extra_spec = {"pci_passthrough:alias": "%s:1" % self.PFS_ALIAS_NAME}
  143         flavor_id = self._create_flavor(extra_spec=extra_spec)
  144         self._create_server(flavor_id=flavor_id, networks='none')
  145 
  146         # ensure the filter was called
  147         self.assertTrue(self.mock_filter.called)
  148 
  149     def test_create_server_with_PF_no_VF(self):
  150         """Create a server with a PF and ensure the VFs are then reserved."""
  151 
  152         pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=4)
  153         self.start_compute(pci_info=pci_info)
  154 
  155         # create a server using the PF
  156         extra_spec_pfs = {"pci_passthrough:alias": f"{self.PFS_ALIAS_NAME}:1"}
  157         flavor_id_pfs = self._create_flavor(extra_spec=extra_spec_pfs)
  158         self._create_server(flavor_id=flavor_id_pfs, networks='none')
  159 
  160         # now attempt to build another server, this time using the VF; this
  161         # should fail because the VF is used by an instance
  162         extra_spec_vfs = {"pci_passthrough:alias": f"{self.VFS_ALIAS_NAME}:1"}
  163         flavor_id_vfs = self._create_flavor(extra_spec=extra_spec_vfs)
  164         self._create_server(
  165             flavor_id=flavor_id_vfs, networks='none', expected_state='ERROR',
  166         )
  167 
  168     def test_create_server_with_VF_no_PF(self):
  169         """Create a server with a VF and ensure the PF is then reserved."""
  170 
  171         pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=4)
  172         self.start_compute(pci_info=pci_info)
  173 
  174         # create a server using the VF
  175         extra_spec_vfs = {'pci_passthrough:alias': f'{self.VFS_ALIAS_NAME}:1'}
  176         flavor_id_vfs = self._create_flavor(extra_spec=extra_spec_vfs)
  177         self._create_server(flavor_id=flavor_id_vfs, networks='none')
  178 
  179         # now attempt to build another server, this time using the PF; this
  180         # should fail because the PF is used by an instance
  181         extra_spec_pfs = {'pci_passthrough:alias': f'{self.PFS_ALIAS_NAME}:1'}
  182         flavor_id_pfs = self._create_flavor(extra_spec=extra_spec_pfs)
  183         self._create_server(
  184             flavor_id=flavor_id_pfs, networks='none', expected_state='ERROR',
  185         )
  186 
  187     def test_create_server_with_neutron(self):
  188         """Create an instance using a neutron-provisioned SR-IOV VIF."""
  189 
  190         pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=2)
  191 
  192         orig_create = nova.virt.libvirt.guest.Guest.create
  193 
  194         def fake_create(cls, xml, host):
  195             tree = etree.fromstring(xml)
  196             elem = tree.find('./devices/interface/source/address')
  197 
  198             # compare address
  199             expected = ('0x81', '0x00', '0x2')
  200             actual = (
  201                 elem.get('bus'), elem.get('slot'), elem.get('function'),
  202             )
  203             self.assertEqual(expected, actual)
  204 
  205             return orig_create(xml, host)
  206 
  207         self.stub_out(
  208             'nova.virt.libvirt.guest.Guest.create',
  209             fake_create,
  210         )
  211 
  212         self.start_compute(pci_info=pci_info)
  213 
  214         # create the port
  215         self.neutron.create_port({'port': self.neutron.network_4_port_1})
  216 
  217         # ensure the binding details are currently unset
  218         port = self.neutron.show_port(
  219             base.LibvirtNeutronFixture.network_4_port_1['id'],
  220         )['port']
  221         self.assertNotIn('binding:profile', port)
  222 
  223         # create a server using the VF via neutron
  224         flavor_id = self._create_flavor()
  225         self._create_server(
  226             flavor_id=flavor_id,
  227             networks=[
  228                 {'port': base.LibvirtNeutronFixture.network_4_port_1['id']},
  229             ],
  230         )
  231 
  232         # ensure the binding details sent to "neutron" were correct
  233         port = self.neutron.show_port(
  234             base.LibvirtNeutronFixture.network_4_port_1['id'],
  235         )['port']
  236         self.assertIn('binding:profile', port)
  237         self.assertEqual(
  238             {
  239                 'pci_vendor_info': '8086:1515',
  240                 'pci_slot': '0000:81:00.2',
  241                 'physical_network': 'physnet4',
  242             },
  243             port['binding:profile'],
  244         )
  245 
  246     def test_get_server_diagnostics_server_with_VF(self):
  247         """Ensure server disagnostics include info on VF-type PCI devices."""
  248 
  249         pci_info = fakelibvirt.HostPCIDevicesInfo()
  250         self.start_compute(pci_info=pci_info)
  251 
  252         # create the SR-IOV port
  253         self.neutron.create_port({'port': self.neutron.network_4_port_1})
  254 
  255         # create a server using the VF and multiple networks
  256         extra_spec = {'pci_passthrough:alias': f'{self.VFS_ALIAS_NAME}:1'}
  257         flavor_id = self._create_flavor(extra_spec=extra_spec)
  258         server = self._create_server(
  259             flavor_id=flavor_id,
  260             networks=[
  261                 {'uuid': base.LibvirtNeutronFixture.network_1['id']},
  262                 {'port': base.LibvirtNeutronFixture.network_4_port_1['id']},
  263             ],
  264         )
  265 
  266         # now check the server diagnostics to ensure the VF-type PCI device is
  267         # attached
  268         diagnostics = self.admin_api.get_server_diagnostics(
  269             server['id']
  270         )
  271 
  272         self.assertEqual(
  273             base.LibvirtNeutronFixture.network_1_port_2['mac_address'],
  274             diagnostics['nic_details'][0]['mac_address'],
  275         )
  276         self.assertIsNotNone(diagnostics['nic_details'][0]['tx_packets'])
  277 
  278         self.assertEqual(
  279             base.LibvirtNeutronFixture.network_4_port_1['mac_address'],
  280             diagnostics['nic_details'][1]['mac_address'],
  281         )
  282         self.assertIsNone(diagnostics['nic_details'][1]['tx_packets'])
  283 
  284     def test_create_server_after_change_in_nonsriov_pf_to_sriov_pf(self):
  285         # Starts a compute with PF not configured with SRIOV capabilities
  286         # Updates the PF with SRIOV capability and restart the compute service
  287         # Then starts a VM with the sriov port. The VM should be in active
  288         # state with sriov port attached.
  289 
  290         # To emulate the device type changing, we first create a
  291         # HostPCIDevicesInfo object with PFs and VFs. Then we make a copy
  292         # and remove the VFs and the virt_function capability. This is
  293         # done to ensure the physical function product id is same in both
  294         # the versions.
  295         pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)
  296         pci_info_no_sriov = copy.deepcopy(pci_info)
  297 
  298         # Disable SRIOV capabilties in PF and delete the VFs
  299         self._disable_sriov_in_pf(pci_info_no_sriov)
  300 
  301         fake_connection = self._get_connection(pci_info=pci_info_no_sriov,
  302                                                hostname='test_compute0')
  303         self.mock_conn.return_value = fake_connection
  304 
  305         self.compute = self.start_service('compute', host='test_compute0')
  306 
  307         ctxt = context.get_admin_context()
  308         pci_devices = objects.PciDeviceList.get_by_compute_node(
  309             ctxt,
  310             objects.ComputeNode.get_by_nodename(
  311                 ctxt, 'test_compute0',
  312             ).id,
  313         )
  314         self.assertEqual(1, len(pci_devices))
  315         self.assertEqual('type-PCI', pci_devices[0].dev_type)
  316 
  317         # Update connection with original pci info with sriov PFs
  318         fake_connection = self._get_connection(pci_info=pci_info,
  319                                                hostname='test_compute0')
  320         self.mock_conn.return_value = fake_connection
  321 
  322         # Restart the compute service
  323         self.restart_compute_service(self.compute)
  324 
  325         # Verify if PCI devices are of type type-PF or type-VF
  326         pci_devices = objects.PciDeviceList.get_by_compute_node(
  327             ctxt,
  328             objects.ComputeNode.get_by_nodename(
  329                 ctxt, 'test_compute0',
  330             ).id,
  331         )
  332         for pci_device in pci_devices:
  333             self.assertIn(pci_device.dev_type, ['type-PF', 'type-VF'])
  334 
  335         # create the port
  336         self.neutron.create_port({'port': self.neutron.network_4_port_1})
  337 
  338         # create a server using the VF via neutron
  339         flavor_id = self._create_flavor()
  340         self._create_server(
  341             flavor_id=flavor_id,
  342             networks=[
  343                 {'port': base.LibvirtNeutronFixture.network_4_port_1['id']},
  344             ],
  345         )
  346 
  347 
  348 class SRIOVAttachDetachTest(_PCIServersTestBase):
  349     # no need for aliases as these test will request SRIOV via neutron
  350     PCI_ALIAS = []
  351 
  352     PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
  353         {
  354             'vendor_id': fakelibvirt.PCI_VEND_ID,
  355             'product_id': fakelibvirt.PF_PROD_ID,
  356             "physical_network": "physnet2",
  357         },
  358         {
  359             'vendor_id': fakelibvirt.PCI_VEND_ID,
  360             'product_id': fakelibvirt.VF_PROD_ID,
  361             "physical_network": "physnet2",
  362         },
  363     )]
  364 
  365     def setUp(self):
  366         super().setUp()
  367 
  368         self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
  369 
  370         # add extra ports and the related network to the neutron fixture
  371         # specifically for these tests. It cannot be added globally in the
  372         # fixture init as it adds a second network that makes auto allocation
  373         # based test to fail due to ambiguous networks.
  374         self.neutron._networks[
  375             self.neutron.network_2['id']] = self.neutron.network_2
  376         self.neutron._subnets[
  377             self.neutron.subnet_2['id']] = self.neutron.subnet_2
  378         for port in [self.neutron.sriov_port, self.neutron.sriov_port2,
  379                      self.neutron.sriov_pf_port, self.neutron.sriov_pf_port2,
  380                      self.neutron.macvtap_port, self.neutron.macvtap_port2]:
  381             self.neutron._ports[port['id']] = copy.deepcopy(port)
  382 
  383     def _get_attached_port_ids(self, instance_uuid):
  384         return [
  385             attachment['port_id']
  386             for attachment in self.api.get_port_interfaces(instance_uuid)]
  387 
  388     def _detach_port(self, instance_uuid, port_id):
  389         self.api.detach_interface(instance_uuid, port_id)
  390         fake_notifier.wait_for_versioned_notifications(
  391             'instance.interface_detach.end')
  392 
  393     def _attach_port(self, instance_uuid, port_id):
  394         self.api.attach_interface(
  395             instance_uuid,
  396             {'interfaceAttachment': {'port_id': port_id}})
  397         fake_notifier.wait_for_versioned_notifications(
  398             'instance.interface_attach.end')
  399 
  400     def _test_detach_attach(self, first_port_id, second_port_id):
  401         # This test takes two ports that requires PCI claim.
  402         # Starts a compute with one PF and one connected VF.
  403         # Then starts a VM with the first port. Then detach it, then
  404         # re-attach it. These expected to be successful. Then try to attach the
  405         # second port and asserts that it fails as no free PCI device left on
  406         # the host.
  407         host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1,
  408                                          cpu_cores=2, cpu_threads=2,
  409                                          kB_mem=15740000)
  410         pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)
  411         fake_connection = self._get_connection(host_info, pci_info)
  412         self.mock_conn.return_value = fake_connection
  413 
  414         self.compute = self.start_service('compute', host='test_compute0')
  415 
  416         # Create server with a port
  417         server = self._create_server(networks=[{'port': first_port_id}])
  418 
  419         updated_port = self.neutron.show_port(first_port_id)['port']
  420         self.assertEqual('test_compute0', updated_port['binding:host_id'])
  421         self.assertIn(first_port_id, self._get_attached_port_ids(server['id']))
  422 
  423         self._detach_port(server['id'], first_port_id)
  424 
  425         updated_port = self.neutron.show_port(first_port_id)['port']
  426         self.assertIsNone(updated_port['binding:host_id'])
  427         self.assertNotIn(
  428             first_port_id,
  429             self._get_attached_port_ids(server['id']))
  430 
  431         # Attach back the port
  432         self._attach_port(server['id'], first_port_id)
  433 
  434         updated_port = self.neutron.show_port(first_port_id)['port']
  435         self.assertEqual('test_compute0', updated_port['binding:host_id'])
  436         self.assertIn(first_port_id, self._get_attached_port_ids(server['id']))
  437 
  438         # Try to attach the second port but no free PCI device left
  439         ex = self.assertRaises(
  440             client.OpenStackApiException, self._attach_port, server['id'],
  441             second_port_id)
  442 
  443         self.assertEqual(400, ex.response.status_code)
  444         self.assertIn('Failed to claim PCI device', str(ex))
  445         attached_ports = self._get_attached_port_ids(server['id'])
  446         self.assertIn(first_port_id, attached_ports)
  447         self.assertNotIn(second_port_id, attached_ports)
  448 
  449     def test_detach_attach_direct(self):
  450         self._test_detach_attach(
  451             self.neutron.sriov_port['id'], self.neutron.sriov_port2['id'])
  452 
  453     def test_detach_macvtap(self):
  454         self._test_detach_attach(
  455             self.neutron.macvtap_port['id'],
  456             self.neutron.macvtap_port2['id'])
  457 
  458     def test_detach_direct_physical(self):
  459         self._test_detach_attach(
  460             self.neutron.sriov_pf_port['id'],
  461             self.neutron.sriov_pf_port2['id'])
  462 
  463 
  464 class PCIServersTest(_PCIServersTestBase):
  465 
  466     ADMIN_API = True
  467     microversion = 'latest'
  468 
  469     ALIAS_NAME = 'a1'
  470     PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(
  471         {
  472             'vendor_id': fakelibvirt.PCI_VEND_ID,
  473             'product_id': fakelibvirt.PCI_PROD_ID,
  474         }
  475     )]
  476     PCI_ALIAS = [jsonutils.dumps(
  477         {
  478             'vendor_id': fakelibvirt.PCI_VEND_ID,
  479             'product_id': fakelibvirt.PCI_PROD_ID,
  480             'name': ALIAS_NAME,
  481         }
  482     )]
  483 
  484     def test_create_server_with_pci_dev_and_numa(self):
  485         """Verifies that an instance can be booted with cpu pinning and with an
  486            assigned pci device.
  487         """
  488 
  489         self.flags(cpu_dedicated_set='0-7', group='compute')
  490 
  491         pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=1)
  492         self.start_compute(pci_info=pci_info)
  493 
  494         # create a flavor
  495         extra_spec = {
  496             'hw:cpu_policy': 'dedicated',
  497             'pci_passthrough:alias': '%s:1' % self.ALIAS_NAME,
  498         }
  499         flavor_id = self._create_flavor(extra_spec=extra_spec)
  500 
  501         self._create_server(flavor_id=flavor_id, networks='none')
  502 
  503     def test_create_server_with_pci_dev_and_numa_fails(self):
  504         """This test ensures that it is not possible to allocated CPU and
  505            memory resources from one NUMA node and a PCI device from another.
  506         """
  507 
  508         self.flags(cpu_dedicated_set='0-7', group='compute')
  509 
  510         pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
  511         self.start_compute(pci_info=pci_info)
  512 
  513         # boot one instance with no PCI device to "fill up" NUMA node 0
  514         extra_spec = {'hw:cpu_policy': 'dedicated'}
  515         flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
  516         self._create_server(flavor_id=flavor_id, networks='none')
  517 
  518         # now boot one with a PCI device, which should fail to boot
  519         extra_spec['pci_passthrough:alias'] = '%s:1' % self.ALIAS_NAME
  520         flavor_id = self._create_flavor(extra_spec=extra_spec)
  521         self._create_server(
  522             flavor_id=flavor_id, networks='none', expected_state='ERROR')
  523 
  524     def _confirm_resize(self, server, host='host1'):
  525         # NOTE(sbauza): Unfortunately, _cleanup_resize() in libvirt checks the
  526         # host option to know the source hostname but given we have a global
  527         # CONF, the value will be the hostname of the last compute service that
  528         # was created, so we need to change it here.
  529         # TODO(sbauza): Remove the below once we stop using CONF.host in
  530         # libvirt and rather looking at the compute host value.
  531         orig_host = CONF.host
  532         self.flags(host=host)
  533         super()._confirm_resize(server)
  534         self.flags(host=orig_host)
  535 
  536     def assertPCIDeviceCounts(self, hostname, total, free):
  537         """Ensure $hostname has $total devices, $free of which are free."""
  538         ctxt = context.get_admin_context()
  539         devices = objects.PciDeviceList.get_by_compute_node(
  540             ctxt, objects.ComputeNode.get_by_nodename(ctxt, hostname).id,
  541         )
  542         self.assertEqual(total, len(devices))
  543         self.assertEqual(free, len([d for d in devices if d.is_available()]))
  544 
  545     def test_cold_migrate_server_with_pci(self):
  546 
  547         host_devices = {}
  548         orig_create = nova.virt.libvirt.guest.Guest.create
  549 
  550         def fake_create(cls, xml, host):
  551             tree = etree.fromstring(xml)
  552             elem = tree.find('./devices/hostdev/source/address')
  553 
  554             hostname = host.get_hostname()
  555             address = (
  556                 elem.get('bus'), elem.get('slot'), elem.get('function'),
  557             )
  558             if hostname in host_devices:
  559                 self.assertNotIn(address, host_devices[hostname])
  560             else:
  561                 host_devices[hostname] = []
  562             host_devices[host.get_hostname()].append(address)
  563 
  564             return orig_create(xml, host)
  565 
  566         self.stub_out(
  567             'nova.virt.libvirt.guest.Guest.create',
  568             fake_create,
  569         )
  570 
  571         # start two compute services
  572         for hostname in ('test_compute0', 'test_compute1'):
  573             pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=2)
  574             self.start_compute(hostname=hostname, pci_info=pci_info)
  575 
  576         # boot an instance with a PCI device on each host
  577         extra_spec = {
  578             'pci_passthrough:alias': '%s:1' % self.ALIAS_NAME,
  579         }
  580         flavor_id = self._create_flavor(extra_spec=extra_spec)
  581 
  582         server_a = self._create_server(
  583             flavor_id=flavor_id, networks='none', host='test_compute0')
  584         server_b = self._create_server(
  585             flavor_id=flavor_id, networks='none', host='test_compute1')
  586 
  587         # the instances should have landed on separate hosts; ensure both hosts
  588         # have one used PCI device and one free PCI device
  589         self.assertNotEqual(
  590             server_a['OS-EXT-SRV-ATTR:host'], server_b['OS-EXT-SRV-ATTR:host'],
  591         )
  592         for hostname in ('test_compute0', 'test_compute1'):
  593             self.assertPCIDeviceCounts(hostname, total=2, free=1)
  594 
  595         # TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should
  596         # probably be less...dumb
  597         with mock.patch(
  598             'nova.virt.libvirt.driver.LibvirtDriver'
  599             '.migrate_disk_and_power_off', return_value='{}',
  600         ):
  601             # TODO(stephenfin): Use a helper
  602             self.api.post_server_action(server_a['id'], {'migrate': None})
  603             server_a = self._wait_for_state_change(server_a, 'VERIFY_RESIZE')
  604 
  605         # the instances should now be on the same host; ensure the source host
  606         # still has one used PCI device while the destination now has two used
  607         # test_compute0 initially
  608         self.assertEqual(
  609             server_a['OS-EXT-SRV-ATTR:host'], server_b['OS-EXT-SRV-ATTR:host'],
  610         )
  611         self.assertPCIDeviceCounts('test_compute0', total=2, free=1)
  612         self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
  613 
  614         # now, confirm the migration and check our counts once again
  615         self._confirm_resize(server_a)
  616 
  617         self.assertPCIDeviceCounts('test_compute0', total=2, free=2)
  618         self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
  619 
  620 
  621 class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
  622 
  623     ALIAS_NAME = 'a1'
  624     PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(
  625         {
  626             'vendor_id': fakelibvirt.PCI_VEND_ID,
  627             'product_id': fakelibvirt.PCI_PROD_ID,
  628         }
  629     )]
  630     PCI_ALIAS = [jsonutils.dumps(
  631         {
  632             'vendor_id': fakelibvirt.PCI_VEND_ID,
  633             'product_id': fakelibvirt.PCI_PROD_ID,
  634             'name': ALIAS_NAME,
  635             'device_type': fields.PciDeviceType.STANDARD,
  636             'numa_policy': fields.PCINUMAAffinityPolicy.PREFERRED,
  637         }
  638     )]
  639     expected_state = 'ACTIVE'
  640 
  641     def test_create_server_with_pci_dev_and_numa(self):
  642         """Validate behavior of 'preferred' PCI NUMA policy.
  643 
  644         This test ensures that it *is* possible to allocate CPU and memory
  645         resources from one NUMA node and a PCI device from another *if* PCI
  646         NUMA policies are in use.
  647         """
  648 
  649         self.flags(cpu_dedicated_set='0-7', group='compute')
  650 
  651         pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
  652         self.start_compute(pci_info=pci_info)
  653 
  654         # boot one instance with no PCI device to "fill up" NUMA node 0
  655         extra_spec = {
  656             'hw:cpu_policy': 'dedicated',
  657         }
  658         flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
  659         self._create_server(flavor_id=flavor_id)
  660 
  661         # now boot one with a PCI device, which should succeed thanks to the
  662         # use of the PCI policy
  663         extra_spec['pci_passthrough:alias'] = '%s:1' % self.ALIAS_NAME
  664         flavor_id = self._create_flavor(extra_spec=extra_spec)
  665         self._create_server(
  666             flavor_id=flavor_id, expected_state=self.expected_state)
  667 
  668 
  669 class PCIServersWithRequiredNUMATest(PCIServersWithPreferredNUMATest):
  670 
  671     ALIAS_NAME = 'a1'
  672     PCI_ALIAS = [jsonutils.dumps(
  673         {
  674             'vendor_id': fakelibvirt.PCI_VEND_ID,
  675             'product_id': fakelibvirt.PCI_PROD_ID,
  676             'name': ALIAS_NAME,
  677             'device_type': fields.PciDeviceType.STANDARD,
  678             'numa_policy': fields.PCINUMAAffinityPolicy.REQUIRED,
  679         }
  680     )]
  681     expected_state = 'ERROR'
  682 
  683 
  684 @ddt.ddt
  685 class PCIServersWithSRIOVAffinityPoliciesTest(_PCIServersTestBase):
  686 
  687     ALIAS_NAME = 'a1'
  688     PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(
  689         {
  690             'vendor_id': fakelibvirt.PCI_VEND_ID,
  691             'product_id': fakelibvirt.PCI_PROD_ID,
  692         }
  693     )]
  694     # we set the numa_affinity policy to required to ensure strict affinity
  695     # between pci devices and the guest cpu and memory will be enforced.
  696     PCI_ALIAS = [jsonutils.dumps(
  697         {
  698             'vendor_id': fakelibvirt.PCI_VEND_ID,
  699             'product_id': fakelibvirt.PCI_PROD_ID,
  700             'name': ALIAS_NAME,
  701             'device_type': fields.PciDeviceType.STANDARD,
  702             'numa_policy': fields.PCINUMAAffinityPolicy.REQUIRED,
  703         }
  704     )]
  705 
  706     # NOTE(sean-k-mooney): i could just apply the ddt decorators
  707     # to this function for the most part but i have chosen to
  708     # keep one top level function per policy to make documenting
  709     # the test cases simpler.
  710     def _test_policy(self, pci_numa_node, status, policy):
  711         # only allow cpus on numa node 1 to be used for pinning
  712         self.flags(cpu_dedicated_set='4-7', group='compute')
  713 
  714         pci_info = fakelibvirt.HostPCIDevicesInfo(
  715             num_pci=1, numa_node=pci_numa_node)
  716         self.start_compute(pci_info=pci_info)
  717 
  718         # request cpu pinning to create a numa toplogy and allow the test to
  719         # force which numa node the vm would have to be pinned too.
  720         extra_spec = {
  721             'hw:cpu_policy': 'dedicated',
  722             'pci_passthrough:alias': '%s:1' % self.ALIAS_NAME,
  723             'hw:pci_numa_affinity_policy': policy
  724         }
  725         flavor_id = self._create_flavor(extra_spec=extra_spec)
  726         self._create_server(flavor_id=flavor_id, expected_state=status)
  727 
  728         if status == 'ACTIVE':
  729             self.assertTrue(self.mock_filter.called)
  730         else:
  731             # the PciPassthroughFilter should not have been called, since the
  732             # NUMATopologyFilter should have eliminated the filter first
  733             self.assertFalse(self.mock_filter.called)
  734 
  735     @ddt.unpack  # unpacks each sub-tuple e.g. *(pci_numa_node, status)
  736     # the preferred policy should always pass regardless of numa affinity
  737     @ddt.data((-1, 'ACTIVE'), (0, 'ACTIVE'), (1, 'ACTIVE'))
  738     def test_create_server_with_sriov_numa_affinity_policy_preferred(
  739             self, pci_numa_node, status):
  740         """Validate behavior of 'preferred' PCI NUMA affinity policy.
  741 
  742         This test ensures that it *is* possible to allocate CPU and memory
  743         resources from one NUMA node and a PCI device from another *if*
  744         the SR-IOV NUMA affinity policy is set to preferred.
  745         """
  746         self._test_policy(pci_numa_node, status, 'preferred')
  747 
  748     @ddt.unpack  # unpacks each sub-tuple e.g. *(pci_numa_node, status)
  749     # the legacy policy allow a PCI device to be used if it has NUMA
  750     # affinity or if no NUMA info is available so we set the NUMA
  751     # node for this device to -1 which is the sentinel value use by the
  752     # Linux kernel for a device with no NUMA affinity.
  753     @ddt.data((-1, 'ACTIVE'), (0, 'ERROR'), (1, 'ACTIVE'))
  754     def test_create_server_with_sriov_numa_affinity_policy_legacy(
  755             self, pci_numa_node, status):
  756         """Validate behavior of 'legacy' PCI NUMA affinity policy.
  757 
  758         This test ensures that it *is* possible to allocate CPU and memory
  759         resources from one NUMA node and a PCI device from another *if*
  760         the SR-IOV NUMA affinity policy is set to legacy and the device
  761         does not report NUMA information.
  762         """
  763         self._test_policy(pci_numa_node, status, 'legacy')
  764 
  765     @ddt.unpack  # unpacks each sub-tuple e.g. *(pci_numa_node, status)
  766     # The required policy requires a PCI device to both report a NUMA
  767     # and for the guest cpus and ram to be affinitized to the same
  768     # NUMA node so we create 1 pci device in the first NUMA node.
  769     @ddt.data((-1, 'ERROR'), (0, 'ERROR'), (1, 'ACTIVE'))
  770     def test_create_server_with_sriov_numa_affinity_policy_required(
  771             self, pci_numa_node, status):
  772         """Validate behavior of 'required' PCI NUMA affinity policy.
  773 
  774         This test ensures that it *is not* possible to allocate CPU and memory
  775         resources from one NUMA node and a PCI device from another *if*
  776         the SR-IOV NUMA affinity policy is set to required and the device
  777         does reports NUMA information.
  778         """
  779 
  780         # we set the numa_affinity policy to preferred to allow the PCI device
  781         # to be selected from any numa node so we can prove the flavor
  782         # overrides the alias.
  783         alias = [jsonutils.dumps(
  784             {
  785                 'vendor_id': fakelibvirt.PCI_VEND_ID,
  786                 'product_id': fakelibvirt.PCI_PROD_ID,
  787                 'name': self.ALIAS_NAME,
  788                 'device_type': fields.PciDeviceType.STANDARD,
  789                 'numa_policy': fields.PCINUMAAffinityPolicy.PREFERRED,
  790             }
  791         )]
  792 
  793         self.flags(passthrough_whitelist=self.PCI_PASSTHROUGH_WHITELIST,
  794                    alias=alias,
  795                    group='pci')
  796 
  797         self._test_policy(pci_numa_node, status, 'required')