"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "salt/modules/vsphere.py" between
salt-3002.1.tar.gz and salt-3002.2.tar.gz

About: SaltStack is a systems management software for data center automation, cloud orchestration, server provisioning, configuration management and more. Community version.

vsphere.py  (salt-3002.1):vsphere.py  (salt-3002.2)
# -*- coding: utf-8 -*-
""" """
Manage VMware vCenter servers and ESXi hosts. Manage VMware vCenter servers and ESXi hosts.
.. versionadded:: 2015.8.4 .. versionadded:: 2015.8.4
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstaley.com> :codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstaley.com>
Dependencies Dependencies
============ ============
skipping to change at line 184 skipping to change at line 183
enabled: enabled:
True True
host_vnic: host_vnic:
vmk0 vmk0
ip: ip:
coredump-location.example.com coredump-location.example.com
port: port:
6500 6500
""" """
# Import Python Libs
from __future__ import absolute_import
import datetime import datetime
import logging import logging
import sys import sys
from functools import wraps from functools import wraps
import salt.utils.args import salt.utils.args
import salt.utils.dictupdate as dictupdate import salt.utils.dictupdate as dictupdate
import salt.utils.http import salt.utils.http
import salt.utils.path import salt.utils.path
import salt.utils.pbm import salt.utils.pbm
skipping to change at line 223 skipping to change at line 219
from salt.exceptions import ( from salt.exceptions import (
ArgumentValueError, ArgumentValueError,
CommandExecutionError, CommandExecutionError,
InvalidConfigError, InvalidConfigError,
InvalidEntityError, InvalidEntityError,
VMwareApiError, VMwareApiError,
VMwareObjectExistsError, VMwareObjectExistsError,
VMwareObjectRetrievalError, VMwareObjectRetrievalError,
VMwareSaltError, VMwareSaltError,
) )
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves import range, zip
from salt.utils.decorators import depends, ignores_kwargs from salt.utils.decorators import depends, ignores_kwargs
from salt.utils.dictdiffer import recursive_diff from salt.utils.dictdiffer import recursive_diff
from salt.utils.listdiffer import list_diff from salt.utils.listdiffer import list_diff
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
# Import Third Party Libs
try: try:
import jsonschema import jsonschema
HAS_JSONSCHEMA = True HAS_JSONSCHEMA = True
except ImportError: except ImportError:
HAS_JSONSCHEMA = False HAS_JSONSCHEMA = False
try: try:
# pylint: disable=no-name-in-module # pylint: disable=no-name-in-module
from pyVmomi import ( from pyVmomi import (
skipping to change at line 342 skipping to change at line 333
details = __salt__["esxi.get_details"]() details = __salt__["esxi.get_details"]()
elif proxytype == "esxcluster": elif proxytype == "esxcluster":
details = __salt__["esxcluster.get_details"]() details = __salt__["esxcluster.get_details"]()
elif proxytype == "esxdatacenter": elif proxytype == "esxdatacenter":
details = __salt__["esxdatacenter.get_details"]() details = __salt__["esxdatacenter.get_details"]()
elif proxytype == "vcenter": elif proxytype == "vcenter":
details = __salt__["vcenter.get_details"]() details = __salt__["vcenter.get_details"]()
elif proxytype == "esxvm": elif proxytype == "esxvm":
details = __salt__["esxvm.get_details"]() details = __salt__["esxvm.get_details"]()
else: else:
raise CommandExecutionError("'{0}' proxy is not supported" "".format(pro xytype)) raise CommandExecutionError("'{}' proxy is not supported" "".format(prox ytype))
return ( return (
details.get("vcenter") if "vcenter" in details else details.get("host"), details.get("vcenter") if "vcenter" in details else details.get("host"),
details.get("username"), details.get("username"),
details.get("password"), details.get("password"),
details.get("protocol"), details.get("protocol"),
details.get("port"), details.get("port"),
details.get("mechanism"), details.get("mechanism"),
details.get("principal"), details.get("principal"),
details.get("domain"), details.get("domain"),
) )
skipping to change at line 368 skipping to change at line 359
proxy_types: proxy_types:
Arbitrary list of strings with the supported types of proxies Arbitrary list of strings with the supported types of proxies
""" """
def _supports_proxies(fn): def _supports_proxies(fn):
@wraps(fn) @wraps(fn)
def __supports_proxies(*args, **kwargs): def __supports_proxies(*args, **kwargs):
proxy_type = get_proxy_type() proxy_type = get_proxy_type()
if proxy_type not in proxy_types: if proxy_type not in proxy_types:
raise CommandExecutionError( raise CommandExecutionError(
"'{0}' proxy is not supported by function {1}" "'{}' proxy is not supported by function {}"
"".format(proxy_type, fn.__name__) "".format(proxy_type, fn.__name__)
) )
return fn(*args, **salt.utils.args.clean_kwargs(**kwargs)) return fn(*args, **salt.utils.args.clean_kwargs(**kwargs))
return __supports_proxies return __supports_proxies
return _supports_proxies return _supports_proxies
def gets_service_instance_via_proxy(fn): def gets_service_instance_via_proxy(fn):
""" """
skipping to change at line 410 skipping to change at line 401
args_name, args_name,
kwargs_name, kwargs_name,
default_values, default_values,
) = salt.utils.args.get_function_argspec(fn) ) = salt.utils.args.get_function_argspec(fn)
default_values = default_values if default_values is not None else [] default_values = default_values if default_values is not None else []
@wraps(fn) @wraps(fn)
def _gets_service_instance_via_proxy(*args, **kwargs): def _gets_service_instance_via_proxy(*args, **kwargs):
if "service_instance" not in arg_names and not kwargs_name: if "service_instance" not in arg_names and not kwargs_name:
raise CommandExecutionError( raise CommandExecutionError(
"Function {0} must have either a 'service_instance', or a " "Function {} must have either a 'service_instance', or a "
"'**kwargs' type parameter".format(fn_name) "'**kwargs' type parameter".format(fn_name)
) )
connection_details = _get_proxy_connection_details() connection_details = _get_proxy_connection_details()
# Figure out how to pass in the connection value # Figure out how to pass in the connection value
local_service_instance = None local_service_instance = None
if "service_instance" in arg_names: if "service_instance" in arg_names:
idx = arg_names.index("service_instance") idx = arg_names.index("service_instance")
if idx >= len(arg_names) - len(default_values): if idx >= len(arg_names) - len(default_values):
# 'service_instance' has a default value: # 'service_instance' has a default value:
# we check if we need to instantiate it or # we check if we need to instantiate it or
skipping to change at line 468 skipping to change at line 459
ret = fn(*args, **salt.utils.args.clean_kwargs(**kwargs)) ret = fn(*args, **salt.utils.args.clean_kwargs(**kwargs))
# Disconnect if connected in the decorator # Disconnect if connected in the decorator
if local_service_instance: if local_service_instance:
salt.utils.vmware.disconnect(local_service_instance) salt.utils.vmware.disconnect(local_service_instance)
return ret return ret
except Exception as e: # pylint: disable=broad-except except Exception as e: # pylint: disable=broad-except
# Disconnect if connected in the decorator # Disconnect if connected in the decorator
if local_service_instance: if local_service_instance:
salt.utils.vmware.disconnect(local_service_instance) salt.utils.vmware.disconnect(local_service_instance)
# raise original exception and traceback # raise original exception and traceback
six.reraise(*sys.exc_info()) raise
return _gets_service_instance_via_proxy return _gets_service_instance_via_proxy
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@supports_proxies("esxi", "esxcluster", "esxdatacenter", "vcenter", "esxvm") @supports_proxies("esxi", "esxcluster", "esxdatacenter", "vcenter", "esxvm")
def get_service_instance_via_proxy(service_instance=None): def get_service_instance_via_proxy(service_instance=None):
""" """
Returns a service instance to the proxied endpoint (vCenter/ESXi host). Returns a service instance to the proxied endpoint (vCenter/ESXi host).
service_instance service_instance
skipping to change at line 754 skipping to change at line 745
salt '*' vsphere.coredump_network_enable my.esxi.host root bad-password True salt '*' vsphere.coredump_network_enable my.esxi.host root bad-password True
# Used for connecting to a vCenter Server # Used for connecting to a vCenter Server
salt '*' vsphere.coredump_network_enable my.vcenter.location root bad-pa ssword True \ salt '*' vsphere.coredump_network_enable my.vcenter.location root bad-pa ssword True \
esxi_hosts='[esxi-1.host.com, esxi-2.host.com]' esxi_hosts='[esxi-1.host.com, esxi-2.host.com]'
""" """
if enabled: if enabled:
enable_it = 1 enable_it = 1
else: else:
enable_it = 0 enable_it = 0
cmd = "system coredump network set -e {0}".format(enable_it) cmd = "system coredump network set -e {}".format(enable_it)
ret = {} ret = {}
if esxi_hosts: if esxi_hosts:
if not isinstance(esxi_hosts, list): if not isinstance(esxi_hosts, list):
raise CommandExecutionError("'esxi_hosts' must be a list.") raise CommandExecutionError("'esxi_hosts' must be a list.")
for esxi_host in esxi_hosts: for esxi_host in esxi_hosts:
response = salt.utils.vmware.esxcli( response = salt.utils.vmware.esxcli(
host, host,
username, username,
skipping to change at line 861 skipping to change at line 852
.. code-block:: bash .. code-block:: bash
# Used for ESXi host connection information # Used for ESXi host connection information
salt '*' vsphere.set_coredump_network_config my.esxi.host root bad-passw ord 'dump_ip.host.com' salt '*' vsphere.set_coredump_network_config my.esxi.host root bad-passw ord 'dump_ip.host.com'
# Used for connecting to a vCenter Server # Used for connecting to a vCenter Server
salt '*' vsphere.set_coredump_network_config my.vcenter.location root ba d-password 'dump_ip.host.com' \ salt '*' vsphere.set_coredump_network_config my.vcenter.location root ba d-password 'dump_ip.host.com' \
esxi_hosts='[esxi-1.host.com, esxi-2.host.com]' esxi_hosts='[esxi-1.host.com, esxi-2.host.com]'
""" """
cmd = "system coredump network set -v {0} -i {1} -o {2}".format( cmd = "system coredump network set -v {} -i {} -o {}".format(
host_vnic, dump_ip, dump_port host_vnic, dump_ip, dump_port
) )
ret = {} ret = {}
if esxi_hosts: if esxi_hosts:
if not isinstance(esxi_hosts, list): if not isinstance(esxi_hosts, list):
raise CommandExecutionError("'esxi_hosts' must be a list.") raise CommandExecutionError("'esxi_hosts' must be a list.")
for esxi_host in esxi_hosts: for esxi_host in esxi_hosts:
response = salt.utils.vmware.esxcli( response = salt.utils.vmware.esxcli(
host, host,
skipping to change at line 1069 skipping to change at line 1060
.. code-block:: bash .. code-block:: bash
# Used for ESXi host connection information # Used for ESXi host connection information
salt '*' vsphere.enable_firewall_ruleset my.esxi.host root bad-password True 'syslog' salt '*' vsphere.enable_firewall_ruleset my.esxi.host root bad-password True 'syslog'
# Used for connecting to a vCenter Server # Used for connecting to a vCenter Server
salt '*' vsphere.enable_firewall_ruleset my.vcenter.location root bad-pa ssword True 'syslog' \ salt '*' vsphere.enable_firewall_ruleset my.vcenter.location root bad-pa ssword True 'syslog' \
esxi_hosts='[esxi-1.host.com, esxi-2.host.com]' esxi_hosts='[esxi-1.host.com, esxi-2.host.com]'
""" """
cmd = "network firewall ruleset set --enabled {0} --ruleset-id={1}".format( cmd = "network firewall ruleset set --enabled {} --ruleset-id={}".format(
ruleset_enable, ruleset_name ruleset_enable, ruleset_name
) )
ret = {} ret = {}
if esxi_hosts: if esxi_hosts:
if not isinstance(esxi_hosts, list): if not isinstance(esxi_hosts, list):
raise CommandExecutionError("'esxi_hosts' must be a list.") raise CommandExecutionError("'esxi_hosts' must be a list.")
for esxi_host in esxi_hosts: for esxi_host in esxi_hosts:
response = salt.utils.vmware.esxcli( response = salt.utils.vmware.esxcli(
skipping to change at line 1613 skipping to change at line 1604
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.upload_ssh_key my.esxi.host root bad-password ssh_key_f ile='/etc/salt/my_keys/my_key.pub' salt '*' vsphere.upload_ssh_key my.esxi.host root bad-password ssh_key_f ile='/etc/salt/my_keys/my_key.pub'
""" """
if protocol is None: if protocol is None:
protocol = "https" protocol = "https"
if port is None: if port is None:
port = 443 port = 443
url = "{0}://{1}:{2}/host/ssh_root_authorized_keys".format(protocol, host, p ort) url = "{}://{}:{}/host/ssh_root_authorized_keys".format(protocol, host, port )
ret = {} ret = {}
result = None result = None
try: try:
if ssh_key: if ssh_key:
result = salt.utils.http.query( result = salt.utils.http.query(
url, url,
status=True, status=True,
text=True, text=True,
method="PUT", method="PUT",
username=username, username=username,
skipping to change at line 1680 skipping to change at line 1671
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.get_ssh_key my.esxi.host root bad-password certificate_ verify=True salt '*' vsphere.get_ssh_key my.esxi.host root bad-password certificate_ verify=True
""" """
if protocol is None: if protocol is None:
protocol = "https" protocol = "https"
if port is None: if port is None:
port = 443 port = 443
url = "{0}://{1}:{2}/host/ssh_root_authorized_keys".format(protocol, host, p ort) url = "{}://{}:{}/host/ssh_root_authorized_keys".format(protocol, host, port )
ret = {} ret = {}
try: try:
result = salt.utils.http.query( result = salt.utils.http.query(
url, url,
status=True, status=True,
text=True, text=True,
method="GET", method="GET",
username=username, username=username,
password=password, password=password,
verify_ssl=certificate_verify, verify_ssl=certificate_verify,
skipping to change at line 1907 skipping to change at line 1898
host_names = _check_hosts(service_instance, host, host_names) host_names = _check_hosts(service_instance, host, host_names)
ret = {} ret = {}
for host_name in host_names: for host_name in host_names:
# Check if the service_name provided is a valid one. # Check if the service_name provided is a valid one.
# If we don't have a valid service, return. The service will be invalid for all hosts. # If we don't have a valid service, return. The service will be invalid for all hosts.
if service_name not in valid_services: if service_name not in valid_services:
ret.update( ret.update(
{ {
host_name: { host_name: {
"Error": "{0} is not a valid service name.".format(servi ce_name) "Error": "{} is not a valid service name.".format(servic e_name)
} }
} }
) )
return ret return ret
host_ref = _get_host_ref(service_instance, host, host_name=host_name) host_ref = _get_host_ref(service_instance, host, host_name=host_name)
services = host_ref.configManager.serviceSystem.serviceInfo.service services = host_ref.configManager.serviceSystem.serviceInfo.service
# Don't require users to know that VMware lists the ssh service as TSM-S SH # Don't require users to know that VMware lists the ssh service as TSM-S SH
if service_name == "SSH" or service_name == "ssh": if service_name == "SSH" or service_name == "ssh":
skipping to change at line 1930 skipping to change at line 1921
temp_service_name = service_name temp_service_name = service_name
# Loop through services until we find a matching name # Loop through services until we find a matching name
for service in services: for service in services:
if service.key == temp_service_name: if service.key == temp_service_name:
ret.update({host_name: {service_name: service.policy}}) ret.update({host_name: {service_name: service.policy}})
# We've found a match - break out of the loop so we don't overwr ite the # We've found a match - break out of the loop so we don't overwr ite the
# Updated host_name value with an error message. # Updated host_name value with an error message.
break break
else: else:
msg = "Could not find service '{0}' for host '{1}'.".format( msg = "Could not find service '{}' for host '{}'.".format(
service_name, host_name service_name, host_name
) )
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
# If we made it this far, something else has gone wrong. # If we made it this far, something else has gone wrong.
if ret.get(host_name) is None: if ret.get(host_name) is None:
msg = "'vsphere.get_service_policy' failed for host {0}.".format(hos t_name) msg = "'vsphere.get_service_policy' failed for host {}.".format(host _name)
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
return ret return ret
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@ignores_kwargs("credstore") @ignores_kwargs("credstore")
def get_service_running( def get_service_running(
host, username, password, service_name, protocol=None, port=None, host_names =None host, username, password, service_name, protocol=None, port=None, host_names =None
): ):
skipping to change at line 2033 skipping to change at line 2024
host_names = _check_hosts(service_instance, host, host_names) host_names = _check_hosts(service_instance, host, host_names)
ret = {} ret = {}
for host_name in host_names: for host_name in host_names:
# Check if the service_name provided is a valid one. # Check if the service_name provided is a valid one.
# If we don't have a valid service, return. The service will be invalid for all hosts. # If we don't have a valid service, return. The service will be invalid for all hosts.
if service_name not in valid_services: if service_name not in valid_services:
ret.update( ret.update(
{ {
host_name: { host_name: {
"Error": "{0} is not a valid service name.".format(servi ce_name) "Error": "{} is not a valid service name.".format(servic e_name)
} }
} }
) )
return ret return ret
host_ref = _get_host_ref(service_instance, host, host_name=host_name) host_ref = _get_host_ref(service_instance, host, host_name=host_name)
services = host_ref.configManager.serviceSystem.serviceInfo.service services = host_ref.configManager.serviceSystem.serviceInfo.service
# Don't require users to know that VMware lists the ssh service as TSM-S SH # Don't require users to know that VMware lists the ssh service as TSM-S SH
if service_name == "SSH" or service_name == "ssh": if service_name == "SSH" or service_name == "ssh":
skipping to change at line 2056 skipping to change at line 2047
temp_service_name = service_name temp_service_name = service_name
# Loop through services until we find a matching name # Loop through services until we find a matching name
for service in services: for service in services:
if service.key == temp_service_name: if service.key == temp_service_name:
ret.update({host_name: {service_name: service.running}}) ret.update({host_name: {service_name: service.running}})
# We've found a match - break out of the loop so we don't overwr ite the # We've found a match - break out of the loop so we don't overwr ite the
# Updated host_name value with an error message. # Updated host_name value with an error message.
break break
else: else:
msg = "Could not find service '{0}' for host '{1}'.".format( msg = "Could not find service '{}' for host '{}'.".format(
service_name, host_name service_name, host_name
) )
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
# If we made it this far, something else has gone wrong. # If we made it this far, something else has gone wrong.
if ret.get(host_name) is None: if ret.get(host_name) is None:
msg = "'vsphere.get_service_running' failed for host {0}.".format(ho st_name) msg = "'vsphere.get_service_running' failed for host {}.".format(hos t_name)
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
return ret return ret
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@ignores_kwargs("credstore") @ignores_kwargs("credstore")
def get_vmotion_enabled( def get_vmotion_enabled(
host, username, password, protocol=None, port=None, host_names=None host, username, password, protocol=None, port=None, host_names=None
): ):
skipping to change at line 2188 skipping to change at line 2179
host=host, username=username, password=password, protocol=protocol, port =port host=host, username=username, password=password, protocol=protocol, port =port
) )
host_names = _check_hosts(service_instance, host, host_names) host_names = _check_hosts(service_instance, host, host_names)
ret = {} ret = {}
for host_name in host_names: for host_name in host_names:
host_ref = _get_host_ref(service_instance, host, host_name=host_name) host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vsan_config = host_ref.config.vsanHostConfig vsan_config = host_ref.config.vsanHostConfig
# We must have a VSAN Config in place get information about VSAN state. # We must have a VSAN Config in place get information about VSAN state.
if vsan_config is None: if vsan_config is None:
msg = "VSAN System Config Manager is unset for host '{0}'.".format( msg = "VSAN System Config Manager is unset for host '{}'.".format(ho
host_name st_name)
)
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
else: else:
ret.update({host_name: {"VSAN Enabled": vsan_config.enabled}}) ret.update({host_name: {"VSAN Enabled": vsan_config.enabled}})
return ret return ret
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@ignores_kwargs("credstore") @ignores_kwargs("credstore")
def get_vsan_eligible_disks( def get_vsan_eligible_disks(
skipping to change at line 2250 skipping to change at line 2239
salt '*' vsphere.get_vsan_eligible_disks my.vcenter.location root bad-pa ssword \ salt '*' vsphere.get_vsan_eligible_disks my.vcenter.location root bad-pa ssword \
host_names='[esxi-1.host.com, esxi-2.host.com]' host_names='[esxi-1.host.com, esxi-2.host.com]'
""" """
service_instance = salt.utils.vmware.get_service_instance( service_instance = salt.utils.vmware.get_service_instance(
host=host, username=username, password=password, protocol=protocol, port =port host=host, username=username, password=password, protocol=protocol, port =port
) )
host_names = _check_hosts(service_instance, host, host_names) host_names = _check_hosts(service_instance, host, host_names)
response = _get_vsan_eligible_disks(service_instance, host, host_names) response = _get_vsan_eligible_disks(service_instance, host, host_names)
ret = {} ret = {}
for host_name, value in six.iteritems(response): for host_name, value in response.items():
error = value.get("Error") error = value.get("Error")
if error: if error:
ret.update({host_name: {"Error": error}}) ret.update({host_name: {"Error": error}})
continue continue
disks = value.get("Eligible") disks = value.get("Eligible")
# If we have eligible disks, it will be a list of disk objects # If we have eligible disks, it will be a list of disk objects
if disks and isinstance(disks, list): if disks and isinstance(disks, list):
disk_names = [] disk_names = []
# We need to return ONLY the disk names, otherwise # We need to return ONLY the disk names, otherwise
skipping to change at line 2899 skipping to change at line 2888
# Get DateTimeConfig object from ntp_config # Get DateTimeConfig object from ntp_config
date_config = vim.HostDateTimeConfig(ntpConfig=ntp_config) date_config = vim.HostDateTimeConfig(ntpConfig=ntp_config)
host_names = _check_hosts(service_instance, host, host_names) host_names = _check_hosts(service_instance, host, host_names)
ret = {} ret = {}
for host_name in host_names: for host_name in host_names:
host_ref = _get_host_ref(service_instance, host, host_name=host_name) host_ref = _get_host_ref(service_instance, host, host_name=host_name)
date_time_manager = _get_date_time_mgr(host_ref) date_time_manager = _get_date_time_mgr(host_ref)
log.debug( log.debug(
"Configuring NTP Servers '{0}' for host '{1}'.".format( "Configuring NTP Servers '{}' for host '{}'.".format(ntp_servers, ho
ntp_servers, host_name st_name)
)
) )
try: try:
date_time_manager.UpdateDateTimeConfig(config=date_config) date_time_manager.UpdateDateTimeConfig(config=date_config)
except vim.fault.HostConfigFault as err: except vim.fault.HostConfigFault as err:
msg = "vsphere.ntp_configure_servers failed: {0}".format(err) msg = "vsphere.ntp_configure_servers failed: {}".format(err)
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
continue continue
ret.update({host_name: {"NTP Servers": ntp_config}}) ret.update({host_name: {"NTP Servers": ntp_config}})
return ret return ret
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@ignores_kwargs("credstore") @ignores_kwargs("credstore")
def service_start( def service_start(
skipping to change at line 3011 skipping to change at line 2998
else: else:
temp_service_name = service_name temp_service_name = service_name
for host_name in host_names: for host_name in host_names:
# Check if the service_name provided is a valid one. # Check if the service_name provided is a valid one.
# If we don't have a valid service, return. The service will be invalid for all hosts. # If we don't have a valid service, return. The service will be invalid for all hosts.
if service_name not in valid_services: if service_name not in valid_services:
ret.update( ret.update(
{ {
host_name: { host_name: {
"Error": "{0} is not a valid service name.".format(servi ce_name) "Error": "{} is not a valid service name.".format(servic e_name)
} }
} }
) )
return ret return ret
host_ref = _get_host_ref(service_instance, host, host_name=host_name) host_ref = _get_host_ref(service_instance, host, host_name=host_name)
service_manager = _get_service_manager(host_ref) service_manager = _get_service_manager(host_ref)
log.debug("Starting the '{0}' service on {1}.".format(service_name, host _name)) log.debug("Starting the '{}' service on {}.".format(service_name, host_n ame))
# Start the service # Start the service
try: try:
service_manager.StartService(id=temp_service_name) service_manager.StartService(id=temp_service_name)
except vim.fault.HostConfigFault as err: except vim.fault.HostConfigFault as err:
msg = "'vsphere.service_start' failed for host {0}: {1}".format( msg = "'vsphere.service_start' failed for host {}: {}".format(
host_name, err host_name, err
) )
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
continue continue
# Some services are restricted by the vSphere License Level. # Some services are restricted by the vSphere License Level.
except vim.fault.RestrictedVersion as err: except vim.fault.RestrictedVersion as err:
log.debug(err) log.debug(err)
ret.update({host_name: {"Error": err}}) ret.update({host_name: {"Error": err}})
continue continue
skipping to change at line 3137 skipping to change at line 3124
else: else:
temp_service_name = service_name temp_service_name = service_name
for host_name in host_names: for host_name in host_names:
# Check if the service_name provided is a valid one. # Check if the service_name provided is a valid one.
# If we don't have a valid service, return. The service will be invalid for all hosts. # If we don't have a valid service, return. The service will be invalid for all hosts.
if service_name not in valid_services: if service_name not in valid_services:
ret.update( ret.update(
{ {
host_name: { host_name: {
"Error": "{0} is not a valid service name.".format(servi ce_name) "Error": "{} is not a valid service name.".format(servic e_name)
} }
} }
) )
return ret return ret
host_ref = _get_host_ref(service_instance, host, host_name=host_name) host_ref = _get_host_ref(service_instance, host, host_name=host_name)
service_manager = _get_service_manager(host_ref) service_manager = _get_service_manager(host_ref)
log.debug("Stopping the '{0}' service on {1}.".format(service_name, host _name)) log.debug("Stopping the '{}' service on {}.".format(service_name, host_n ame))
# Stop the service. # Stop the service.
try: try:
service_manager.StopService(id=temp_service_name) service_manager.StopService(id=temp_service_name)
except vim.fault.HostConfigFault as err: except vim.fault.HostConfigFault as err:
msg = "'vsphere.service_stop' failed for host {0}: {1}".format( msg = "'vsphere.service_stop' failed for host {}: {}".format(host_na
host_name, err me, err)
)
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
continue continue
# Some services are restricted by the vSphere License Level. # Some services are restricted by the vSphere License Level.
except vim.fault.RestrictedVersion as err: except vim.fault.RestrictedVersion as err:
log.debug(err) log.debug(err)
ret.update({host_name: {"Error": err}}) ret.update({host_name: {"Error": err}})
continue continue
ret.update({host_name: {"Service Stopped": True}}) ret.update({host_name: {"Service Stopped": True}})
skipping to change at line 3263 skipping to change at line 3248
else: else:
temp_service_name = service_name temp_service_name = service_name
for host_name in host_names: for host_name in host_names:
# Check if the service_name provided is a valid one. # Check if the service_name provided is a valid one.
# If we don't have a valid service, return. The service will be invalid for all hosts. # If we don't have a valid service, return. The service will be invalid for all hosts.
if service_name not in valid_services: if service_name not in valid_services:
ret.update( ret.update(
{ {
host_name: { host_name: {
"Error": "{0} is not a valid service name.".format(servi ce_name) "Error": "{} is not a valid service name.".format(servic e_name)
} }
} }
) )
return ret return ret
host_ref = _get_host_ref(service_instance, host, host_name=host_name) host_ref = _get_host_ref(service_instance, host, host_name=host_name)
service_manager = _get_service_manager(host_ref) service_manager = _get_service_manager(host_ref)
log.debug( log.debug("Restarting the '{}' service on {}.".format(service_name, host
"Restarting the '{0}' service on {1}.".format(service_name, host_nam _name))
e)
)
# Restart the service. # Restart the service.
try: try:
service_manager.RestartService(id=temp_service_name) service_manager.RestartService(id=temp_service_name)
except vim.fault.HostConfigFault as err: except vim.fault.HostConfigFault as err:
msg = "'vsphere.service_restart' failed for host {0}: {1}".format( msg = "'vsphere.service_restart' failed for host {}: {}".format(
host_name, err host_name, err
) )
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
continue continue
# Some services are restricted by the vSphere License Level. # Some services are restricted by the vSphere License Level.
except vim.fault.RestrictedVersion as err: except vim.fault.RestrictedVersion as err:
log.debug(err) log.debug(err)
ret.update({host_name: {"Error": err}}) ret.update({host_name: {"Error": err}})
continue continue
skipping to change at line 3395 skipping to change at line 3378
] ]
ret = {} ret = {}
for host_name in host_names: for host_name in host_names:
# Check if the service_name provided is a valid one. # Check if the service_name provided is a valid one.
# If we don't have a valid service, return. The service will be invalid for all hosts. # If we don't have a valid service, return. The service will be invalid for all hosts.
if service_name not in valid_services: if service_name not in valid_services:
ret.update( ret.update(
{ {
host_name: { host_name: {
"Error": "{0} is not a valid service name.".format(servi ce_name) "Error": "{} is not a valid service name.".format(servic e_name)
} }
} }
) )
return ret return ret
host_ref = _get_host_ref(service_instance, host, host_name=host_name) host_ref = _get_host_ref(service_instance, host, host_name=host_name)
service_manager = _get_service_manager(host_ref) service_manager = _get_service_manager(host_ref)
services = host_ref.configManager.serviceSystem.serviceInfo.service services = host_ref.configManager.serviceSystem.serviceInfo.service
# Services are stored in a general list - we need loop through the list and find # Services are stored in a general list - we need loop through the list and find
skipping to change at line 3424 skipping to change at line 3407
if service.key == "TSM-SSH": if service.key == "TSM-SSH":
service_key = "TSM-SSH" service_key = "TSM-SSH"
# If we have a service_key, we've found a match. Update the policy. # If we have a service_key, we've found a match. Update the policy.
if service_key: if service_key:
try: try:
service_manager.UpdateServicePolicy( service_manager.UpdateServicePolicy(
id=service_key, policy=service_policy id=service_key, policy=service_policy
) )
except vim.fault.NotFound: except vim.fault.NotFound:
msg = "The service name '{0}' was not found.".format(service _name) msg = "The service name '{}' was not found.".format(service_ name)
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
continue continue
# Some services are restricted by the vSphere License Level. # Some services are restricted by the vSphere License Level.
except vim.fault.HostConfigFault as err: except vim.fault.HostConfigFault as err:
msg = "'vsphere.set_service_policy' failed for host {0}: {1} ".format( msg = "'vsphere.set_service_policy' failed for host {}: {}". format(
host_name, err host_name, err
) )
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
continue continue
ret.update({host_name: True}) ret.update({host_name: True})
# If we made it this far, something else has gone wrong. # If we made it this far, something else has gone wrong.
if ret.get(host_name) is None: if ret.get(host_name) is None:
msg = "Could not find service '{0}' for host '{1}'.".format( msg = "Could not find service '{}' for host '{}'.".format(
service_name, host_name service_name, host_name
) )
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
return ret return ret
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@ignores_kwargs("credstore") @ignores_kwargs("credstore")
def update_host_datetime( def update_host_datetime(
skipping to change at line 3506 skipping to change at line 3489
host=host, username=username, password=password, protocol=protocol, port =port host=host, username=username, password=password, protocol=protocol, port =port
) )
host_names = _check_hosts(service_instance, host, host_names) host_names = _check_hosts(service_instance, host, host_names)
ret = {} ret = {}
for host_name in host_names: for host_name in host_names:
host_ref = _get_host_ref(service_instance, host, host_name=host_name) host_ref = _get_host_ref(service_instance, host, host_name=host_name)
date_time_manager = _get_date_time_mgr(host_ref) date_time_manager = _get_date_time_mgr(host_ref)
try: try:
date_time_manager.UpdateDateTime(datetime.datetime.utcnow()) date_time_manager.UpdateDateTime(datetime.datetime.utcnow())
except vim.fault.HostConfigFault as err: except vim.fault.HostConfigFault as err:
msg = "'vsphere.update_date_time' failed for host {0}: {1}".format( msg = "'vsphere.update_date_time' failed for host {}: {}".format(
host_name, err host_name, err
) )
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
continue continue
ret.update({host_name: {"Datetime Updated": True}}) ret.update({host_name: {"Datetime Updated": True}})
return ret return ret
skipping to change at line 3572 skipping to change at line 3555
user_account.id = username user_account.id = username
user_account.password = new_password user_account.password = new_password
# Update the password # Update the password
try: try:
account_manager.UpdateUser(user_account) account_manager.UpdateUser(user_account)
except vmodl.fault.SystemError as err: except vmodl.fault.SystemError as err:
raise CommandExecutionError(err.msg) raise CommandExecutionError(err.msg)
except vim.fault.UserNotFound: except vim.fault.UserNotFound:
raise CommandExecutionError( raise CommandExecutionError(
"'vsphere.update_host_password' failed for host {0}: " "'vsphere.update_host_password' failed for host {}: "
"User was not found.".format(host) "User was not found.".format(host)
) )
# If the username and password already exist, we don't need to do anything. # If the username and password already exist, we don't need to do anything.
except vim.fault.AlreadyExists: except vim.fault.AlreadyExists:
pass pass
return True return True
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@ignores_kwargs("credstore") @ignores_kwargs("credstore")
skipping to change at line 3639 skipping to change at line 3622
host_names = _check_hosts(service_instance, host, host_names) host_names = _check_hosts(service_instance, host, host_names)
ret = {} ret = {}
for host_name in host_names: for host_name in host_names:
host_ref = _get_host_ref(service_instance, host, host_name=host_name) host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vmotion_system = host_ref.configManager.vmotionSystem vmotion_system = host_ref.configManager.vmotionSystem
# Disable VMotion for the host by removing the VNic selected to use for VMotion. # Disable VMotion for the host by removing the VNic selected to use for VMotion.
try: try:
vmotion_system.DeselectVnic() vmotion_system.DeselectVnic()
except vim.fault.HostConfigFault as err: except vim.fault.HostConfigFault as err:
msg = "vsphere.vmotion_disable failed: {0}".format(err) msg = "vsphere.vmotion_disable failed: {}".format(err)
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg, "VMotion Disabled": False}}) ret.update({host_name: {"Error": msg, "VMotion Disabled": False}})
continue continue
ret.update({host_name: {"VMotion Disabled": True}}) ret.update({host_name: {"VMotion Disabled": True}})
return ret return ret
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@ignores_kwargs("credstore") @ignores_kwargs("credstore")
skipping to change at line 3710 skipping to change at line 3693
host_names = _check_hosts(service_instance, host, host_names) host_names = _check_hosts(service_instance, host, host_names)
ret = {} ret = {}
for host_name in host_names: for host_name in host_names:
host_ref = _get_host_ref(service_instance, host, host_name=host_name) host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vmotion_system = host_ref.configManager.vmotionSystem vmotion_system = host_ref.configManager.vmotionSystem
# Enable VMotion for the host by setting the given device to provide the VNic to use for VMotion. # Enable VMotion for the host by setting the given device to provide the VNic to use for VMotion.
try: try:
vmotion_system.SelectVnic(device) vmotion_system.SelectVnic(device)
except vim.fault.HostConfigFault as err: except vim.fault.HostConfigFault as err:
msg = "vsphere.vmotion_disable failed: {0}".format(err) msg = "vsphere.vmotion_disable failed: {}".format(err)
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg, "VMotion Enabled": False}}) ret.update({host_name: {"Error": msg, "VMotion Enabled": False}})
continue continue
ret.update({host_name: {"VMotion Enabled": True}}) ret.update({host_name: {"VMotion Enabled": True}})
return ret return ret
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@ignores_kwargs("credstore") @ignores_kwargs("credstore")
skipping to change at line 3770 skipping to change at line 3753
salt '*' vsphere.vsan_add_disks my.vcenter.location root bad-password \ salt '*' vsphere.vsan_add_disks my.vcenter.location root bad-password \
host_names='[esxi-1.host.com, esxi-2.host.com]' host_names='[esxi-1.host.com, esxi-2.host.com]'
""" """
service_instance = salt.utils.vmware.get_service_instance( service_instance = salt.utils.vmware.get_service_instance(
host=host, username=username, password=password, protocol=protocol, port =port host=host, username=username, password=password, protocol=protocol, port =port
) )
host_names = _check_hosts(service_instance, host, host_names) host_names = _check_hosts(service_instance, host, host_names)
response = _get_vsan_eligible_disks(service_instance, host, host_names) response = _get_vsan_eligible_disks(service_instance, host, host_names)
ret = {} ret = {}
for host_name, value in six.iteritems(response): for host_name, value in response.items():
host_ref = _get_host_ref(service_instance, host, host_name=host_name) host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vsan_system = host_ref.configManager.vsanSystem vsan_system = host_ref.configManager.vsanSystem
# We must have a VSAN Config in place before we can manipulate it. # We must have a VSAN Config in place before we can manipulate it.
if vsan_system is None: if vsan_system is None:
msg = ( msg = (
"VSAN System Config Manager is unset for host '{0}'. " "VSAN System Config Manager is unset for host '{}'. "
"VSAN configuration cannot be changed without a configured " "VSAN configuration cannot be changed without a configured "
"VSAN System.".format(host_name) "VSAN System.".format(host_name)
) )
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
else: else:
eligible = value.get("Eligible") eligible = value.get("Eligible")
error = value.get("Error") error = value.get("Error")
if eligible and isinstance(eligible, list): if eligible and isinstance(eligible, list):
skipping to change at line 3799 skipping to change at line 3782
try: try:
task = vsan_system.AddDisks(eligible) task = vsan_system.AddDisks(eligible)
salt.utils.vmware.wait_for_task( salt.utils.vmware.wait_for_task(
task, host_name, "Adding disks to VSAN", sleep_seconds=3 task, host_name, "Adding disks to VSAN", sleep_seconds=3
) )
except vim.fault.InsufficientDisks as err: except vim.fault.InsufficientDisks as err:
log.debug(err.msg) log.debug(err.msg)
ret.update({host_name: {"Error": err.msg}}) ret.update({host_name: {"Error": err.msg}})
continue continue
except Exception as err: # pylint: disable=broad-except except Exception as err: # pylint: disable=broad-except
msg = "'vsphere.vsan_add_disks' failed for host {0}: {1}".fo rmat( msg = "'vsphere.vsan_add_disks' failed for host {}: {}".form at(
host_name, err host_name, err
) )
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
continue continue
log.debug( log.debug(
"Successfully added disks to the VSAN system for host '{0}'. ".format( "Successfully added disks to the VSAN system for host '{}'." .format(
host_name host_name
) )
) )
# We need to return ONLY the disk names, otherwise Message Pack can't deserialize the disk objects. # We need to return ONLY the disk names, otherwise Message Pack can't deserialize the disk objects.
disk_names = [] disk_names = []
for disk in eligible: for disk in eligible:
disk_names.append(disk.canonicalName) disk_names.append(disk.canonicalName)
ret.update({host_name: {"Disks Added": disk_names}}) ret.update({host_name: {"Disks Added": disk_names}})
elif eligible and isinstance(eligible, six.string_types): elif eligible and isinstance(eligible, str):
# If we have a string type in the eligible value, we don't # If we have a string type in the eligible value, we don't
# have any VSAN-eligible disks. Pull the message through. # have any VSAN-eligible disks. Pull the message through.
ret.update({host_name: {"Disks Added": eligible}}) ret.update({host_name: {"Disks Added": eligible}})
elif error: elif error:
# If we hit an error, populate the Error return dict for state f unctions. # If we hit an error, populate the Error return dict for state f unctions.
ret.update({host_name: {"Error": error}}) ret.update({host_name: {"Error": error}})
else: else:
# If we made it this far, we somehow have eligible disks, but th ey didn't # If we made it this far, we somehow have eligible disks, but th ey didn't
# match the disk list and just got an empty list of matching dis ks. # match the disk list and just got an empty list of matching dis ks.
ret.update( ret.update(
skipping to change at line 3895 skipping to change at line 3878
host_names = _check_hosts(service_instance, host, host_names) host_names = _check_hosts(service_instance, host, host_names)
ret = {} ret = {}
for host_name in host_names: for host_name in host_names:
host_ref = _get_host_ref(service_instance, host, host_name=host_name) host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vsan_system = host_ref.configManager.vsanSystem vsan_system = host_ref.configManager.vsanSystem
# We must have a VSAN Config in place before we can manipulate it. # We must have a VSAN Config in place before we can manipulate it.
if vsan_system is None: if vsan_system is None:
msg = ( msg = (
"VSAN System Config Manager is unset for host '{0}'. " "VSAN System Config Manager is unset for host '{}'. "
"VSAN configuration cannot be changed without a configured " "VSAN configuration cannot be changed without a configured "
"VSAN System.".format(host_name) "VSAN System.".format(host_name)
) )
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
else: else:
try: try:
# Disable vsan on the host # Disable vsan on the host
task = vsan_system.UpdateVsan_Task(vsan_config) task = vsan_system.UpdateVsan_Task(vsan_config)
salt.utils.vmware.wait_for_task( salt.utils.vmware.wait_for_task(
task, host_name, "Disabling VSAN", sleep_seconds=3 task, host_name, "Disabling VSAN", sleep_seconds=3
) )
except vmodl.fault.SystemError as err: except vmodl.fault.SystemError as err:
log.debug(err.msg) log.debug(err.msg)
ret.update({host_name: {"Error": err.msg}}) ret.update({host_name: {"Error": err.msg}})
continue continue
except Exception as err: # pylint: disable=broad-except except Exception as err: # pylint: disable=broad-except
msg = "'vsphere.vsan_disable' failed for host {0}: {1}".format( msg = "'vsphere.vsan_disable' failed for host {}: {}".format(
host_name, err host_name, err
) )
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
continue continue
ret.update({host_name: {"VSAN Disabled": True}}) ret.update({host_name: {"VSAN Disabled": True}})
return ret return ret
skipping to change at line 3983 skipping to change at line 3966
host_names = _check_hosts(service_instance, host, host_names) host_names = _check_hosts(service_instance, host, host_names)
ret = {} ret = {}
for host_name in host_names: for host_name in host_names:
host_ref = _get_host_ref(service_instance, host, host_name=host_name) host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vsan_system = host_ref.configManager.vsanSystem vsan_system = host_ref.configManager.vsanSystem
# We must have a VSAN Config in place before we can manipulate it. # We must have a VSAN Config in place before we can manipulate it.
if vsan_system is None: if vsan_system is None:
msg = ( msg = (
"VSAN System Config Manager is unset for host '{0}'. " "VSAN System Config Manager is unset for host '{}'. "
"VSAN configuration cannot be changed without a configured " "VSAN configuration cannot be changed without a configured "
"VSAN System.".format(host_name) "VSAN System.".format(host_name)
) )
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
else: else:
try: try:
# Enable vsan on the host # Enable vsan on the host
task = vsan_system.UpdateVsan_Task(vsan_config) task = vsan_system.UpdateVsan_Task(vsan_config)
salt.utils.vmware.wait_for_task( salt.utils.vmware.wait_for_task(
task, host_name, "Enabling VSAN", sleep_seconds=3 task, host_name, "Enabling VSAN", sleep_seconds=3
) )
except vmodl.fault.SystemError as err: except vmodl.fault.SystemError as err:
log.debug(err.msg) log.debug(err.msg)
ret.update({host_name: {"Error": err.msg}}) ret.update({host_name: {"Error": err.msg}})
continue continue
except vim.fault.VsanFault as err: except vim.fault.VsanFault as err:
msg = "'vsphere.vsan_enable' failed for host {0}: {1}".format( msg = "'vsphere.vsan_enable' failed for host {}: {}".format(
host_name, err host_name, err
) )
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
continue continue
ret.update({host_name: {"VSAN Enabled": True}}) ret.update({host_name: {"VSAN Enabled": True}})
return ret return ret
def _get_dvs_config_dict(dvs_name, dvs_config): def _get_dvs_config_dict(dvs_name, dvs_config):
""" """
Returns the dict representation of the DVS config Returns the dict representation of the DVS config
dvs_name dvs_name
The name of the DVS The name of the DVS
dvs_config dvs_config
The DVS config The DVS config
""" """
log.trace("Building the dict of the DVS '{0}' config".format(dvs_name)) log.trace("Building the dict of the DVS '{}' config".format(dvs_name))
conf_dict = { conf_dict = {
"name": dvs_name, "name": dvs_name,
"contact_email": dvs_config.contact.contact, "contact_email": dvs_config.contact.contact,
"contact_name": dvs_config.contact.name, "contact_name": dvs_config.contact.name,
"description": dvs_config.description, "description": dvs_config.description,
"lacp_api_version": dvs_config.lacpApiVersion, "lacp_api_version": dvs_config.lacpApiVersion,
"network_resource_control_version": dvs_config.networkResourceControlVer sion, "network_resource_control_version": dvs_config.networkResourceControlVer sion,
"network_resource_management_enabled": dvs_config.networkResourceManagem entEnabled, "network_resource_management_enabled": dvs_config.networkResourceManagem entEnabled,
"max_mtu": dvs_config.maxMtu, "max_mtu": dvs_config.maxMtu,
} }
skipping to change at line 4048 skipping to change at line 4031
""" """
Returns the dict representation of the DVS link discovery protocol Returns the dict representation of the DVS link discovery protocol
dvs_name dvs_name
The name of the DVS The name of the DVS
dvs_link_disc_protocl dvs_link_disc_protocl
The DVS link discovery protocol The DVS link discovery protocol
""" """
log.trace( log.trace(
"Building the dict of the DVS '{0}' link discovery " "protocol".format(d vs_name) "Building the dict of the DVS '{}' link discovery " "protocol".format(dv s_name)
) )
return { return {
"operation": dvs_link_disc_protocol.operation, "operation": dvs_link_disc_protocol.operation,
"protocol": dvs_link_disc_protocol.protocol, "protocol": dvs_link_disc_protocol.protocol,
} }
def _get_dvs_product_info(dvs_name, dvs_product_info): def _get_dvs_product_info(dvs_name, dvs_product_info):
""" """
Returns the dict representation of the DVS product_info Returns the dict representation of the DVS product_info
dvs_name dvs_name
The name of the DVS The name of the DVS
dvs_product_info dvs_product_info
The DVS product info The DVS product info
""" """
log.trace("Building the dict of the DVS '{0}' product " "info".format(dvs_na me)) log.trace("Building the dict of the DVS '{}' product " "info".format(dvs_nam e))
return { return {
"name": dvs_product_info.name, "name": dvs_product_info.name,
"vendor": dvs_product_info.vendor, "vendor": dvs_product_info.vendor,
"version": dvs_product_info.version, "version": dvs_product_info.version,
} }
def _get_dvs_capability(dvs_name, dvs_capability): def _get_dvs_capability(dvs_name, dvs_capability):
""" """
Returns the dict representation of the DVS product_info Returns the dict representation of the DVS product_info
dvs_name dvs_name
The name of the DVS The name of the DVS
dvs_capability dvs_capability
The DVS capability The DVS capability
""" """
log.trace("Building the dict of the DVS '{0}' capability" "".format(dvs_name )) log.trace("Building the dict of the DVS '{}' capability" "".format(dvs_name) )
return { return {
"operation_supported": dvs_capability.dvsOperationSupported, "operation_supported": dvs_capability.dvsOperationSupported,
"portgroup_operation_supported": dvs_capability.dvPortGroupOperationSupp orted, "portgroup_operation_supported": dvs_capability.dvPortGroupOperationSupp orted,
"port_operation_supported": dvs_capability.dvPortOperationSupported, "port_operation_supported": dvs_capability.dvPortOperationSupported,
} }
def _get_dvs_infrastructure_traffic_resources(dvs_name, dvs_infra_traffic_ress): def _get_dvs_infrastructure_traffic_resources(dvs_name, dvs_infra_traffic_ress):
""" """
Returns a list of dict representations of the DVS infrastructure traffic Returns a list of dict representations of the DVS infrastructure traffic
resource resource
dvs_name dvs_name
The name of the DVS The name of the DVS
dvs_infra_traffic_ress dvs_infra_traffic_ress
The DVS infrastructure traffic resources The DVS infrastructure traffic resources
""" """
log.trace( log.trace(
"Building the dicts of the DVS '{0}' infrastructure traffic " "Building the dicts of the DVS '{}' infrastructure traffic "
"resources".format(dvs_name) "resources".format(dvs_name)
) )
res_dicts = [] res_dicts = []
for res in dvs_infra_traffic_ress: for res in dvs_infra_traffic_ress:
res_dict = { res_dict = {
"key": res.key, "key": res.key,
"limit": res.allocationInfo.limit, "limit": res.allocationInfo.limit,
"reservation": res.allocationInfo.reservation, "reservation": res.allocationInfo.reservation,
} }
if res.allocationInfo.shares: if res.allocationInfo.shares:
skipping to change at line 4344 skipping to change at line 4327
Name of the DVS to be created. Name of the DVS to be created.
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter. Service instance (vim.ServiceInstance) of the vCenter.
Default is None. Default is None.
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.create_dvs dvs dict=$dvs_dict dvs_name=dvs_name salt '*' vsphere.create_dvs dvs dict=$dvs_dict dvs_name=dvs_name
""" """
log.trace("Creating dvs '{0}' with dict = {1}".format(dvs_name, dvs_dict)) log.trace("Creating dvs '{}' with dict = {}".format(dvs_name, dvs_dict))
proxy_type = get_proxy_type() proxy_type = get_proxy_type()
if proxy_type == "esxdatacenter": if proxy_type == "esxdatacenter":
datacenter = __salt__["esxdatacenter.get_details"]()["datacenter"] datacenter = __salt__["esxdatacenter.get_details"]()["datacenter"]
dc_ref = _get_proxy_target(service_instance) dc_ref = _get_proxy_target(service_instance)
elif proxy_type == "esxcluster": elif proxy_type == "esxcluster":
datacenter = __salt__["esxcluster.get_details"]()["datacenter"] datacenter = __salt__["esxcluster.get_details"]()["datacenter"]
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
# Make the name of the DVS consistent with the call # Make the name of the DVS consistent with the call
dvs_dict["name"] = dvs_name dvs_dict["name"] = dvs_name
# Build the config spec from the input # Build the config spec from the input
skipping to change at line 4384 skipping to change at line 4367
_apply_dvs_infrastructure_traffic_resources( _apply_dvs_infrastructure_traffic_resources(
dvs_create_spec.configSpec.infrastructureTrafficResourceConfig, dvs_create_spec.configSpec.infrastructureTrafficResourceConfig,
dvs_dict["infrastructure_traffic_resource_pools"], dvs_dict["infrastructure_traffic_resource_pools"],
) )
log.trace("dvs_create_spec = {}".format(dvs_create_spec)) log.trace("dvs_create_spec = {}".format(dvs_create_spec))
salt.utils.vmware.create_dvs(dc_ref, dvs_name, dvs_create_spec) salt.utils.vmware.create_dvs(dc_ref, dvs_name, dvs_create_spec)
if "network_resource_management_enabled" in dvs_dict: if "network_resource_management_enabled" in dvs_dict:
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs_name]) dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs_name])
if not dvs_refs: if not dvs_refs:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"DVS '{0}' wasn't found in datacenter '{1}'" "DVS '{}' wasn't found in datacenter '{}'"
"".format(dvs_name, datacenter) "".format(dvs_name, datacenter)
) )
dvs_ref = dvs_refs[0] dvs_ref = dvs_refs[0]
salt.utils.vmware.set_dvs_network_resource_management_enabled( salt.utils.vmware.set_dvs_network_resource_management_enabled(
dvs_ref, dvs_dict["network_resource_management_enabled"] dvs_ref, dvs_dict["network_resource_management_enabled"]
) )
return True return True
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@supports_proxies("esxdatacenter", "esxcluster") @supports_proxies("esxdatacenter", "esxcluster")
skipping to change at line 4420 skipping to change at line 4403
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter. Service instance (vim.ServiceInstance) of the vCenter.
Default is None. Default is None.
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.update_dvs dvs_dict=$dvs_dict dvs=dvs1 salt '*' vsphere.update_dvs dvs_dict=$dvs_dict dvs=dvs1
""" """
# Remove ignored properties # Remove ignored properties
log.trace("Updating dvs '{0}' with dict = {1}".format(dvs, dvs_dict)) log.trace("Updating dvs '{}' with dict = {}".format(dvs, dvs_dict))
for prop in ["product_info", "capability", "uplink_names", "name"]: for prop in ["product_info", "capability", "uplink_names", "name"]:
if prop in dvs_dict: if prop in dvs_dict:
del dvs_dict[prop] del dvs_dict[prop]
proxy_type = get_proxy_type() proxy_type = get_proxy_type()
if proxy_type == "esxdatacenter": if proxy_type == "esxdatacenter":
datacenter = __salt__["esxdatacenter.get_details"]()["datacenter"] datacenter = __salt__["esxdatacenter.get_details"]()["datacenter"]
dc_ref = _get_proxy_target(service_instance) dc_ref = _get_proxy_target(service_instance)
elif proxy_type == "esxcluster": elif proxy_type == "esxcluster":
datacenter = __salt__["esxcluster.get_details"]()["datacenter"] datacenter = __salt__["esxcluster.get_details"]()["datacenter"]
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
if not dvs_refs: if not dvs_refs:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"DVS '{0}' wasn't found in " "datacenter '{1}'" "".format(dvs, datac enter) "DVS '{}' wasn't found in " "datacenter '{}'" "".format(dvs, datacen ter)
) )
dvs_ref = dvs_refs[0] dvs_ref = dvs_refs[0]
# Build the config spec from the input # Build the config spec from the input
dvs_props = salt.utils.vmware.get_properties_of_managed_object( dvs_props = salt.utils.vmware.get_properties_of_managed_object(
dvs_ref, ["config", "capability"] dvs_ref, ["config", "capability"]
) )
dvs_config = vim.VMwareDVSConfigSpec() dvs_config = vim.VMwareDVSConfigSpec()
# Copy all of the properties in the config of the of the DVS to a # Copy all of the properties in the config of the of the DVS to a
# DvsConfigSpec # DvsConfigSpec
skipped_properties = ["host"] skipped_properties = ["host"]
skipping to change at line 4482 skipping to change at line 4465
def _get_dvportgroup_out_shaping(pg_name, pg_default_port_config): def _get_dvportgroup_out_shaping(pg_name, pg_default_port_config):
""" """
Returns the out shaping policy of a distributed virtual portgroup Returns the out shaping policy of a distributed virtual portgroup
pg_name pg_name
The name of the portgroup The name of the portgroup
pg_default_port_config pg_default_port_config
The dafault port config of the portgroup The dafault port config of the portgroup
""" """
log.trace("Retrieving portgroup's '{0}' out shaping " "config".format(pg_nam e)) log.trace("Retrieving portgroup's '{}' out shaping " "config".format(pg_name ))
out_shaping_policy = pg_default_port_config.outShapingPolicy out_shaping_policy = pg_default_port_config.outShapingPolicy
if not out_shaping_policy: if not out_shaping_policy:
return {} return {}
return { return {
"average_bandwidth": out_shaping_policy.averageBandwidth.value, "average_bandwidth": out_shaping_policy.averageBandwidth.value,
"burst_size": out_shaping_policy.burstSize.value, "burst_size": out_shaping_policy.burstSize.value,
"enabled": out_shaping_policy.enabled.value, "enabled": out_shaping_policy.enabled.value,
"peak_bandwidth": out_shaping_policy.peakBandwidth.value, "peak_bandwidth": out_shaping_policy.peakBandwidth.value,
} }
def _get_dvportgroup_security_policy(pg_name, pg_default_port_config): def _get_dvportgroup_security_policy(pg_name, pg_default_port_config):
""" """
Returns the security policy of a distributed virtual portgroup Returns the security policy of a distributed virtual portgroup
pg_name pg_name
The name of the portgroup The name of the portgroup
pg_default_port_config pg_default_port_config
The dafault port config of the portgroup The dafault port config of the portgroup
""" """
log.trace("Retrieving portgroup's '{0}' security policy " "config".format(pg _name)) log.trace("Retrieving portgroup's '{}' security policy " "config".format(pg_ name))
sec_policy = pg_default_port_config.securityPolicy sec_policy = pg_default_port_config.securityPolicy
if not sec_policy: if not sec_policy:
return {} return {}
return { return {
"allow_promiscuous": sec_policy.allowPromiscuous.value, "allow_promiscuous": sec_policy.allowPromiscuous.value,
"forged_transmits": sec_policy.forgedTransmits.value, "forged_transmits": sec_policy.forgedTransmits.value,
"mac_changes": sec_policy.macChanges.value, "mac_changes": sec_policy.macChanges.value,
} }
def _get_dvportgroup_teaming(pg_name, pg_default_port_config): def _get_dvportgroup_teaming(pg_name, pg_default_port_config):
""" """
Returns the teaming of a distributed virtual portgroup Returns the teaming of a distributed virtual portgroup
pg_name pg_name
The name of the portgroup The name of the portgroup
pg_default_port_config pg_default_port_config
The dafault port config of the portgroup The dafault port config of the portgroup
""" """
log.trace("Retrieving portgroup's '{0}' teaming" "config".format(pg_name)) log.trace("Retrieving portgroup's '{}' teaming" "config".format(pg_name))
teaming_policy = pg_default_port_config.uplinkTeamingPolicy teaming_policy = pg_default_port_config.uplinkTeamingPolicy
if not teaming_policy: if not teaming_policy:
return {} return {}
ret_dict = { ret_dict = {
"notify_switches": teaming_policy.notifySwitches.value, "notify_switches": teaming_policy.notifySwitches.value,
"policy": teaming_policy.policy.value, "policy": teaming_policy.policy.value,
"reverse_policy": teaming_policy.reversePolicy.value, "reverse_policy": teaming_policy.reversePolicy.value,
"rolling_order": teaming_policy.rollingOrder.value, "rolling_order": teaming_policy.rollingOrder.value,
} }
if teaming_policy.failureCriteria: if teaming_policy.failureCriteria:
skipping to change at line 4656 skipping to change at line 4639
if proxy_type == "esxdatacenter": if proxy_type == "esxdatacenter":
datacenter = __salt__["esxdatacenter.get_details"]()["datacenter"] datacenter = __salt__["esxdatacenter.get_details"]()["datacenter"]
dc_ref = _get_proxy_target(service_instance) dc_ref = _get_proxy_target(service_instance)
elif proxy_type == "esxcluster": elif proxy_type == "esxcluster":
datacenter = __salt__["esxcluster.get_details"]()["datacenter"] datacenter = __salt__["esxcluster.get_details"]()["datacenter"]
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if dvs: if dvs:
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
if not dvs_refs: if not dvs_refs:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"DVS '{0}' was not " "retrieved".format(dvs) "DVS '{}' was not " "retrieved".format(dvs)
) )
dvs_ref = dvs_refs[0] dvs_ref = dvs_refs[0]
get_all_portgroups = True if not portgroup_names else False get_all_portgroups = True if not portgroup_names else False
for pg_ref in salt.utils.vmware.get_dvportgroups( for pg_ref in salt.utils.vmware.get_dvportgroups(
parent_ref=dvs_ref if dvs else dc_ref, parent_ref=dvs_ref if dvs else dc_ref,
portgroup_names=portgroup_names, portgroup_names=portgroup_names,
get_all_portgroups=get_all_portgroups, get_all_portgroups=get_all_portgroups,
): ):
ret_dict.append(_get_dvportgroup_dict(pg_ref)) ret_dict.append(_get_dvportgroup_dict(pg_ref))
skipping to change at line 4696 skipping to change at line 4679
""" """
proxy_type = get_proxy_type() proxy_type = get_proxy_type()
if proxy_type == "esxdatacenter": if proxy_type == "esxdatacenter":
datacenter = __salt__["esxdatacenter.get_details"]()["datacenter"] datacenter = __salt__["esxdatacenter.get_details"]()["datacenter"]
dc_ref = _get_proxy_target(service_instance) dc_ref = _get_proxy_target(service_instance)
elif proxy_type == "esxcluster": elif proxy_type == "esxcluster":
datacenter = __salt__["esxcluster.get_details"]()["datacenter"] datacenter = __salt__["esxcluster.get_details"]()["datacenter"]
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
if not dvs_refs: if not dvs_refs:
raise VMwareObjectRetrievalError("DVS '{0}' was not " "retrieved".format (dvs)) raise VMwareObjectRetrievalError("DVS '{}' was not " "retrieved".format( dvs))
uplink_pg_ref = salt.utils.vmware.get_uplink_dvportgroup(dvs_refs[0]) uplink_pg_ref = salt.utils.vmware.get_uplink_dvportgroup(dvs_refs[0])
return _get_dvportgroup_dict(uplink_pg_ref) return _get_dvportgroup_dict(uplink_pg_ref)
def _apply_dvportgroup_out_shaping(pg_name, out_shaping, out_shaping_conf): def _apply_dvportgroup_out_shaping(pg_name, out_shaping, out_shaping_conf):
""" """
Applies the values in out_shaping_conf to an out_shaping object Applies the values in out_shaping_conf to an out_shaping object
pg_name pg_name
The name of the portgroup The name of the portgroup
out_shaping out_shaping
The vim.DVSTrafficShapingPolicy to apply the config to The vim.DVSTrafficShapingPolicy to apply the config to
out_shaping_conf out_shaping_conf
The out shaping config The out shaping config
""" """
log.trace("Building portgroup's '{0}' out shaping " "policy".format(pg_name) ) log.trace("Building portgroup's '{}' out shaping " "policy".format(pg_name))
if out_shaping_conf.get("average_bandwidth"): if out_shaping_conf.get("average_bandwidth"):
out_shaping.averageBandwidth = vim.LongPolicy() out_shaping.averageBandwidth = vim.LongPolicy()
out_shaping.averageBandwidth.value = out_shaping_conf["average_bandwidth "] out_shaping.averageBandwidth.value = out_shaping_conf["average_bandwidth "]
if out_shaping_conf.get("burst_size"): if out_shaping_conf.get("burst_size"):
out_shaping.burstSize = vim.LongPolicy() out_shaping.burstSize = vim.LongPolicy()
out_shaping.burstSize.value = out_shaping_conf["burst_size"] out_shaping.burstSize.value = out_shaping_conf["burst_size"]
if "enabled" in out_shaping_conf: if "enabled" in out_shaping_conf:
out_shaping.enabled = vim.BoolPolicy() out_shaping.enabled = vim.BoolPolicy()
out_shaping.enabled.value = out_shaping_conf["enabled"] out_shaping.enabled.value = out_shaping_conf["enabled"]
if out_shaping_conf.get("peak_bandwidth"): if out_shaping_conf.get("peak_bandwidth"):
skipping to change at line 4740 skipping to change at line 4723
pg_name pg_name
The name of the portgroup The name of the portgroup
sec_policy sec_policy
The vim.DVSTrafficShapingPolicy to apply the config to The vim.DVSTrafficShapingPolicy to apply the config to
sec_policy_conf sec_policy_conf
The out shaping config The out shaping config
""" """
log.trace("Building portgroup's '{0}' security policy ".format(pg_name)) log.trace("Building portgroup's '{}' security policy ".format(pg_name))
if "allow_promiscuous" in sec_policy_conf: if "allow_promiscuous" in sec_policy_conf:
sec_policy.allowPromiscuous = vim.BoolPolicy() sec_policy.allowPromiscuous = vim.BoolPolicy()
sec_policy.allowPromiscuous.value = sec_policy_conf["allow_promiscuous"] sec_policy.allowPromiscuous.value = sec_policy_conf["allow_promiscuous"]
if "forged_transmits" in sec_policy_conf: if "forged_transmits" in sec_policy_conf:
sec_policy.forgedTransmits = vim.BoolPolicy() sec_policy.forgedTransmits = vim.BoolPolicy()
sec_policy.forgedTransmits.value = sec_policy_conf["forged_transmits"] sec_policy.forgedTransmits.value = sec_policy_conf["forged_transmits"]
if "mac_changes" in sec_policy_conf: if "mac_changes" in sec_policy_conf:
sec_policy.macChanges = vim.BoolPolicy() sec_policy.macChanges = vim.BoolPolicy()
sec_policy.macChanges.value = sec_policy_conf["mac_changes"] sec_policy.macChanges.value = sec_policy_conf["mac_changes"]
skipping to change at line 4764 skipping to change at line 4747
pg_name pg_name
The name of the portgroup The name of the portgroup
teaming teaming
The vim.VmwareUplinkPortTeamingPolicy to apply the config to The vim.VmwareUplinkPortTeamingPolicy to apply the config to
teaming_conf teaming_conf
The teaming config The teaming config
""" """
log.trace("Building portgroup's '{0}' teaming".format(pg_name)) log.trace("Building portgroup's '{}' teaming".format(pg_name))
if "notify_switches" in teaming_conf: if "notify_switches" in teaming_conf:
teaming.notifySwitches = vim.BoolPolicy() teaming.notifySwitches = vim.BoolPolicy()
teaming.notifySwitches.value = teaming_conf["notify_switches"] teaming.notifySwitches.value = teaming_conf["notify_switches"]
if "policy" in teaming_conf: if "policy" in teaming_conf:
teaming.policy = vim.StringPolicy() teaming.policy = vim.StringPolicy()
teaming.policy.value = teaming_conf["policy"] teaming.policy.value = teaming_conf["policy"]
if "reverse_policy" in teaming_conf: if "reverse_policy" in teaming_conf:
teaming.reversePolicy = vim.BoolPolicy() teaming.reversePolicy = vim.BoolPolicy()
teaming.reversePolicy.value = teaming_conf["reverse_policy"] teaming.reversePolicy.value = teaming_conf["reverse_policy"]
if "rolling_order" in teaming_conf: if "rolling_order" in teaming_conf:
skipping to change at line 4839 skipping to change at line 4822
pg_name pg_name
The name of the portgroup The name of the portgroup
pg_spec pg_spec
The vim.DVPortgroupConfigSpec to apply the config to The vim.DVPortgroupConfigSpec to apply the config to
pg_conf pg_conf
The portgroup config The portgroup config
""" """
log.trace("Building portgroup's '{0}' spec".format(pg_name)) log.trace("Building portgroup's '{}' spec".format(pg_name))
if "name" in pg_conf: if "name" in pg_conf:
pg_spec.name = pg_conf["name"] pg_spec.name = pg_conf["name"]
if "description" in pg_conf: if "description" in pg_conf:
pg_spec.description = pg_conf["description"] pg_spec.description = pg_conf["description"]
if "num_ports" in pg_conf: if "num_ports" in pg_conf:
pg_spec.numPorts = pg_conf["num_ports"] pg_spec.numPorts = pg_conf["num_ports"]
if "type" in pg_conf: if "type" in pg_conf:
pg_spec.type = pg_conf["type"] pg_spec.type = pg_conf["type"]
if not pg_spec.defaultPortConfig: if not pg_spec.defaultPortConfig:
skipping to change at line 4909 skipping to change at line 4892
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter. Service instance (vim.ServiceInstance) of the vCenter.
Default is None. Default is None.
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.create_dvportgroup portgroup_dict=<dict> salt '*' vsphere.create_dvportgroup portgroup_dict=<dict>
portgroup_name=pg1 dvs=dvs1 portgroup_name=pg1 dvs=dvs1
""" """
log.trace( log.trace(
"Creating portgroup'{0}' in dvs '{1}' " "Creating portgroup'{}' in dvs '{}' "
"with dict = {2}".format(portgroup_name, dvs, portgroup_dict) "with dict = {}".format(portgroup_name, dvs, portgroup_dict)
) )
proxy_type = get_proxy_type() proxy_type = get_proxy_type()
if proxy_type == "esxdatacenter": if proxy_type == "esxdatacenter":
datacenter = __salt__["esxdatacenter.get_details"]()["datacenter"] datacenter = __salt__["esxdatacenter.get_details"]()["datacenter"]
dc_ref = _get_proxy_target(service_instance) dc_ref = _get_proxy_target(service_instance)
elif proxy_type == "esxcluster": elif proxy_type == "esxcluster":
datacenter = __salt__["esxcluster.get_details"]()["datacenter"] datacenter = __salt__["esxcluster.get_details"]()["datacenter"]
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
if not dvs_refs: if not dvs_refs:
raise VMwareObjectRetrievalError("DVS '{0}' was not " "retrieved".format (dvs)) raise VMwareObjectRetrievalError("DVS '{}' was not " "retrieved".format( dvs))
# Make the name of the dvportgroup consistent with the parameter # Make the name of the dvportgroup consistent with the parameter
portgroup_dict["name"] = portgroup_name portgroup_dict["name"] = portgroup_name
spec = vim.DVPortgroupConfigSpec() spec = vim.DVPortgroupConfigSpec()
_apply_dvportgroup_config(portgroup_name, spec, portgroup_dict) _apply_dvportgroup_config(portgroup_name, spec, portgroup_dict)
salt.utils.vmware.create_dvportgroup(dvs_refs[0], spec) salt.utils.vmware.create_dvportgroup(dvs_refs[0], spec)
return True return True
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@supports_proxies("esxdatacenter", "esxcluster") @supports_proxies("esxdatacenter", "esxcluster")
@gets_service_instance_via_proxy @gets_service_instance_via_proxy
skipping to change at line 4959 skipping to change at line 4942
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.update_dvportgroup portgroup_dict=<dict> salt '*' vsphere.update_dvportgroup portgroup_dict=<dict>
portgroup=pg1 portgroup=pg1
salt '*' vsphere.update_dvportgroup portgroup_dict=<dict> salt '*' vsphere.update_dvportgroup portgroup_dict=<dict>
portgroup=pg1 dvs=dvs1 portgroup=pg1 dvs=dvs1
""" """
log.trace( log.trace(
"Updating portgroup'{0}' in dvs '{1}' " "Updating portgroup'{}' in dvs '{}' "
"with dict = {2}".format(portgroup, dvs, portgroup_dict) "with dict = {}".format(portgroup, dvs, portgroup_dict)
) )
proxy_type = get_proxy_type() proxy_type = get_proxy_type()
if proxy_type == "esxdatacenter": if proxy_type == "esxdatacenter":
datacenter = __salt__["esxdatacenter.get_details"]()["datacenter"] datacenter = __salt__["esxdatacenter.get_details"]()["datacenter"]
dc_ref = _get_proxy_target(service_instance) dc_ref = _get_proxy_target(service_instance)
elif proxy_type == "esxcluster": elif proxy_type == "esxcluster":
datacenter = __salt__["esxcluster.get_details"]()["datacenter"] datacenter = __salt__["esxcluster.get_details"]()["datacenter"]
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
if not dvs_refs: if not dvs_refs:
raise VMwareObjectRetrievalError("DVS '{0}' was not " "retrieved".format (dvs)) raise VMwareObjectRetrievalError("DVS '{}' was not " "retrieved".format( dvs))
pg_refs = salt.utils.vmware.get_dvportgroups( pg_refs = salt.utils.vmware.get_dvportgroups(
dvs_refs[0], portgroup_names=[portgroup] dvs_refs[0], portgroup_names=[portgroup]
) )
if not pg_refs: if not pg_refs:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"Portgroup '{0}' was not " "retrieved".format(portgroup) "Portgroup '{}' was not " "retrieved".format(portgroup)
) )
pg_props = salt.utils.vmware.get_properties_of_managed_object( pg_props = salt.utils.vmware.get_properties_of_managed_object(
pg_refs[0], ["config"] pg_refs[0], ["config"]
) )
spec = vim.DVPortgroupConfigSpec() spec = vim.DVPortgroupConfigSpec()
# Copy existing properties in spec # Copy existing properties in spec
for prop in [ for prop in [
"autoExpand", "autoExpand",
"configVersion", "configVersion",
"defaultPortConfig", "defaultPortConfig",
skipping to change at line 5023 skipping to change at line 5006
Name of the DVS containing the portgroups. Name of the DVS containing the portgroups.
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter. Service instance (vim.ServiceInstance) of the vCenter.
Default is None. Default is None.
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.remove_dvportgroup portgroup=pg1 dvs=dvs1 salt '*' vsphere.remove_dvportgroup portgroup=pg1 dvs=dvs1
""" """
log.trace("Removing portgroup'{0}' in dvs '{1}' " "".format(portgroup, dvs)) log.trace("Removing portgroup'{}' in dvs '{}' " "".format(portgroup, dvs))
proxy_type = get_proxy_type() proxy_type = get_proxy_type()
if proxy_type == "esxdatacenter": if proxy_type == "esxdatacenter":
datacenter = __salt__["esxdatacenter.get_details"]()["datacenter"] datacenter = __salt__["esxdatacenter.get_details"]()["datacenter"]
dc_ref = _get_proxy_target(service_instance) dc_ref = _get_proxy_target(service_instance)
elif proxy_type == "esxcluster": elif proxy_type == "esxcluster":
datacenter = __salt__["esxcluster.get_details"]()["datacenter"] datacenter = __salt__["esxcluster.get_details"]()["datacenter"]
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
if not dvs_refs: if not dvs_refs:
raise VMwareObjectRetrievalError("DVS '{0}' was not " "retrieved".format (dvs)) raise VMwareObjectRetrievalError("DVS '{}' was not " "retrieved".format( dvs))
pg_refs = salt.utils.vmware.get_dvportgroups( pg_refs = salt.utils.vmware.get_dvportgroups(
dvs_refs[0], portgroup_names=[portgroup] dvs_refs[0], portgroup_names=[portgroup]
) )
if not pg_refs: if not pg_refs:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"Portgroup '{0}' was not " "retrieved".format(portgroup) "Portgroup '{}' was not " "retrieved".format(portgroup)
) )
salt.utils.vmware.remove_dvportgroup(pg_refs[0]) salt.utils.vmware.remove_dvportgroup(pg_refs[0])
return True return True
def _get_policy_dict(policy): def _get_policy_dict(policy):
"""Returns a dictionary representation of a policy""" """Returns a dictionary representation of a policy"""
profile_dict = { profile_dict = {
"name": policy.name, "name": policy.name,
"description": policy.description, "description": policy.description,
"resource_type": policy.resourceType.resourceType, "resource_type": policy.resourceType.resourceType,
skipping to change at line 5173 skipping to change at line 5156
""" """
profile_manager = salt.utils.pbm.get_profile_manager(service_instance) profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
ret_list = [ ret_list = [
_get_capability_definition_dict(c) _get_capability_definition_dict(c)
for c in salt.utils.pbm.get_capability_definitions(profile_manager) for c in salt.utils.pbm.get_capability_definitions(profile_manager)
] ]
return ret_list return ret_list
def _apply_policy_config(policy_spec, policy_dict): def _apply_policy_config(policy_spec, policy_dict):
"""Applies a policy dictionary to a policy spec""" """Applies a policy dictionary to a policy spec"""
log.trace("policy_dict = {0}".format(policy_dict)) log.trace("policy_dict = {}".format(policy_dict))
if policy_dict.get("name"): if policy_dict.get("name"):
policy_spec.name = policy_dict["name"] policy_spec.name = policy_dict["name"]
if policy_dict.get("description"): if policy_dict.get("description"):
policy_spec.description = policy_dict["description"] policy_spec.description = policy_dict["description"]
if policy_dict.get("subprofiles"): if policy_dict.get("subprofiles"):
# Incremental changes to subprofiles and capabilities are not # Incremental changes to subprofiles and capabilities are not
# supported because they would complicate updates too much # supported because they would complicate updates too much
# The whole configuration of all sub-profiles is expected and applied # The whole configuration of all sub-profiles is expected and applied
policy_spec.constraints = pbm.profile.SubProfileCapabilityConstraints() policy_spec.constraints = pbm.profile.SubProfileCapabilityConstraints()
subprofiles = [] subprofiles = []
skipping to change at line 5217 skipping to change at line 5200
constraint=[ constraint=[
pbm.capability.ConstraintInstance( pbm.capability.ConstraintInstance(
propertyInstance=[prop_inst_spec] propertyInstance=[prop_inst_spec]
) )
], ],
) )
cap_specs.append(cap_spec) cap_specs.append(cap_spec)
subprofile_spec.capability = cap_specs subprofile_spec.capability = cap_specs
subprofiles.append(subprofile_spec) subprofiles.append(subprofile_spec)
policy_spec.constraints.subProfiles = subprofiles policy_spec.constraints.subProfiles = subprofiles
log.trace("updated policy_spec = {0}".format(policy_spec)) log.trace("updated policy_spec = {}".format(policy_spec))
return policy_spec return policy_spec
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@supports_proxies("esxdatacenter", "vcenter") @supports_proxies("esxdatacenter", "vcenter")
@gets_service_instance_via_proxy @gets_service_instance_via_proxy
def create_storage_policy(policy_name, policy_dict, service_instance=None): def create_storage_policy(policy_name, policy_dict, service_instance=None):
""" """
Creates a storage policy. Creates a storage policy.
Supported capability types: scalar, set, range. Supported capability types: scalar, set, range.
skipping to change at line 5248 skipping to change at line 5231
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter. Service instance (vim.ServiceInstance) of the vCenter.
Default is None. Default is None.
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.create_storage_policy policy_name='policy name' salt '*' vsphere.create_storage_policy policy_name='policy name'
policy_dict="$policy_dict" policy_dict="$policy_dict"
""" """
log.trace( log.trace(
"create storage policy '{0}', dict = {1}" "".format(policy_name, policy_ dict) "create storage policy '{}', dict = {}" "".format(policy_name, policy_di ct)
) )
profile_manager = salt.utils.pbm.get_profile_manager(service_instance) profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policy_create_spec = pbm.profile.CapabilityBasedProfileCreateSpec() policy_create_spec = pbm.profile.CapabilityBasedProfileCreateSpec()
# Hardcode the storage profile resource type # Hardcode the storage profile resource type
policy_create_spec.resourceType = pbm.profile.ResourceType( policy_create_spec.resourceType = pbm.profile.ResourceType(
resourceType=pbm.profile.ResourceTypeEnum.STORAGE resourceType=pbm.profile.ResourceTypeEnum.STORAGE
) )
# Set name argument # Set name argument
policy_dict["name"] = policy_name policy_dict["name"] = policy_name
log.trace("Setting policy values in policy_update_spec") log.trace("Setting policy values in policy_update_spec")
skipping to change at line 5288 skipping to change at line 5271
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter. Service instance (vim.ServiceInstance) of the vCenter.
Default is None. Default is None.
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.update_storage_policy policy='policy name' salt '*' vsphere.update_storage_policy policy='policy name'
policy_dict="$policy_dict" policy_dict="$policy_dict"
""" """
log.trace("updating storage policy, dict = {0}".format(policy_dict)) log.trace("updating storage policy, dict = {}".format(policy_dict))
profile_manager = salt.utils.pbm.get_profile_manager(service_instance) profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy]) policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy])
if not policies: if not policies:
raise VMwareObjectRetrievalError("Policy '{0}' was not found" "".format( policy)) raise VMwareObjectRetrievalError("Policy '{}' was not found" "".format(p olicy))
policy_ref = policies[0] policy_ref = policies[0]
policy_update_spec = pbm.profile.CapabilityBasedProfileUpdateSpec() policy_update_spec = pbm.profile.CapabilityBasedProfileUpdateSpec()
log.trace("Setting policy values in policy_update_spec") log.trace("Setting policy values in policy_update_spec")
for prop in ["description", "constraints"]: for prop in ["description", "constraints"]:
setattr(policy_update_spec, prop, getattr(policy_ref, prop)) setattr(policy_update_spec, prop, getattr(policy_ref, prop))
_apply_policy_config(policy_update_spec, policy_dict) _apply_policy_config(policy_update_spec, policy_dict)
salt.utils.pbm.update_storage_policy( salt.utils.pbm.update_storage_policy(
profile_manager, policy_ref, policy_update_spec profile_manager, policy_ref, policy_update_spec
) )
return {"update_storage_policy": True} return {"update_storage_policy": True}
skipping to change at line 5325 skipping to change at line 5308
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter. Service instance (vim.ServiceInstance) of the vCenter.
Default is None. Default is None.
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.list_default_storage_policy_of_datastore datastore=ds1 salt '*' vsphere.list_default_storage_policy_of_datastore datastore=ds1
""" """
log.trace( log.trace(
"Listing the default storage policy of datastore '{0}'" "".format(datast ore) "Listing the default storage policy of datastore '{}'" "".format(datasto re)
) )
# Find datastore # Find datastore
target_ref = _get_proxy_target(service_instance) target_ref = _get_proxy_target(service_instance)
ds_refs = salt.utils.vmware.get_datastores( ds_refs = salt.utils.vmware.get_datastores(
service_instance, target_ref, datastore_names=[datastore] service_instance, target_ref, datastore_names=[datastore]
) )
if not ds_refs: if not ds_refs:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"Datastore '{0}' was not " "found".format(datastore) "Datastore '{}' was not " "found".format(datastore)
) )
profile_manager = salt.utils.pbm.get_profile_manager(service_instance) profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policy = salt.utils.pbm.get_default_storage_policy_of_datastore( policy = salt.utils.pbm.get_default_storage_policy_of_datastore(
profile_manager, ds_refs[0] profile_manager, ds_refs[0]
) )
return _get_policy_dict(policy) return _get_policy_dict(policy)
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@supports_proxies("esxcluster", "esxdatacenter", "vcenter") @supports_proxies("esxcluster", "esxdatacenter", "vcenter")
@gets_service_instance_via_proxy @gets_service_instance_via_proxy
skipping to change at line 5368 skipping to change at line 5351
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter. Service instance (vim.ServiceInstance) of the vCenter.
Default is None. Default is None.
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.assign_storage_policy_to_datastore salt '*' vsphere.assign_storage_policy_to_datastore
policy='policy name' datastore=ds1 policy='policy name' datastore=ds1
""" """
log.trace("Assigning policy {0} to datastore {1}" "".format(policy, datastor e)) log.trace("Assigning policy {} to datastore {}" "".format(policy, datastore) )
profile_manager = salt.utils.pbm.get_profile_manager(service_instance) profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
# Find policy # Find policy
policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy]) policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy])
if not policies: if not policies:
raise VMwareObjectRetrievalError("Policy '{0}' was not found" "".format( policy)) raise VMwareObjectRetrievalError("Policy '{}' was not found" "".format(p olicy))
policy_ref = policies[0] policy_ref = policies[0]
# Find datastore # Find datastore
target_ref = _get_proxy_target(service_instance) target_ref = _get_proxy_target(service_instance)
ds_refs = salt.utils.vmware.get_datastores( ds_refs = salt.utils.vmware.get_datastores(
service_instance, target_ref, datastore_names=[datastore] service_instance, target_ref, datastore_names=[datastore]
) )
if not ds_refs: if not ds_refs:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"Datastore '{0}' was not " "found".format(datastore) "Datastore '{}' was not " "found".format(datastore)
) )
ds_ref = ds_refs[0] ds_ref = ds_refs[0]
salt.utils.pbm.assign_default_storage_policy_to_datastore( salt.utils.pbm.assign_default_storage_policy_to_datastore(
profile_manager, policy_ref, ds_ref profile_manager, policy_ref, ds_ref
) )
return True return True
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@supports_proxies("esxdatacenter", "esxcluster", "vcenter", "esxvm") @supports_proxies("esxdatacenter", "esxcluster", "vcenter", "esxvm")
@gets_service_instance_via_proxy @gets_service_instance_via_proxy
skipping to change at line 5466 skipping to change at line 5449
a vim.ClusterComputeResource object. a vim.ClusterComputeResource object.
cluster_name cluster_name
Name of the cluster Name of the cluster
cluster_ref cluster_ref
Reference to the cluster Reference to the cluster
""" """
log.trace( log.trace(
"Building a dictionary representation of cluster " "'{0}'".format(cluste r_name) "Building a dictionary representation of cluster " "'{}'".format(cluster _name)
) )
props = salt.utils.vmware.get_properties_of_managed_object( props = salt.utils.vmware.get_properties_of_managed_object(
cluster_ref, properties=["configurationEx"] cluster_ref, properties=["configurationEx"]
) )
res = { res = {
"ha": {"enabled": props["configurationEx"].dasConfig.enabled}, "ha": {"enabled": props["configurationEx"].dasConfig.enabled},
"drs": {"enabled": props["configurationEx"].drsConfig.enabled}, "drs": {"enabled": props["configurationEx"].drsConfig.enabled},
} }
# Convert HA properties of interest # Convert HA properties of interest
ha_conf = props["configurationEx"].dasConfig ha_conf = props["configurationEx"].dasConfig
log.trace("ha_conf = {0}".format(ha_conf)) log.trace("ha_conf = {}".format(ha_conf))
res["ha"]["admission_control_enabled"] = ha_conf.admissionControlEnabled res["ha"]["admission_control_enabled"] = ha_conf.admissionControlEnabled
if ha_conf.admissionControlPolicy and isinstance( if ha_conf.admissionControlPolicy and isinstance(
ha_conf.admissionControlPolicy, ha_conf.admissionControlPolicy,
vim.ClusterFailoverResourcesAdmissionControlPolicy, vim.ClusterFailoverResourcesAdmissionControlPolicy,
): ):
pol = ha_conf.admissionControlPolicy pol = ha_conf.admissionControlPolicy
res["ha"]["admission_control_policy"] = { res["ha"]["admission_control_policy"] = {
"cpu_failover_percent": pol.cpuFailoverResourcesPercent, "cpu_failover_percent": pol.cpuFailoverResourcesPercent,
"memory_failover_percent": pol.memoryFailoverResourcesPercent, "memory_failover_percent": pol.memoryFailoverResourcesPercent,
} }
skipping to change at line 5504 skipping to change at line 5487
res["ha"]["hb_ds_candidate_policy"] = ha_conf.hBDatastoreCandidatePolicy res["ha"]["hb_ds_candidate_policy"] = ha_conf.hBDatastoreCandidatePolicy
if ha_conf.hostMonitoring: if ha_conf.hostMonitoring:
res["ha"]["host_monitoring"] = ha_conf.hostMonitoring res["ha"]["host_monitoring"] = ha_conf.hostMonitoring
if ha_conf.option: if ha_conf.option:
res["ha"]["options"] = [ res["ha"]["options"] = [
{"key": o.key, "value": o.value} for o in ha_conf.option {"key": o.key, "value": o.value} for o in ha_conf.option
] ]
res["ha"]["vm_monitoring"] = ha_conf.vmMonitoring res["ha"]["vm_monitoring"] = ha_conf.vmMonitoring
# Convert DRS properties # Convert DRS properties
drs_conf = props["configurationEx"].drsConfig drs_conf = props["configurationEx"].drsConfig
log.trace("drs_conf = {0}".format(drs_conf)) log.trace("drs_conf = {}".format(drs_conf))
res["drs"]["vmotion_rate"] = 6 - drs_conf.vmotionRate res["drs"]["vmotion_rate"] = 6 - drs_conf.vmotionRate
res["drs"]["default_vm_behavior"] = drs_conf.defaultVmBehavior res["drs"]["default_vm_behavior"] = drs_conf.defaultVmBehavior
# vm_swap_placement # vm_swap_placement
res["vm_swap_placement"] = props["configurationEx"].vmSwapPlacement res["vm_swap_placement"] = props["configurationEx"].vmSwapPlacement
# Convert VSAN properties # Convert VSAN properties
si = salt.utils.vmware.get_service_instance_from_managed_object(cluster_ref) si = salt.utils.vmware.get_service_instance_from_managed_object(cluster_ref)
if salt.utils.vsan.vsan_supported(si): if salt.utils.vsan.vsan_supported(si):
# XXX The correct way of retrieving the VSAN data (on the if branch) # XXX The correct way of retrieving the VSAN data (on the if branch)
# is not supported before 60u2 vcenter # is not supported before 60u2 vcenter
vcenter_info = salt.utils.vmware.get_service_info(si) vcenter_info = salt.utils.vmware.get_service_info(si)
if int(vcenter_info.build) >= 3634794: # 60u2 if int(vcenter_info.build) >= 3634794: # 60u2
# VSAN API is fully supported by the VC starting with 60u2 # VSAN API is fully supported by the VC starting with 60u2
vsan_conf = salt.utils.vsan.get_cluster_vsan_info(cluster_ref) vsan_conf = salt.utils.vsan.get_cluster_vsan_info(cluster_ref)
log.trace("vsan_conf = {0}".format(vsan_conf)) log.trace("vsan_conf = {}".format(vsan_conf))
res["vsan"] = { res["vsan"] = {
"enabled": vsan_conf.enabled, "enabled": vsan_conf.enabled,
"auto_claim_storage": vsan_conf.defaultConfig.autoClaimStorage, "auto_claim_storage": vsan_conf.defaultConfig.autoClaimStorage,
} }
if vsan_conf.dataEfficiencyConfig: if vsan_conf.dataEfficiencyConfig:
data_eff = vsan_conf.dataEfficiencyConfig data_eff = vsan_conf.dataEfficiencyConfig
res["vsan"].update( res["vsan"].update(
{ {
# We force compression_enabled to be True/False # We force compression_enabled to be True/False
"compression_enabled": data_eff.compressionEnabled or Fa lse, "compression_enabled": data_eff.compressionEnabled or Fa lse,
skipping to change at line 5584 skipping to change at line 5567
proxy_type = get_proxy_type() proxy_type = get_proxy_type()
if proxy_type == "esxdatacenter": if proxy_type == "esxdatacenter":
dc_ref = _get_proxy_target(service_instance) dc_ref = _get_proxy_target(service_instance)
if not cluster: if not cluster:
raise ArgumentValueError("'cluster' needs to be specified") raise ArgumentValueError("'cluster' needs to be specified")
cluster_ref = salt.utils.vmware.get_cluster(dc_ref, cluster) cluster_ref = salt.utils.vmware.get_cluster(dc_ref, cluster)
elif proxy_type == "esxcluster": elif proxy_type == "esxcluster":
cluster_ref = _get_proxy_target(service_instance) cluster_ref = _get_proxy_target(service_instance)
cluster = __salt__["esxcluster.get_details"]()["cluster"] cluster = __salt__["esxcluster.get_details"]()["cluster"]
log.trace( log.trace(
"Retrieving representation of cluster '{0}' in a " "Retrieving representation of cluster '{}' in a "
"{1} proxy".format(cluster, proxy_type) "{} proxy".format(cluster, proxy_type)
) )
return _get_cluster_dict(cluster, cluster_ref) return _get_cluster_dict(cluster, cluster_ref)
def _apply_cluster_dict(cluster_spec, cluster_dict, vsan_spec=None, vsan_61=True ): def _apply_cluster_dict(cluster_spec, cluster_dict, vsan_spec=None, vsan_61=True ):
""" """
Applies the values of cluster_dict dictionary to a cluster spec Applies the values of cluster_dict dictionary to a cluster spec
(vim.ClusterConfigSpecEx). (vim.ClusterConfigSpecEx).
All vsan values (cluster_dict['vsan']) will be applied to All vsan values (cluster_dict['vsan']) will be applied to
vsan_spec (vim.vsan.cluster.ConfigInfoEx). Can be not omitted vsan_spec (vim.vsan.cluster.ConfigInfoEx). Can be not omitted
if not required. if not required.
VSAN 6.1 config needs to be applied differently than the post VSAN 6.1 way. VSAN 6.1 config needs to be applied differently than the post VSAN 6.1 way.
The type of configuration desired is dictated by the flag vsan_61. The type of configuration desired is dictated by the flag vsan_61.
""" """
log.trace("Applying cluster dict {0}".format(cluster_dict)) log.trace("Applying cluster dict {}".format(cluster_dict))
if cluster_dict.get("ha"): if cluster_dict.get("ha"):
ha_dict = cluster_dict["ha"] ha_dict = cluster_dict["ha"]
if not cluster_spec.dasConfig: if not cluster_spec.dasConfig:
cluster_spec.dasConfig = vim.ClusterDasConfigInfo() cluster_spec.dasConfig = vim.ClusterDasConfigInfo()
das_config = cluster_spec.dasConfig das_config = cluster_spec.dasConfig
if "enabled" in ha_dict: if "enabled" in ha_dict:
das_config.enabled = ha_dict["enabled"] das_config.enabled = ha_dict["enabled"]
if ha_dict["enabled"]: if ha_dict["enabled"]:
# Default values when ha is enabled # Default values when ha is enabled
das_config.failoverLevel = 1 das_config.failoverLevel = 1
skipping to change at line 5711 skipping to change at line 5694
vsan_config = cluster_spec.vsanConfig vsan_config = cluster_spec.vsanConfig
if "enabled" in vsan_dict: if "enabled" in vsan_dict:
vsan_config.enabled = vsan_dict["enabled"] vsan_config.enabled = vsan_dict["enabled"]
if "auto_claim_storage" in vsan_dict: if "auto_claim_storage" in vsan_dict:
if not vsan_config.defaultConfig: if not vsan_config.defaultConfig:
vsan_config.defaultConfig = vim.VsanClusterConfigInfoHostDefault Info() vsan_config.defaultConfig = vim.VsanClusterConfigInfoHostDefault Info()
elif vsan_config.defaultConfig.uuid: elif vsan_config.defaultConfig.uuid:
# If this remains set it caused an error # If this remains set it caused an error
vsan_config.defaultConfig.uuid = None vsan_config.defaultConfig.uuid = None
vsan_config.defaultConfig.autoClaimStorage = vsan_dict["auto_claim_s torage"] vsan_config.defaultConfig.autoClaimStorage = vsan_dict["auto_claim_s torage"]
log.trace("cluster_spec = {0}".format(cluster_spec)) log.trace("cluster_spec = {}".format(cluster_spec))
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@depends(HAS_JSONSCHEMA) @depends(HAS_JSONSCHEMA)
@supports_proxies("esxcluster", "esxdatacenter") @supports_proxies("esxcluster", "esxdatacenter")
@gets_service_instance_via_proxy @gets_service_instance_via_proxy
def create_cluster(cluster_dict, datacenter=None, cluster=None, service_instance =None): def create_cluster(cluster_dict, datacenter=None, cluster=None, service_instance =None):
""" """
Creates a cluster. Creates a cluster.
Note: cluster_dict['name'] will be overridden by the cluster param value Note: cluster_dict['name'] will be overridden by the cluster param value
skipping to change at line 5903 skipping to change at line 5886
vsan_info.dataEfficiencyConfig = None vsan_info.dataEfficiencyConfig = None
else: else:
vsan_61 = True vsan_61 = True
_apply_cluster_dict(cluster_spec, cluster_dict, vsan_spec, vsan_61) _apply_cluster_dict(cluster_spec, cluster_dict, vsan_spec, vsan_61)
# We try to reconfigure vsan first as it fails if HA is enabled so the # We try to reconfigure vsan first as it fails if HA is enabled so the
# command will abort not having any side-effects # command will abort not having any side-effects
# also if HA was previously disabled it can be enabled automatically if # also if HA was previously disabled it can be enabled automatically if
# desired # desired
if vsan_spec: if vsan_spec:
log.trace("vsan_spec = {0}".format(vsan_spec)) log.trace("vsan_spec = {}".format(vsan_spec))
salt.utils.vsan.reconfigure_cluster_vsan(cluster_ref, vsan_spec) salt.utils.vsan.reconfigure_cluster_vsan(cluster_ref, vsan_spec)
# We need to retrieve again the properties and reapply them # We need to retrieve again the properties and reapply them
# As the VSAN configuration has changed # As the VSAN configuration has changed
cluster_spec = vim.ClusterConfigSpecEx() cluster_spec = vim.ClusterConfigSpecEx()
props = salt.utils.vmware.get_properties_of_managed_object( props = salt.utils.vmware.get_properties_of_managed_object(
cluster_ref, properties=["configurationEx"] cluster_ref, properties=["configurationEx"]
) )
# Copy elements we want to update to spec # Copy elements we want to update to spec
for p in ["dasConfig", "drsConfig"]: for p in ["dasConfig", "drsConfig"]:
skipping to change at line 5974 skipping to change at line 5957
# Default to getting all disks if no filtering is done # Default to getting all disks if no filtering is done
get_all_datastores = ( get_all_datastores = (
True True
if not (datastore_names or backing_disk_ids or backing_disk_scsi_address es) if not (datastore_names or backing_disk_ids or backing_disk_scsi_address es)
else False else False
) )
# Get the ids of the disks with the scsi addresses # Get the ids of the disks with the scsi addresses
if backing_disk_scsi_addresses: if backing_disk_scsi_addresses:
log.debug( log.debug(
"Retrieving disk ids for scsi addresses " "Retrieving disk ids for scsi addresses "
"'{0}'".format(backing_disk_scsi_addresses) "'{}'".format(backing_disk_scsi_addresses)
) )
disk_ids = [ disk_ids = [
d.canonicalName d.canonicalName
for d in salt.utils.vmware.get_disks( for d in salt.utils.vmware.get_disks(
target, scsi_addresses=backing_disk_scsi_addresses target, scsi_addresses=backing_disk_scsi_addresses
) )
] ]
log.debug("Found disk ids '{}'".format(disk_ids)) log.debug("Found disk ids '{}'".format(disk_ids))
backing_disk_ids = ( backing_disk_ids = (
backing_disk_ids.extend(disk_ids) if backing_disk_ids else disk_ids backing_disk_ids.extend(disk_ids) if backing_disk_ids else disk_ids
skipping to change at line 6079 skipping to change at line 6062
schema, schema,
) )
except jsonschema.exceptions.ValidationError as exc: except jsonschema.exceptions.ValidationError as exc:
raise ArgumentValueError(exc) raise ArgumentValueError(exc)
host_ref = _get_proxy_target(service_instance) host_ref = _get_proxy_target(service_instance)
hostname = __proxy__["esxi.get_details"]()["esxi_host"] hostname = __proxy__["esxi.get_details"]()["esxi_host"]
if safety_checks: if safety_checks:
disks = salt.utils.vmware.get_disks(host_ref, disk_ids=[disk_id]) disks = salt.utils.vmware.get_disks(host_ref, disk_ids=[disk_id])
if not disks: if not disks:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"Disk '{0}' was not found in host '{1}'".format(disk_id, hostnam e) "Disk '{}' was not found in host '{}'".format(disk_id, hostname)
) )
ds_ref = salt.utils.vmware.create_vmfs_datastore( ds_ref = salt.utils.vmware.create_vmfs_datastore(
host_ref, datastore_name, disks[0], vmfs_major_version host_ref, datastore_name, disks[0], vmfs_major_version
) )
return True return True
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@supports_proxies("esxi", "esxcluster", "esxdatacenter") @supports_proxies("esxi", "esxcluster", "esxdatacenter")
@gets_service_instance_via_proxy @gets_service_instance_via_proxy
def rename_datastore(datastore_name, new_datastore_name, service_instance=None): def rename_datastore(datastore_name, new_datastore_name, service_instance=None):
skipping to change at line 6109 skipping to change at line 6092
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter/ESXi host. Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
Default is None. Default is None.
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.rename_datastore old_name new_name salt '*' vsphere.rename_datastore old_name new_name
""" """
# Argument validation # Argument validation
log.trace( log.trace(
"Renaming datastore {0} to {1}" "".format(datastore_name, new_datastore_ name) "Renaming datastore {} to {}" "".format(datastore_name, new_datastore_na me)
) )
target = _get_proxy_target(service_instance) target = _get_proxy_target(service_instance)
datastores = salt.utils.vmware.get_datastores( datastores = salt.utils.vmware.get_datastores(
service_instance, target, datastore_names=[datastore_name] service_instance, target, datastore_names=[datastore_name]
) )
if not datastores: if not datastores:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"Datastore '{0}' was not found" "".format(datastore_name) "Datastore '{}' was not found" "".format(datastore_name)
) )
ds = datastores[0] ds = datastores[0]
salt.utils.vmware.rename_datastore(ds, new_datastore_name) salt.utils.vmware.rename_datastore(ds, new_datastore_name)
return True return True
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@supports_proxies("esxi", "esxcluster", "esxdatacenter") @supports_proxies("esxi", "esxcluster", "esxdatacenter")
@gets_service_instance_via_proxy @gets_service_instance_via_proxy
def remove_datastore(datastore, service_instance=None): def remove_datastore(datastore, service_instance=None):
""" """
skipping to change at line 6141 skipping to change at line 6124
Datastore name Datastore name
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter/ESXi host. Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
Default is None. Default is None.
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.remove_datastore ds_name salt '*' vsphere.remove_datastore ds_name
""" """
log.trace("Removing datastore '{0}'".format(datastore)) log.trace("Removing datastore '{}'".format(datastore))
target = _get_proxy_target(service_instance) target = _get_proxy_target(service_instance)
datastores = salt.utils.vmware.get_datastores( datastores = salt.utils.vmware.get_datastores(
service_instance, reference=target, datastore_names=[datastore] service_instance, reference=target, datastore_names=[datastore]
) )
if not datastores: if not datastores:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"Datastore '{0}' was not found".format(datastore) "Datastore '{}' was not found".format(datastore)
) )
if len(datastores) > 1: if len(datastores) > 1:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"Multiple datastores '{0}' were found".format(datastore) "Multiple datastores '{}' were found".format(datastore)
) )
salt.utils.vmware.remove_datastore(service_instance, datastores[0]) salt.utils.vmware.remove_datastore(service_instance, datastores[0])
return True return True
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@supports_proxies("esxcluster", "esxdatacenter") @supports_proxies("esxcluster", "esxdatacenter")
@gets_service_instance_via_proxy @gets_service_instance_via_proxy
def list_licenses(service_instance=None): def list_licenses(service_instance=None):
""" """
Lists all licenses on a vCenter. Lists all licenses on a vCenter.
skipping to change at line 6212 skipping to change at line 6195
performing the required task performing the required task
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter/ESXi host. Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
Default is None. Default is None.
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.add_license key=<license_key> desc='License desc' salt '*' vsphere.add_license key=<license_key> desc='License desc'
""" """
log.trace("Adding license '{0}'".format(key)) log.trace("Adding license '{}'".format(key))
salt.utils.vmware.add_license(service_instance, key, description) salt.utils.vmware.add_license(service_instance, key, description)
return True return True
def _get_entity(service_instance, entity): def _get_entity(service_instance, entity):
""" """
Returns the entity associated with the entity dict representation Returns the entity associated with the entity dict representation
Supported entities: cluster, vcenter Supported entities: cluster, vcenter
Expected entity format: Expected entity format:
skipping to change at line 6240 skipping to change at line 6223
vcenter: vcenter:
{'type': 'vcenter'} {'type': 'vcenter'}
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter. Service instance (vim.ServiceInstance) of the vCenter.
entity entity
Entity dict in the format above Entity dict in the format above
""" """
log.trace("Retrieving entity: {0}".format(entity)) log.trace("Retrieving entity: {}".format(entity))
if entity["type"] == "cluster": if entity["type"] == "cluster":
dc_ref = salt.utils.vmware.get_datacenter( dc_ref = salt.utils.vmware.get_datacenter(
service_instance, entity["datacenter"] service_instance, entity["datacenter"]
) )
return salt.utils.vmware.get_cluster(dc_ref, entity["cluster"]) return salt.utils.vmware.get_cluster(dc_ref, entity["cluster"])
elif entity["type"] == "vcenter": elif entity["type"] == "vcenter":
return None return None
raise ArgumentValueError("Unsupported entity type '{0}'" "".format(entity["t ype"])) raise ArgumentValueError("Unsupported entity type '{}'" "".format(entity["ty pe"]))
def _validate_entity(entity): def _validate_entity(entity):
""" """
Validates the entity dict representation Validates the entity dict representation
entity entity
Dictionary representation of an entity. Dictionary representation of an entity.
See ``_get_entity`` docstrings for format. See ``_get_entity`` docstrings for format.
""" """
# Validate entity: # Validate entity:
if entity["type"] == "cluster": if entity["type"] == "cluster":
schema = ESXClusterEntitySchema.serialize() schema = ESXClusterEntitySchema.serialize()
elif entity["type"] == "vcenter": elif entity["type"] == "vcenter":
schema = VCenterEntitySchema.serialize() schema = VCenterEntitySchema.serialize()
else: else:
raise ArgumentValueError( raise ArgumentValueError(
"Unsupported entity type '{0}'" "".format(entity["type"]) "Unsupported entity type '{}'" "".format(entity["type"])
) )
try: try:
jsonschema.validate(entity, schema) jsonschema.validate(entity, schema)
except jsonschema.exceptions.ValidationError as exc: except jsonschema.exceptions.ValidationError as exc:
raise InvalidEntityError(exc) raise InvalidEntityError(exc)
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@depends(HAS_JSONSCHEMA) @depends(HAS_JSONSCHEMA)
@supports_proxies("esxcluster", "esxdatacenter") @supports_proxies("esxcluster", "esxdatacenter")
@gets_service_instance_via_proxy @gets_service_instance_via_proxy
skipping to change at line 6303 skipping to change at line 6286
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter/ESXi host. Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
Default is None. Default is None.
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.list_assigned_licenses salt '*' vsphere.list_assigned_licenses
entity={type:cluster,datacenter:dc,cluster:cl} entity={type:cluster,datacenter:dc,cluster:cl}
entiy_display_name=cl entiy_display_name=cl
""" """
log.trace("Listing assigned licenses of entity {0}" "".format(entity)) log.trace("Listing assigned licenses of entity {}" "".format(entity))
_validate_entity(entity) _validate_entity(entity)
assigned_licenses = salt.utils.vmware.get_assigned_licenses( assigned_licenses = salt.utils.vmware.get_assigned_licenses(
service_instance, service_instance,
entity_ref=_get_entity(service_instance, entity), entity_ref=_get_entity(service_instance, entity),
entity_name=entity_display_name, entity_name=entity_display_name,
) )
return [ return [
{ {
skipping to change at line 6365 skipping to change at line 6348
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter/ESXi host. Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
Default is None. Default is None.
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.assign_license license_key=00000:00000 salt '*' vsphere.assign_license license_key=00000:00000
license name=test entity={type:cluster,datacenter:dc,cluster:cl} license name=test entity={type:cluster,datacenter:dc,cluster:cl}
""" """
log.trace("Assigning license {0} to entity {1}" "".format(license_key, entit y)) log.trace("Assigning license {} to entity {}" "".format(license_key, entity) )
_validate_entity(entity) _validate_entity(entity)
if safety_checks: if safety_checks:
licenses = salt.utils.vmware.get_licenses(service_instance) licenses = salt.utils.vmware.get_licenses(service_instance)
if not [l for l in licenses if l.licenseKey == license_key]: if not [l for l in licenses if l.licenseKey == license_key]:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"License '{0}' wasn't found" "".format(license_name) "License '{}' wasn't found" "".format(license_name)
) )
salt.utils.vmware.assign_license( salt.utils.vmware.assign_license(
service_instance, service_instance,
license_key, license_key,
license_name, license_name,
entity_ref=_get_entity(service_instance, entity), entity_ref=_get_entity(service_instance, entity),
entity_name=entity_display_name, entity_name=entity_display_name,
) )
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
skipping to change at line 6464 skipping to change at line 6447
salt '*' vsphere.list_disks salt '*' vsphere.list_disks
salt '*' vsphere.list_disks disk_ids='[naa.00, naa.001]' salt '*' vsphere.list_disks disk_ids='[naa.00, naa.001]'
salt '*' vsphere.list_disks salt '*' vsphere.list_disks
scsi_addresses='[vmhba0:C0:T0:L0, vmhba1:C0:T0:L0]' scsi_addresses='[vmhba0:C0:T0:L0, vmhba1:C0:T0:L0]'
""" """
host_ref = _get_proxy_target(service_instance) host_ref = _get_proxy_target(service_instance)
hostname = __proxy__["esxi.get_details"]()["esxi_host"] hostname = __proxy__["esxi.get_details"]()["esxi_host"]
log.trace("Retrieving disks if host '{0}'".format(hostname)) log.trace("Retrieving disks if host '{}'".format(hostname))
log.trace("disk ids = {0}".format(disk_ids)) log.trace("disk ids = {}".format(disk_ids))
log.trace("scsi_addresses = {0}".format(scsi_addresses)) log.trace("scsi_addresses = {}".format(scsi_addresses))
# Default to getting all disks if no filtering is done # Default to getting all disks if no filtering is done
get_all_disks = True if not (disk_ids or scsi_addresses) else False get_all_disks = True if not (disk_ids or scsi_addresses) else False
ret_list = [] ret_list = []
scsi_address_to_lun = salt.utils.vmware.get_scsi_address_to_lun_map( scsi_address_to_lun = salt.utils.vmware.get_scsi_address_to_lun_map(
host_ref, hostname=hostname host_ref, hostname=hostname
) )
canonical_name_to_scsi_address = { canonical_name_to_scsi_address = {
lun.canonicalName: scsi_addr lun.canonicalName: scsi_addr for scsi_addr, lun in scsi_address_to_lun.i
for scsi_addr, lun in six.iteritems(scsi_address_to_lun) tems()
} }
for d in salt.utils.vmware.get_disks( for d in salt.utils.vmware.get_disks(
host_ref, disk_ids, scsi_addresses, get_all_disks host_ref, disk_ids, scsi_addresses, get_all_disks
): ):
ret_list.append( ret_list.append(
{ {
"id": d.canonicalName, "id": d.canonicalName,
"scsi_address": canonical_name_to_scsi_address[d.canonicalName], "scsi_address": canonical_name_to_scsi_address[d.canonicalName],
} }
) )
skipping to change at line 6527 skipping to change at line 6509
if not disk_id and not scsi_address: if not disk_id and not scsi_address:
raise ArgumentValueError( raise ArgumentValueError(
"Either 'disk_id' or 'scsi_address' " "needs to be specified" "Either 'disk_id' or 'scsi_address' " "needs to be specified"
) )
host_ref = _get_proxy_target(service_instance) host_ref = _get_proxy_target(service_instance)
hostname = __proxy__["esxi.get_details"]()["esxi_host"] hostname = __proxy__["esxi.get_details"]()["esxi_host"]
if not disk_id: if not disk_id:
scsi_address_to_lun = salt.utils.vmware.get_scsi_address_to_lun_map(host _ref) scsi_address_to_lun = salt.utils.vmware.get_scsi_address_to_lun_map(host _ref)
if scsi_address not in scsi_address_to_lun: if scsi_address not in scsi_address_to_lun:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"Scsi lun with address '{0}' was not found on host '{1}'" "Scsi lun with address '{}' was not found on host '{}'"
"".format(scsi_address, hostname) "".format(scsi_address, hostname)
) )
disk_id = scsi_address_to_lun[scsi_address].canonicalName disk_id = scsi_address_to_lun[scsi_address].canonicalName
log.trace( log.trace(
"[{0}] Got disk id '{1}' for scsi address '{2}'" "[{}] Got disk id '{}' for scsi address '{}'"
"".format(hostname, disk_id, scsi_address) "".format(hostname, disk_id, scsi_address)
) )
log.trace( log.trace(
"Erasing disk partitions on disk '{0}' in host '{1}'" "Erasing disk partitions on disk '{}' in host '{}'" "".format(disk_id, h
"".format(disk_id, hostname) ostname)
) )
salt.utils.vmware.erase_disk_partitions( salt.utils.vmware.erase_disk_partitions(
service_instance, host_ref, disk_id, hostname=hostname service_instance, host_ref, disk_id, hostname=hostname
) )
log.info( log.info(
"Erased disk partitions on disk '{0}' on host '{1}'" "Erased disk partitions on disk '{}' on host '{}'" "".format(disk_id, ho
"".format(disk_id, hostname) stname)
) )
return True return True
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@supports_proxies("esxi") @supports_proxies("esxi")
@gets_service_instance_via_proxy @gets_service_instance_via_proxy
def list_disk_partitions(disk_id=None, scsi_address=None, service_instance=None) : def list_disk_partitions(disk_id=None, scsi_address=None, service_instance=None) :
""" """
Lists the partitions on a disk. Lists the partitions on a disk.
The disk can be specified either by the canonical name, or by the The disk can be specified either by the canonical name, or by the
skipping to change at line 6587 skipping to change at line 6567
if not disk_id and not scsi_address: if not disk_id and not scsi_address:
raise ArgumentValueError( raise ArgumentValueError(
"Either 'disk_id' or 'scsi_address' " "needs to be specified" "Either 'disk_id' or 'scsi_address' " "needs to be specified"
) )
host_ref = _get_proxy_target(service_instance) host_ref = _get_proxy_target(service_instance)
hostname = __proxy__["esxi.get_details"]()["esxi_host"] hostname = __proxy__["esxi.get_details"]()["esxi_host"]
if not disk_id: if not disk_id:
scsi_address_to_lun = salt.utils.vmware.get_scsi_address_to_lun_map(host _ref) scsi_address_to_lun = salt.utils.vmware.get_scsi_address_to_lun_map(host _ref)
if scsi_address not in scsi_address_to_lun: if scsi_address not in scsi_address_to_lun:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"Scsi lun with address '{0}' was not found on host '{1}'" "Scsi lun with address '{}' was not found on host '{}'"
"".format(scsi_address, hostname) "".format(scsi_address, hostname)
) )
disk_id = scsi_address_to_lun[scsi_address].canonicalName disk_id = scsi_address_to_lun[scsi_address].canonicalName
log.trace( log.trace(
"[{0}] Got disk id '{1}' for scsi address '{2}'" "[{}] Got disk id '{}' for scsi address '{}'"
"".format(hostname, disk_id, scsi_address) "".format(hostname, disk_id, scsi_address)
) )
log.trace( log.trace(
"Listing disk partitions on disk '{0}' in host '{1}'" "Listing disk partitions on disk '{}' in host '{}'" "".format(disk_id, h
"".format(disk_id, hostname) ostname)
) )
partition_info = salt.utils.vmware.get_disk_partition_info(host_ref, disk_id ) partition_info = salt.utils.vmware.get_disk_partition_info(host_ref, disk_id )
ret_list = [] ret_list = []
# NOTE: 1. The layout view has an extra 'None' partition for free space # NOTE: 1. The layout view has an extra 'None' partition for free space
# 2. The orders in the layout/partition views are not the same # 2. The orders in the layout/partition views are not the same
for part_spec in partition_info.spec.partition: for part_spec in partition_info.spec.partition:
part_layout = [ part_layout = [
p p
for p in partition_info.layout.partition for p in partition_info.layout.partition
if p.partition == part_spec.partition if p.partition == part_spec.partition
skipping to change at line 6652 skipping to change at line 6631
Default is None. Default is None.
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.list_diskgroups salt '*' vsphere.list_diskgroups
salt '*' vsphere.list_diskgroups cache_disk_ids='[naa.000000000000001]' salt '*' vsphere.list_diskgroups cache_disk_ids='[naa.000000000000001]'
""" """
host_ref = _get_proxy_target(service_instance) host_ref = _get_proxy_target(service_instance)
hostname = __proxy__["esxi.get_details"]()["esxi_host"] hostname = __proxy__["esxi.get_details"]()["esxi_host"]
log.trace("Listing diskgroups in '{0}'".format(hostname)) log.trace("Listing diskgroups in '{}'".format(hostname))
get_all_diskgroups = True if not cache_disk_ids else False get_all_diskgroups = True if not cache_disk_ids else False
ret_list = [] ret_list = []
for dg in salt.utils.vmware.get_diskgroups( for dg in salt.utils.vmware.get_diskgroups(
host_ref, cache_disk_ids, get_all_diskgroups host_ref, cache_disk_ids, get_all_diskgroups
): ):
ret_list.append( ret_list.append(
{ {
"cache_disk": dg.ssd.canonicalName, "cache_disk": dg.ssd.canonicalName,
"capacity_disks": [d.canonicalName for d in dg.nonSsd], "capacity_disks": [d.canonicalName for d in dg.nonSsd],
} }
skipping to change at line 6717 skipping to change at line 6696
schema, schema,
) )
except jsonschema.exceptions.ValidationError as exc: except jsonschema.exceptions.ValidationError as exc:
raise ArgumentValueError(exc) raise ArgumentValueError(exc)
host_ref = _get_proxy_target(service_instance) host_ref = _get_proxy_target(service_instance)
hostname = __proxy__["esxi.get_details"]()["esxi_host"] hostname = __proxy__["esxi.get_details"]()["esxi_host"]
if safety_checks: if safety_checks:
diskgroups = salt.utils.vmware.get_diskgroups(host_ref, [cache_disk_id]) diskgroups = salt.utils.vmware.get_diskgroups(host_ref, [cache_disk_id])
if diskgroups: if diskgroups:
raise VMwareObjectExistsError( raise VMwareObjectExistsError(
"Diskgroup with cache disk id '{0}' already exists ESXi " "Diskgroup with cache disk id '{}' already exists ESXi "
"host '{1}'".format(cache_disk_id, hostname) "host '{}'".format(cache_disk_id, hostname)
) )
disk_ids = capacity_disk_ids[:] disk_ids = capacity_disk_ids[:]
disk_ids.insert(0, cache_disk_id) disk_ids.insert(0, cache_disk_id)
disks = salt.utils.vmware.get_disks(host_ref, disk_ids=disk_ids) disks = salt.utils.vmware.get_disks(host_ref, disk_ids=disk_ids)
for id in disk_ids: for id in disk_ids:
if not [d for d in disks if d.canonicalName == id]: if not [d for d in disks if d.canonicalName == id]:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"No disk with id '{0}' was found in ESXi host '{1}'" "No disk with id '{}' was found in ESXi host '{}'"
"".format(id, hostname) "".format(id, hostname)
) )
cache_disk = [d for d in disks if d.canonicalName == cache_disk_id][0] cache_disk = [d for d in disks if d.canonicalName == cache_disk_id][0]
capacity_disks = [d for d in disks if d.canonicalName in capacity_disk_ids] capacity_disks = [d for d in disks if d.canonicalName in capacity_disk_ids]
vsan_disk_mgmt_system = salt.utils.vsan.get_vsan_disk_management_system( vsan_disk_mgmt_system = salt.utils.vsan.get_vsan_disk_management_system(
service_instance service_instance
) )
dg = salt.utils.vsan.create_diskgroup( dg = salt.utils.vsan.create_diskgroup(
service_instance, vsan_disk_mgmt_system, host_ref, cache_disk, capacity_ disks service_instance, vsan_disk_mgmt_system, host_ref, cache_disk, capacity_ disks
) )
skipping to change at line 6789 skipping to change at line 6768
) )
except jsonschema.exceptions.ValidationError as exc: except jsonschema.exceptions.ValidationError as exc:
raise ArgumentValueError(exc) raise ArgumentValueError(exc)
host_ref = _get_proxy_target(service_instance) host_ref = _get_proxy_target(service_instance)
hostname = __proxy__["esxi.get_details"]()["esxi_host"] hostname = __proxy__["esxi.get_details"]()["esxi_host"]
disks = salt.utils.vmware.get_disks(host_ref, disk_ids=capacity_disk_ids) disks = salt.utils.vmware.get_disks(host_ref, disk_ids=capacity_disk_ids)
if safety_checks: if safety_checks:
for id in capacity_disk_ids: for id in capacity_disk_ids:
if not [d for d in disks if d.canonicalName == id]: if not [d for d in disks if d.canonicalName == id]:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"No disk with id '{0}' was found in ESXi host '{1}'" "No disk with id '{}' was found in ESXi host '{}'"
"".format(id, hostname) "".format(id, hostname)
) )
diskgroups = salt.utils.vmware.get_diskgroups( diskgroups = salt.utils.vmware.get_diskgroups(
host_ref, cache_disk_ids=[cache_disk_id] host_ref, cache_disk_ids=[cache_disk_id]
) )
if not diskgroups: if not diskgroups:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"No diskgroup with cache disk id '{0}' was found in ESXi " "No diskgroup with cache disk id '{}' was found in ESXi "
"host '{1}'".format(cache_disk_id, hostname) "host '{}'".format(cache_disk_id, hostname)
) )
vsan_disk_mgmt_system = salt.utils.vsan.get_vsan_disk_management_system( vsan_disk_mgmt_system = salt.utils.vsan.get_vsan_disk_management_system(
service_instance service_instance
) )
salt.utils.vsan.add_capacity_to_diskgroup( salt.utils.vsan.add_capacity_to_diskgroup(
service_instance, vsan_disk_mgmt_system, host_ref, diskgroups[0], disks service_instance, vsan_disk_mgmt_system, host_ref, diskgroups[0], disks
) )
return True return True
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
skipping to change at line 6866 skipping to change at line 6845
) )
except jsonschema.exceptions.ValidationError as exc: except jsonschema.exceptions.ValidationError as exc:
raise ArgumentValueError(str(exc)) raise ArgumentValueError(str(exc))
host_ref = _get_proxy_target(service_instance) host_ref = _get_proxy_target(service_instance)
hostname = __proxy__["esxi.get_details"]()["esxi_host"] hostname = __proxy__["esxi.get_details"]()["esxi_host"]
disks = salt.utils.vmware.get_disks(host_ref, disk_ids=capacity_disk_ids) disks = salt.utils.vmware.get_disks(host_ref, disk_ids=capacity_disk_ids)
if safety_checks: if safety_checks:
for id in capacity_disk_ids: for id in capacity_disk_ids:
if not [d for d in disks if d.canonicalName == id]: if not [d for d in disks if d.canonicalName == id]:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"No disk with id '{0}' was found in ESXi host '{1}'" "No disk with id '{}' was found in ESXi host '{}'"
"".format(id, hostname) "".format(id, hostname)
) )
diskgroups = salt.utils.vmware.get_diskgroups( diskgroups = salt.utils.vmware.get_diskgroups(
host_ref, cache_disk_ids=[cache_disk_id] host_ref, cache_disk_ids=[cache_disk_id]
) )
if not diskgroups: if not diskgroups:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"No diskgroup with cache disk id '{0}' was found in ESXi " "No diskgroup with cache disk id '{}' was found in ESXi "
"host '{1}'".format(cache_disk_id, hostname) "host '{}'".format(cache_disk_id, hostname)
) )
log.trace("data_evacuation = {0}".format(data_evacuation)) log.trace("data_evacuation = {}".format(data_evacuation))
salt.utils.vsan.remove_capacity_from_diskgroup( salt.utils.vsan.remove_capacity_from_diskgroup(
service_instance, service_instance,
host_ref, host_ref,
diskgroups[0], diskgroups[0],
capacity_disks=[d for d in disks if d.canonicalName in capacity_disk_ids ], capacity_disks=[d for d in disks if d.canonicalName in capacity_disk_ids ],
data_evacuation=data_evacuation, data_evacuation=data_evacuation,
) )
return True return True
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
skipping to change at line 6917 skipping to change at line 6896
salt '*' vsphere.remove_diskgroup cache_disk_id='naa.000000000000001' salt '*' vsphere.remove_diskgroup cache_disk_id='naa.000000000000001'
""" """
log.trace("Validating diskgroup input") log.trace("Validating diskgroup input")
host_ref = _get_proxy_target(service_instance) host_ref = _get_proxy_target(service_instance)
hostname = __proxy__["esxi.get_details"]()["esxi_host"] hostname = __proxy__["esxi.get_details"]()["esxi_host"]
diskgroups = salt.utils.vmware.get_diskgroups( diskgroups = salt.utils.vmware.get_diskgroups(
host_ref, cache_disk_ids=[cache_disk_id] host_ref, cache_disk_ids=[cache_disk_id]
) )
if not diskgroups: if not diskgroups:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"No diskgroup with cache disk id '{0}' was found in ESXi " "No diskgroup with cache disk id '{}' was found in ESXi "
"host '{1}'".format(cache_disk_id, hostname) "host '{}'".format(cache_disk_id, hostname)
) )
log.trace("data accessibility = {0}".format(data_accessibility)) log.trace("data accessibility = {}".format(data_accessibility))
salt.utils.vsan.remove_diskgroup( salt.utils.vsan.remove_diskgroup(
service_instance, host_ref, diskgroups[0], data_accessibility=data_acces sibility service_instance, host_ref, diskgroups[0], data_accessibility=data_acces sibility
) )
return True return True
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@supports_proxies("esxi") @supports_proxies("esxi")
@gets_service_instance_via_proxy @gets_service_instance_via_proxy
def get_host_cache(service_instance=None): def get_host_cache(service_instance=None):
""" """
skipping to change at line 6947 skipping to change at line 6926
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.get_host_cache salt '*' vsphere.get_host_cache
""" """
# Default to getting all disks if no filtering is done # Default to getting all disks if no filtering is done
ret_dict = {} ret_dict = {}
host_ref = _get_proxy_target(service_instance) host_ref = _get_proxy_target(service_instance)
hostname = __proxy__["esxi.get_details"]()["esxi_host"] hostname = __proxy__["esxi.get_details"]()["esxi_host"]
hci = salt.utils.vmware.get_host_cache(host_ref) hci = salt.utils.vmware.get_host_cache(host_ref)
if not hci: if not hci:
log.debug("Host cache not configured on host '{0}'".format(hostname)) log.debug("Host cache not configured on host '{}'".format(hostname))
ret_dict["enabled"] = False ret_dict["enabled"] = False
return ret_dict return ret_dict
# TODO Support multiple host cache info objects (on multiple datastores) # TODO Support multiple host cache info objects (on multiple datastores)
return { return {
"enabled": True, "enabled": True,
"datastore": {"name": hci.key.name}, "datastore": {"name": hci.key.name},
"swap_size": "{}MiB".format(hci.swapSize), "swap_size": "{}MiB".format(hci.swapSize),
} }
skipping to change at line 7015 skipping to change at line 6994
ret_dict = {"enabled": False} ret_dict = {"enabled": False}
host_ref = _get_proxy_target(service_instance) host_ref = _get_proxy_target(service_instance)
hostname = __proxy__["esxi.get_details"]()["esxi_host"] hostname = __proxy__["esxi.get_details"]()["esxi_host"]
if datastore: if datastore:
ds_refs = salt.utils.vmware.get_datastores( ds_refs = salt.utils.vmware.get_datastores(
service_instance, host_ref, datastore_names=[datastore] service_instance, host_ref, datastore_names=[datastore]
) )
if not ds_refs: if not ds_refs:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"Datastore '{0}' was not found on host " "Datastore '{}' was not found on host "
"'{1}'".format(datastore, hostname) "'{}'".format(datastore, hostname)
) )
ds_ref = ds_refs[0] ds_ref = ds_refs[0]
salt.utils.vmware.configure_host_cache(host_ref, ds_ref, swap_size_MiB) salt.utils.vmware.configure_host_cache(host_ref, ds_ref, swap_size_MiB)
return True return True
def _check_hosts(service_instance, host, host_names): def _check_hosts(service_instance, host, host_names):
""" """
Helper function that checks to see if the host provided is a vCenter Server or Helper function that checks to see if the host provided is a vCenter Server or
an ESXi host. If it's an ESXi host, returns a list of a single host_name. an ESXi host. If it's an ESXi host, returns a list of a single host_name.
skipping to change at line 7200 skipping to change at line 7179
'host_4': {'Eligible': []}} 'host_4': {'Eligible': []}}
""" """
ret = {} ret = {}
for host_name in host_names: for host_name in host_names:
# Get VSAN System Config Manager, if available. # Get VSAN System Config Manager, if available.
host_ref = _get_host_ref(service_instance, host, host_name=host_name) host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vsan_system = host_ref.configManager.vsanSystem vsan_system = host_ref.configManager.vsanSystem
if vsan_system is None: if vsan_system is None:
msg = ( msg = (
"VSAN System Config Manager is unset for host '{0}'. " "VSAN System Config Manager is unset for host '{}'. "
"VSAN configuration cannot be changed without a configured " "VSAN configuration cannot be changed without a configured "
"VSAN System.".format(host_name) "VSAN System.".format(host_name)
) )
log.debug(msg) log.debug(msg)
ret.update({host_name: {"Error": msg}}) ret.update({host_name: {"Error": msg}})
continue continue
# Get all VSAN suitable disks for this host. # Get all VSAN suitable disks for this host.
suitable_disks = [] suitable_disks = []
query = vsan_system.QueryDisksForVsan() query = vsan_system.QueryDisksForVsan()
for item in query: for item in query:
if item.state == "eligible": if item.state == "eligible":
suitable_disks.append(item) suitable_disks.append(item)
# No suitable disks were found to add. Warn and move on. # No suitable disks were found to add. Warn and move on.
# This isn't an error as the state may run repeatedly after all eligible disks are added. # This isn't an error as the state may run repeatedly after all eligible disks are added.
if not suitable_disks: if not suitable_disks:
msg = "The host '{0}' does not have any VSAN eligible disks.".format ( msg = "The host '{}' does not have any VSAN eligible disks.".format(
host_name host_name
) )
log.warning(msg) log.warning(msg)
ret.update({host_name: {"Eligible": msg}}) ret.update({host_name: {"Eligible": msg}})
continue continue
# Get disks for host and combine into one list of Disk Objects # Get disks for host and combine into one list of Disk Objects
disks = _get_host_ssds(host_ref) + _get_host_non_ssds(host_ref) disks = _get_host_ssds(host_ref) + _get_host_non_ssds(host_ref)
# Get disks that are in both the disks list and suitable_disks lists. # Get disks that are in both the disks list and suitable_disks lists.
skipping to change at line 7304 skipping to change at line 7283
config_value, config_value,
protocol=None, protocol=None,
port=None, port=None,
reset_service=None, reset_service=None,
esxi_host=None, esxi_host=None,
credstore=None, credstore=None,
): ):
""" """
Helper function for set_syslog_config that sets the config and populates the return dictionary. Helper function for set_syslog_config that sets the config and populates the return dictionary.
""" """
cmd = "system syslog config set --{0} {1}".format(syslog_config, config_valu e) cmd = "system syslog config set --{} {}".format(syslog_config, config_value)
ret_dict = {} ret_dict = {}
valid_resets = [ valid_resets = [
"logdir", "logdir",
"loghost", "loghost",
"default-rotate", "default-rotate",
"default-size", "default-size",
"default-timeout", "default-timeout",
"logdir-unique", "logdir-unique",
] ]
if syslog_config not in valid_resets: if syslog_config not in valid_resets:
ret_dict.update( ret_dict.update(
{ {
"success": False, "success": False,
"message": "'{0}' is not a valid config variable.".format( "message": "'{}' is not a valid config variable.".format(syslog_
syslog_config config),
),
} }
) )
return ret_dict return ret_dict
response = salt.utils.vmware.esxcli( response = salt.utils.vmware.esxcli(
host, host,
username, username,
password, password,
cmd, cmd,
protocol=protocol, protocol=protocol,
skipping to change at line 7553 skipping to change at line 7530
""" """
ret = {} ret = {}
ret["success"] = True ret["success"] = True
ret["message"] = [] ret["message"] = []
service_instance = salt.utils.vmware.get_service_instance( service_instance = salt.utils.vmware.get_service_instance(
host=host, username=username, password=password, protocol=protocol, port =port host=host, username=username, password=password, protocol=protocol, port =port
) )
dvs = salt.utils.vmware._get_dvs(service_instance, dvs_name) dvs = salt.utils.vmware._get_dvs(service_instance, dvs_name)
if not dvs: if not dvs:
ret["message"].append( ret["message"].append(
"No Distributed Virtual Switch found with name {0}".format(dvs_name) "No Distributed Virtual Switch found with name {}".format(dvs_name)
) )
ret["success"] = False ret["success"] = False
target_portgroup = salt.utils.vmware._get_dvs_portgroup(dvs, target_portgrou p_name) target_portgroup = salt.utils.vmware._get_dvs_portgroup(dvs, target_portgrou p_name)
if not target_portgroup: if not target_portgroup:
ret["message"].append( ret["message"].append(
"No target portgroup found with name {0}".format(target_portgroup_na me) "No target portgroup found with name {}".format(target_portgroup_nam e)
) )
ret["success"] = False ret["success"] = False
uplink_portgroup = salt.utils.vmware._get_dvs_uplink_portgroup( uplink_portgroup = salt.utils.vmware._get_dvs_uplink_portgroup(
dvs, uplink_portgroup_name dvs, uplink_portgroup_name
) )
if not uplink_portgroup: if not uplink_portgroup:
ret["message"].append( ret["message"].append(
"No uplink portgroup found with name {0}".format(uplink_portgroup_na me) "No uplink portgroup found with name {}".format(uplink_portgroup_nam e)
) )
ret["success"] = False ret["success"] = False
if len(ret["message"]) > 0: if len(ret["message"]) > 0:
return ret return ret
dvs_uuid = dvs.config.uuid dvs_uuid = dvs.config.uuid
try: try:
host_names = _check_hosts(service_instance, host, host_names) host_names = _check_hosts(service_instance, host, host_names)
except CommandExecutionError as e: except CommandExecutionError as e:
ret["message"] = "Error retrieving hosts: {0}".format(e.msg) ret["message"] = "Error retrieving hosts: {}".format(e.msg)
return ret return ret
for host_name in host_names: for host_name in host_names:
ret[host_name] = {} ret[host_name] = {}
ret[host_name].update( ret[host_name].update(
{ {
"status": False, "status": False,
"uplink": uplink_portgroup_name, "uplink": uplink_portgroup_name,
"portgroup": target_portgroup_name, "portgroup": target_portgroup_name,
skipping to change at line 7608 skipping to change at line 7585
ret[host_name].update({"message": "Host {1} not found".format(host_n ame)}) ret[host_name].update({"message": "Host {1} not found".format(host_n ame)})
ret["success"] = False ret["success"] = False
continue continue
dvs_hostmember_config = vim.dvs.HostMember.ConfigInfo(host=host_ref) dvs_hostmember_config = vim.dvs.HostMember.ConfigInfo(host=host_ref)
dvs_hostmember = vim.dvs.HostMember(config=dvs_hostmember_config) dvs_hostmember = vim.dvs.HostMember(config=dvs_hostmember_config)
p_nics = salt.utils.vmware._get_pnics(host_ref) p_nics = salt.utils.vmware._get_pnics(host_ref)
p_nic = [x for x in p_nics if x.device == vmnic_name] p_nic = [x for x in p_nics if x.device == vmnic_name]
if len(p_nic) == 0: if len(p_nic) == 0:
ret[host_name].update( ret[host_name].update(
{"message": "Physical nic {0} not found".format(vmknic_name)} {"message": "Physical nic {} not found".format(vmknic_name)}
) )
ret["success"] = False ret["success"] = False
continue continue
v_nics = salt.utils.vmware._get_vnics(host_ref) v_nics = salt.utils.vmware._get_vnics(host_ref)
v_nic = [x for x in v_nics if x.device == vmknic_name] v_nic = [x for x in v_nics if x.device == vmknic_name]
if len(v_nic) == 0: if len(v_nic) == 0:
ret[host_name].update( ret[host_name].update(
{"message": "Virtual nic {0} not found".format(vmnic_name)} {"message": "Virtual nic {} not found".format(vmnic_name)}
) )
ret["success"] = False ret["success"] = False
continue continue
v_nic_mgr = salt.utils.vmware._get_vnic_manager(host_ref) v_nic_mgr = salt.utils.vmware._get_vnic_manager(host_ref)
if not v_nic_mgr: if not v_nic_mgr:
ret[host_name].update( ret[host_name].update(
{"message": "Unable to get the host's virtual nic manager."} {"message": "Unable to get the host's virtual nic manager."}
) )
ret["success"] = False ret["success"] = False
skipping to change at line 7711 skipping to change at line 7688
) )
try: try:
network_system.UpdateNetworkConfig( network_system.UpdateNetworkConfig(
changeMode="modify", config=host_network_config changeMode="modify", config=host_network_config
) )
ret[host_name].update({"status": True}) ret[host_name].update({"status": True})
except Exception as e: # pylint: disable=broad-except except Exception as e: # pylint: disable=broad-except
if hasattr(e, "msg"): if hasattr(e, "msg"):
ret[host_name].update( ret[host_name].update(
{"message": "Failed to migrate adapters ({0})".format(e.msg) } {"message": "Failed to migrate adapters ({})".format(e.msg)}
) )
continue continue
else: else:
raise raise
return ret return ret
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@supports_proxies("esxi", "esxcluster", "esxdatacenter", "vcenter") @supports_proxies("esxi", "esxcluster", "esxdatacenter", "vcenter")
def _get_proxy_target(service_instance): def _get_proxy_target(service_instance):
skipping to change at line 7782 skipping to change at line 7759
details = __proxy__["esxi.get_details"]() details = __proxy__["esxi.get_details"]()
if "vcenter" not in details: if "vcenter" not in details:
raise InvalidEntityError( raise InvalidEntityError(
"Proxies connected directly to ESXi " "hosts are not supported" "Proxies connected directly to ESXi " "hosts are not supported"
) )
references = salt.utils.vmware.get_hosts( references = salt.utils.vmware.get_hosts(
service_instance, host_names=details["esxi_host"] service_instance, host_names=details["esxi_host"]
) )
if not references: if not references:
raise VMwareObjectRetrievalError( raise VMwareObjectRetrievalError(
"ESXi host '{0}' was not found".format(details["esxi_host"]) "ESXi host '{}' was not found".format(details["esxi_host"])
) )
reference = references[0] reference = references[0]
log.trace("reference = {0}".format(reference)) log.trace("reference = {}".format(reference))
return reference return reference
def _get_esxdatacenter_proxy_details(): def _get_esxdatacenter_proxy_details():
""" """
Returns the running esxdatacenter's proxy details Returns the running esxdatacenter's proxy details
""" """
det = __salt__["esxdatacenter.get_details"]() det = __salt__["esxdatacenter.get_details"]()
return ( return (
det.get("vcenter"), det.get("vcenter"),
det.get("username"), det.get("username"),
skipping to change at line 7923 skipping to change at line 7900
if "cluster" in placement: if "cluster" in placement:
container_object = salt.utils.vmware.get_cluster( container_object = salt.utils.vmware.get_cluster(
datacenter_object, placement["cluster"] datacenter_object, placement["cluster"]
) )
else: else:
container_objects = salt.utils.vmware.get_hosts( container_objects = salt.utils.vmware.get_hosts(
service_instance, datacenter_name=datacenter, host_names=[placement[ "host"]] service_instance, datacenter_name=datacenter, host_names=[placement[ "host"]]
) )
if not container_objects: if not container_objects:
raise salt.exceptions.VMwareObjectRetrievalError( raise salt.exceptions.VMwareObjectRetrievalError(
"ESXi host named '{0}' wasn't " "found.".format(placement["host" ]) "ESXi host named '{}' wasn't " "found.".format(placement["host"] )
) )
container_object = container_objects[0] container_object = container_objects[0]
# list of vim.host.DatastoreBrowser.SearchResults objects # list of vim.host.DatastoreBrowser.SearchResults objects
files = salt.utils.vmware.get_datastore_files( files = salt.utils.vmware.get_datastore_files(
service_instance, directory, [datastore], container_object, browser_spec service_instance, directory, [datastore], container_object, browser_spec
) )
if files and len(files[0].file) > 1: if files and len(files[0].file) > 1:
raise salt.exceptions.VMwareMultipleObjectsError( raise salt.exceptions.VMwareMultipleObjectsError(
"Multiple configuration files found in " "the same virtual machine f older" "Multiple configuration files found in " "the same virtual machine f older"
skipping to change at line 7957 skipping to change at line 7934
config_spec config_spec
Configuration spec object Configuration spec object
operation operation
Defines the operation which should be used, Defines the operation which should be used,
the possibles values: 'add' and 'edit', the default value is 'add' the possibles values: 'add' and 'edit', the default value is 'add'
""" """
log.trace( log.trace(
"Configuring virtual machine hardware " "Configuring virtual machine hardware "
"version version={0}".format(hardware_version) "version version={}".format(hardware_version)
) )
if operation == "edit": if operation == "edit":
log.trace( log.trace(
"Scheduling hardware version " "upgrade to {0}".format(hardware_vers ion) "Scheduling hardware version " "upgrade to {}".format(hardware_versi on)
) )
scheduled_hardware_upgrade = vim.vm.ScheduledHardwareUpgradeInfo() scheduled_hardware_upgrade = vim.vm.ScheduledHardwareUpgradeInfo()
scheduled_hardware_upgrade.upgradePolicy = "always" scheduled_hardware_upgrade.upgradePolicy = "always"
scheduled_hardware_upgrade.versionKey = hardware_version scheduled_hardware_upgrade.versionKey = hardware_version
config_spec.scheduledHardwareUpgradeInfo = scheduled_hardware_upgrade config_spec.scheduledHardwareUpgradeInfo = scheduled_hardware_upgrade
elif operation == "add": elif operation == "add":
config_spec.version = str(hardware_version) config_spec.version = str(hardware_version)
def _apply_cpu_config(config_spec, cpu_props): def _apply_cpu_config(config_spec, cpu_props):
""" """
Sets CPU core count to the given value Sets CPU core count to the given value
config_spec config_spec
vm.ConfigSpec object vm.ConfigSpec object
cpu_props cpu_props
CPU properties dict CPU properties dict
""" """
log.trace( log.trace(
"Configuring virtual machine CPU " "settings cpu_props={0}".format(cpu_p rops) "Configuring virtual machine CPU " "settings cpu_props={}".format(cpu_pr ops)
) )
if "count" in cpu_props: if "count" in cpu_props:
config_spec.numCPUs = int(cpu_props["count"]) config_spec.numCPUs = int(cpu_props["count"])
if "cores_per_socket" in cpu_props: if "cores_per_socket" in cpu_props:
config_spec.numCoresPerSocket = int(cpu_props["cores_per_socket"]) config_spec.numCoresPerSocket = int(cpu_props["cores_per_socket"])
if "nested" in cpu_props and cpu_props["nested"]: if "nested" in cpu_props and cpu_props["nested"]:
config_spec.nestedHVEnabled = cpu_props["nested"] # True config_spec.nestedHVEnabled = cpu_props["nested"] # True
if "hotadd" in cpu_props and cpu_props["hotadd"]: if "hotadd" in cpu_props and cpu_props["hotadd"]:
config_spec.cpuHotAddEnabled = cpu_props["hotadd"] # True config_spec.cpuHotAddEnabled = cpu_props["hotadd"] # True
if "hotremove" in cpu_props and cpu_props["hotremove"]: if "hotremove" in cpu_props and cpu_props["hotremove"]:
skipping to change at line 8004 skipping to change at line 7981
def _apply_memory_config(config_spec, memory): def _apply_memory_config(config_spec, memory):
""" """
Sets memory size to the given value Sets memory size to the given value
config_spec config_spec
vm.ConfigSpec object vm.ConfigSpec object
memory memory
Memory size and unit Memory size and unit
""" """
log.trace( log.trace("Configuring virtual machine memory " "settings memory={}".format(
"Configuring virtual machine memory " "settings memory={0}".format(memor memory))
y)
)
if "size" in memory and "unit" in memory: if "size" in memory and "unit" in memory:
try: try:
if memory["unit"].lower() == "kb": if memory["unit"].lower() == "kb":
memory_mb = memory["size"] / 1024 memory_mb = memory["size"] / 1024
elif memory["unit"].lower() == "mb": elif memory["unit"].lower() == "mb":
memory_mb = memory["size"] memory_mb = memory["size"]
elif memory["unit"].lower() == "gb": elif memory["unit"].lower() == "gb":
memory_mb = int(float(memory["size"]) * 1024) memory_mb = int(float(memory["size"]) * 1024)
except (TypeError, ValueError): except (TypeError, ValueError):
memory_mb = int(memory["size"]) memory_mb = int(memory["size"])
skipping to change at line 8058 skipping to change at line 8033
config_spec config_spec
vm.ConfigSpec object vm.ConfigSpec object
advanced_config advanced_config
config key value pairs config key value pairs
vm_extra_config vm_extra_config
Virtual machine vm_ref.config.extraConfig object Virtual machine vm_ref.config.extraConfig object
""" """
log.trace( log.trace(
"Configuring advanced configuration " "parameters {0}".format(advanced_c onfig) "Configuring advanced configuration " "parameters {}".format(advanced_co nfig)
) )
if isinstance(advanced_config, str): if isinstance(advanced_config, str):
raise salt.exceptions.ArgumentValueError( raise salt.exceptions.ArgumentValueError(
"The specified 'advanced_configs' configuration " "The specified 'advanced_configs' configuration "
"option cannot be parsed, please check the parameters" "option cannot be parsed, please check the parameters"
) )
for key, value in six.iteritems(advanced_config): for key, value in advanced_config.items():
if vm_extra_config: if vm_extra_config:
for option in vm_extra_config: for option in vm_extra_config:
if option.key == key and option.value == str(value): if option.key == key and option.value == str(value):
continue continue
else: else:
option = vim.option.OptionValue(key=key, value=value) option = vim.option.OptionValue(key=key, value=value)
config_spec.extraConfig.append(option) config_spec.extraConfig.append(option)
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@supports_proxies("esxvm", "esxcluster", "esxdatacenter") @supports_proxies("esxvm", "esxcluster", "esxdatacenter")
skipping to change at line 8130 skipping to change at line 8105
config_spec config_spec
vm.ConfigSpec object vm.ConfigSpec object
advanced_config advanced_config
List of advanced config keys to be deleted List of advanced config keys to be deleted
vm_extra_config vm_extra_config
Virtual machine vm_ref.config.extraConfig object Virtual machine vm_ref.config.extraConfig object
""" """
log.trace( log.trace(
"Removing advanced configuration " "parameters {0}".format(advanced_conf ig) "Removing advanced configuration " "parameters {}".format(advanced_confi g)
) )
if isinstance(advanced_config, str): if isinstance(advanced_config, str):
raise salt.exceptions.ArgumentValueError( raise salt.exceptions.ArgumentValueError(
"The specified 'advanced_configs' configuration " "The specified 'advanced_configs' configuration "
"option cannot be parsed, please check the parameters" "option cannot be parsed, please check the parameters"
) )
removed_configs = [] removed_configs = []
for key in advanced_config: for key in advanced_config:
for option in vm_extra_config: for option in vm_extra_config:
if option.key == key: if option.key == key:
skipping to change at line 8201 skipping to change at line 8176
List of SCSI Controller objects (old+newly created) List of SCSI Controller objects (old+newly created)
""" """
# list of new/old VirtualSCSIController objects, both new and old objects # list of new/old VirtualSCSIController objects, both new and old objects
# should contain a key attribute key should be a negative integer in case # should contain a key attribute key should be a negative integer in case
# of a new object # of a new object
keys = [ keys = [
ctrl.key for ctrl in scsi_ctrls if scsi_ctrls and ctrl.busNumber == bus_ number ctrl.key for ctrl in scsi_ctrls if scsi_ctrls and ctrl.busNumber == bus_ number
] ]
if not keys: if not keys:
raise salt.exceptions.VMwareVmCreationError( raise salt.exceptions.VMwareVmCreationError(
"SCSI controller number {0} doesn't exist".format(bus_number) "SCSI controller number {} doesn't exist".format(bus_number)
) )
return keys[0] return keys[0]
def _apply_hard_disk( def _apply_hard_disk(
unit_number, unit_number,
key, key,
operation, operation,
disk_label=None, disk_label=None,
size=None, size=None,
unit="GB", unit="GB",
skipping to change at line 8256 skipping to change at line 8231
eagerly_scrub eagerly_scrub
Boolean for eagerly scrubbing Boolean for eagerly scrubbing
datastore datastore
Datastore name where the disk will be located Datastore name where the disk will be located
filename filename
Full file name of the vm disk Full file name of the vm disk
""" """
log.trace( log.trace(
"Configuring hard disk {0} size={1}, unit={2}, " "Configuring hard disk {} size={}, unit={}, "
"controller_key={3}, thin_provision={4}, " "controller_key={}, thin_provision={}, "
"eagerly_scrub={5}, datastore={6}, " "eagerly_scrub={}, datastore={}, "
"filename={7}".format( "filename={}".format(
disk_label, disk_label,
size, size,
unit, unit,
controller_key, controller_key,
thin_provision, thin_provision,
eagerly_scrub, eagerly_scrub,
datastore, datastore,
filename, filename,
) )
) )
skipping to change at line 8291 skipping to change at line 8266
disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo () disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo ()
disk_spec.device.backing.diskMode = "persistent" disk_spec.device.backing.diskMode = "persistent"
if thin_provision is not None: if thin_provision is not None:
disk_spec.device.backing.thinProvisioned = thin_provision disk_spec.device.backing.thinProvisioned = thin_provision
if eagerly_scrub is not None and eagerly_scrub != "None": if eagerly_scrub is not None and eagerly_scrub != "None":
disk_spec.device.backing.eagerlyScrub = eagerly_scrub disk_spec.device.backing.eagerlyScrub = eagerly_scrub
if controller_key: if controller_key:
disk_spec.device.controllerKey = controller_key disk_spec.device.controllerKey = controller_key
if operation == "add": if operation == "add":
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
disk_spec.device.backing.fileName = "[{0}] {1}".format( disk_spec.device.backing.fileName = "[{}] {}".format(
salt.utils.vmware.get_managed_object_name(datastore), filename salt.utils.vmware.get_managed_object_name(datastore), filename
) )
disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation. create disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation. create
elif operation == "edit": elif operation == "edit":
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
return disk_spec return disk_spec
def _create_adapter_type(network_adapter, adapter_type, network_adapter_label="" ): def _create_adapter_type(network_adapter, adapter_type, network_adapter_label="" ):
""" """
Returns a vim.vm.device.VirtualEthernetCard object specifying a virtual Returns a vim.vm.device.VirtualEthernetCard object specifying a virtual
skipping to change at line 8315 skipping to change at line 8290
None or VirtualEthernet object None or VirtualEthernet object
adapter_type adapter_type
String, type of adapter String, type of adapter
network_adapter_label network_adapter_label
string, network adapter name string, network adapter name
""" """
log.trace( log.trace(
"Configuring virtual machine network " "Configuring virtual machine network "
"adapter adapter_type={0}".format(adapter_type) "adapter adapter_type={}".format(adapter_type)
) )
if adapter_type in ["vmxnet", "vmxnet2", "vmxnet3", "e1000", "e1000e"]: if adapter_type in ["vmxnet", "vmxnet2", "vmxnet3", "e1000", "e1000e"]:
edited_network_adapter = salt.utils.vmware.get_network_adapter_type( edited_network_adapter = salt.utils.vmware.get_network_adapter_type(
adapter_type adapter_type
) )
if isinstance(network_adapter, type(edited_network_adapter)): if isinstance(network_adapter, type(edited_network_adapter)):
edited_network_adapter = network_adapter edited_network_adapter = network_adapter
else: else:
if network_adapter: if network_adapter:
log.trace( log.trace(
"Changing type of '{0}' from" "Changing type of '{}' from"
" '{1}' to '{2}'".format( " '{}' to '{}'".format(
network_adapter.deviceInfo.label, network_adapter.deviceInfo.label,
type(network_adapter).__name__.rsplit(".", 1)[1][7:].low er(), type(network_adapter).__name__.rsplit(".", 1)[1][7:].low er(),
adapter_type, adapter_type,
) )
) )
else: else:
# If device is edited and type not specified or does not match, # If device is edited and type not specified or does not match,
# don't change adapter type # don't change adapter type
if network_adapter: if network_adapter:
if adapter_type: if adapter_type:
log.error( log.error(
"Cannot change type of '{0}' to '{1}'. " "Cannot change type of '{}' to '{}'. "
"Not changing type".format( "Not changing type".format(
network_adapter.deviceInfo.label, adapter_type network_adapter.deviceInfo.label, adapter_type
) )
) )
edited_network_adapter = network_adapter edited_network_adapter = network_adapter
else: else:
if not adapter_type: if not adapter_type:
log.trace( log.trace(
"The type of '{0}' has not been specified. " "The type of '{}' has not been specified. "
"Creating of default type 'vmxnet3'".format(network_adapter_ label) "Creating of default type 'vmxnet3'".format(network_adapter_ label)
) )
edited_network_adapter = vim.vm.device.VirtualVmxnet3() edited_network_adapter = vim.vm.device.VirtualVmxnet3()
return edited_network_adapter return edited_network_adapter
def _create_network_backing(network_name, switch_type, parent_ref): def _create_network_backing(network_name, switch_type, parent_ref):
""" """
Returns a vim.vm.device.VirtualDevice.BackingInfo object specifying a Returns a vim.vm.device.VirtualDevice.BackingInfo object specifying a
virtual ethernet card backing information virtual ethernet card backing information
network_name network_name
string, network name string, network name
switch_type switch_type
string, type of switch string, type of switch
parent_ref parent_ref
Parent reference to search for network Parent reference to search for network
""" """
log.trace( log.trace(
"Configuring virtual machine network backing network_name={0} " "Configuring virtual machine network backing network_name={} "
"switch_type={1} parent={2}".format( "switch_type={} parent={}".format(
network_name, network_name,
switch_type, switch_type,
salt.utils.vmware.get_managed_object_name(parent_ref), salt.utils.vmware.get_managed_object_name(parent_ref),
) )
) )
backing = {} backing = {}
if network_name: if network_name:
if switch_type == "standard": if switch_type == "standard":
networks = salt.utils.vmware.get_networks( networks = salt.utils.vmware.get_networks(
parent_ref, network_names=[network_name] parent_ref, network_names=[network_name]
) )
if not networks: if not networks:
raise salt.exceptions.VMwareObjectRetrievalError( raise salt.exceptions.VMwareObjectRetrievalError(
"The network '{0}' could not be " "retrieved.".format(networ k_name) "The network '{}' could not be " "retrieved.".format(network _name)
) )
network_ref = networks[0] network_ref = networks[0]
backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
backing.deviceName = network_name backing.deviceName = network_name
backing.network = network_ref backing.network = network_ref
elif switch_type == "distributed": elif switch_type == "distributed":
networks = salt.utils.vmware.get_dvportgroups( networks = salt.utils.vmware.get_dvportgroups(
parent_ref, portgroup_names=[network_name] parent_ref, portgroup_names=[network_name]
) )
if not networks: if not networks:
raise salt.exceptions.VMwareObjectRetrievalError( raise salt.exceptions.VMwareObjectRetrievalError(
"The port group '{0}' could not be " "The port group '{}' could not be "
"retrieved.".format(network_name) "retrieved.".format(network_name)
) )
network_ref = networks[0] network_ref = networks[0]
dvs_port_connection = vim.dvs.PortConnection( dvs_port_connection = vim.dvs.PortConnection(
portgroupKey=network_ref.key, portgroupKey=network_ref.key,
switchUuid=network_ref.config.distributedVirtualSwitch.uuid, switchUuid=network_ref.config.distributedVirtualSwitch.uuid,
) )
backing = ( backing = (
vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingI nfo() vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingI nfo()
) )
skipping to change at line 8456 skipping to change at line 8431
mac mac
MAC address of the network adapter MAC address of the network adapter
parent parent
Parent object reference Parent object reference
""" """
adapter_type.strip().lower() adapter_type.strip().lower()
switch_type.strip().lower() switch_type.strip().lower()
log.trace( log.trace(
"Configuring virtual machine network adapter " "Configuring virtual machine network adapter "
"network_adapter_label={0} network_name={1} " "network_adapter_label={} network_name={} "
"adapter_type={2} switch_type={3} mac={4}".format( "adapter_type={} switch_type={} mac={}".format(
network_adapter_label, network_name, adapter_type, switch_type, mac network_adapter_label, network_name, adapter_type, switch_type, mac
) )
) )
network_spec = vim.vm.device.VirtualDeviceSpec() network_spec = vim.vm.device.VirtualDeviceSpec()
network_spec.device = _create_adapter_type( network_spec.device = _create_adapter_type(
network_spec.device, adapter_type, network_adapter_label=network_adapter _label network_spec.device, adapter_type, network_adapter_label=network_adapter _label
) )
network_spec.device.deviceInfo = vim.Description() network_spec.device.deviceInfo = vim.Description()
if operation == "add": if operation == "add":
network_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add network_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
skipping to change at line 8527 skipping to change at line 8502
the possibles values: 'add' and 'edit', the default value is 'add' the possibles values: 'add' and 'edit', the default value is 'add'
.. code-block:: bash .. code-block:: bash
scsi: scsi:
adapter: 'SCSI controller 0' adapter: 'SCSI controller 0'
type: paravirtual or lsilogic or lsilogic_sas type: paravirtual or lsilogic or lsilogic_sas
bus_sharing: 'no_sharing' or 'virtual_sharing' or 'physical_sharing' bus_sharing: 'no_sharing' or 'virtual_sharing' or 'physical_sharing'
""" """
log.trace( log.trace(
"Configuring scsi controller adapter={0} adapter_type={1} " "Configuring scsi controller adapter={} adapter_type={} "
"bus_sharing={2} key={3} bus_number={4}".format( "bus_sharing={} key={} bus_number={}".format(
adapter, adapter_type, bus_sharing, key, bus_number adapter, adapter_type, bus_sharing, key, bus_number
) )
) )
scsi_spec = vim.vm.device.VirtualDeviceSpec() scsi_spec = vim.vm.device.VirtualDeviceSpec()
if adapter_type == "lsilogic": if adapter_type == "lsilogic":
summary = "LSI Logic" summary = "LSI Logic"
scsi_spec.device = vim.vm.device.VirtualLsiLogicController() scsi_spec.device = vim.vm.device.VirtualLsiLogicController()
elif adapter_type == "lsilogic_sas": elif adapter_type == "lsilogic_sas":
summary = "LSI Logic Sas" summary = "LSI Logic Sas"
scsi_spec.device = vim.vm.device.VirtualLsiLogicSASController() scsi_spec.device = vim.vm.device.VirtualLsiLogicSASController()
skipping to change at line 8584 skipping to change at line 8559
Returns a list of vim.vm.device.VirtualDeviceSpec objects representing Returns a list of vim.vm.device.VirtualDeviceSpec objects representing
IDE controllers IDE controllers
ide_controllers ide_controllers
IDE properties IDE properties
""" """
ide_ctrls = [] ide_ctrls = []
keys = range(-200, -250, -1) keys = range(-200, -250, -1)
if ide_controllers: if ide_controllers:
devs = [ide["adapter"] for ide in ide_controllers] devs = [ide["adapter"] for ide in ide_controllers]
log.trace("Creating IDE controllers {0}".format(devs)) log.trace("Creating IDE controllers {}".format(devs))
for ide, key in zip(ide_controllers, keys): for ide, key in zip(ide_controllers, keys):
ide_ctrls.append( ide_ctrls.append(
_apply_ide_controller_config(ide["adapter"], "add", key, abs(key + 200)) _apply_ide_controller_config(ide["adapter"], "add", key, abs(key + 200))
) )
return ide_ctrls return ide_ctrls
def _apply_ide_controller_config(ide_controller_label, operation, key, bus_numbe r=0): def _apply_ide_controller_config(ide_controller_label, operation, key, bus_numbe r=0):
""" """
Returns a vim.vm.device.VirtualDeviceSpec object specifying to add/edit an Returns a vim.vm.device.VirtualDeviceSpec object specifying to add/edit an
IDE controller IDE controller
skipping to change at line 8610 skipping to change at line 8585
Type of operation: add or edit Type of operation: add or edit
key key
Unique key of the device Unique key of the device
bus_number bus_number
Device bus number property Device bus number property
""" """
log.trace( log.trace(
"Configuring IDE controller " "Configuring IDE controller "
"ide_controller_label={0}".format(ide_controller_label) "ide_controller_label={}".format(ide_controller_label)
) )
ide_spec = vim.vm.device.VirtualDeviceSpec() ide_spec = vim.vm.device.VirtualDeviceSpec()
ide_spec.device = vim.vm.device.VirtualIDEController() ide_spec.device = vim.vm.device.VirtualIDEController()
if operation == "add": if operation == "add":
ide_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add ide_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
if operation == "edit": if operation == "edit":
ide_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit ide_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
ide_spec.device.key = key ide_spec.device.key = key
ide_spec.device.busNumber = bus_number ide_spec.device.busNumber = bus_number
if ide_controller_label: if ide_controller_label:
skipping to change at line 8638 skipping to change at line 8613
Returns a list of vim.vm.device.VirtualDeviceSpec objects representing Returns a list of vim.vm.device.VirtualDeviceSpec objects representing
SATA controllers SATA controllers
sata_controllers sata_controllers
SATA properties SATA properties
""" """
sata_ctrls = [] sata_ctrls = []
keys = range(-15000, -15050, -1) keys = range(-15000, -15050, -1)
if sata_controllers: if sata_controllers:
devs = [sata["adapter"] for sata in sata_controllers] devs = [sata["adapter"] for sata in sata_controllers]
log.trace("Creating SATA controllers {0}".format(devs)) log.trace("Creating SATA controllers {}".format(devs))
for sata, key in zip(sata_controllers, keys): for sata, key in zip(sata_controllers, keys):
sata_ctrls.append( sata_ctrls.append(
_apply_sata_controller_config( _apply_sata_controller_config(
sata["adapter"], "add", key, sata["bus_number"] sata["adapter"], "add", key, sata["bus_number"]
) )
) )
return sata_ctrls return sata_ctrls
def _apply_sata_controller_config(sata_controller_label, operation, key, bus_num ber=0): def _apply_sata_controller_config(sata_controller_label, operation, key, bus_num ber=0):
""" """
skipping to change at line 8666 skipping to change at line 8641
Type of operation: add or edit Type of operation: add or edit
key key
Unique key of the device Unique key of the device
bus_number bus_number
Device bus number property Device bus number property
""" """
log.trace( log.trace(
"Configuring SATA controller " "Configuring SATA controller "
"sata_controller_label={0}".format(sata_controller_label) "sata_controller_label={}".format(sata_controller_label)
) )
sata_spec = vim.vm.device.VirtualDeviceSpec() sata_spec = vim.vm.device.VirtualDeviceSpec()
sata_spec.device = vim.vm.device.VirtualAHCIController() sata_spec.device = vim.vm.device.VirtualAHCIController()
if operation == "add": if operation == "add":
sata_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add sata_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
elif operation == "edit": elif operation == "edit":
sata_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit sata_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
sata_spec.device.key = key sata_spec.device.key = key
sata_spec.device.controllerKey = 100 sata_spec.device.controllerKey = 100
sata_spec.device.busNumber = bus_number sata_spec.device.busNumber = bus_number
skipping to change at line 8739 skipping to change at line 8714
device_type: datastore_iso_file or client_device device_type: datastore_iso_file or client_device
client_device: client_device:
mode: atapi or passthrough mode: atapi or passthrough
datastore_iso_file: datastore_iso_file:
path: "[share] iso/disk.iso" path: "[share] iso/disk.iso"
connectable: connectable:
start_connected: True start_connected: True
allow_guest_control: allow_guest_control:
""" """
log.trace( log.trace(
"Configuring CD/DVD drive drive_label={0} " "Configuring CD/DVD drive drive_label={} "
"device_type={1} client_device={2} " "device_type={} client_device={} "
"datastore_iso_file={3}".format( "datastore_iso_file={}".format(
drive_label, device_type, client_device, datastore_iso_file drive_label, device_type, client_device, datastore_iso_file
) )
) )
drive_spec = vim.vm.device.VirtualDeviceSpec() drive_spec = vim.vm.device.VirtualDeviceSpec()
drive_spec.device = vim.vm.device.VirtualCdrom() drive_spec.device = vim.vm.device.VirtualCdrom()
drive_spec.device.deviceInfo = vim.Description() drive_spec.device.deviceInfo = vim.Description()
if operation == "add": if operation == "add":
drive_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add drive_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
elif operation == "edit": elif operation == "edit":
drive_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit drive_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
skipping to change at line 8763 skipping to change at line 8738
drive_spec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo() drive_spec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo()
drive_spec.device.backing.fileName = datastore_iso_file["path"] drive_spec.device.backing.fileName = datastore_iso_file["path"]
datastore = datastore_iso_file["path"].partition("[")[-1].rpartition("]" )[0] datastore = datastore_iso_file["path"].partition("[")[-1].rpartition("]" )[0]
datastore_object = salt.utils.vmware.get_datastores( datastore_object = salt.utils.vmware.get_datastores(
salt.utils.vmware.get_service_instance_from_managed_object(parent_re f), salt.utils.vmware.get_service_instance_from_managed_object(parent_re f),
parent_ref, parent_ref,
datastore_names=[datastore], datastore_names=[datastore],
)[0] )[0]
if datastore_object: if datastore_object:
drive_spec.device.backing.datastore = datastore_object drive_spec.device.backing.datastore = datastore_object
drive_spec.device.deviceInfo.summary = "{0}".format(datastore_iso_file[" path"]) drive_spec.device.deviceInfo.summary = "{}".format(datastore_iso_file["p ath"])
elif device_type == "client_device": elif device_type == "client_device":
if client_device["mode"] == "passthrough": if client_device["mode"] == "passthrough":
drive_spec.device.backing = ( drive_spec.device.backing = (
vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo() vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
) )
elif client_device["mode"] == "atapi": elif client_device["mode"] == "atapi":
drive_spec.device.backing = ( drive_spec.device.backing = (
vim.vm.device.VirtualCdrom.RemoteAtapiBackingInfo() vim.vm.device.VirtualCdrom.RemoteAtapiBackingInfo()
) )
drive_spec.device.key = key drive_spec.device.key = key
skipping to change at line 8847 skipping to change at line 8822
type: uri type: uri
uri: 'telnet://something:port' uri: 'telnet://something:port'
direction: <client|server> direction: <client|server>
filename: 'service_uri' filename: 'service_uri'
connectable: connectable:
allow_guest_control: True allow_guest_control: True
start_connected: True start_connected: True
yield: False yield: False
""" """
log.trace( log.trace(
"Creating serial port adapter={0} type={1} connectable={2} " "Creating serial port adapter={} type={} connectable={} "
"yield={3}".format( "yield={}".format(
serial_device_spec["adapter"], serial_device_spec["adapter"],
serial_device_spec["type"], serial_device_spec["type"],
serial_device_spec["connectable"], serial_device_spec["connectable"],
serial_device_spec["yield"], serial_device_spec["yield"],
) )
) )
device_spec = vim.vm.device.VirtualDeviceSpec() device_spec = vim.vm.device.VirtualDeviceSpec()
device_spec.device = vim.vm.device.VirtualSerialPort() device_spec.device = vim.vm.device.VirtualSerialPort()
if operation == "add": if operation == "add":
device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
skipping to change at line 8928 skipping to change at line 8903
controller: 'SCSI controller 0' controller: 'SCSI controller 0'
thin_provision: False thin_provision: False
eagerly_scrub: False eagerly_scrub: False
datastore: 'myshare' datastore: 'myshare'
filename: 'vm/mydisk.vmdk' filename: 'vm/mydisk.vmdk'
""" """
disk_specs = [] disk_specs = []
keys = range(-2000, -2050, -1) keys = range(-2000, -2050, -1)
if disks: if disks:
devs = [disk["adapter"] for disk in disks] devs = [disk["adapter"] for disk in disks]
log.trace("Creating disks {0}".format(devs)) log.trace("Creating disks {}".format(devs))
for disk, key in zip(disks, keys): for disk, key in zip(disks, keys):
# create the disk # create the disk
filename, datastore, datastore_ref = None, None, None filename, datastore, datastore_ref = None, None, None
size = float(disk["size"]) size = float(disk["size"])
# when creating both SCSI controller and Hard disk at the same time # when creating both SCSI controller and Hard disk at the same time
# we need the randomly assigned (temporary) key of the newly created # we need the randomly assigned (temporary) key of the newly created
# SCSI controller # SCSI controller
controller_key = 1000 # Default is the first SCSI controller controller_key = 1000 # Default is the first SCSI controller
if "address" in disk: # 0:0 if "address" in disk: # 0:0
controller_bus_number, unit_number = disk["address"].split(":") controller_bus_number, unit_number = disk["address"].split(":")
skipping to change at line 8952 skipping to change at line 8927
controller_bus_number, scsi_ctrls=scsi_controllers controller_bus_number, scsi_ctrls=scsi_controllers
) )
elif "controller" in disk: elif "controller" in disk:
for contr in scsi_controllers: for contr in scsi_controllers:
if contr["label"] == disk["controller"]: if contr["label"] == disk["controller"]:
controller_key = contr["key"] controller_key = contr["key"]
break break
else: else:
raise salt.exceptions.VMwareObjectNotFoundError( raise salt.exceptions.VMwareObjectNotFoundError(
"The given controller does not exist: " "The given controller does not exist: "
"{0}".format(disk["controller"]) "{}".format(disk["controller"])
) )
if "datastore" in disk: if "datastore" in disk:
datastore_ref = salt.utils.vmware.get_datastores( datastore_ref = salt.utils.vmware.get_datastores(
service_instance, parent, datastore_names=[disk["datastore"]] service_instance, parent, datastore_names=[disk["datastore"]]
)[0] )[0]
datastore = disk["datastore"] datastore = disk["datastore"]
if "filename" in disk: if "filename" in disk:
filename = disk["filename"] filename = disk["filename"]
# XOR filename, datastore # XOR filename, datastore
if (not filename and datastore) or (filename and not datastore): if (not filename and datastore) or (filename and not datastore):
raise salt.exceptions.ArgumentValueError( raise salt.exceptions.ArgumentValueError(
"You must specify both filename and datastore attributes" "You must specify both filename and datastore attributes"
" to place your disk to a specific datastore " " to place your disk to a specific datastore "
"{0}, {1}".format(datastore, filename) "{}, {}".format(datastore, filename)
) )
disk_spec = _apply_hard_disk( disk_spec = _apply_hard_disk(
unit_number, unit_number,
key, key,
disk_label=disk["adapter"], disk_label=disk["adapter"],
size=size, size=size,
unit=disk["unit"], unit=disk["unit"],
controller_key=controller_key, controller_key=controller_key,
operation="add", operation="add",
thin_provision=disk["thin_provision"], thin_provision=disk["thin_provision"],
skipping to change at line 8997 skipping to change at line 8972
Returns a list of vim.vm.device.VirtualDeviceSpec objects representing Returns a list of vim.vm.device.VirtualDeviceSpec objects representing
SCSI controllers SCSI controllers
scsi_devices: scsi_devices:
List of SCSI device properties List of SCSI device properties
""" """
keys = range(-1000, -1050, -1) keys = range(-1000, -1050, -1)
scsi_specs = [] scsi_specs = []
if scsi_devices: if scsi_devices:
devs = [scsi["adapter"] for scsi in scsi_devices] devs = [scsi["adapter"] for scsi in scsi_devices]
log.trace("Creating SCSI devices {0}".format(devs)) log.trace("Creating SCSI devices {}".format(devs))
# unitNumber for disk attachment, 0:0 1st 0 is the controller busNumber, # unitNumber for disk attachment, 0:0 1st 0 is the controller busNumber,
# 2nd is the unitNumber # 2nd is the unitNumber
for (key, scsi_controller) in zip(keys, scsi_devices): for (key, scsi_controller) in zip(keys, scsi_devices):
# create the SCSI controller # create the SCSI controller
scsi_spec = _apply_scsi_controller( scsi_spec = _apply_scsi_controller(
scsi_controller["adapter"], scsi_controller["adapter"],
scsi_controller["type"], scsi_controller["type"],
scsi_controller["bus_sharing"], scsi_controller["bus_sharing"],
key, key,
scsi_controller["bus_number"], scsi_controller["bus_number"],
skipping to change at line 9038 skipping to change at line 9013
name: vlan100 name: vlan100
switch_type: distributed or standard switch_type: distributed or standard
adapter_type: vmxnet3 or vmxnet, vmxnet2, vmxnet3, e1000, e1000e adapter_type: vmxnet3 or vmxnet, vmxnet2, vmxnet3, e1000, e1000e
mac: '00:11:22:33:44:55' mac: '00:11:22:33:44:55'
""" """
network_specs = [] network_specs = []
nics_settings = [] nics_settings = []
keys = range(-4000, -4050, -1) keys = range(-4000, -4050, -1)
if network_interfaces: if network_interfaces:
devs = [inter["adapter"] for inter in network_interfaces] devs = [inter["adapter"] for inter in network_interfaces]
log.trace("Creating network interfaces {0}".format(devs)) log.trace("Creating network interfaces {}".format(devs))
for interface, key in zip(network_interfaces, keys): for interface, key in zip(network_interfaces, keys):
network_spec = _apply_network_adapter_config( network_spec = _apply_network_adapter_config(
key, key,
interface["name"], interface["name"],
interface["adapter_type"], interface["adapter_type"],
interface["switch_type"], interface["switch_type"],
network_adapter_label=interface["adapter"], network_adapter_label=interface["adapter"],
operation="add", operation="add",
connectable=interface["connectable"] connectable=interface["connectable"]
if "connectable" in interface if "connectable" in interface
skipping to change at line 9077 skipping to change at line 9052
Returns a list of vim.vm.device.VirtualDeviceSpec objects representing the Returns a list of vim.vm.device.VirtualDeviceSpec objects representing the
serial ports to be created for a virtual machine serial ports to be created for a virtual machine
serial_ports serial_ports
Serial port properties Serial port properties
""" """
ports = [] ports = []
keys = range(-9000, -9050, -1) keys = range(-9000, -9050, -1)
if serial_ports: if serial_ports:
devs = [serial["adapter"] for serial in serial_ports] devs = [serial["adapter"] for serial in serial_ports]
log.trace("Creating serial ports {0}".format(devs)) log.trace("Creating serial ports {}".format(devs))
for port, key in zip(serial_ports, keys): for port, key in zip(serial_ports, keys):
serial_port_device = _apply_serial_port(port, key, "add") serial_port_device = _apply_serial_port(port, key, "add")
ports.append(serial_port_device) ports.append(serial_port_device)
return ports return ports
def _create_cd_drives(cd_drives, controllers=None, parent_ref=None): def _create_cd_drives(cd_drives, controllers=None, parent_ref=None):
""" """
Returns a list of vim.vm.device.VirtualDeviceSpec objects representing the Returns a list of vim.vm.device.VirtualDeviceSpec objects representing the
CD/DVD drives to be created for a virtual machine CD/DVD drives to be created for a virtual machine
skipping to change at line 9101 skipping to change at line 9076
controllers controllers
CD/DVD drive controllers (IDE, SATA) CD/DVD drive controllers (IDE, SATA)
parent_ref parent_ref
Parent object reference Parent object reference
""" """
cd_drive_specs = [] cd_drive_specs = []
keys = range(-3000, -3050, -1) keys = range(-3000, -3050, -1)
if cd_drives: if cd_drives:
devs = [dvd["adapter"] for dvd in cd_drives] devs = [dvd["adapter"] for dvd in cd_drives]
log.trace("Creating cd/dvd drives {0}".format(devs)) log.trace("Creating cd/dvd drives {}".format(devs))
for drive, key in zip(cd_drives, keys): for drive, key in zip(cd_drives, keys):
# if a controller is not available/cannot be created we should use t he # if a controller is not available/cannot be created we should use t he
# one which is available by default, this is 'IDE 0' # one which is available by default, this is 'IDE 0'
controller_key = 200 controller_key = 200
if controllers: if controllers:
controller = _get_device_by_label(controllers, drive["controller "]) controller = _get_device_by_label(controllers, drive["controller "])
controller_key = controller.key controller_key = controller.key
cd_drive_specs.append( cd_drive_specs.append(
_apply_cd_drive( _apply_cd_drive(
drive["adapter"], drive["adapter"],
skipping to change at line 9147 skipping to change at line 9122
list of vim.vm.device.VirtualDevice objects list of vim.vm.device.VirtualDevice objects
key key
Unique key of device Unique key of device
""" """
device_keys = [d for d in devices if d.key == key] device_keys = [d for d in devices if d.key == key]
if device_keys: if device_keys:
return device_keys[0] return device_keys[0]
else: else:
raise salt.exceptions.VMwareObjectNotFoundError( raise salt.exceptions.VMwareObjectNotFoundError(
"Virtual machine device with unique key " "{0} does not exist".forma t(key) "Virtual machine device with unique key " "{} does not exist".format (key)
) )
def _get_device_by_label(devices, label): def _get_device_by_label(devices, label):
""" """
Returns the device with the given label, raises error if the device is Returns the device with the given label, raises error if the device is
not found. not found.
devices devices
list of vim.vm.device.VirtualDevice objects list of vim.vm.device.VirtualDevice objects
key key
Unique key of device Unique key of device
""" """
device_labels = [d for d in devices if d.deviceInfo.label == label] device_labels = [d for d in devices if d.deviceInfo.label == label]
if device_labels: if device_labels:
return device_labels[0] return device_labels[0]
else: else:
raise salt.exceptions.VMwareObjectNotFoundError( raise salt.exceptions.VMwareObjectNotFoundError(
"Virtual machine device with " "label {0} does not exist".format(lab el) "Virtual machine device with " "label {} does not exist".format(labe l)
) )
def _convert_units(devices): def _convert_units(devices):
""" """
Updates the size and unit dictionary values with the new unit values Updates the size and unit dictionary values with the new unit values
devices devices
List of device data objects List of device data objects
""" """
if devices: if devices:
skipping to change at line 9270 skipping to change at line 9245
) )
# The adapter name shouldn't be changed # The adapter name shouldn't be changed
interface_diffs.remove_diff(diff_key="adapter") interface_diffs.remove_diff(diff_key="adapter")
if interface_diffs.diffs: if interface_diffs.diffs:
diffs["interfaces"] = interface_diffs diffs["interfaces"] = interface_diffs
# For general items where the identification can be done by adapter # For general items where the identification can be done by adapter
for key in keys: for key in keys:
if key not in current_config or key not in new_config: if key not in current_config or key not in new_config:
raise ValueError( raise ValueError(
"A general device {0} configuration was " "A general device {} configuration was "
"not supplied or it was not retrieved from " "not supplied or it was not retrieved from "
"remote configuration".format(key) "remote configuration".format(key)
) )
device_diffs = list_diff(current_config[key], new_config[key], "adapter" ) device_diffs = list_diff(current_config[key], new_config[key], "adapter" )
if device_diffs.diffs: if device_diffs.diffs:
diffs[key] = device_diffs diffs[key] = device_diffs
return diffs return diffs
@gets_service_instance_via_proxy @gets_service_instance_via_proxy
skipping to change at line 9527 skipping to change at line 9502
The controller property cannot be updated, because controller address The controller property cannot be updated, because controller address
identifies the disk by the unit and bus number properties. identifies the disk by the unit and bus number properties.
disks_diffs disks_diffs
List of old and new disk properties, the properties are dictionary List of old and new disk properties, the properties are dictionary
objects objects
""" """
disk_changes = [] disk_changes = []
if disks_old_new: if disks_old_new:
devs = [disk["old"]["address"] for disk in disks_old_new] devs = [disk["old"]["address"] for disk in disks_old_new]
log.trace("Updating disks {0}".format(devs)) log.trace("Updating disks {}".format(devs))
for item in disks_old_new: for item in disks_old_new:
current_disk = item["old"] current_disk = item["old"]
next_disk = item["new"] next_disk = item["new"]
difference = recursive_diff(current_disk, next_disk) difference = recursive_diff(current_disk, next_disk)
difference.ignore_unset_values = False difference.ignore_unset_values = False
if difference.changed(): if difference.changed():
if next_disk["size"] < current_disk["size"]: if next_disk["size"] < current_disk["size"]:
raise salt.exceptions.VMwareSaltError( raise salt.exceptions.VMwareSaltError(
"Disk cannot be downsized size={0} unit={1} " "Disk cannot be downsized size={} unit={} "
"controller_key={2} " "controller_key={} "
"unit_number={3}".format( "unit_number={}".format(
next_disk["size"], next_disk["size"],
next_disk["unit"], next_disk["unit"],
current_disk["controller_key"], current_disk["controller_key"],
current_disk["unit_number"], current_disk["unit_number"],
) )
) )
log.trace( log.trace(
"Virtual machine disk will be updated " "Virtual machine disk will be updated "
"size={0} unit={1} controller_key={2} " "size={} unit={} controller_key={} "
"unit_number={3}".format( "unit_number={}".format(
next_disk["size"], next_disk["size"],
next_disk["unit"], next_disk["unit"],
current_disk["controller_key"], current_disk["controller_key"],
current_disk["unit_number"], current_disk["unit_number"],
) )
) )
device_config_spec = _apply_hard_disk( device_config_spec = _apply_hard_disk(
current_disk["unit_number"], current_disk["unit_number"],
current_disk["key"], current_disk["key"],
"edit", "edit",
skipping to change at line 9581 skipping to change at line 9556
""" """
Returns a list of vim.vm.device.VirtualDeviceSpec specifying the scsi Returns a list of vim.vm.device.VirtualDeviceSpec specifying the scsi
properties as input the old and new configs are defined in a dictionary. properties as input the old and new configs are defined in a dictionary.
scsi_diffs scsi_diffs
List of old and new scsi properties List of old and new scsi properties
""" """
device_config_specs = [] device_config_specs = []
if scsis_old_new: if scsis_old_new:
devs = [scsi["old"]["adapter"] for scsi in scsis_old_new] devs = [scsi["old"]["adapter"] for scsi in scsis_old_new]
log.trace("Updating SCSI controllers {0}".format(devs)) log.trace("Updating SCSI controllers {}".format(devs))
for item in scsis_old_new: for item in scsis_old_new:
next_scsi = item["new"] next_scsi = item["new"]
current_scsi = item["old"] current_scsi = item["old"]
difference = recursive_diff(current_scsi, next_scsi) difference = recursive_diff(current_scsi, next_scsi)
difference.ignore_unset_values = False difference.ignore_unset_values = False
if difference.changed(): if difference.changed():
log.trace( log.trace(
"Virtual machine scsi device will be updated " "Virtual machine scsi device will be updated "
"key={0} bus_number={1} type={2} " "key={} bus_number={} type={} "
"bus_sharing={3}".format( "bus_sharing={}".format(
current_scsi["key"], current_scsi["key"],
current_scsi["bus_number"], current_scsi["bus_number"],
next_scsi["type"], next_scsi["type"],
next_scsi["bus_sharing"], next_scsi["bus_sharing"],
) )
) )
# The sharedBus property is not optional # The sharedBus property is not optional
# The type can only be updated if we delete the original # The type can only be updated if we delete the original
# controller, create a new one with the properties and then # controller, create a new one with the properties and then
# attach the disk object to the newly created controller, even # attach the disk object to the newly created controller, even
skipping to change at line 9656 skipping to change at line 9631
interface_old_new interface_old_new
Dictionary with old and new keys which contains the current and the Dictionary with old and new keys which contains the current and the
next config for a network device next config for a network device
parent parent
Parent managed object reference Parent managed object reference
""" """
network_changes = [] network_changes = []
if interface_old_new: if interface_old_new:
devs = [inter["old"]["mac"] for inter in interface_old_new] devs = [inter["old"]["mac"] for inter in interface_old_new]
log.trace("Updating network interfaces {0}".format(devs)) log.trace("Updating network interfaces {}".format(devs))
for item in interface_old_new: for item in interface_old_new:
current_interface = item["old"] current_interface = item["old"]
next_interface = item["new"] next_interface = item["new"]
difference = recursive_diff(current_interface, next_interface) difference = recursive_diff(current_interface, next_interface)
difference.ignore_unset_values = False difference.ignore_unset_values = False
if difference.changed(): if difference.changed():
log.trace( log.trace(
"Virtual machine network adapter will be updated " "Virtual machine network adapter will be updated "
"switch_type={0} name={1} adapter_type={2} " "switch_type={} name={} adapter_type={} "
"mac={3}".format( "mac={}".format(
next_interface["switch_type"], next_interface["switch_type"],
next_interface["name"], next_interface["name"],
current_interface["adapter_type"], current_interface["adapter_type"],
current_interface["mac"], current_interface["mac"],
) )
) )
device_config_spec = _apply_network_adapter_config( device_config_spec = _apply_network_adapter_config(
current_interface["key"], current_interface["key"],
next_interface["name"], next_interface["name"],
current_interface["adapter_type"], current_interface["adapter_type"],
skipping to change at line 9697 skipping to change at line 9672
Returns a list of vim.vm.device.VirtualDeviceSpec specifying to edit a Returns a list of vim.vm.device.VirtualDeviceSpec specifying to edit a
deployed serial port configuration to the new given config deployed serial port configuration to the new given config
serial_old_new serial_old_new
Dictionary with old and new keys which contains the current and the Dictionary with old and new keys which contains the current and the
next config for a serial port device next config for a serial port device
""" """
serial_changes = [] serial_changes = []
if serial_old_new: if serial_old_new:
devs = [serial["old"]["adapter"] for serial in serial_old_new] devs = [serial["old"]["adapter"] for serial in serial_old_new]
log.trace("Updating serial ports {0}".format(devs)) log.trace("Updating serial ports {}".format(devs))
for item in serial_old_new: for item in serial_old_new:
current_serial = item["old"] current_serial = item["old"]
next_serial = item["new"] next_serial = item["new"]
difference = recursive_diff(current_serial, next_serial) difference = recursive_diff(current_serial, next_serial)
difference.ignore_unset_values = False difference.ignore_unset_values = False
if difference.changed(): if difference.changed():
serial_changes.append( serial_changes.append(
_apply_serial_port(next_serial, current_serial["key"], "edit ") _apply_serial_port(next_serial, current_serial["key"], "edit ")
) )
return serial_changes return serial_changes
skipping to change at line 9727 skipping to change at line 9702
controllers controllers
Controller device list Controller device list
parent parent
Managed object reference of the parent object Managed object reference of the parent object
""" """
cd_changes = [] cd_changes = []
if drives_old_new: if drives_old_new:
devs = [drive["old"]["adapter"] for drive in drives_old_new] devs = [drive["old"]["adapter"] for drive in drives_old_new]
log.trace("Updating cd/dvd drives {0}".format(devs)) log.trace("Updating cd/dvd drives {}".format(devs))
for item in drives_old_new: for item in drives_old_new:
current_drive = item["old"] current_drive = item["old"]
new_drive = item["new"] new_drive = item["new"]
difference = recursive_diff(current_drive, new_drive) difference = recursive_diff(current_drive, new_drive)
difference.ignore_unset_values = False difference.ignore_unset_values = False
if difference.changed(): if difference.changed():
if controllers: if controllers:
controller = _get_device_by_label( controller = _get_device_by_label(
controllers, new_drive["controller"] controllers, new_drive["controller"]
) )
skipping to change at line 9768 skipping to change at line 9743
return cd_changes return cd_changes
def _delete_device(device): def _delete_device(device):
""" """
Returns a vim.vm.device.VirtualDeviceSpec specifying to remove a virtual Returns a vim.vm.device.VirtualDeviceSpec specifying to remove a virtual
machine device machine device
device device
Device data type object Device data type object
""" """
log.trace("Deleting device with type {0}".format(type(device))) log.trace("Deleting device with type {}".format(type(device)))
device_spec = vim.vm.device.VirtualDeviceSpec() device_spec = vim.vm.device.VirtualDeviceSpec()
device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
device_spec.device = device device_spec.device = device
return device_spec return device_spec
def _get_client(server, username, password): def _get_client(server, username, password):
""" """
Establish client through proxy or with user provided credentials. Establish client through proxy or with user provided credentials.
:param basestring server: :param basestring server:
skipping to change at line 10389 skipping to change at line 10364
config_spec.guestId = image config_spec.guestId = image
config_spec.files = vim.vm.FileInfo() config_spec.files = vim.vm.FileInfo()
# For VSAN disks we need to specify a different vm path name, the vm file # For VSAN disks we need to specify a different vm path name, the vm file
# full path cannot be used # full path cannot be used
datastore_object = salt.utils.vmware.get_datastores( datastore_object = salt.utils.vmware.get_datastores(
service_instance, placement_object, datastore_names=[datastore] service_instance, placement_object, datastore_names=[datastore]
)[0] )[0]
if not datastore_object: if not datastore_object:
raise salt.exceptions.ArgumentValueError( raise salt.exceptions.ArgumentValueError(
"Specified datastore: '{0}' does not exist.".format(datastore) "Specified datastore: '{}' does not exist.".format(datastore)
) )
try: try:
ds_summary = salt.utils.vmware.get_properties_of_managed_object( ds_summary = salt.utils.vmware.get_properties_of_managed_object(
datastore_object, "summary.type" datastore_object, "summary.type"
) )
if "summary.type" in ds_summary and ds_summary["summary.type"] == "vsan" : if "summary.type" in ds_summary and ds_summary["summary.type"] == "vsan" :
log.trace( log.trace(
"The vmPathName should be the datastore " "The vmPathName should be the datastore "
"name if the datastore type is vsan" "name if the datastore type is vsan"
) )
config_spec.files.vmPathName = "[{0}]".format(datastore) config_spec.files.vmPathName = "[{}]".format(datastore)
else: else:
config_spec.files.vmPathName = "[{0}] {1}/{1}.vmx".format( config_spec.files.vmPathName = "[{0}] {1}/{1}.vmx".format(
datastore, vm_name datastore, vm_name
) )
except salt.exceptions.VMwareApiError: except salt.exceptions.VMwareApiError:
config_spec.files.vmPathName = "[{0}] {1}/{1}.vmx".format(datastore, vm_ name) config_spec.files.vmPathName = "[{0}] {1}/{1}.vmx".format(datastore, vm_ name)
cd_controllers = [] cd_controllers = []
if version: if version:
_apply_hardware_version(version, config_spec, "add") _apply_hardware_version(version, config_spec, "add")
skipping to change at line 10654 skipping to change at line 10629
diffs["cd_drives"].added, diffs["cd_drives"].added,
controllers=controllers, controllers=controllers,
parent_ref=datacenter_ref, parent_ref=datacenter_ref,
) )
) )
config_spec.deviceChange.extend(cd_changes) config_spec.deviceChange.extend(cd_changes)
if difference_keys: if difference_keys:
salt.utils.vmware.update_vm(vm_ref, config_spec) salt.utils.vmware.update_vm(vm_ref, config_spec)
changes = {} changes = {}
for key, properties in six.iteritems(diffs): for key, properties in diffs.items():
# We can't display object, although we will need them for delete # We can't display object, although we will need them for delete
# and update actions, we will need to delete these before we summarize # and update actions, we will need to delete these before we summarize
# the changes for the users # the changes for the users
if isinstance(properties, salt.utils.listdiffer.ListDictDiffer): if isinstance(properties, salt.utils.listdiffer.ListDictDiffer):
properties.remove_diff(diff_key="object", diff_list="intersect") properties.remove_diff(diff_key="object", diff_list="intersect")
properties.remove_diff(diff_key="key", diff_list="intersect") properties.remove_diff(diff_key="key", diff_list="intersect")
properties.remove_diff(diff_key="object", diff_list="removed") properties.remove_diff(diff_key="object", diff_list="removed")
properties.remove_diff(diff_key="key", diff_list="removed") properties.remove_diff(diff_key="key", diff_list="removed")
changes[key] = properties.diffs changes[key] = properties.diffs
skipping to change at line 10693 skipping to change at line 10668
vmx_path: vmx_path:
Full path to the vmx file, datastore name should be included Full path to the vmx file, datastore name should be included
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter. Service instance (vim.ServiceInstance) of the vCenter.
Default is None. Default is None.
""" """
log.trace( log.trace(
"Registering virtual machine with properties " "Registering virtual machine with properties "
"datacenter={0}, placement={1}, " "datacenter={}, placement={}, "
"vmx_path={2}".format(datacenter, placement, vmx_path) "vmx_path={}".format(datacenter, placement, vmx_path)
) )
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datac enter) datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datac enter)
if "cluster" in placement: if "cluster" in placement:
cluster_obj = salt.utils.vmware.get_cluster( cluster_obj = salt.utils.vmware.get_cluster(
datacenter_object, placement["cluster"] datacenter_object, placement["cluster"]
) )
cluster_props = salt.utils.vmware.get_properties_of_managed_object( cluster_props = salt.utils.vmware.get_properties_of_managed_object(
cluster_obj, properties=["resourcePool"] cluster_obj, properties=["resourcePool"]
) )
if "resourcePool" in cluster_props: if "resourcePool" in cluster_props:
skipping to change at line 10717 skipping to change at line 10692
raise salt.exceptions.VMwareObjectRetrievalError( raise salt.exceptions.VMwareObjectRetrievalError(
"The cluster's resource pool object could not be retrieved." "The cluster's resource pool object could not be retrieved."
) )
salt.utils.vmware.register_vm(datacenter_object, name, vmx_path, resourc epool) salt.utils.vmware.register_vm(datacenter_object, name, vmx_path, resourc epool)
elif "host" in placement: elif "host" in placement:
hosts = salt.utils.vmware.get_hosts( hosts = salt.utils.vmware.get_hosts(
service_instance, datacenter_name=datacenter, host_names=[placement[ "host"]] service_instance, datacenter_name=datacenter, host_names=[placement[ "host"]]
) )
if not hosts: if not hosts:
raise salt.exceptions.VMwareObjectRetrievalError( raise salt.exceptions.VMwareObjectRetrievalError(
"ESXi host named '{0}' wasn't found.".format(placement["host"]) "ESXi host named '{}' wasn't found.".format(placement["host"])
) )
host_obj = hosts[0] host_obj = hosts[0]
host_props = salt.utils.vmware.get_properties_of_managed_object( host_props = salt.utils.vmware.get_properties_of_managed_object(
host_obj, properties=["parent"] host_obj, properties=["parent"]
) )
if "parent" in host_props: if "parent" in host_props:
host_parent = host_props["parent"] host_parent = host_props["parent"]
parent = salt.utils.vmware.get_properties_of_managed_object( parent = salt.utils.vmware.get_properties_of_managed_object(
host_parent, properties=["parent"] host_parent, properties=["parent"]
) )
skipping to change at line 10769 skipping to change at line 10744
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter. Service instance (vim.ServiceInstance) of the vCenter.
Default is None. Default is None.
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.power_on_vm name=my_vm salt '*' vsphere.power_on_vm name=my_vm
""" """
log.trace("Powering on virtual machine {0}".format(name)) log.trace("Powering on virtual machine {}".format(name))
vm_properties = ["name", "summary.runtime.powerState"] vm_properties = ["name", "summary.runtime.powerState"]
virtual_machine = salt.utils.vmware.get_vm_by_property( virtual_machine = salt.utils.vmware.get_vm_by_property(
service_instance, name, datacenter=datacenter, vm_properties=vm_properti es service_instance, name, datacenter=datacenter, vm_properties=vm_properti es
) )
if virtual_machine["summary.runtime.powerState"] == "poweredOn": if virtual_machine["summary.runtime.powerState"] == "poweredOn":
result = { result = {
"comment": "Virtual machine is already powered on", "comment": "Virtual machine is already powered on",
"changes": {"power_on": True}, "changes": {"power_on": True},
} }
return result return result
skipping to change at line 10809 skipping to change at line 10784
service_instance service_instance
Service instance (vim.ServiceInstance) of the vCenter. Service instance (vim.ServiceInstance) of the vCenter.
Default is None. Default is None.
.. code-block:: bash .. code-block:: bash
salt '*' vsphere.power_off_vm name=my_vm salt '*' vsphere.power_off_vm name=my_vm
""" """
log.trace("Powering off virtual machine {0}".format(name)) log.trace("Powering off virtual machine {}".format(name))
vm_properties = ["name", "summary.runtime.powerState"] vm_properties = ["name", "summary.runtime.powerState"]
virtual_machine = salt.utils.vmware.get_vm_by_property( virtual_machine = salt.utils.vmware.get_vm_by_property(
service_instance, name, datacenter=datacenter, vm_properties=vm_properti es service_instance, name, datacenter=datacenter, vm_properties=vm_properti es
) )
if virtual_machine["summary.runtime.powerState"] == "poweredOff": if virtual_machine["summary.runtime.powerState"] == "poweredOff":
result = { result = {
"comment": "Virtual machine is already powered off", "comment": "Virtual machine is already powered off",
"changes": {"power_off": True}, "changes": {"power_off": True},
} }
return result return result
skipping to change at line 10864 skipping to change at line 10839
results["powered_off"] = True results["powered_off"] = True
vm_ref = salt.utils.vmware.get_mor_by_property( vm_ref = salt.utils.vmware.get_mor_by_property(
service_instance, service_instance,
vim.VirtualMachine, vim.VirtualMachine,
name, name,
property_name="name", property_name="name",
container_ref=placement_object, container_ref=placement_object,
) )
if not vm_ref: if not vm_ref:
raise salt.exceptions.VMwareObjectRetrievalError( raise salt.exceptions.VMwareObjectRetrievalError(
"The virtual machine object {0} in datacenter " "The virtual machine object {} in datacenter "
"{1} was not found".format(name, datacenter) "{} was not found".format(name, datacenter)
) )
return results, vm_ref return results, vm_ref
@depends(HAS_PYVMOMI) @depends(HAS_PYVMOMI)
@supports_proxies("esxvm", "esxcluster", "esxdatacenter") @supports_proxies("esxvm", "esxcluster", "esxdatacenter")
@gets_service_instance_via_proxy @gets_service_instance_via_proxy
def delete_vm(name, datacenter, placement=None, power_off=False, service_instanc e=None): def delete_vm(name, datacenter, placement=None, power_off=False, service_instanc e=None):
""" """
Deletes a virtual machine defined by name and placement Deletes a virtual machine defined by name and placement
 End of changes. 196 change blocks. 
246 lines changed or deleted 229 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)