Merge pull request #30361 from cro/esxi-proxy2

Flip the sense of the test for proxymodule imports, add more fns for esxi proxy
This commit is contained in:
Nicole Thomas 2016-01-14 13:54:08 -07:00
commit 40594efc0b
8 changed files with 230 additions and 25 deletions

View File

@ -40,21 +40,25 @@ def _find_credentials():
if 'fallback_admin_username' in __pillar__.get('proxy'):
usernames.append(__pillar__['proxy'].get('fallback_admin_username'))
for u in usernames:
for p in __pillar__['proxy']['passwords']:
r = salt.modules.dracr.get_chassis_name(host=__pillar__['proxy']['host'],
admin_username=u,
admin_password=p)
for user in usernames:
for pwd in __pillar__['proxy']['passwords']:
r = __salt__['dracr.get_chassis_name'](host=__pillar__['proxy']['host'],
admin_username=user,
admin_password=pwd)
# Retcode will be present if the chassis_name call failed
try:
if r.get('retcode', None) is None:
return (u, p)
__opts__['proxy']['admin_username'] = user
__opts__['proxy']['admin_password'] = pwd
return (user, pwd)
except AttributeError:
# Then the above was a string, and we can return the username
# and password
return (u, p)
__opts__['proxy']['admin_username'] = user
__opts__['proxy']['admin_password'] = pwd
return (user, pwd)
logger.debug('grains fx2._find_credentials found no valid credentials, using Dell default')
logger.debug('grains fx2.find_credentials found no valid credentials, using Dell default')
return ('root', 'calvin')

View File

@ -1224,7 +1224,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
#
# Render modules and state modules are OK though
if 'proxy' in self.opts:
if self.tag not in ['render', 'states', 'utils']:
if self.tag in ['grains', 'proxy']:
if not hasattr(mod, '__proxyenabled__') or \
(self.opts['proxy']['proxytype'] not in mod.__proxyenabled__ and
'*' not in mod.__proxyenabled__):

View File

@ -1374,6 +1374,12 @@ class Minion(MinionBase):
log.debug('Refreshing modules. Notify={0}'.format(notify))
if hasattr(self, 'proxy'):
self.functions, self.returners, _ = self._load_modules(force_refresh, notify=notify, proxy=self.proxy)
# Proxies have a chicken-and-egg problem. Usually we load grains early
# in the setup process, but we can't load grains for proxies until
# we talk to the device we are proxying for. So force a grains
# sync here.
self.functions['saltutil.sync_grains'](saltenv='base')
else:
self.functions, self.returners, _ = self._load_modules(force_refresh, notify=notify)
@ -2583,6 +2589,7 @@ class ProxyMinion(Minion):
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
@ -2597,9 +2604,8 @@ class ProxyMinion(Minion):
# Proxies have a chicken-and-egg problem. Usually we load grains early
# in the setup process, but we can't load grains for proxies until
# we talk to the device we are proxying for. So reload the grains
# functions here, and then force a grains sync.
# functions here, and then force a grains sync in modules_refresh
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh=True)
self.functions['saltutil.sync_grains'](saltenv='base')
# Check config 'add_proxymodule_to_opts' Remove this in Boron.
if self.opts['add_proxymodule_to_opts']:

View File

@ -36,6 +36,7 @@ try:
except ImportError:
pass
__proxyenabled__ = ['*']
# Define the module's virtual name
__virtualname__ = 'cmd'

View File

@ -23,6 +23,8 @@ from salt.ext.six.moves import map
log = logging.getLogger(__name__)
__proxyenabled__ = ['fx2']
try:
run_all = __salt__['cmd.run_all']
except NameError:
@ -1319,6 +1321,7 @@ def get_general(cfg_sec, cfg_var, host=None,
def bare_rac_cmd(cmd, host=None,
admin_username=None, admin_password=None):
ret = __execute_ret('{0}'.format(cmd),
host=host,
admin_username=admin_username,

View File

@ -180,6 +180,7 @@ except ImportError:
log = logging.getLogger(__name__)
__virtualname__ = 'vsphere'
__proxyenabled__ = ['esxi']
def __virtual__():
@ -3224,6 +3225,19 @@ def vsan_enable(host, username, password, protocol=None, port=None, host_names=N
return ret
def get_dvs_portgroup_assignment(service_instance, host, dvs_name,
portgroup_name, host_names):
'''
:param service_instance: Reference to the vSphere server
:param dvs_name: Name of the DVS
:param portgroup_name: Portgroup to examine
:param host_names: Names of hosts
:return:
'''
host_refs = _check_hosts(service_instance, host, host_names)
def _check_hosts(service_instance, host, host_names):
'''
Helper function that checks to see if the host provided is a vCenter Server or
@ -3519,3 +3533,109 @@ def _set_syslog_config_helper(host, username, password, syslog_config, config_va
ret_dict.update({'syslog_restart': {'success': response['retcode'] == 0}})
return ret_dict
def add_host_to_dvs(host, username, password, vmknic_name, vmnic_name,
dvs_name, portgroup_name, protocol=None, port=None,
host_names=None):
'''
Adds an ESXi host to a vSphere Distributed Virtual Switch
DOES NOT migrate the ESXi's physical and virtual NICs to the switch (yet)
(please don't remove the commented code)
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
dvs = salt.utils.vmware._get_dvs(service_instance, dvs_name)
target_portgroup = salt.utils.vmware._get_dvs_portgroup(dvs,
portgroup_name)
uplink_portgroup = salt.utils.vmware._get_dvs_uplink_portgroup(dvs,
'DSwitch-DVUplinks-34')
dvs_uuid = dvs.config.uuid
host_names = _check_hosts(service_instance, host, host_names)
ret = {}
for host_name in host_names:
# try:
ret[host_name] = {}
ret[host_name].update({'status': False,
'portgroup': portgroup_name,
'dvs': dvs_name})
host_ref = _get_host_ref(service_instance, host, host_name)
dvs_hostmember_config = vim.dvs.HostMember.ConfigInfo(
host=host_ref
)
dvs_hostmember = vim.dvs.HostMember(
config=dvs_hostmember_config
)
p_nics = salt.utils.vmware._get_pnics(host_ref)
p_nic = [x for x in p_nics if x.device == 'vmnic0']
v_nics = salt.utils.vmware._get_vnics(host_ref)
v_nic = [x for x in v_nics if x.device == vmknic_name]
v_nic_mgr = salt.utils.vmware._get_vnic_manager(host_ref)
dvs_pnic_spec = vim.dvs.HostMember.PnicSpec(
pnicDevice=vmnic_name,
uplinkPortgroupKey=uplink_portgroup.key
)
pnic_backing = vim.dvs.HostMember.PnicBacking(
pnicSpec=[dvs_pnic_spec]
)
dvs_hostmember_config_spec = vim.dvs.HostMember.ConfigSpec(
host=host_ref,
operation='add',
)
dvs_config = vim.DVSConfigSpec(
configVersion=dvs.config.configVersion,
host=[dvs_hostmember_config_spec])
task = dvs.ReconfigureDvs_Task(spec=dvs_config)
salt.utils.vmware.wait_for_task(task, host_name,
'Adding host to the DVS',
sleep_seconds=3)
ret[host_name].update({'status': True})
return ret
# # host_network_config.proxySwitch[0].spec.backing.pnicSpec = dvs_pnic_spec
#
# network_system = host_ref.configManager.networkSystem
#
# host_network_config = network_system.networkConfig
#
#
# orig_portgroup = network_system.networkInfo.portgroup[0]
# host_portgroup_config = []
# host_portgroup_config.append(vim.HostPortGroupConfig(
# changeOperation='remove',
# spec=orig_portgroup.spec
# ))
# # host_portgroup_config.append(vim.HostPortGroupConfig(
# # changeOperation='add',
# # spec=target_portgroup
# #
# # ))
# dvs_port_connection = vim.DistributedVirtualSwitchPortConnection(
# portgroupKey=target_portgroup.key, switchUuid=dvs_uuid
# )
# host_vnic_spec = vim.HostVirtualNicSpec(
# distributedVirtualPort=dvs_port_connection
# )
# host_vnic_config = vim.HostVirtualNicConfig(
# changeOperation='add',
# device=vmknic_name,
# portgroup=target_portgroup.key,
# spec=host_vnic_spec
# )
# host_network_config.proxySwitch[0].spec.backing = pnic_backing
# host_network_config.portgroup = host_portgroup_config
# host_network_config.vnic = [host_vnic_config]
# # host_network_config = vim.HostNetworkConfig(
# # portgroup=[host_portgroup_config],
# # vnic=[host_vnic_config]
# # )
# network_system.UpdateNetworkConfig(changeMode='modify',
# config=host_network_config)

View File

@ -290,27 +290,27 @@ def find_credentials():
if 'fallback_admin_username' in __pillar__.get('proxy'):
usernames.append(__pillar__['proxy'].get('fallback_admin_username'))
for u in usernames:
for p in __pillar__['proxy']['passwords']:
for user in usernames:
for pwd in __pillar__['proxy']['passwords']:
r = __salt__['dracr.get_chassis_name'](host=__pillar__['proxy']['host'],
admin_username=u,
admin_password=p)
admin_username=user,
admin_password=pwd)
# Retcode will be present if the chassis_name call failed
try:
if r.get('retcode', None) is None:
DETAILS['admin_username'] = u
DETAILS['admin_password'] = p
__opts__['proxy']['admin_username'] = u
__opts__['proxy']['admin_password'] = p
return (u, p)
DETAILS['admin_username'] = user
DETAILS['admin_password'] = pwd
__opts__['proxy']['admin_username'] = user
__opts__['proxy']['admin_password'] = pwd
return (user, pwd)
except AttributeError:
# Then the above was a string, and we can return the username
# and password
DETAILS['admin_username'] = u
DETAILS['admin_password'] = p
__opts__['proxy']['admin_username'] = u
__opts__['proxy']['admin_password'] = p
return (u, p)
DETAILS['admin_username'] = user
DETAILS['admin_password'] = pwd
__opts__['proxy']['admin_username'] = user
__opts__['proxy']['admin_password'] = pwd
return (user, pwd)
log.debug('proxy fx2.find_credentials found no valid credentials, using Dell default')
return ('root', 'calvin')

View File

@ -236,6 +236,77 @@ def get_service_instance(host, username, password, protocol=None, port=None):
return service_instance
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.